Merge branch 'main' into feat/test-phase

This commit is contained in:
bryan
2026-01-20 16:36:27 -08:00
48 changed files with 4333 additions and 110 deletions
+1
View File
@@ -0,0 +1 @@
../../core/.claude/skills/building-agents
+3
View File
@@ -5,6 +5,7 @@ node_modules/
# Build outputs
dist/
build/
workdir/
.next/
out/
@@ -64,3 +65,5 @@ core/data/
.cache/
tmp/
temp/
exports/*
+9
View File
@@ -0,0 +1,9 @@
{
"mcpServers": {
"agent-builder": {
"command": "python",
"args": ["-m", "framework.mcp.agent_builder_server"],
"cwd": "/home/timothy/oss/hive/core"
}
}
}
+41 -69
View File
@@ -81,80 +81,48 @@ docker compose up
Traditional agent frameworks require you to manually design workflows, define agent interactions, and handle failures reactively. Aden flips this paradigm—**you describe outcomes, and the system builds itself**.
```mermaid
flowchart TB
subgraph USER["👤 User"]
GOAL[("🎯 Define Goal<br/>(Natural Language)")]
flowchart LR
subgraph BUILD["🏗️ BUILD"]
GOAL["Define Goal<br/>+ Success Criteria"] --> NODES["Add Nodes<br/>LLM/Router/Function"]
NODES --> EDGES["Connect Edges<br/>on_success/failure/conditional"]
EDGES --> TEST["Test & Validate"] --> APPROVE["Approve & Export"]
end
subgraph CODING["🤖 Coding Agent"]
subgraph EXPORT["📦 EXPORT"]
direction TB
GENERATE["Generate Agent Graph"]
CONNECTION["Create Connection Code"]
TESTGEN["Generate Test Cases"]
EVOLVE["Evolve on Failure"]
JSON["agent.json<br/>(GraphSpec)"]
TOOLS["tools.py<br/>(Functions)"]
MCP["mcp_servers.json<br/>(Integrations)"]
end
subgraph WORKERS["⚙️ Worker Agents"]
direction TB
subgraph NODE1["SDK-Wrapped Node"]
N1_MEM["Memory (STM/LTM)"]
N1_TOOLS["Tools Access"]
N1_LLM["LLM Integration"]
N1_MON["Monitoring"]
subgraph RUN["🚀 RUNTIME"]
LOAD["AgentRunner<br/>Load + Parse"] --> SETUP["Setup Runtime<br/>+ ToolRegistry"]
SETUP --> EXEC["GraphExecutor<br/>Execute Nodes"]
subgraph DECISION["Decision Recording"]
DEC1["runtime.decide()<br/>intent → options → choice"]
DEC2["runtime.record_outcome()<br/>success, result, metrics"]
end
subgraph NODE2["SDK-Wrapped Node"]
N2_MEM["Memory (STM/LTM)"]
N2_TOOLS["Tools Access"]
N2_LLM["LLM Integration"]
N2_MON["Monitoring"]
end
HITL["🙋 Human-in-the-Loop<br/>Intervention Points"]
end
subgraph CONTROL["🎛 Hive Control Plane"]
direction TB
BUDGET["Budget & Cost Control"]
POLICY["Policy Management"]
METRICS["Real-time Metrics"]
MCP["19 MCP Tools"]
subgraph INFRA[" INFRASTRUCTURE"]
CTX["NodeContext<br/>memory • llm • tools"]
STORE[("FileStorage<br/>Runs & Decisions")]
end
subgraph STORAGE["💾 Storage Layer"]
TSDB[("TimescaleDB<br/>Metrics & Events")]
MONGO[("MongoDB<br/>Policies")]
POSTGRES[("PostgreSQL<br/>Users & Config")]
end
APPROVE --> EXPORT
EXPORT --> LOAD
EXEC --> DECISION
EXEC --> CTX
DECISION --> STORE
STORE -.->|"Analyze & Improve"| NODES
subgraph DASHBOARD["📊 Dashboard (Honeycomb)"]
ANALYTICS["Analytics & KPIs"]
AGENTS["Agent Monitoring"]
COSTS["Cost Tracking"]
end
GOAL --> GENERATE
GENERATE --> CONNECTION
CONNECTION --> TESTGEN
TESTGEN --> NODE1
TESTGEN --> NODE2
NODE1 <--> NODE2
NODE1 & NODE2 --> HITL
NODE1 & NODE2 -->|Events| CONTROL
CONTROL -->|Policies| NODE1 & NODE2
CONTROL <-->|WebSocket| DASHBOARD
CONTROL --> STORAGE
NODE1 & NODE2 -->|Failure Data| EVOLVE
EVOLVE -->|Updated Graph| GENERATE
style USER fill:#e8f5e9,stroke:#2e7d32
style CODING fill:#e3f2fd,stroke:#1565c0
style WORKERS fill:#fff3e0,stroke:#ef6c00
style CONTROL fill:#fce4ec,stroke:#c2185b
style STORAGE fill:#f3e5f5,stroke:#7b1fa2
style DASHBOARD fill:#e0f7fa,stroke:#00838f
style BUILD fill:#ffbe42,stroke:#cc5d00,stroke-width:3px,color:#333
style EXPORT fill:#fff59d,stroke:#ed8c00,stroke-width:2px,color:#333
style RUN fill:#ffb100,stroke:#cc5d00,stroke-width:3px,color:#333
style DECISION fill:#ffcc80,stroke:#ed8c00,stroke-width:2px,color:#333
style INFRA fill:#e8763d,stroke:#cc5d00,stroke-width:3px,color:#fff
style STORE fill:#ed8c00,stroke:#cc5d00,stroke-width:2px,color:#fff
```
### The Aden Advantage
@@ -213,12 +181,16 @@ Choose other frameworks when you need:
```
hive/
├── honeycomb/ # Frontend (React + TypeScript + Vite)
├── hive/ # Backend (Node.js + TypeScript + Express)
├── docs/ # Documentation
├── scripts/ # Build and utility scripts
├── config.yaml.example # Configuration template
── docker-compose.yml # Container orchestration
├── honeycomb/ # Frontend Dashboard
├── hive/ # Backend API Server
├── aden-tools/ # MCP Tools Package - 19 tools for agent capabilities
├── docs/ # Documentation and guides
├── scripts/ # Build and utility scripts
── config.yaml.example # Configuration template
├── docker-compose.yml # Container orchestration
├── DEVELOPER.md # Developer guide
├── CONTRIBUTING.md # Contribution guidelines
└── ROADMAP.md # Product roadmap
```
## Development
+2 -2
View File
@@ -146,13 +146,13 @@ Every tool folder needs a `README.md` with:
## Testing
Place tests in `tests/tools/test_my_tool.py`:
Place tests in `tests/tools/test_{{tool_name}}.py`:
```python
import pytest
from fastmcp import FastMCP
from aden_tools.tools.my_tool import register_tools
from aden_tools.tools.{{tool_name}} import register_tools
@pytest.fixture
+10 -1
View File
@@ -15,9 +15,18 @@ COPY mcp_server.py ./
RUN pip install --no-cache-dir -e .
# Create non-root user for security
RUN useradd -m -u 1001 appuser && chown -R appuser:appuser /app
RUN useradd -m -u 1001 appuser
# Create workspaces directory for file system tools persistence
# This directory will be mounted as a volume
RUN mkdir -p /app/workdir/workspaces && \
chown -R appuser:appuser /app
USER appuser
# Declare volume for workspace persistence across container runs
VOLUME ["/app/workdir/workspaces"]
# Expose MCP server port
EXPOSE 4001
+1
View File
@@ -27,6 +27,7 @@ dependencies = [
"pandas>=2.0.0",
"jsonpath-ng>=1.6.0",
"fastmcp>=2.0.0",
"diff-match-patch>=20230430",
]
[project.optional-dependencies]
@@ -20,6 +20,16 @@ from .web_search_tool import register_tools as register_web_search
from .web_scrape_tool import register_tools as register_web_scrape
from .pdf_read_tool import register_tools as register_pdf_read
# Import file system toolkits
from .file_system_toolkits.view_file import register_tools as register_view_file
from .file_system_toolkits.write_to_file import register_tools as register_write_to_file
from .file_system_toolkits.list_dir import register_tools as register_list_dir
from .file_system_toolkits.replace_file_content import register_tools as register_replace_file_content
from .file_system_toolkits.apply_diff import register_tools as register_apply_diff
from .file_system_toolkits.apply_patch import register_tools as register_apply_patch
from .file_system_toolkits.grep_search import register_tools as register_grep_search
from .file_system_toolkits.execute_command_tool import register_tools as register_execute_command
def register_all_tools(mcp: FastMCP) -> List[str]:
"""
@@ -38,6 +48,16 @@ def register_all_tools(mcp: FastMCP) -> List[str]:
register_web_scrape(mcp)
register_pdf_read(mcp)
# Register file system toolkits
register_view_file(mcp)
register_write_to_file(mcp)
register_list_dir(mcp)
register_replace_file_content(mcp)
register_apply_diff(mcp)
register_apply_patch(mcp)
register_grep_search(mcp)
register_execute_command(mcp)
return [
"example_tool",
"file_read",
@@ -45,6 +65,14 @@ def register_all_tools(mcp: FastMCP) -> List[str]:
"web_search",
"web_scrape",
"pdf_read",
"view_file",
"write_to_file",
"list_dir",
"replace_file_content",
"apply_diff",
"apply_patch",
"grep_search",
"execute_command_tool",
]
@@ -0,0 +1,109 @@
# Apply Diff Tool
Applies a unified diff patch to a file within the secure session sandbox.
## Description
The `apply_diff` tool applies structured diff patches to files, enabling precise modifications using the diff-match-patch algorithm. It can apply multiple patches in a single operation and reports success status for each patch.
## Use Cases
- Applying code review suggestions
- Implementing automated refactoring
- Synchronizing file changes from version control
- Making precise, contextual file modifications
## Usage
```python
apply_diff(
path="src/main.py",
diff_text="@@ -1,3 +1,3 @@\n import os\n-import sys\n+import json\n from typing import List",
workspace_id="workspace-123",
agent_id="agent-456",
session_id="session-789"
)
```
## Arguments
| Argument | Type | Required | Default | Description |
|----------|------|----------|---------|-------------|
| `path` | str | Yes | - | The path to the file (relative to session root) |
| `diff_text` | str | Yes | - | The diff patch text to apply |
| `workspace_id` | str | Yes | - | The ID of the workspace |
| `agent_id` | str | Yes | - | The ID of the agent |
| `session_id` | str | Yes | - | The ID of the current session |
## Returns
Returns a dictionary with the following structure:
**Success (all patches applied):**
```python
{
"success": True,
"path": "src/main.py",
"patches_applied": 3,
"all_successful": True
}
```
**Partial success (some patches failed):**
```python
{
"success": False,
"path": "src/main.py",
"patches_applied": 2,
"patches_failed": 1,
"error": "Failed to apply 1 of 3 patches"
}
```
**Error:**
```python
{
"error": "File not found at src/main.py"
}
```
## Error Handling
- Returns an error dict if the file doesn't exist
- Returns partial success if some patches fail to apply
- Returns an error dict if the diff text is malformed
- Uses diff-match-patch library for intelligent fuzzy matching
## Examples
### Applying a single-line change
```python
diff = "@@ -10,1 +10,1 @@\n- old_code()\n+ new_code()"
result = apply_diff(
path="module.py",
diff_text=diff,
workspace_id="ws-1",
agent_id="agent-1",
session_id="session-1"
)
# Returns: {"success": True, "path": "module.py", "patches_applied": 1, "all_successful": True}
```
### Handling patch failures
```python
result = apply_diff(
path="outdated.py",
diff_text="@@ -1,1 +1,1 @@\n-nonexistent line\n+new line",
workspace_id="ws-1",
agent_id="agent-1",
session_id="session-1"
)
# Returns: {"success": False, "path": "outdated.py", "patches_applied": 0, "patches_failed": 1, ...}
```
## Notes
- Uses the diff-match-patch library for patch application
- Supports fuzzy matching for more robust patching
- Patches are applied atomically (all or nothing for file write)
- The file is only modified if at least one patch succeeds
@@ -0,0 +1,3 @@
from .apply_diff import register_tools
__all__ = ["register_tools"]
@@ -0,0 +1,58 @@
import os
import diff_match_patch as dmp_module
from mcp.server.fastmcp import FastMCP
from ..security import get_secure_path
def register_tools(mcp: FastMCP) -> None:
"""Register diff application tools with the MCP server."""
@mcp.tool()
def apply_diff(path: str, diff_text: str, workspace_id: str, agent_id: str, session_id: str) -> dict:
"""
Apply a diff to a file within the session sandbox.
Use this when you need to apply structured diff patches to modify file content.
Args:
path: The path to the file (relative to session root)
diff_text: The diff patch text to apply
workspace_id: The ID of the workspace
agent_id: The ID of the agent
session_id: The ID of the current session
Returns:
Dict with application status and patch results, or error dict
"""
try:
secure_path = get_secure_path(path, workspace_id, agent_id, session_id)
if not os.path.exists(secure_path):
return {"error": f"File not found at {path}"}
dmp = dmp_module.diff_match_patch()
patches = dmp.patch_fromText(diff_text)
with open(secure_path, "r", encoding="utf-8") as f:
content = f.read()
new_content, results = dmp.patch_apply(patches, content)
if all(results):
with open(secure_path, "w", encoding="utf-8") as f:
f.write(new_content)
return {
"success": True,
"path": path,
"patches_applied": len(patches),
"all_successful": True
}
else:
failed_count = sum(1 for r in results if not r)
return {
"success": False,
"path": path,
"patches_applied": len([r for r in results if r]),
"patches_failed": failed_count,
"error": f"Failed to apply {failed_count} of {len(patches)} patches"
}
except Exception as e:
return {"error": f"Failed to apply diff: {str(e)}"}
@@ -0,0 +1,97 @@
# Apply Patch Tool
Applies a patch (unified diff) to a file within the secure session sandbox.
## Description
The `apply_patch` tool is an alias for `apply_diff` that applies structured diff patches to files. It provides the same functionality with alternative naming for user preference.
## Use Cases
- Applying code review suggestions
- Implementing automated refactoring
- Synchronizing file changes from version control
- Making precise, contextual file modifications
## Usage
```python
apply_patch(
path="src/main.py",
patch_text="@@ -1,3 +1,3 @@\n import os\n-import sys\n+import json\n from typing import List",
workspace_id="workspace-123",
agent_id="agent-456",
session_id="session-789"
)
```
## Arguments
| Argument | Type | Required | Default | Description |
|----------|------|----------|---------|-------------|
| `path` | str | Yes | - | The path to the file (relative to session root) |
| `patch_text` | str | Yes | - | The patch text to apply |
| `workspace_id` | str | Yes | - | The ID of the workspace |
| `agent_id` | str | Yes | - | The ID of the agent |
| `session_id` | str | Yes | - | The ID of the current session |
## Returns
Returns a dictionary with the following structure:
**Success (all patches applied):**
```python
{
"success": True,
"path": "src/main.py",
"patches_applied": 3,
"all_successful": True
}
```
**Partial success (some patches failed):**
```python
{
"success": False,
"path": "src/main.py",
"patches_applied": 2,
"patches_failed": 1,
"error": "Failed to apply 1 of 3 patches"
}
```
**Error:**
```python
{
"error": "File not found at src/main.py"
}
```
## Error Handling
- Returns an error dict if the file doesn't exist
- Returns partial success if some patches fail to apply
- Returns an error dict if the patch text is malformed
- Uses diff-match-patch library for intelligent fuzzy matching
## Examples
### Applying a patch
```python
patch = "@@ -10,1 +10,1 @@\n- old_code()\n+ new_code()"
result = apply_patch(
path="module.py",
patch_text=patch,
workspace_id="ws-1",
agent_id="agent-1",
session_id="session-1"
)
# Returns: {"success": True, "path": "module.py", "patches_applied": 1, "all_successful": True}
```
## Notes
- This is an alias for the `apply_diff` tool with identical functionality
- Uses the diff-match-patch library for patch application
- Supports fuzzy matching for more robust patching
- The implementation is duplicated for atomic isolation (not a simple function call)
@@ -0,0 +1,3 @@
from .apply_patch import register_tools
__all__ = ["register_tools"]
@@ -0,0 +1,60 @@
import os
import diff_match_patch as dmp_module
from mcp.server.fastmcp import FastMCP
from ..security import get_secure_path
def register_tools(mcp: FastMCP) -> None:
"""Register patch application tools with the MCP server."""
@mcp.tool()
def apply_patch(path: str, patch_text: str, workspace_id: str, agent_id: str, session_id: str) -> dict:
"""
Apply a patch to a file within the session sandbox.
Use this when you need to apply patch-formatted changes to a file.
This is an alias for apply_diff with the same functionality.
Args:
path: The path to the file (relative to session root)
patch_text: The patch text to apply
workspace_id: The ID of the workspace
agent_id: The ID of the agent
session_id: The ID of the current session
Returns:
Dict with application status and patch results, or error dict
"""
# Logic duplicated from apply_diff for atomic isolation
try:
secure_path = get_secure_path(path, workspace_id, agent_id, session_id)
if not os.path.exists(secure_path):
return {"error": f"File not found at {path}"}
dmp = dmp_module.diff_match_patch()
patches = dmp.patch_fromText(patch_text)
with open(secure_path, "r", encoding="utf-8") as f:
content = f.read()
new_content, results = dmp.patch_apply(patches, content)
if all(results):
with open(secure_path, "w", encoding="utf-8") as f:
f.write(new_content)
return {
"success": True,
"path": path,
"patches_applied": len(patches),
"all_successful": True
}
else:
failed_count = sum(1 for r in results if not r)
return {
"success": False,
"path": path,
"patches_applied": len([r for r in results if r]),
"patches_failed": failed_count,
"error": f"Failed to apply {failed_count} of {len(patches)} patches"
}
except Exception as e:
return {"error": f"Failed to apply patch: {str(e)}"}
@@ -0,0 +1,152 @@
# Execute Command Tool
Executes shell commands within the secure session sandbox.
## Description
The `execute_command_tool` allows you to run arbitrary shell commands in a sandboxed environment. Commands are executed with a 60-second timeout and capture both stdout and stderr output.
## Use Cases
- Running build commands (npm build, make, etc.)
- Executing tests
- Running linters or formatters
- Performing git operations
- Installing dependencies
## Usage
```python
execute_command_tool(
command="npm install",
workspace_id="workspace-123",
agent_id="agent-456",
session_id="session-789",
cwd="project"
)
```
## Arguments
| Argument | Type | Required | Default | Description |
|----------|------|----------|---------|-------------|
| `command` | str | Yes | - | The shell command to execute |
| `workspace_id` | str | Yes | - | The ID of the workspace |
| `agent_id` | str | Yes | - | The ID of the agent |
| `session_id` | str | Yes | - | The ID of the current session |
| `cwd` | str | No | "." | The working directory for the command (relative to session root) |
## Returns
Returns a dictionary with the following structure:
**Success:**
```python
{
"success": True,
"command": "npm install",
"return_code": 0,
"stdout": "added 42 packages in 3s",
"stderr": "",
"cwd": "project"
}
```
**Command failure (non-zero exit):**
```python
{
"success": True, # Command executed successfully, but exited with error code
"command": "npm test",
"return_code": 1,
"stdout": "",
"stderr": "Error: Tests failed",
"cwd": "."
}
```
**Timeout:**
```python
{
"error": "Command timed out after 60 seconds"
}
```
**Error:**
```python
{
"error": "Failed to execute command: [error message]"
}
```
## Error Handling
- Returns an error dict if the command times out (60 second limit)
- Returns an error dict if the command cannot be executed
- Returns success with non-zero return_code if command runs but fails
- Commands are executed in a sandboxed session environment
- Working directory defaults to session root if not specified
## Security Considerations
- Commands are executed within the session sandbox only
- File access is restricted to the session directory
- Network access depends on sandbox configuration
- Commands run with the permissions of the session user
- Use with caution as shell injection is possible
## Examples
### Running a build command
```python
result = execute_command_tool(
command="npm run build",
workspace_id="ws-1",
agent_id="agent-1",
session_id="session-1",
cwd="frontend"
)
# Returns: {"success": True, "return_code": 0, "stdout": "Build complete", ...}
```
### Running tests with output
```python
result = execute_command_tool(
command="pytest -v",
workspace_id="ws-1",
agent_id="agent-1",
session_id="session-1"
)
# Returns: {"success": True, "return_code": 0, "stdout": "test output...", "stderr": ""}
```
### Handling command failures
```python
result = execute_command_tool(
command="nonexistent-command",
workspace_id="ws-1",
agent_id="agent-1",
session_id="session-1"
)
# Returns: {"success": True, "return_code": 127, "stderr": "command not found", ...}
```
### Running git commands
```python
result = execute_command_tool(
command="git status",
workspace_id="ws-1",
agent_id="agent-1",
session_id="session-1",
cwd="repo"
)
# Returns: {"success": True, "return_code": 0, "stdout": "On branch main...", ...}
```
## Notes
- 60-second timeout for all commands
- Commands are executed using shell=True (supports pipes, redirects, etc.)
- Both stdout and stderr are captured separately
- Return code 0 typically indicates success
- Working directory is created if it doesn't exist
- Command output is returned as text (UTF-8 encoding)
@@ -0,0 +1,3 @@
from .execute_command_tool import register_tools
__all__ = ["register_tools"]
@@ -0,0 +1,58 @@
import os
import subprocess
from typing import Optional
from mcp.server.fastmcp import FastMCP
from ..security import get_secure_path, WORKSPACES_DIR
def register_tools(mcp: FastMCP) -> None:
"""Register command execution tools with the MCP server."""
@mcp.tool()
def execute_command_tool(command: str, workspace_id: str, agent_id: str, session_id: str, cwd: Optional[str] = None) -> dict:
"""
Execute a shell command within the session sandbox.
Use this when you need to run shell commands safely within the sandboxed environment.
Commands are executed with a 60-second timeout.
Args:
command: The shell command to execute
workspace_id: The ID of the workspace
agent_id: The ID of the agent
session_id: The ID of the current session
cwd: The working directory for the command (relative to session root, optional)
Returns:
Dict with command output and execution details, or error dict
"""
try:
# Default cwd is the session root
session_root = os.path.join(WORKSPACES_DIR, workspace_id, agent_id, session_id)
os.makedirs(session_root, exist_ok=True)
if cwd:
secure_cwd = get_secure_path(cwd, workspace_id, agent_id, session_id)
else:
secure_cwd = session_root
result = subprocess.run(
command,
shell=True,
cwd=secure_cwd,
capture_output=True,
text=True,
timeout=60
)
return {
"success": True,
"command": command,
"return_code": result.returncode,
"stdout": result.stdout,
"stderr": result.stderr,
"cwd": cwd or "."
}
except subprocess.TimeoutExpired:
return {"error": "Command timed out after 60 seconds"}
except Exception as e:
return {"error": f"Failed to execute command: {str(e)}"}
@@ -0,0 +1,140 @@
# Grep Search Tool
Searches for regex patterns in files or directories within the secure session sandbox.
## Description
The `grep_search` tool provides powerful pattern matching capabilities across files and directories. It uses Python's regex engine to find matches and returns detailed results including file paths, line numbers, and matched content.
## Use Cases
- Finding function or variable definitions
- Searching for TODO comments or specific patterns
- Analyzing code for security issues or patterns
- Locating configuration values across multiple files
## Usage
```python
grep_search(
path="src",
pattern="def \\w+\\(",
workspace_id="workspace-123",
agent_id="agent-456",
session_id="session-789",
recursive=True
)
```
## Arguments
| Argument | Type | Required | Default | Description |
|----------|------|----------|---------|-------------|
| `path` | str | Yes | - | The path to search in (file or directory, relative to session root) |
| `pattern` | str | Yes | - | The regex pattern to search for |
| `workspace_id` | str | Yes | - | The ID of the workspace |
| `agent_id` | str | Yes | - | The ID of the agent |
| `session_id` | str | Yes | - | The ID of the current session |
| `recursive` | bool | No | False | Whether to search recursively in subdirectories |
## Returns
Returns a dictionary with the following structure:
**Success:**
```python
{
"success": True,
"pattern": "def \\w+\\(",
"path": "src",
"recursive": True,
"matches": [
{
"file": "src/main.py",
"line_number": 10,
"line_content": "def process_data(args):"
},
{
"file": "src/utils.py",
"line_number": 5,
"line_content": "def helper_function():"
}
],
"total_matches": 2
}
```
**No matches:**
```python
{
"success": True,
"pattern": "nonexistent",
"path": "src",
"recursive": False,
"matches": [],
"total_matches": 0
}
```
**Error:**
```python
{
"error": "Failed to perform grep search: [error message]"
}
```
## Error Handling
- Returns an error dict if the path doesn't exist
- Skips files that cannot be decoded (binary files, encoding errors)
- Skips files with permission errors
- Returns empty matches list if no matches found
- Handles invalid regex patterns with error message
## Examples
### Searching for function definitions
```python
result = grep_search(
path="src",
pattern="^def ",
workspace_id="ws-1",
agent_id="agent-1",
session_id="session-1",
recursive=True
)
# Returns: {"success": True, "pattern": "^def ", "matches": [...], "total_matches": 15}
```
### Searching a single file
```python
result = grep_search(
path="config.py",
pattern="API_KEY",
workspace_id="ws-1",
agent_id="agent-1",
session_id="session-1"
)
# Returns: {"success": True, "pattern": "API_KEY", "matches": [{...}], "total_matches": 1}
```
### Case-insensitive search using regex flags
```python
result = grep_search(
path="docs",
pattern="(?i)todo",
workspace_id="ws-1",
agent_id="agent-1",
session_id="session-1",
recursive=True
)
# Finds "TODO", "todo", "Todo", etc.
```
## Notes
- Uses Python's `re` module for regex matching
- Binary files and files with encoding errors are automatically skipped
- Line numbers start at 1
- Returned file paths are relative to the session root
- For non-recursive directory searches, only files in the immediate directory are searched
@@ -0,0 +1,3 @@
from .grep_search import register_tools
__all__ = ["register_tools"]
@@ -0,0 +1,70 @@
import os
import re
from mcp.server.fastmcp import FastMCP
from ..security import get_secure_path, WORKSPACES_DIR
def register_tools(mcp: FastMCP) -> None:
"""Register grep search tools with the MCP server."""
@mcp.tool()
def grep_search(path: str, pattern: str, workspace_id: str, agent_id: str, session_id: str, recursive: bool = False) -> dict:
"""
Search for a pattern in a file or directory within the session sandbox.
Use this when you need to find specific content or patterns in files using regex.
Set recursive=True to search through all subdirectories.
Args:
path: The path to search in (file or directory, relative to session root)
pattern: The regex pattern to search for
workspace_id: The ID of the workspace
agent_id: The ID of the agent
session_id: The ID of the current session
recursive: Whether to search recursively in directories (default: False)
Returns:
Dict with search results and match details, or error dict
"""
try:
secure_path = get_secure_path(path, workspace_id, agent_id, session_id)
# Use session dir root for relative path calculations
session_root = os.path.join(WORKSPACES_DIR, workspace_id, agent_id, session_id)
matches = []
regex = re.compile(pattern)
if os.path.isfile(secure_path):
files = [secure_path]
elif recursive:
files = []
for root, _, filenames in os.walk(secure_path):
for filename in filenames:
files.append(os.path.join(root, filename))
else:
files = [os.path.join(secure_path, f) for f in os.listdir(secure_path) if os.path.isfile(os.path.join(secure_path, f))]
for file_path in files:
# Calculate relative path for display
display_path = os.path.relpath(file_path, session_root)
try:
with open(file_path, "r", encoding="utf-8") as f:
for i, line in enumerate(f, 1):
if regex.search(line):
matches.append({
"file": display_path,
"line_number": i,
"line_content": line.strip()
})
except (UnicodeDecodeError, PermissionError):
continue
return {
"success": True,
"pattern": pattern,
"path": path,
"recursive": recursive,
"matches": matches,
"total_matches": len(matches)
}
except Exception as e:
return {"error": f"Failed to perform grep search: {str(e)}"}
@@ -0,0 +1,88 @@
# List Dir Tool
Lists the contents of a directory within the secure session sandbox.
## Description
The `list_dir` tool allows you to explore directory contents, viewing all files and subdirectories with their metadata. It provides a structured view of the filesystem hierarchy.
## Use Cases
- Exploring project structure
- Finding specific files
- Checking for file existence
- Understanding directory organization
## Usage
```python
list_dir(
path="src",
workspace_id="workspace-123",
agent_id="agent-456",
session_id="session-789"
)
```
## Arguments
| Argument | Type | Required | Default | Description |
|----------|------|----------|---------|-------------|
| `path` | str | Yes | - | The directory path (relative to session root) |
| `workspace_id` | str | Yes | - | The ID of the workspace |
| `agent_id` | str | Yes | - | The ID of the agent |
| `session_id` | str | Yes | - | The ID of the current session |
## Returns
Returns a dictionary with the following structure:
**Success:**
```python
{
"success": True,
"path": "src",
"entries": [
{"name": "main.py", "type": "file", "size_bytes": 1024},
{"name": "utils", "type": "directory", "size_bytes": null}
],
"total_count": 2
}
```
**Error:**
```python
{
"error": "Directory not found at src"
}
```
## Error Handling
- Returns an error dict if the directory doesn't exist
- Returns an error dict if the path points to a file instead of a directory
- Returns an error dict if the directory cannot be read (permission issues, etc.)
## Examples
### Listing directory contents
```python
result = list_dir(
path=".",
workspace_id="ws-1",
agent_id="agent-1",
session_id="session-1"
)
# Returns: {"success": True, "path": ".", "entries": [...], "total_count": 5}
```
### Checking an empty directory
```python
result = list_dir(
path="empty_folder",
workspace_id="ws-1",
agent_id="agent-1",
session_id="session-1"
)
# Returns: {"success": True, "path": "empty_folder", "entries": [], "total_count": 0}
```
@@ -0,0 +1,3 @@
from .list_dir import register_tools
__all__ = ["register_tools"]
@@ -0,0 +1,49 @@
import os
from mcp.server.fastmcp import FastMCP
from ..security import get_secure_path
def register_tools(mcp: FastMCP) -> None:
"""Register directory listing tools with the MCP server."""
@mcp.tool()
def list_dir(path: str, workspace_id: str, agent_id: str, session_id: str) -> dict:
"""
List the contents of a directory within the session sandbox.
Use this when you need to explore directory contents and see what files
and subdirectories exist.
Args:
path: The directory path (relative to session root)
workspace_id: The ID of the workspace
agent_id: The ID of the agent
session_id: The ID of the current session
Returns:
Dict with directory contents and metadata, or error dict
"""
try:
secure_path = get_secure_path(path, workspace_id, agent_id, session_id)
if not os.path.exists(secure_path):
return {"error": f"Directory not found at {path}"}
items = os.listdir(secure_path)
entries = []
for item in items:
full_path = os.path.join(secure_path, item)
is_dir = os.path.isdir(full_path)
entry = {
"name": item,
"type": "directory" if is_dir else "file",
"size_bytes": os.path.getsize(full_path) if not is_dir else None
}
entries.append(entry)
return {
"success": True,
"path": path,
"entries": entries,
"total_count": len(entries)
}
except Exception as e:
return {"error": f"Failed to list directory: {str(e)}"}
@@ -0,0 +1,102 @@
# Replace File Content Tool
Replaces specific string occurrences in a file within the secure session sandbox.
## Description
The `replace_file_content` tool performs find-and-replace operations on file content. It replaces all occurrences of a target string with a replacement string, providing details about the number of replacements made.
## Use Cases
- Updating configuration values
- Refactoring code (renaming variables, functions)
- Batch text replacements
- Updating version numbers or URLs
## Usage
```python
replace_file_content(
path="config/settings.json",
target='"debug": false',
replacement='"debug": true',
workspace_id="workspace-123",
agent_id="agent-456",
session_id="session-789"
)
```
## Arguments
| Argument | Type | Required | Default | Description |
|----------|------|----------|---------|-------------|
| `path` | str | Yes | - | The path to the file (relative to session root) |
| `target` | str | Yes | - | The string to search for and replace |
| `replacement` | str | Yes | - | The string to replace it with |
| `workspace_id` | str | Yes | - | The ID of the workspace |
| `agent_id` | str | Yes | - | The ID of the agent |
| `session_id` | str | Yes | - | The ID of the current session |
## Returns
Returns a dictionary with the following structure:
**Success:**
```python
{
"success": True,
"path": "config/settings.json",
"occurrences_replaced": 3,
"target_length": 15,
"replacement_length": 14
}
```
**Error:**
```python
{
"error": "Target string not found in config/settings.json"
}
```
## Error Handling
- Returns an error dict if the file doesn't exist
- Returns an error dict if the target string is not found in the file
- Returns an error dict if the file cannot be read or written
- All occurrences of the target string are replaced
## Examples
### Replacing a configuration value
```python
result = replace_file_content(
path="app.config",
target="localhost",
replacement="production.example.com",
workspace_id="ws-1",
agent_id="agent-1",
session_id="session-1"
)
# Returns: {"success": True, "path": "app.config", "occurrences_replaced": 2, "target_length": 9, "replacement_length": 23}
```
### Handling missing target string
```python
result = replace_file_content(
path="README.md",
target="nonexistent text",
replacement="new text",
workspace_id="ws-1",
agent_id="agent-1",
session_id="session-1"
)
# Returns: {"error": "Target string not found in README.md"}
```
## Notes
- This operation replaces **all** occurrences of the target string
- The replacement is case-sensitive
- For regex-based replacements, consider using a different tool
- The file is overwritten with the new content
@@ -0,0 +1,3 @@
from .replace_file_content import register_tools
__all__ = ["register_tools"]
@@ -0,0 +1,51 @@
import os
from mcp.server.fastmcp import FastMCP
from ..security import get_secure_path
def register_tools(mcp: FastMCP) -> None:
"""Register file content replacement tools with the MCP server."""
@mcp.tool()
def replace_file_content(path: str, target: str, replacement: str, workspace_id: str, agent_id: str, session_id: str) -> dict:
"""
Replace content in a file within the session sandbox.
Use this when you need to perform find-and-replace operations on file content.
All occurrences of the target string will be replaced.
Args:
path: The path to the file (relative to session root)
target: The string to search for and replace
replacement: The string to replace it with
workspace_id: The ID of the workspace
agent_id: The ID of the agent
session_id: The ID of the current session
Returns:
Dict with replacement count and status, or error dict
"""
try:
secure_path = get_secure_path(path, workspace_id, agent_id, session_id)
if not os.path.exists(secure_path):
return {"error": f"File not found at {path}"}
with open(secure_path, "r", encoding="utf-8") as f:
content = f.read()
if target not in content:
return {"error": f"Target string not found in {path}"}
occurrences = content.count(target)
new_content = content.replace(target, replacement)
with open(secure_path, "w", encoding="utf-8") as f:
f.write(new_content)
return {
"success": True,
"path": path,
"occurrences_replaced": occurrences,
"target_length": len(target),
"replacement_length": len(replacement)
}
except Exception as e:
return {"error": f"Failed to replace content: {str(e)}"}
@@ -0,0 +1,27 @@
import os
WORKSPACES_DIR = os.path.abspath(os.path.join(os.getcwd(), "workdir/workspaces"))
def get_secure_path(path: str, workspace_id: str, agent_id: str, session_id: str) -> str:
"""Resolve and verify a path within a 3-layer sandbox (workspace/agent/session)."""
if not workspace_id or not agent_id or not session_id:
raise ValueError("workspace_id, agent_id, and session_id are all required")
# Ensure session directory exists: runtime/workspace_id/agent_id/session_id
session_dir = os.path.join(WORKSPACES_DIR, workspace_id, agent_id, session_id)
os.makedirs(session_dir, exist_ok=True)
# Resolve absolute path
if os.path.isabs(path):
# Treat absolute paths as relative to the session root if they start with /
rel_path = path.lstrip(os.sep)
final_path = os.path.abspath(os.path.join(session_dir, rel_path))
else:
final_path = os.path.abspath(os.path.join(session_dir, path))
# Verify path is within session_dir
common_prefix = os.path.commonpath([final_path, session_dir])
if common_prefix != session_dir:
raise ValueError(f"Access denied: Path '{path}' is outside the session sandbox.")
return final_path
@@ -0,0 +1,86 @@
# View File Tool
Reads the content of a file within the secure session sandbox.
## Description
The `view_file` tool allows you to read and retrieve the complete content of files within a sandboxed session environment. It provides metadata about the file along with its content.
## Use Cases
- Reading configuration files
- Viewing source code
- Inspecting log files
- Retrieving data files for processing
## Usage
```python
view_file(
path="config/settings.json",
workspace_id="workspace-123",
agent_id="agent-456",
session_id="session-789"
)
```
## Arguments
| Argument | Type | Required | Default | Description |
|----------|------|----------|---------|-------------|
| `path` | str | Yes | - | The path to the file (relative to session root) |
| `workspace_id` | str | Yes | - | The ID of the workspace |
| `agent_id` | str | Yes | - | The ID of the agent |
| `session_id` | str | Yes | - | The ID of the current session |
## Returns
Returns a dictionary with the following structure:
**Success:**
```python
{
"success": True,
"path": "config/settings.json",
"content": "{\"debug\": true}",
"size_bytes": 16,
"lines": 1
}
```
**Error:**
```python
{
"error": "File not found at config/settings.json"
}
```
## Error Handling
- Returns an error dict if the file doesn't exist
- Returns an error dict if the file cannot be read (permission issues, encoding errors, etc.)
- Handles binary files gracefully by returning appropriate error messages
## Examples
### Reading a text file
```python
result = view_file(
path="README.md",
workspace_id="ws-1",
agent_id="agent-1",
session_id="session-1"
)
# Returns: {"success": True, "path": "README.md", "content": "# My Project\n...", "size_bytes": 1024, "lines": 42}
```
### Handling missing files
```python
result = view_file(
path="nonexistent.txt",
workspace_id="ws-1",
agent_id="agent-1",
session_id="session-1"
)
# Returns: {"error": "File not found at nonexistent.txt"}
```
@@ -0,0 +1,3 @@
from .view_file import register_tools
__all__ = ["register_tools"]
@@ -0,0 +1,40 @@
import os
from mcp.server.fastmcp import FastMCP
from ..security import get_secure_path
def register_tools(mcp: FastMCP) -> None:
"""Register file view tools with the MCP server."""
@mcp.tool()
def view_file(path: str, workspace_id: str, agent_id: str, session_id: str) -> dict:
"""
Read the content of a file within the session sandbox.
Use this when you need to view the contents of an existing file.
Args:
path: The path to the file (relative to session root)
workspace_id: The ID of the workspace
agent_id: The ID of the agent
session_id: The ID of the current session
Returns:
Dict with file content and metadata, or error dict
"""
try:
secure_path = get_secure_path(path, workspace_id, agent_id, session_id)
if not os.path.exists(secure_path):
return {"error": f"File not found at {path}"}
with open(secure_path, "r", encoding="utf-8") as f:
content = f.read()
return {
"success": True,
"path": path,
"content": content,
"size_bytes": len(content.encode("utf-8")),
"lines": len(content.splitlines())
}
except Exception as e:
return {"error": f"Failed to read file: {str(e)}"}
@@ -0,0 +1,92 @@
# Write to File Tool
Writes content to a file within the secure session sandbox. Supports both overwriting and appending modes.
## Description
The `write_to_file` tool allows you to create new files or modify existing files within a sandboxed session environment. It automatically creates parent directories if they don't exist and provides flexible write modes.
## Use Cases
- Creating new configuration files
- Writing generated code or data
- Appending logs or output to existing files
- Saving processed results to disk
## Usage
```python
write_to_file(
path="config/settings.json",
content='{"debug": true}',
workspace_id="workspace-123",
agent_id="agent-456",
session_id="session-789",
append=False
)
```
## Arguments
| Argument | Type | Required | Default | Description |
|----------|------|----------|---------|-------------|
| `path` | str | Yes | - | The path to the file (relative to session root) |
| `content` | str | Yes | - | The content to write to the file |
| `workspace_id` | str | Yes | - | The ID of the workspace |
| `agent_id` | str | Yes | - | The ID of the agent |
| `session_id` | str | Yes | - | The ID of the current session |
| `append` | bool | No | False | Whether to append to the file instead of overwriting |
## Returns
Returns a dictionary with the following structure:
**Success:**
```python
{
"success": True,
"path": "config/settings.json",
"mode": "written", # or "appended"
"bytes_written": 18
}
```
**Error:**
```python
{
"error": "Failed to write to file: [error message]"
}
```
## Error Handling
- Returns an error dict if the file cannot be written (permission issues, invalid path, etc.)
- Automatically creates parent directories if they don't exist
- Handles encoding errors gracefully
## Examples
### Creating a new file
```python
result = write_to_file(
path="data/output.txt",
content="Hello, world!",
workspace_id="ws-1",
agent_id="agent-1",
session_id="session-1"
)
# Returns: {"success": True, "path": "data/output.txt", "mode": "written", "bytes_written": 13}
```
### Appending to a file
```python
result = write_to_file(
path="logs/activity.log",
content="\n[INFO] Task completed",
workspace_id="ws-1",
agent_id="agent-1",
session_id="session-1",
append=True
)
# Returns: {"success": True, "path": "logs/activity.log", "mode": "appended", "bytes_written": 24}
```
@@ -0,0 +1,3 @@
from .write_to_file import register_tools
__all__ = ["register_tools"]
@@ -0,0 +1,40 @@
import os
from mcp.server.fastmcp import FastMCP
from ..security import get_secure_path
def register_tools(mcp: FastMCP) -> None:
"""Register file write tools with the MCP server."""
@mcp.tool()
def write_to_file(path: str, content: str, workspace_id: str, agent_id: str, session_id: str, append: bool = False) -> dict:
"""
Write content to a file within the session sandbox.
Use this when you need to create a new file or overwrite an existing file.
Set append=True to add content to the end of an existing file.
Args:
path: The path to the file (relative to session root)
content: The content to write to the file
workspace_id: The ID of the workspace
agent_id: The ID of the agent
session_id: The ID of the current session
append: Whether to append to the file instead of overwriting (default: False)
Returns:
Dict with success status and path, or error dict
"""
try:
secure_path = get_secure_path(path, workspace_id, agent_id, session_id)
os.makedirs(os.path.dirname(secure_path), exist_ok=True)
mode = "a" if append else "w"
with open(secure_path, mode, encoding="utf-8") as f:
f.write(content)
return {
"success": True,
"path": path,
"mode": "appended" if append else "written",
"bytes_written": len(content.encode("utf-8"))
}
except Exception as e:
return {"error": f"Failed to write to file: {str(e)}"}
@@ -0,0 +1,731 @@
"""Tests for file_system_toolkits tools (FastMCP)."""
import os
import pytest
from pathlib import Path
from unittest.mock import Mock, patch
from fastmcp import FastMCP
@pytest.fixture
def mcp():
"""Create a FastMCP instance."""
return FastMCP("test-server")
@pytest.fixture
def mock_workspace():
"""Mock workspace, agent, and session IDs."""
return {
"workspace_id": "test-workspace",
"agent_id": "test-agent",
"session_id": "test-session"
}
@pytest.fixture
def mock_secure_path(tmp_path):
"""Mock get_secure_path to return temp directory paths."""
def _get_secure_path(path, workspace_id, agent_id, session_id):
return os.path.join(tmp_path, path)
with patch("aden_tools.tools.file_system_toolkits.view_file.view_file.get_secure_path", side_effect=_get_secure_path):
with patch("aden_tools.tools.file_system_toolkits.write_to_file.write_to_file.get_secure_path", side_effect=_get_secure_path):
with patch("aden_tools.tools.file_system_toolkits.list_dir.list_dir.get_secure_path", side_effect=_get_secure_path):
with patch("aden_tools.tools.file_system_toolkits.replace_file_content.replace_file_content.get_secure_path", side_effect=_get_secure_path):
with patch("aden_tools.tools.file_system_toolkits.apply_diff.apply_diff.get_secure_path", side_effect=_get_secure_path):
with patch("aden_tools.tools.file_system_toolkits.apply_patch.apply_patch.get_secure_path", side_effect=_get_secure_path):
with patch("aden_tools.tools.file_system_toolkits.grep_search.grep_search.get_secure_path", side_effect=_get_secure_path):
with patch("aden_tools.tools.file_system_toolkits.grep_search.grep_search.WORKSPACES_DIR", str(tmp_path)):
with patch("aden_tools.tools.file_system_toolkits.execute_command_tool.execute_command_tool.get_secure_path", side_effect=_get_secure_path):
with patch("aden_tools.tools.file_system_toolkits.execute_command_tool.execute_command_tool.WORKSPACES_DIR", str(tmp_path)):
yield
class TestViewFileTool:
"""Tests for view_file tool."""
@pytest.fixture
def view_file_fn(self, mcp):
from aden_tools.tools.file_system_toolkits.view_file import register_tools
register_tools(mcp)
return mcp._tool_manager._tools["view_file"].fn
def test_view_existing_file(self, view_file_fn, mock_workspace, mock_secure_path, tmp_path):
"""Viewing an existing file returns content and metadata."""
test_file = tmp_path / "test.txt"
test_file.write_text("Hello, World!")
result = view_file_fn(path="test.txt", **mock_workspace)
assert result["success"] is True
assert result["content"] == "Hello, World!"
assert result["size_bytes"] == len("Hello, World!".encode("utf-8"))
assert result["lines"] == 1
def test_view_nonexistent_file(self, view_file_fn, mock_workspace, mock_secure_path):
"""Viewing a non-existent file returns an error."""
result = view_file_fn(path="nonexistent.txt", **mock_workspace)
assert "error" in result
assert "not found" in result["error"].lower()
def test_view_multiline_file(self, view_file_fn, mock_workspace, mock_secure_path, tmp_path):
"""Viewing a multiline file returns correct line count."""
test_file = tmp_path / "multiline.txt"
content = "Line 1\nLine 2\nLine 3\nLine 4\n"
test_file.write_text(content)
result = view_file_fn(path="multiline.txt", **mock_workspace)
assert result["success"] is True
assert result["content"] == content
assert result["lines"] == 4
def test_view_empty_file(self, view_file_fn, mock_workspace, mock_secure_path, tmp_path):
"""Viewing an empty file returns empty content."""
test_file = tmp_path / "empty.txt"
test_file.write_text("")
result = view_file_fn(path="empty.txt", **mock_workspace)
assert result["success"] is True
assert result["content"] == ""
assert result["size_bytes"] == 0
assert result["lines"] == 0
def test_view_file_with_unicode(self, view_file_fn, mock_workspace, mock_secure_path, tmp_path):
"""Viewing a file with unicode characters works correctly."""
test_file = tmp_path / "unicode.txt"
content = "Hello 世界! 🌍 émoji"
test_file.write_text(content, encoding="utf-8")
result = view_file_fn(path="unicode.txt", **mock_workspace)
assert result["success"] is True
assert result["content"] == content
assert result["size_bytes"] == len(content.encode("utf-8"))
def test_view_nested_file(self, view_file_fn, mock_workspace, mock_secure_path, tmp_path):
"""Viewing a file in a nested directory works correctly."""
nested = tmp_path / "nested" / "dir"
nested.mkdir(parents=True)
test_file = nested / "file.txt"
test_file.write_text("nested content")
result = view_file_fn(path="nested/dir/file.txt", **mock_workspace)
assert result["success"] is True
assert result["content"] == "nested content"
class TestWriteToFileTool:
"""Tests for write_to_file tool."""
@pytest.fixture
def write_to_file_fn(self, mcp):
from aden_tools.tools.file_system_toolkits.write_to_file import register_tools
register_tools(mcp)
return mcp._tool_manager._tools["write_to_file"].fn
def test_write_new_file(self, write_to_file_fn, mock_workspace, mock_secure_path, tmp_path):
"""Writing to a new file creates it successfully."""
result = write_to_file_fn(
path="new_file.txt",
content="Test content",
**mock_workspace
)
assert result["success"] is True
assert result["mode"] == "written"
assert result["bytes_written"] > 0
# Verify file was created
created_file = tmp_path / "new_file.txt"
assert created_file.exists()
assert created_file.read_text() == "Test content"
def test_write_append_mode(self, write_to_file_fn, mock_workspace, mock_secure_path, tmp_path):
"""Writing with append=True appends to existing file."""
test_file = tmp_path / "append_test.txt"
test_file.write_text("Line 1\n")
result = write_to_file_fn(
path="append_test.txt",
content="Line 2\n",
append=True,
**mock_workspace
)
assert result["success"] is True
assert result["mode"] == "appended"
assert test_file.read_text() == "Line 1\nLine 2\n"
def test_write_overwrite_existing(self, write_to_file_fn, mock_workspace, mock_secure_path, tmp_path):
"""Writing to existing file overwrites it by default."""
test_file = tmp_path / "overwrite.txt"
test_file.write_text("Original content")
result = write_to_file_fn(
path="overwrite.txt",
content="New content",
**mock_workspace
)
assert result["success"] is True
assert result["mode"] == "written"
assert test_file.read_text() == "New content"
def test_write_creates_parent_directories(self, write_to_file_fn, mock_workspace, mock_secure_path, tmp_path):
"""Writing creates parent directories if they don't exist."""
result = write_to_file_fn(
path="nested/dir/file.txt",
content="Test",
**mock_workspace
)
assert result["success"] is True
created_file = tmp_path / "nested" / "dir" / "file.txt"
assert created_file.exists()
assert created_file.read_text() == "Test"
def test_write_empty_content(self, write_to_file_fn, mock_workspace, mock_secure_path, tmp_path):
"""Writing empty content creates empty file."""
result = write_to_file_fn(
path="empty.txt",
content="",
**mock_workspace
)
assert result["success"] is True
assert result["bytes_written"] == 0
created_file = tmp_path / "empty.txt"
assert created_file.exists()
assert created_file.read_text() == ""
class TestListDirTool:
"""Tests for list_dir tool."""
@pytest.fixture
def list_dir_fn(self, mcp):
from aden_tools.tools.file_system_toolkits.list_dir import register_tools
register_tools(mcp)
return mcp._tool_manager._tools["list_dir"].fn
def test_list_directory(self, list_dir_fn, mock_workspace, mock_secure_path, tmp_path):
"""Listing a directory returns all entries."""
# Create test files and directories
(tmp_path / "file1.txt").write_text("content")
(tmp_path / "file2.txt").write_text("content")
(tmp_path / "subdir").mkdir()
result = list_dir_fn(path=".", **mock_workspace)
assert result["success"] is True
assert result["total_count"] == 3
assert len(result["entries"]) == 3
# Check that entries have correct structure
for entry in result["entries"]:
assert "name" in entry
assert "type" in entry
assert entry["type"] in ["file", "directory"]
def test_list_empty_directory(self, list_dir_fn, mock_workspace, mock_secure_path, tmp_path):
"""Listing an empty directory returns empty list."""
empty_dir = tmp_path / "empty"
empty_dir.mkdir()
result = list_dir_fn(path="empty", **mock_workspace)
assert result["success"] is True
assert result["total_count"] == 0
assert result["entries"] == []
def test_list_nonexistent_directory(self, list_dir_fn, mock_workspace, mock_secure_path):
"""Listing a non-existent directory returns error."""
result = list_dir_fn(path="nonexistent_dir", **mock_workspace)
assert "error" in result
assert "not found" in result["error"].lower()
def test_list_directory_with_file_sizes(self, list_dir_fn, mock_workspace, mock_secure_path, tmp_path):
"""Listing a directory returns file sizes for files."""
(tmp_path / "small.txt").write_text("hi")
(tmp_path / "larger.txt").write_text("hello world")
(tmp_path / "subdir").mkdir()
result = list_dir_fn(path=".", **mock_workspace)
assert result["success"] is True
# Find entries by name
entries_by_name = {e["name"]: e for e in result["entries"]}
# Files should have size_bytes
assert entries_by_name["small.txt"]["type"] == "file"
assert entries_by_name["small.txt"]["size_bytes"] == 2
assert entries_by_name["larger.txt"]["type"] == "file"
assert entries_by_name["larger.txt"]["size_bytes"] == 11
# Directories should have None for size_bytes
assert entries_by_name["subdir"]["type"] == "directory"
assert entries_by_name["subdir"]["size_bytes"] is None
class TestReplaceFileContentTool:
"""Tests for replace_file_content tool."""
@pytest.fixture
def replace_file_content_fn(self, mcp):
from aden_tools.tools.file_system_toolkits.replace_file_content import register_tools
register_tools(mcp)
return mcp._tool_manager._tools["replace_file_content"].fn
def test_replace_content(self, replace_file_content_fn, mock_workspace, mock_secure_path, tmp_path):
"""Replacing content in a file works correctly."""
test_file = tmp_path / "replace_test.txt"
test_file.write_text("Hello World! Hello again!")
result = replace_file_content_fn(
path="replace_test.txt",
target="Hello",
replacement="Hi",
**mock_workspace
)
assert result["success"] is True
assert result["occurrences_replaced"] == 2
assert test_file.read_text() == "Hi World! Hi again!"
def test_replace_target_not_found(self, replace_file_content_fn, mock_workspace, mock_secure_path, tmp_path):
"""Replacing non-existent target returns error."""
test_file = tmp_path / "test.txt"
test_file.write_text("Hello World")
result = replace_file_content_fn(
path="test.txt",
target="nonexistent",
replacement="new",
**mock_workspace
)
assert "error" in result
assert "not found" in result["error"].lower()
def test_replace_file_not_found(self, replace_file_content_fn, mock_workspace, mock_secure_path):
"""Replacing content in non-existent file returns error."""
result = replace_file_content_fn(
path="nonexistent.txt",
target="foo",
replacement="bar",
**mock_workspace
)
assert "error" in result
assert "not found" in result["error"].lower()
def test_replace_single_occurrence(self, replace_file_content_fn, mock_workspace, mock_secure_path, tmp_path):
"""Replacing content with single occurrence works correctly."""
test_file = tmp_path / "single.txt"
test_file.write_text("Hello World")
result = replace_file_content_fn(
path="single.txt",
target="Hello",
replacement="Hi",
**mock_workspace
)
assert result["success"] is True
assert result["occurrences_replaced"] == 1
assert test_file.read_text() == "Hi World"
def test_replace_multiline_content(self, replace_file_content_fn, mock_workspace, mock_secure_path, tmp_path):
"""Replacing content across multiple lines works correctly."""
test_file = tmp_path / "multiline.txt"
test_file.write_text("Line 1\nTODO: fix this\nLine 3\nTODO: add tests\n")
result = replace_file_content_fn(
path="multiline.txt",
target="TODO:",
replacement="DONE:",
**mock_workspace
)
assert result["success"] is True
assert result["occurrences_replaced"] == 2
assert test_file.read_text() == "Line 1\nDONE: fix this\nLine 3\nDONE: add tests\n"
class TestGrepSearchTool:
"""Tests for grep_search tool."""
@pytest.fixture
def grep_search_fn(self, mcp):
from aden_tools.tools.file_system_toolkits.grep_search import register_tools
register_tools(mcp)
return mcp._tool_manager._tools["grep_search"].fn
def test_grep_search_single_file(self, grep_search_fn, mock_workspace, mock_secure_path, tmp_path):
"""Searching a single file returns matches."""
test_file = tmp_path / "search_test.txt"
test_file.write_text("Line 1\nLine 2 with pattern\nLine 3")
result = grep_search_fn(
path="search_test.txt",
pattern="pattern",
**mock_workspace
)
assert result["success"] is True
assert result["total_matches"] == 1
assert len(result["matches"]) == 1
assert result["matches"][0]["line_number"] == 2
assert "pattern" in result["matches"][0]["line_content"]
def test_grep_search_no_matches(self, grep_search_fn, mock_workspace, mock_secure_path, tmp_path):
"""Searching with no matches returns empty list."""
test_file = tmp_path / "test.txt"
test_file.write_text("Hello World")
result = grep_search_fn(
path="test.txt",
pattern="nonexistent",
**mock_workspace
)
assert result["success"] is True
assert result["total_matches"] == 0
assert result["matches"] == []
def test_grep_search_directory_non_recursive(self, grep_search_fn, mock_workspace, mock_secure_path, tmp_path):
"""Searching directory non-recursively only searches immediate files."""
# Create files in root
(tmp_path / "file1.txt").write_text("pattern here")
(tmp_path / "file2.txt").write_text("no match here")
# Create nested directory with file
nested = tmp_path / "nested"
nested.mkdir()
(nested / "nested_file.txt").write_text("pattern in nested")
result = grep_search_fn(
path=".",
pattern="pattern",
recursive=False,
**mock_workspace
)
assert result["success"] is True
assert result["total_matches"] == 1 # Only finds pattern in root, not in nested
assert result["recursive"] is False
def test_grep_search_directory_recursive(self, grep_search_fn, mock_workspace, mock_secure_path, tmp_path):
"""Searching directory recursively finds matches in subdirectories."""
# Create files in root
(tmp_path / "file1.txt").write_text("pattern here")
# Create nested directory with file
nested = tmp_path / "nested"
nested.mkdir()
(nested / "nested_file.txt").write_text("pattern in nested")
result = grep_search_fn(
path=".",
pattern="pattern",
recursive=True,
**mock_workspace
)
assert result["success"] is True
assert result["total_matches"] == 2 # Finds pattern in both files
assert result["recursive"] is True
def test_grep_search_regex_pattern(self, grep_search_fn, mock_workspace, mock_secure_path, tmp_path):
"""Searching with regex pattern finds complex matches."""
test_file = tmp_path / "regex_test.txt"
test_file.write_text("foo123bar\nfoo456bar\nbaz789baz\n")
result = grep_search_fn(
path="regex_test.txt",
pattern=r"foo\d+bar",
**mock_workspace
)
assert result["success"] is True
assert result["total_matches"] == 2
assert result["matches"][0]["line_number"] == 1
assert result["matches"][1]["line_number"] == 2
def test_grep_search_multiple_matches_per_line(self, grep_search_fn, mock_workspace, mock_secure_path, tmp_path):
"""Searching returns one match per line even with multiple occurrences."""
test_file = tmp_path / "multi_match.txt"
test_file.write_text("hello hello hello\nworld\nhello again")
result = grep_search_fn(
path="multi_match.txt",
pattern="hello",
**mock_workspace
)
assert result["success"] is True
assert result["total_matches"] == 2 # Line 1 and Line 3
class TestExecuteCommandTool:
"""Tests for execute_command_tool."""
@pytest.fixture
def execute_command_fn(self, mcp):
from aden_tools.tools.file_system_toolkits.execute_command_tool import register_tools
register_tools(mcp)
return mcp._tool_manager._tools["execute_command_tool"].fn
def test_execute_simple_command(self, execute_command_fn, mock_workspace, mock_secure_path):
"""Executing a simple command returns output."""
result = execute_command_fn(
command="echo 'Hello World'",
**mock_workspace
)
assert result["success"] is True
assert result["return_code"] == 0
assert "Hello World" in result["stdout"]
def test_execute_failing_command(self, execute_command_fn, mock_workspace, mock_secure_path):
"""Executing a failing command returns non-zero exit code."""
result = execute_command_fn(
command="exit 1",
**mock_workspace
)
assert result["success"] is True
assert result["return_code"] == 1
def test_execute_command_with_stderr(self, execute_command_fn, mock_workspace, mock_secure_path):
"""Executing a command that writes to stderr captures it."""
result = execute_command_fn(
command="echo 'error message' >&2",
**mock_workspace
)
assert result["success"] is True
assert "error message" in result.get("stderr", "")
def test_execute_command_list_files(self, execute_command_fn, mock_workspace, mock_secure_path, tmp_path):
"""Executing ls command lists files."""
# Create a test file
(tmp_path / "testfile.txt").write_text("content")
result = execute_command_fn(
command=f"ls {tmp_path}",
**mock_workspace
)
assert result["success"] is True
assert result["return_code"] == 0
assert "testfile.txt" in result["stdout"]
def test_execute_command_with_pipe(self, execute_command_fn, mock_workspace, mock_secure_path):
"""Executing a command with pipe works correctly."""
result = execute_command_fn(
command="echo 'hello world' | tr 'a-z' 'A-Z'",
**mock_workspace
)
assert result["success"] is True
assert result["return_code"] == 0
assert "HELLO WORLD" in result["stdout"]
class TestApplyDiffTool:
"""Tests for apply_diff tool."""
@pytest.fixture
def apply_diff_fn(self, mcp):
from aden_tools.tools.file_system_toolkits.apply_diff import register_tools
register_tools(mcp)
return mcp._tool_manager._tools["apply_diff"].fn
def test_apply_diff_file_not_found(self, apply_diff_fn, mock_workspace, mock_secure_path):
"""Applying diff to non-existent file returns error."""
result = apply_diff_fn(
path="nonexistent.txt",
diff_text="some diff",
**mock_workspace
)
assert "error" in result
assert "not found" in result["error"].lower()
def test_apply_diff_successful(self, apply_diff_fn, mock_workspace, mock_secure_path, tmp_path):
"""Applying a valid diff successfully modifies the file."""
test_file = tmp_path / "diff_test.txt"
test_file.write_text("Hello World")
# Create a simple diff using diff_match_patch format
import diff_match_patch as dmp_module
dmp = dmp_module.diff_match_patch()
patches = dmp.patch_make("Hello World", "Hello Universe")
diff_text = dmp.patch_toText(patches)
result = apply_diff_fn(
path="diff_test.txt",
diff_text=diff_text,
**mock_workspace
)
assert result["success"] is True
assert result["all_successful"] is True
assert result["patches_applied"] > 0
assert test_file.read_text() == "Hello Universe"
def test_apply_diff_multiline(self, apply_diff_fn, mock_workspace, mock_secure_path, tmp_path):
"""Applying diff to multiline content works correctly."""
test_file = tmp_path / "multiline.txt"
original = "Line 1\nLine 2\nLine 3\n"
test_file.write_text(original)
import diff_match_patch as dmp_module
dmp = dmp_module.diff_match_patch()
modified = "Line 1\nModified Line 2\nLine 3\n"
patches = dmp.patch_make(original, modified)
diff_text = dmp.patch_toText(patches)
result = apply_diff_fn(
path="multiline.txt",
diff_text=diff_text,
**mock_workspace
)
assert result["success"] is True
assert result["all_successful"] is True
assert test_file.read_text() == modified
def test_apply_diff_invalid_patch(self, apply_diff_fn, mock_workspace, mock_secure_path, tmp_path):
"""Applying an invalid diff handles gracefully."""
test_file = tmp_path / "test.txt"
original_content = "Original content"
test_file.write_text(original_content)
# Invalid diff text
result = apply_diff_fn(
path="test.txt",
diff_text="invalid diff format",
**mock_workspace
)
# Should either error or show no patches applied
if "error" not in result:
assert result.get("patches_applied", 0) == 0
# File should remain unchanged
assert test_file.read_text() == original_content
class TestApplyPatchTool:
"""Tests for apply_patch tool."""
@pytest.fixture
def apply_patch_fn(self, mcp):
from aden_tools.tools.file_system_toolkits.apply_patch import register_tools
register_tools(mcp)
return mcp._tool_manager._tools["apply_patch"].fn
def test_apply_patch_file_not_found(self, apply_patch_fn, mock_workspace, mock_secure_path):
"""Applying patch to non-existent file returns error."""
result = apply_patch_fn(
path="nonexistent.txt",
patch_text="some patch",
**mock_workspace
)
assert "error" in result
assert "not found" in result["error"].lower()
def test_apply_patch_successful(self, apply_patch_fn, mock_workspace, mock_secure_path, tmp_path):
"""Applying a valid patch successfully modifies the file."""
test_file = tmp_path / "patch_test.txt"
test_file.write_text("Hello World")
# Create a simple patch using diff_match_patch format
import diff_match_patch as dmp_module
dmp = dmp_module.diff_match_patch()
patches = dmp.patch_make("Hello World", "Hello Python")
patch_text = dmp.patch_toText(patches)
result = apply_patch_fn(
path="patch_test.txt",
patch_text=patch_text,
**mock_workspace
)
assert result["success"] is True
assert result["all_successful"] is True
assert result["patches_applied"] > 0
assert test_file.read_text() == "Hello Python"
def test_apply_patch_multiline(self, apply_patch_fn, mock_workspace, mock_secure_path, tmp_path):
"""Applying patch to multiline content works correctly."""
test_file = tmp_path / "multiline.txt"
original = "Line 1\nLine 2\nLine 3\n"
test_file.write_text(original)
import diff_match_patch as dmp_module
dmp = dmp_module.diff_match_patch()
modified = "Line 1\nModified Line 2\nLine 3\n"
patches = dmp.patch_make(original, modified)
patch_text = dmp.patch_toText(patches)
result = apply_patch_fn(
path="multiline.txt",
patch_text=patch_text,
**mock_workspace
)
assert result["success"] is True
assert result["all_successful"] is True
assert test_file.read_text() == modified
def test_apply_patch_invalid_patch(self, apply_patch_fn, mock_workspace, mock_secure_path, tmp_path):
"""Applying an invalid patch handles gracefully."""
test_file = tmp_path / "test.txt"
original_content = "Original content"
test_file.write_text(original_content)
# Invalid patch text
result = apply_patch_fn(
path="test.txt",
patch_text="invalid patch format",
**mock_workspace
)
# Should either error or show no patches applied
if "error" not in result:
assert result.get("patches_applied", 0) == 0
# File should remain unchanged
assert test_file.read_text() == original_content
def test_apply_patch_multiple_changes(self, apply_patch_fn, mock_workspace, mock_secure_path, tmp_path):
"""Applying patch with multiple changes works correctly."""
test_file = tmp_path / "complex.txt"
original = "Function foo() {\n return 42;\n}\n"
test_file.write_text(original)
import diff_match_patch as dmp_module
dmp = dmp_module.diff_match_patch()
modified = "Function bar() {\n return 100;\n}\n"
patches = dmp.patch_make(original, modified)
patch_text = dmp.patch_toText(patches)
result = apply_patch_fn(
path="complex.txt",
patch_text=patch_text,
**mock_workspace
)
assert result["success"] is True
assert result["all_successful"] is True
assert test_file.read_text() == modified
@@ -563,6 +563,161 @@ Constraint Test Review:
"""
```
## Integrating External Tools (MCP Servers)
Before adding nodes, you can register MCP servers to make their tools available to your agent.
### Using aden-tools in the Hive Monorepo
The hive monorepo includes `aden-tools` which provides web search, web scraping, and file operations.
**Step 1: Register the MCP Server**
After creating your session, register aden-tools:
```python
# Using MCP tools
add_mcp_server(
name="aden-tools",
transport="stdio",
command="python",
args='["mcp_server.py", "--stdio"]',
cwd="../aden-tools" # Relative to core/ directory
)
```
**Expected response:**
```json
{
"success": true,
"server": {
"name": "aden-tools",
"transport": "stdio",
"command": "python",
"args": ["-m", "aden_tools.server"],
"cwd": "../aden-tools"
},
"tools_discovered": 6,
"tools": [
"web_search",
"web_scrape",
"file_read",
"file_write",
"pdf_read",
"example_tool"
],
"note": "MCP server 'aden-tools' registered with 6 tools..."
}
```
**Step 2: List Available Tools** (optional verification)
```python
list_mcp_tools(server_name="aden-tools")
```
This shows detailed information about each tool including parameters.
**Step 3: Use Tools in Your Nodes**
Now you can reference these tools in `llm_tool_use` nodes:
```python
add_node(
node_id="web_searcher",
name="Web Searcher",
description="Search the web for information",
node_type="llm_tool_use",
input_keys='["query"]',
output_keys='["search_results"]',
tools='["web_search"]', # ← Tool from aden-tools
system_prompt="Search for {query} using web_search tool"
)
```
**Step 4: Export Creates mcp_servers.json**
When you export your agent with `export_graph()`, the MCP server configuration is automatically saved:
```
exports/my-agent/
├── agent.json # Agent specification
├── README.md # Documentation
└── mcp_servers.json # ← MCP configuration (auto-generated)
```
The `mcp_servers.json` file ensures the agent can access aden-tools when run later.
### Available aden-tools
| Tool | Description | Key Parameters |
|------|-------------|----------------|
| `web_search` | Search the web using Brave Search API | `query`, `num_results`, `country` |
| `web_scrape` | Extract text content from a webpage | `url`, `selector`, `include_links` |
| `file_read` | Read file contents | `path` |
| `file_write` | Write content to files | `path`, `content` |
| `pdf_read` | Extract text from PDF files | `path` |
### MCP Server Management
List registered servers:
```python
list_mcp_servers()
```
Remove a server:
```python
remove_mcp_server(name="aden-tools")
```
### Best Practices
1. **Register early**: Call `add_mcp_server` right after `create_session` and before defining nodes
2. **Verify tools**: Use `list_mcp_tools` to see available tools and their parameters
3. **Minimal tools**: Only include tools a node actually needs in its `tools` list
4. **Test nodes**: Use `test_node` to verify tool access works before building the full graph
### Example: Research Agent with aden-tools
```python
# 1. Create session
create_session(name="research-agent")
# 2. Register aden-tools
add_mcp_server(
name="aden-tools",
transport="stdio",
command="python",
args='["mcp_server.py", "--stdio"]',
cwd="../aden-tools"
)
# 3. Verify tools
list_mcp_tools(server_name="aden-tools")
# 4. Define goal
set_goal(
goal_id="research",
name="Research Agent",
description="Gather and synthesize information",
success_criteria='[...]',
constraints='[...]'
)
# 5. Add node that uses web_search
add_node(
node_id="searcher",
name="Information Searcher",
node_type="llm_tool_use",
input_keys='["topic"]',
output_keys='["search_results"]',
tools='["web_search"]', # From aden-tools
system_prompt="Search for information about {topic}"
)
# 6. Continue building...
```
## Adding Nodes
Each node does one thing:
+6 -1
View File
@@ -3,7 +3,12 @@
"agent-builder": {
"command": "python",
"args": ["-m", "framework.mcp.agent_builder_server"],
"cwd": "/home/timothy/aden/worker-bee"
"cwd": "/home/timothy/oss/hive/core"
},
"aden-tools": {
"command": "python",
"args": ["-m", "aden_tools.mcp_server", "--stdio"],
"cwd": "/home/timothy/oss/hive/aden-tools"
}
}
}
+334
View File
@@ -0,0 +1,334 @@
# Agent Builder MCP Tools - MCP Integration Guide
This guide explains how to use the new MCP integration tools in the agent builder MCP server.
## Overview
The agent builder now supports registering external MCP servers as tool sources. This allows you to:
1. Register MCP servers (like aden-tools) during agent building
2. Discover available tools from those servers
3. Use those tools in your agent nodes
4. Automatically generate `mcp_servers.json` configuration on export
## New MCP Tools
### `add_mcp_server`
Register an MCP server as a tool source for your agent.
**Parameters:**
- `name` (string, required): Unique name for the MCP server
- `transport` (string, required): Transport type - "stdio" or "http"
- `command` (string): Command to run (for stdio transport)
- `args` (string): JSON array of command arguments (for stdio)
- `cwd` (string): Working directory (for stdio)
- `env` (string): JSON object of environment variables (for stdio)
- `url` (string): Server URL (for http transport)
- `headers` (string): JSON object of HTTP headers (for http)
- `description` (string): Description of the MCP server
**Example - STDIO:**
```json
{
"name": "add_mcp_server",
"arguments": {
"name": "aden-tools",
"transport": "stdio",
"command": "python",
"args": "[\"mcp_server.py\", \"--stdio\"]",
"cwd": "../aden-tools",
"description": "Aden tools for web search and file operations"
}
}
```
**Example - HTTP:**
```json
{
"name": "add_mcp_server",
"arguments": {
"name": "remote-tools",
"transport": "http",
"url": "http://localhost:4001",
"description": "Remote tool server"
}
}
```
**Response:**
```json
{
"success": true,
"server": {
"name": "aden-tools",
"transport": "stdio",
"command": "python",
"args": ["mcp_server.py", "--stdio"],
"cwd": "../aden-tools",
"description": "Aden tools..."
},
"tools_discovered": 6,
"tools": [
"web_search",
"web_scrape",
"file_read",
"file_write",
"pdf_read",
"example_tool"
],
"total_mcp_servers": 1,
"note": "MCP server 'aden-tools' registered with 6 tools. These tools can now be used in llm_tool_use nodes."
}
```
### `list_mcp_servers`
List all registered MCP servers.
**Parameters:** None
**Response:**
```json
{
"mcp_servers": [
{
"name": "aden-tools",
"transport": "stdio",
"command": "python",
"args": ["mcp_server.py", "--stdio"],
"cwd": "../aden-tools",
"description": "Aden tools..."
}
],
"total": 1
}
```
### `list_mcp_tools`
List tools available from registered MCP servers.
**Parameters:**
- `server_name` (string, optional): Name of specific server to list tools from. If omitted, lists tools from all servers.
**Example:**
```json
{
"name": "list_mcp_tools",
"arguments": {
"server_name": "aden-tools"
}
}
```
**Response:**
```json
{
"success": true,
"tools_by_server": {
"aden-tools": [
{
"name": "web_search",
"description": "Search the web for information using Brave Search API...",
"parameters": ["query", "num_results", "country"]
},
{
"name": "web_scrape",
"description": "Scrape and extract text content from a webpage...",
"parameters": ["url", "selector", "include_links", "max_length"]
}
]
},
"total_tools": 6,
"note": "Use these tool names in the 'tools' parameter when adding llm_tool_use nodes"
}
```
### `remove_mcp_server`
Remove a registered MCP server.
**Parameters:**
- `name` (string, required): Name of the MCP server to remove
**Example:**
```json
{
"name": "remove_mcp_server",
"arguments": {
"name": "aden-tools"
}
}
```
**Response:**
```json
{
"success": true,
"removed": "aden-tools",
"remaining_servers": 0
}
```
## Workflow Example
Here's a complete workflow for building an agent with MCP tools:
### 1. Create Session
```json
{
"name": "create_session",
"arguments": {
"name": "web-research-agent"
}
}
```
### 2. Register MCP Server
```json
{
"name": "add_mcp_server",
"arguments": {
"name": "aden-tools",
"transport": "stdio",
"command": "python",
"args": "[\"mcp_server.py\", \"--stdio\"]",
"cwd": "../aden-tools"
}
}
```
### 3. List Available Tools
```json
{
"name": "list_mcp_tools",
"arguments": {
"server_name": "aden-tools"
}
}
```
### 4. Set Goal
```json
{
"name": "set_goal",
"arguments": {
"goal_id": "web-research",
"name": "Web Research Agent",
"description": "Search the web and summarize findings",
"success_criteria": "[{\"id\": \"search-success\", \"description\": \"Successfully retrieve search results\", \"metric\": \"results_count\", \"target\": \">= 3\", \"weight\": 1.0}]"
}
}
```
### 5. Add Node with MCP Tool
```json
{
"name": "add_node",
"arguments": {
"node_id": "web-searcher",
"name": "Web Search",
"description": "Search the web for information",
"node_type": "llm_tool_use",
"input_keys": "[\"query\"]",
"output_keys": "[\"search_results\"]",
"system_prompt": "Search for {query} using the web_search tool",
"tools": "[\"web_search\"]"
}
}
```
Note: `web_search` is now available because we registered the aden-tools MCP server!
### 6. Export Agent
```json
{
"name": "export_graph",
"arguments": {}
}
```
The export will create:
- `exports/web-research-agent/agent.json` - Agent specification
- `exports/web-research-agent/README.md` - Documentation
- `exports/web-research-agent/mcp_servers.json` - **MCP server configuration**
## MCP Configuration File
When you export an agent with registered MCP servers, an `mcp_servers.json` file is automatically created:
```json
{
"servers": [
{
"name": "aden-tools",
"transport": "stdio",
"command": "python",
"args": ["mcp_server.py", "--stdio"],
"cwd": "../aden-tools",
"description": "Aden tools for web search and file operations"
}
]
}
```
This file is automatically loaded by the AgentRunner when the agent is executed, making the MCP tools available at runtime.
## Using the Exported Agent
Once exported, load and run the agent normally:
```python
from framework.runner.runner import AgentRunner
# Load agent - MCP servers auto-load from mcp_servers.json
runner = AgentRunner.load("exports/web-research-agent")
# Run with input
result = await runner.run({"query": "latest AI breakthroughs"})
# The web_search tool from aden-tools is automatically available!
```
## Benefits
1. **Discoverable Tools**: See what tools are available before using them
2. **Validation**: Connection is tested when registering the server
3. **Automatic Configuration**: No manual file editing required
4. **Documentation**: README includes MCP server information
5. **Runtime Ready**: Exported agents work immediately with configured tools
## Common MCP Servers
### aden-tools
Provides:
- `web_search` - Brave Search API integration
- `web_scrape` - Web page content extraction
- `file_read` / `file_write` - File operations
- `pdf_read` - PDF text extraction
### Custom MCP Servers
You can register any MCP server that follows the Model Context Protocol specification.
## Troubleshooting
### "Failed to connect to MCP server"
- Verify the `command` and `args` are correct
- Check that the server is accessible at the specified path/URL
- Ensure any required environment variables are set
- For STDIO: verify the command can be executed from the `cwd`
- For HTTP: verify the server is running and accessible
### Tools not appearing
- Use `list_mcp_tools` to verify tools were discovered
- Check the tool names match exactly (case-sensitive)
- Ensure the MCP server is still registered (`list_mcp_servers`)
### Export doesn't include mcp_servers.json
- Verify you registered at least one MCP server
- Check `get_session_status` to see `mcp_servers_count > 0`
- Re-export the agent after registering servers
+361
View File
@@ -0,0 +1,361 @@
# MCP Integration Guide
This guide explains how to integrate Model Context Protocol (MCP) servers with the Hive Core Framework, enabling agents to use tools from external MCP servers.
## Overview
The framework provides built-in support for MCP servers, allowing you to:
- **Register MCP servers** via STDIO or HTTP transport
- **Auto-discover tools** from registered servers
- **Use MCP tools** seamlessly in your agents
- **Manage multiple MCP servers** simultaneously
## Quick Start
### 1. Register an MCP Server Programmatically
```python
from framework.runner.runner import AgentRunner
# Load your agent
runner = AgentRunner.load("exports/my-agent")
# Register aden-tools MCP server
runner.register_mcp_server(
name="aden-tools",
transport="stdio",
command="python",
args=["-m", "aden_tools.mcp_server", "--stdio"],
cwd="/path/to/aden-tools"
)
# Tools are now available to your agent
result = await runner.run({"input": "data"})
```
### 2. Use Configuration File
Create `mcp_servers.json` in your agent folder:
```json
{
"servers": [
{
"name": "aden-tools",
"transport": "stdio",
"command": "python",
"args": ["-m", "aden_tools.mcp_server", "--stdio"],
"cwd": "../aden-tools"
}
]
}
```
The framework will automatically load and register these servers when you load the agent:
```python
runner = AgentRunner.load("exports/my-agent") # MCP servers auto-loaded
```
## Transport Types
### STDIO Transport
Best for local MCP servers running as subprocesses:
```python
runner.register_mcp_server(
name="local-tools",
transport="stdio",
command="python",
args=["-m", "my_tools.server", "--stdio"],
cwd="/path/to/my-tools",
env={
"API_KEY": "your-key-here"
}
)
```
**Configuration:**
- `command`: Executable to run (e.g., "python", "node")
- `args`: List of command-line arguments
- `cwd`: Working directory for the process
- `env`: Environment variables (optional)
### HTTP Transport
Best for remote MCP servers or containerized deployments:
```python
runner.register_mcp_server(
name="remote-tools",
transport="http",
url="http://localhost:4001",
headers={
"Authorization": "Bearer token"
}
)
```
**Configuration:**
- `url`: Base URL of the MCP server
- `headers`: HTTP headers to include (optional)
## Using MCP Tools in Agents
Once registered, MCP tools are available just like any other tool:
### In Node Specifications
```python
from framework.builder.workflow import WorkflowBuilder
builder = WorkflowBuilder()
# Add a node that uses MCP tools
builder.add_node(
node_id="researcher",
name="Web Researcher",
node_type="llm_tool_use",
system_prompt="Research the topic using web_search",
tools=["web_search"], # Tool from aden-tools MCP server
input_keys=["topic"],
output_keys=["findings"]
)
```
### In Agent.json
Tools from MCP servers can be referenced in your agent.json just like built-in tools:
```json
{
"nodes": [
{
"id": "searcher",
"name": "Web Searcher",
"node_type": "llm_tool_use",
"system_prompt": "Search for information about {topic}",
"tools": ["web_search", "web_scrape"],
"input_keys": ["topic"],
"output_keys": ["results"]
}
]
}
```
## Available Tools from aden-tools
When you register the `aden-tools` MCP server, the following tools become available:
- **web_search**: Search the web using Brave Search API
- **web_scrape**: Scrape content from a URL
- **file_read**: Read file contents
- **file_write**: Write content to a file
- **pdf_read**: Extract text from PDF files
## Environment Variables
Some MCP tools require environment variables. You can pass them in the configuration:
### Via Programmatic Registration
```python
runner.register_mcp_server(
name="aden-tools",
transport="stdio",
command="python",
args=["-m", "aden_tools.mcp_server", "--stdio"],
cwd="../aden-tools",
env={
"BRAVE_SEARCH_API_KEY": os.environ["BRAVE_SEARCH_API_KEY"]
}
)
```
### Via Configuration File
```json
{
"servers": [
{
"name": "aden-tools",
"transport": "stdio",
"command": "python",
"args": ["-m", "aden_tools.mcp_server", "--stdio"],
"cwd": "../aden-tools",
"env": {
"BRAVE_SEARCH_API_KEY": "${BRAVE_SEARCH_API_KEY}"
}
}
]
}
```
The framework will substitute `${VAR_NAME}` with values from the environment.
## Multiple MCP Servers
You can register multiple MCP servers to access different sets of tools:
```json
{
"servers": [
{
"name": "aden-tools",
"transport": "stdio",
"command": "python",
"args": ["-m", "aden_tools.mcp_server", "--stdio"],
"cwd": "../aden-tools"
},
{
"name": "database-tools",
"transport": "http",
"url": "http://localhost:5001"
},
{
"name": "analytics-tools",
"transport": "http",
"url": "http://analytics-server:6001"
}
]
}
```
All tools from all servers will be available to your agent.
## Best Practices
### 1. Use STDIO for Development
STDIO transport is easier to debug and doesn't require managing server processes:
```python
runner.register_mcp_server(
name="dev-tools",
transport="stdio",
command="python",
args=["-m", "my_tools.server", "--stdio"]
)
```
### 2. Use HTTP for Production
HTTP transport is better for:
- Containerized deployments
- Shared tools across multiple agents
- Remote tool execution
```python
runner.register_mcp_server(
name="prod-tools",
transport="http",
url="http://tools-service:8000"
)
```
### 3. Handle Cleanup
Always clean up MCP connections when done:
```python
try:
runner = AgentRunner.load("exports/my-agent")
runner.register_mcp_server(...)
result = await runner.run(input_data)
finally:
runner.cleanup() # Disconnects all MCP servers
```
Or use context manager:
```python
async with AgentRunner.load("exports/my-agent") as runner:
runner.register_mcp_server(...)
result = await runner.run(input_data)
# Automatic cleanup
```
### 4. Tool Name Conflicts
If multiple MCP servers provide tools with the same name, the last registered server wins. To avoid conflicts:
- Use unique tool names in your MCP servers
- Register servers in priority order (most important last)
- Use separate agents for different tool sets
## Troubleshooting
### Connection Errors
If you get connection errors with STDIO transport:
1. Check that the command and path are correct
2. Verify the MCP server starts successfully standalone
3. Check environment variables are set correctly
4. Look at stderr output for error messages
### Tool Not Found
If a tool is registered but not found:
1. Verify the server registered successfully (check logs)
2. List available tools: `runner._tool_registry.get_registered_names()`
3. Check tool name spelling in your node configuration
### HTTP Server Not Responding
If HTTP transport fails:
1. Verify the server is running: `curl http://localhost:4001/health`
2. Check firewall settings
3. Verify the URL and port are correct
## Example: Full Agent with MCP Tools
Here's a complete example of an agent that uses MCP tools:
```python
import asyncio
from pathlib import Path
from framework.runner.runner import AgentRunner
async def main():
# Create agent path
agent_path = Path("exports/web-research-agent")
# Load agent
runner = AgentRunner.load(agent_path)
# Register MCP server
runner.register_mcp_server(
name="aden-tools",
transport="stdio",
command="python",
args=["-m", "aden_tools.mcp_server", "--stdio"],
cwd="../aden-tools",
env={
"BRAVE_SEARCH_API_KEY": "your-api-key"
}
)
# Run agent
result = await runner.run({
"query": "latest developments in quantum computing"
})
print(f"Research complete: {result}")
# Cleanup
runner.cleanup()
if __name__ == "__main__":
asyncio.run(main())
```
## See Also
- [MCP_SERVER_GUIDE.md](MCP_SERVER_GUIDE.md) - Building your own MCP servers
- [examples/mcp_integration_example.py](examples/mcp_integration_example.py) - More examples
- [examples/mcp_servers.json](examples/mcp_servers.json) - Example configuration
+3 -1
View File
@@ -151,7 +151,9 @@ python -m framework test-list <goal_id>
For detailed testing workflows, see the [testing-agent skill](.claude/skills/testing-agent/SKILL.md).
### Analyzing with Builder
### Analyzing Agent Behavior with Builder
The BuilderQuery interface allows you to analyze agent runs and identify improvements:
```python
from framework import BuilderQuery
+199
View File
@@ -0,0 +1,199 @@
#!/usr/bin/env python3
"""
Example: Integrating MCP Servers with the Core Framework
This example demonstrates how to:
1. Register MCP servers programmatically
2. Use MCP tools in agents
3. Load MCP servers from configuration files
"""
import asyncio
from pathlib import Path
from framework.runner.runner import AgentRunner
async def example_1_programmatic_registration():
"""Example 1: Register MCP server programmatically"""
print("\n=== Example 1: Programmatic MCP Server Registration ===\n")
# Load an existing agent
runner = AgentRunner.load("exports/task-planner")
# Register aden-tools MCP server via STDIO
num_tools = runner.register_mcp_server(
name="aden-tools",
transport="stdio",
command="python",
args=["-m", "aden_tools.mcp_server", "--stdio"],
cwd="../aden-tools",
)
print(f"Registered {num_tools} tools from aden-tools MCP server")
# List all available tools
tools = runner._tool_registry.get_tools()
print(f"\nAvailable tools: {list(tools.keys())}")
# Run the agent with MCP tools available
result = await runner.run({
"objective": "Search for 'Claude AI' and summarize the top 3 results"
})
print(f"\nAgent result: {result}")
# Cleanup
runner.cleanup()
async def example_2_http_transport():
"""Example 2: Connect to MCP server via HTTP"""
print("\n=== Example 2: HTTP MCP Server Connection ===\n")
# First, start the aden-tools MCP server in HTTP mode:
# cd aden-tools && python mcp_server.py --port 4001
runner = AgentRunner.load("exports/task-planner")
# Register aden-tools via HTTP
num_tools = runner.register_mcp_server(
name="aden-tools-http",
transport="http",
url="http://localhost:4001",
)
print(f"Registered {num_tools} tools from HTTP MCP server")
# Cleanup
runner.cleanup()
async def example_3_config_file():
"""Example 3: Load MCP servers from configuration file"""
print("\n=== Example 3: Load from Configuration File ===\n")
# Create a test agent folder with mcp_servers.json
test_agent_path = Path("exports/task-planner")
# Copy example config (in practice, you'd place this in your agent folder)
import shutil
shutil.copy(
"examples/mcp_servers.json",
test_agent_path / "mcp_servers.json"
)
# Load agent - MCP servers will be auto-discovered
runner = AgentRunner.load(test_agent_path)
# Tools are automatically available
tools = runner._tool_registry.get_tools()
print(f"Available tools: {list(tools.keys())}")
# Cleanup
runner.cleanup()
# Clean up the test config
(test_agent_path / "mcp_servers.json").unlink()
async def example_4_custom_agent_with_mcp_tools():
"""Example 4: Build custom agent that uses MCP tools"""
print("\n=== Example 4: Custom Agent with MCP Tools ===\n")
from framework.builder.workflow import WorkflowBuilder
# Create a workflow builder
builder = WorkflowBuilder()
# Define goal
builder.set_goal(
goal_id="web-researcher",
name="Web Research Agent",
description="Search the web and summarize findings"
)
# Add success criteria
builder.add_success_criterion(
"search-results",
"Successfully retrieve at least 3 web search results"
)
builder.add_success_criterion(
"summary",
"Provide a clear, concise summary of the findings"
)
# Add nodes that will use MCP tools
builder.add_node(
node_id="web-searcher",
name="Web Search",
description="Search the web for information",
node_type="llm_tool_use",
system_prompt="Search for {query} and return the top results. Use the web_search tool.",
tools=["web_search"], # This tool comes from aden-tools MCP server
input_keys=["query"],
output_keys=["search_results"],
)
builder.add_node(
node_id="summarizer",
name="Summarize Results",
description="Summarize the search results",
node_type="llm_generate",
system_prompt="Summarize the following search results in 2-3 sentences: {search_results}",
input_keys=["search_results"],
output_keys=["summary"],
)
# Connect nodes
builder.add_edge("web-searcher", "summarizer")
# Set entry point
builder.set_entry("web-searcher")
builder.set_terminal("summarizer")
# Export the agent
export_path = Path("exports/web-research-agent")
export_path.mkdir(parents=True, exist_ok=True)
builder.export(export_path)
# Load and register MCP server
runner = AgentRunner.load(export_path)
runner.register_mcp_server(
name="aden-tools",
transport="stdio",
command="python",
args=["-m", "aden_tools.mcp_server", "--stdio"],
cwd="../aden-tools",
)
# Run the agent
result = await runner.run({"query": "latest AI breakthroughs 2026"})
print(f"\nAgent completed with result:\n{result}")
# Cleanup
runner.cleanup()
async def main():
"""Run all examples"""
print("=" * 60)
print("MCP Integration Examples")
print("=" * 60)
try:
# Run examples
await example_1_programmatic_registration()
# await example_2_http_transport() # Requires HTTP server running
# await example_3_config_file()
# await example_4_custom_agent_with_mcp_tools()
except Exception as e:
print(f"\nError running example: {e}")
import traceback
traceback.print_exc()
if __name__ == "__main__":
asyncio.run(main())
+22
View File
@@ -0,0 +1,22 @@
{
"servers": [
{
"name": "aden-tools",
"description": "Aden tools including web search, file operations, and PDF reading",
"transport": "stdio",
"command": "python",
"args": ["mcp_server.py", "--stdio"],
"cwd": "../aden-tools",
"env": {
"BRAVE_SEARCH_API_KEY": "${BRAVE_SEARCH_API_KEY}"
}
},
{
"name": "aden-tools-http",
"description": "Aden tools via HTTP (for Docker deployments)",
"transport": "http",
"url": "http://localhost:4001",
"headers": {}
}
]
}
+3 -3
View File
@@ -392,10 +392,10 @@ class LLMNode(NodeProtocol):
def executor(tool_use: ToolUse) -> ToolResult:
logger.info(f" 🔧 Tool call: {tool_use.name}({', '.join(f'{k}={v}' for k, v in tool_use.input.items())})")
result = self.tool_executor(ctx, tool_use)
result = self.tool_executor(tool_use)
# Truncate long results
result_str = str(result.output)[:150]
if len(str(result.output)) > 150:
result_str = str(result.content)[:150]
if len(str(result.content)) > 150:
result_str += "..."
logger.info(f" ✓ Tool result: {result_str}")
return result
+526 -31
View File
@@ -41,6 +41,7 @@ class BuildSession:
self.goal: Goal | None = None
self.nodes: list[NodeSpec] = []
self.edges: list[EdgeSpec] = []
self.mcp_servers: list[dict] = [] # MCP server configurations
# Global session
@@ -319,6 +320,155 @@ def add_edge(
}, default=str)
@mcp.tool()
def update_node(
node_id: Annotated[str, "ID of the node to update"],
name: Annotated[str, "Updated human-readable name"] = "",
description: Annotated[str, "Updated description"] = "",
node_type: Annotated[str, "Updated type: llm_generate, llm_tool_use, router, or function"] = "",
input_keys: Annotated[str, "Updated JSON array of input keys"] = "",
output_keys: Annotated[str, "Updated JSON array of output keys"] = "",
system_prompt: Annotated[str, "Updated instructions for LLM nodes"] = "",
tools: Annotated[str, "Updated JSON array of tool names"] = "",
routes: Annotated[str, "Updated JSON object mapping conditions to target node IDs"] = "",
) -> str:
"""Update an existing node in the agent graph. Only provided fields will be updated."""
session = get_session()
# Find the node
node = None
for n in session.nodes:
if n.id == node_id:
node = n
break
if not node:
return json.dumps({"valid": False, "errors": [f"Node '{node_id}' not found"]})
# Update fields if provided
if name:
node.name = name
if description:
node.description = description
if node_type:
node.node_type = node_type
if input_keys:
node.input_keys = json.loads(input_keys)
if output_keys:
node.output_keys = json.loads(output_keys)
if system_prompt:
node.system_prompt = system_prompt
if tools:
node.tools = json.loads(tools)
if routes:
node.routes = json.loads(routes)
# Validate
errors = []
warnings = []
if node.node_type == "llm_tool_use" and not node.tools:
errors.append(f"Node '{node_id}' of type llm_tool_use must specify tools")
if node.node_type == "router" and not node.routes:
errors.append(f"Router node '{node_id}' must specify routes")
if node.node_type in ("llm_generate", "llm_tool_use") and not node.system_prompt:
warnings.append(f"LLM node '{node_id}' should have a system_prompt")
return json.dumps({
"valid": len(errors) == 0,
"errors": errors,
"warnings": warnings,
"node": node.model_dump(),
"total_nodes": len(session.nodes),
"approval_required": True,
"approval_question": {
"component_type": "node",
"component_name": node.name,
"question": f"Do you approve this updated {node.node_type} node: {node.name}?",
"header": "Approve Node Update",
"options": [
{
"label": "✓ Approve (Recommended)",
"description": f"Updated node '{node.name}' looks good"
},
{
"label": "✗ Reject & Modify",
"description": "Need to change node configuration"
},
{
"label": "⏸ Pause & Review",
"description": "I need more time to review this update"
}
]
}
}, default=str)
@mcp.tool()
def delete_node(
node_id: Annotated[str, "ID of the node to delete"],
) -> str:
"""Delete a node from the agent graph. Also removes all edges connected to this node."""
session = get_session()
# Find the node
node_idx = None
for i, n in enumerate(session.nodes):
if n.id == node_id:
node_idx = i
break
if node_idx is None:
return json.dumps({"valid": False, "errors": [f"Node '{node_id}' not found"]})
# Remove the node
removed_node = session.nodes.pop(node_idx)
# Remove all edges connected to this node
removed_edges = [e.id for e in session.edges if e.source == node_id or e.target == node_id]
session.edges = [
e for e in session.edges
if not (e.source == node_id or e.target == node_id)
]
return json.dumps({
"valid": True,
"deleted_node": removed_node.model_dump(),
"removed_edges": removed_edges,
"total_nodes": len(session.nodes),
"total_edges": len(session.edges),
"message": f"Node '{node_id}' and {len(removed_edges)} connected edge(s) removed"
}, default=str)
@mcp.tool()
def delete_edge(
edge_id: Annotated[str, "ID of the edge to delete"],
) -> str:
"""Delete an edge from the agent graph."""
session = get_session()
# Find the edge
edge_idx = None
for i, e in enumerate(session.edges):
if e.id == edge_id:
edge_idx = i
break
if edge_idx is None:
return json.dumps({"valid": False, "errors": [f"Edge '{edge_id}' not found"]})
# Remove the edge
removed_edge = session.edges.pop(edge_idx)
return json.dumps({
"valid": True,
"deleted_edge": removed_edge.model_dump(),
"total_edges": len(session.edges),
"message": f"Edge '{edge_id}' removed: {removed_edge.source}{removed_edge.target}"
}, default=str)
@mcp.tool()
def validate_graph() -> str:
"""Validate the complete graph. Checks for unreachable nodes, missing connections, and context flow."""
@@ -334,6 +484,18 @@ def validate_graph() -> str:
errors.append("No nodes defined")
return json.dumps({"valid": False, "errors": errors})
# === DETECT PAUSE/RESUME ARCHITECTURE ===
# Identify pause nodes (nodes marked as PAUSE in description)
pause_nodes = [n.id for n in session.nodes if "PAUSE" in n.description.upper()]
# Identify resume entry points (nodes marked as RESUME ENTRY POINT in description)
resume_entry_points = [n.id for n in session.nodes if "RESUME" in n.description.upper() and "ENTRY" in n.description.upper()]
is_pause_resume_agent = len(pause_nodes) > 0 or len(resume_entry_points) > 0
if is_pause_resume_agent:
warnings.append(f"Pause/resume architecture detected. Pause nodes: {pause_nodes}, Resume entry points: {resume_entry_points}")
# Find entry node (no incoming edges)
entry_candidates = []
for node in session.nodes:
@@ -342,7 +504,8 @@ def validate_graph() -> str:
if not entry_candidates:
errors.append("No entry node found (all nodes have incoming edges)")
elif len(entry_candidates) > 1:
elif len(entry_candidates) > 1 and not is_pause_resume_agent:
# Multiple entry points are expected for pause/resume agents
warnings.append(f"Multiple entry candidates: {entry_candidates}")
# Find terminal nodes (no outgoing edges)
@@ -357,7 +520,13 @@ def validate_graph() -> str:
# Check reachability
if entry_candidates:
reachable = set()
to_visit = [entry_candidates[0]]
# For pause/resume agents, start from ALL entry points (including resume)
if is_pause_resume_agent:
to_visit = list(entry_candidates) # All nodes without incoming edges
else:
to_visit = [entry_candidates[0]] # Just the primary entry
while to_visit:
current = to_visit.pop()
if current in reachable:
@@ -373,7 +542,14 @@ def validate_graph() -> str:
unreachable = [n.id for n in session.nodes if n.id not in reachable]
if unreachable:
errors.append(f"Unreachable nodes: {unreachable}")
# For pause/resume agents, nodes might be reachable only from resume entry points
if is_pause_resume_agent:
# Filter out resume entry points from unreachable list
unreachable_non_resume = [n for n in unreachable if n not in resume_entry_points]
if unreachable_non_resume:
warnings.append(f"Nodes unreachable from primary entry (may be resume-only nodes): {unreachable_non_resume}")
else:
errors.append(f"Unreachable nodes: {unreachable}")
# === CONTEXT FLOW VALIDATION ===
# Build dependency map (node_id -> list of nodes it depends on)
@@ -443,27 +619,64 @@ def validate_graph() -> str:
node = nodes_by_id.get(node_id)
deps = dependencies.get(node_id, [])
# Check if this is a resume entry point
is_resume_entry = node_id in resume_entry_points
if not deps:
# Entry node - inputs must come from initial runtime context
context_warnings.append(
f"Node '{node_id}' requires inputs {missing} from initial context. "
f"Ensure these are provided when running the agent."
)
if is_resume_entry:
context_warnings.append(
f"Resume entry node '{node_id}' requires inputs {missing} from resumed invocation context. "
f"These will be provided by the runtime when resuming (e.g., user's answers)."
)
else:
context_warnings.append(
f"Node '{node_id}' requires inputs {missing} from initial context. "
f"Ensure these are provided when running the agent."
)
else:
# Find which dependency could provide each missing input
suggestions = []
for key in missing:
# Check if any existing node produces this
producers = [n.id for n in session.nodes if key in n.output_keys]
if producers:
suggestions.append(f"'{key}' is produced by {producers} - add dependency edge")
else:
suggestions.append(f"'{key}' is not produced by any node - add a node that outputs it")
# Check if this is a common external input key for resume nodes
external_input_keys = ["input", "user_response", "user_input", "answer", "answers"]
unproduced_external = [k for k in missing if k in external_input_keys]
context_errors.append(
f"Node '{node_id}' requires {missing} but dependencies {deps} don't provide them. "
f"Suggestions: {'; '.join(suggestions)}"
)
if is_resume_entry and unproduced_external:
# Resume entry points can receive external inputs from resumed invocations
other_missing = [k for k in missing if k not in external_input_keys]
if unproduced_external:
context_warnings.append(
f"Resume entry node '{node_id}' expects external inputs {unproduced_external} from resumed invocation. "
f"These will be injected by the runtime when the user responds."
)
if other_missing:
# Still need to check other keys
suggestions = []
for key in other_missing:
producers = [n.id for n in session.nodes if key in n.output_keys]
if producers:
suggestions.append(f"'{key}' is produced by {producers} - ensure edge exists")
else:
suggestions.append(f"'{key}' is not produced - add node or include in external inputs")
context_errors.append(
f"Resume node '{node_id}' requires {other_missing} but dependencies {deps} don't provide them. "
f"Suggestions: {'; '.join(suggestions)}"
)
else:
# Non-resume node or no external input keys - standard validation
suggestions = []
for key in missing:
producers = [n.id for n in session.nodes if key in n.output_keys]
if producers:
suggestions.append(f"'{key}' is produced by {producers} - add dependency edge")
else:
suggestions.append(f"'{key}' is not produced by any node - add a node that outputs it")
context_errors.append(
f"Node '{node_id}' requires {missing} but dependencies {deps} don't provide them. "
f"Suggestions: {'; '.join(suggestions)}"
)
errors.extend(context_errors)
warnings.extend(context_warnings)
@@ -476,6 +689,10 @@ def validate_graph() -> str:
"terminal_nodes": terminal_candidates,
"node_count": len(session.nodes),
"edge_count": len(session.edges),
"pause_resume_detected": is_pause_resume_agent,
"pause_nodes": pause_nodes,
"resume_entry_points": resume_entry_points,
"all_entry_points": entry_candidates,
"context_flow": {
node_id: list(keys) for node_id, keys in available_context.items()
} if available_context else None,
@@ -594,6 +811,18 @@ def _generate_readme(session: BuildSession, export_data: dict, all_tools: set) -
{chr(10).join(f"- `{tool}`" for tool in sorted(all_tools)) if all_tools else "No tools required"}
{"## MCP Tool Sources" if session.mcp_servers else ""}
{chr(10).join(f'''### {s["name"]} ({s["transport"]})
{s.get("description", "")}
**Configuration:**
''' + (f'''- Command: `{s.get("command")}`
- Args: `{s.get("args")}`
- Working Directory: `{s.get("cwd")}`''' if s["transport"] == "stdio" else f'''- URL: `{s.get("url")}`''') for s in session.mcp_servers) if session.mcp_servers else ""}
{"Tools from these MCP servers are automatically loaded when the agent runs." if session.mcp_servers else ""}
## Usage
### Basic Usage
@@ -764,30 +993,51 @@ def export_graph() -> str:
with open(readme_path, "w") as f:
f.write(readme_content)
# Write mcp_servers.json if MCP servers are configured
mcp_servers_path = None
mcp_servers_size = 0
if session.mcp_servers:
mcp_config = {
"servers": session.mcp_servers
}
mcp_servers_path = exports_dir / "mcp_servers.json"
with open(mcp_servers_path, "w") as f:
json.dump(mcp_config, f, indent=2)
mcp_servers_size = mcp_servers_path.stat().st_size
# Get file sizes
agent_json_size = agent_json_path.stat().st_size
readme_size = readme_path.stat().st_size
files_written = {
"agent_json": {
"path": str(agent_json_path),
"size_bytes": agent_json_size,
},
"readme": {
"path": str(readme_path),
"size_bytes": readme_size,
},
}
if mcp_servers_path:
files_written["mcp_servers"] = {
"path": str(mcp_servers_path),
"size_bytes": mcp_servers_size,
}
return json.dumps({
"success": True,
"agent": export_data["agent"],
"files_written": {
"agent_json": {
"path": str(agent_json_path),
"size_bytes": agent_json_size,
},
"readme": {
"path": str(readme_path),
"size_bytes": readme_size,
},
},
"files_written": files_written,
"graph": graph_spec,
"goal": session.goal.model_dump(),
"evaluation_rules": _evaluation_rules,
"required_tools": list(all_tools),
"node_count": len(session.nodes),
"edge_count": len(edges_list),
"note": f"Agent exported to {exports_dir}. Files: agent.json, README.md",
"mcp_servers_count": len(session.mcp_servers),
"note": f"Agent exported to {exports_dir}. Files: agent.json, README.md" + (", mcp_servers.json" if session.mcp_servers else ""),
}, default=str, indent=2)
@@ -802,8 +1052,253 @@ def get_session_status() -> str:
"goal_name": session.goal.name if session.goal else None,
"node_count": len(session.nodes),
"edge_count": len(session.edges),
"mcp_servers_count": len(session.mcp_servers),
"nodes": [n.id for n in session.nodes],
"edges": [(e.source, e.target) for e in session.edges],
"mcp_servers": [s["name"] for s in session.mcp_servers],
})
@mcp.tool()
def add_mcp_server(
name: Annotated[str, "Unique name for the MCP server"],
transport: Annotated[str, "Transport type: 'stdio' or 'http'"],
command: Annotated[str, "Command to run (for stdio transport)"] = "",
args: Annotated[str, "JSON array of command arguments (for stdio)"] = "[]",
cwd: Annotated[str, "Working directory (for stdio)"] = "",
env: Annotated[str, "JSON object of environment variables (for stdio)"] = "{}",
url: Annotated[str, "Server URL (for http transport)"] = "",
headers: Annotated[str, "JSON object of HTTP headers (for http)"] = "{}",
description: Annotated[str, "Description of the MCP server"] = "",
) -> str:
"""
Register an MCP server as a tool source for this agent.
The MCP server will be saved in mcp_servers.json when the agent is exported,
and tools from this server will be available to the agent at runtime.
Example for stdio:
add_mcp_server(
name="aden-tools",
transport="stdio",
command="python",
args='["mcp_server.py", "--stdio"]',
cwd="../aden-tools"
)
Example for http:
add_mcp_server(
name="remote-tools",
transport="http",
url="http://localhost:4001"
)
"""
session = get_session()
# Validate transport
if transport not in ["stdio", "http"]:
return json.dumps({
"success": False,
"error": f"Invalid transport '{transport}'. Must be 'stdio' or 'http'"
})
# Check for duplicate
if any(s["name"] == name for s in session.mcp_servers):
return json.dumps({
"success": False,
"error": f"MCP server '{name}' already registered"
})
# Parse JSON inputs
try:
args_list = json.loads(args)
env_dict = json.loads(env)
headers_dict = json.loads(headers)
except json.JSONDecodeError as e:
return json.dumps({
"success": False,
"error": f"Invalid JSON: {e}"
})
# Validate required fields
errors = []
if transport == "stdio" and not command:
errors.append("command is required for stdio transport")
if transport == "http" and not url:
errors.append("url is required for http transport")
if errors:
return json.dumps({"success": False, "errors": errors})
# Build server config
server_config = {
"name": name,
"transport": transport,
"description": description,
}
if transport == "stdio":
server_config["command"] = command
server_config["args"] = args_list
if cwd:
server_config["cwd"] = cwd
if env_dict:
server_config["env"] = env_dict
else: # http
server_config["url"] = url
if headers_dict:
server_config["headers"] = headers_dict
# Try to connect and discover tools
try:
from framework.runner.mcp_client import MCPClient, MCPServerConfig
mcp_config = MCPServerConfig(
name=name,
transport=transport,
command=command if transport == "stdio" else None,
args=args_list if transport == "stdio" else [],
env=env_dict,
cwd=cwd if cwd else None,
url=url if transport == "http" else None,
headers=headers_dict,
description=description,
)
with MCPClient(mcp_config) as client:
tools = client.list_tools()
tool_names = [t.name for t in tools]
# Add to session
session.mcp_servers.append(server_config)
return json.dumps({
"success": True,
"server": server_config,
"tools_discovered": len(tool_names),
"tools": tool_names,
"total_mcp_servers": len(session.mcp_servers),
"note": f"MCP server '{name}' registered with {len(tool_names)} tools. These tools can now be used in llm_tool_use nodes.",
}, indent=2)
except Exception as e:
return json.dumps({
"success": False,
"error": f"Failed to connect to MCP server: {str(e)}",
"suggestion": "Check that the command/url is correct and the server is accessible"
})
@mcp.tool()
def list_mcp_servers() -> str:
"""List all registered MCP servers for this agent."""
session = get_session()
if not session.mcp_servers:
return json.dumps({
"mcp_servers": [],
"total": 0,
"note": "No MCP servers registered. Use add_mcp_server to add tool sources."
})
return json.dumps({
"mcp_servers": session.mcp_servers,
"total": len(session.mcp_servers),
}, indent=2)
@mcp.tool()
def list_mcp_tools(
server_name: Annotated[str, "Name of the MCP server to list tools from"] = "",
) -> str:
"""
List tools available from registered MCP servers.
If server_name is provided, lists tools from that specific server.
Otherwise, lists all tools from all registered servers.
"""
session = get_session()
if not session.mcp_servers:
return json.dumps({
"success": False,
"error": "No MCP servers registered"
})
# Filter servers if name provided
servers_to_query = session.mcp_servers
if server_name:
servers_to_query = [s for s in session.mcp_servers if s["name"] == server_name]
if not servers_to_query:
return json.dumps({
"success": False,
"error": f"MCP server '{server_name}' not found"
})
all_tools = {}
for server_config in servers_to_query:
try:
from framework.runner.mcp_client import MCPClient, MCPServerConfig
mcp_config = MCPServerConfig(
name=server_config["name"],
transport=server_config["transport"],
command=server_config.get("command"),
args=server_config.get("args", []),
env=server_config.get("env", {}),
cwd=server_config.get("cwd"),
url=server_config.get("url"),
headers=server_config.get("headers", {}),
description=server_config.get("description", ""),
)
with MCPClient(mcp_config) as client:
tools = client.list_tools()
all_tools[server_config["name"]] = [
{
"name": t.name,
"description": t.description,
"parameters": list(t.input_schema.get("properties", {}).keys()),
}
for t in tools
]
except Exception as e:
all_tools[server_config["name"]] = {
"error": f"Failed to connect: {str(e)}"
}
total_tools = sum(len(tools) if isinstance(tools, list) else 0 for tools in all_tools.values())
return json.dumps({
"success": True,
"tools_by_server": all_tools,
"total_tools": total_tools,
"note": "Use these tool names in the 'tools' parameter when adding llm_tool_use nodes",
}, indent=2)
@mcp.tool()
def remove_mcp_server(
name: Annotated[str, "Name of the MCP server to remove"],
) -> str:
"""Remove a registered MCP server."""
session = get_session()
for i, server in enumerate(session.mcp_servers):
if server["name"] == name:
session.mcp_servers.pop(i)
return json.dumps({
"success": True,
"removed": name,
"remaining_servers": len(session.mcp_servers)
})
return json.dumps({
"success": False,
"error": f"MCP server '{name}' not found"
})
+353
View File
@@ -0,0 +1,353 @@
"""MCP Client for connecting to Model Context Protocol servers.
This module provides a client for connecting to MCP servers and invoking their tools.
Supports both STDIO and HTTP transports using the official MCP Python SDK.
"""
import asyncio
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Literal
import httpx
logger = logging.getLogger(__name__)
@dataclass
class MCPServerConfig:
"""Configuration for an MCP server connection."""
name: str
transport: Literal["stdio", "http"]
# For STDIO transport
command: str | None = None
args: list[str] = field(default_factory=list)
env: dict[str, str] = field(default_factory=dict)
cwd: str | None = None
# For HTTP transport
url: str | None = None
headers: dict[str, str] = field(default_factory=dict)
# Optional metadata
description: str = ""
@dataclass
class MCPTool:
"""A tool available from an MCP server."""
name: str
description: str
input_schema: dict[str, Any]
server_name: str
class MCPClient:
"""
Client for communicating with MCP servers.
Supports both STDIO and HTTP transports using the official MCP SDK.
Manages the connection lifecycle and provides methods to list and invoke tools.
"""
def __init__(self, config: MCPServerConfig):
"""
Initialize the MCP client.
Args:
config: Server configuration
"""
self.config = config
self._session = None
self._read_stream = None
self._write_stream = None
self._http_client: httpx.Client | None = None
self._tools: dict[str, MCPTool] = {}
self._connected = False
def _run_async(self, coro):
"""
Run an async coroutine, handling both sync and async contexts.
Args:
coro: Coroutine to run
Returns:
Result of the coroutine
"""
try:
# Try to get the current event loop
asyncio.get_running_loop()
# If we're here, we're in an async context
# Create a new thread to run the coroutine
import threading
result = None
exception = None
def run_in_thread():
nonlocal result, exception
try:
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
try:
result = new_loop.run_until_complete(coro)
finally:
new_loop.close()
except Exception as e:
exception = e
thread = threading.Thread(target=run_in_thread)
thread.start()
thread.join()
if exception:
raise exception
return result
except RuntimeError:
# No event loop running, we can use asyncio.run
return asyncio.run(coro)
def connect(self) -> None:
"""Connect to the MCP server."""
if self._connected:
return
if self.config.transport == "stdio":
self._connect_stdio()
elif self.config.transport == "http":
self._connect_http()
else:
raise ValueError(f"Unsupported transport: {self.config.transport}")
# Discover tools
self._discover_tools()
self._connected = True
def _connect_stdio(self) -> None:
"""Connect to MCP server via STDIO transport using MCP SDK."""
if not self.config.command:
raise ValueError("command is required for STDIO transport")
try:
# Import MCP SDK
from mcp import StdioServerParameters
# Create server parameters
server_params = StdioServerParameters(
command=self.config.command,
args=self.config.args,
env=self.config.env or None,
cwd=self.config.cwd,
)
# Store for later use in async context
self._server_params = server_params
logger.info(f"Connected to MCP server '{self.config.name}' via STDIO")
except Exception as e:
raise RuntimeError(f"Failed to connect to MCP server: {e}")
def _connect_http(self) -> None:
"""Connect to MCP server via HTTP transport."""
if not self.config.url:
raise ValueError("url is required for HTTP transport")
self._http_client = httpx.Client(
base_url=self.config.url,
headers=self.config.headers,
timeout=30.0,
)
# Test connection
try:
response = self._http_client.get("/health")
response.raise_for_status()
logger.info(f"Connected to MCP server '{self.config.name}' via HTTP at {self.config.url}")
except Exception as e:
logger.warning(f"Health check failed for MCP server '{self.config.name}': {e}")
# Continue anyway, server might not have health endpoint
def _discover_tools(self) -> None:
"""Discover available tools from the MCP server."""
try:
if self.config.transport == "stdio":
tools_list = self._run_async(self._list_tools_stdio_async())
else:
tools_list = self._list_tools_http()
self._tools = {}
for tool_data in tools_list:
tool = MCPTool(
name=tool_data["name"],
description=tool_data.get("description", ""),
input_schema=tool_data.get("inputSchema", {}),
server_name=self.config.name,
)
self._tools[tool.name] = tool
logger.info(f"Discovered {len(self._tools)} tools from '{self.config.name}': {list(self._tools.keys())}")
except Exception as e:
logger.error(f"Failed to discover tools from '{self.config.name}': {e}")
raise
async def _list_tools_stdio_async(self) -> list[dict]:
"""List tools via STDIO protocol using MCP SDK."""
from mcp import ClientSession
from mcp.client.stdio import stdio_client
async with stdio_client(self._server_params) as (read, write):
async with ClientSession(read, write) as session:
# Initialize the session
await session.initialize()
# List tools
response = await session.list_tools()
# Convert tools to dict format
tools_list = []
for tool in response.tools:
tools_list.append({
"name": tool.name,
"description": tool.description,
"inputSchema": tool.inputSchema,
})
return tools_list
def _list_tools_http(self) -> list[dict]:
"""List tools via HTTP protocol."""
if not self._http_client:
raise RuntimeError("HTTP client not initialized")
try:
# Use MCP over HTTP protocol
response = self._http_client.post(
"/mcp/v1",
json={
"jsonrpc": "2.0",
"id": 1,
"method": "tools/list",
"params": {},
},
)
response.raise_for_status()
data = response.json()
if "error" in data:
raise RuntimeError(f"MCP error: {data['error']}")
return data.get("result", {}).get("tools", [])
except Exception as e:
raise RuntimeError(f"Failed to list tools via HTTP: {e}")
def list_tools(self) -> list[MCPTool]:
"""
Get list of available tools.
Returns:
List of MCPTool objects
"""
if not self._connected:
self.connect()
return list(self._tools.values())
def call_tool(self, tool_name: str, arguments: dict[str, Any]) -> Any:
"""
Invoke a tool on the MCP server.
Args:
tool_name: Name of the tool to invoke
arguments: Tool arguments
Returns:
Tool result
"""
if not self._connected:
self.connect()
if tool_name not in self._tools:
raise ValueError(f"Unknown tool: {tool_name}")
if self.config.transport == "stdio":
return self._run_async(self._call_tool_stdio_async(tool_name, arguments))
else:
return self._call_tool_http(tool_name, arguments)
async def _call_tool_stdio_async(self, tool_name: str, arguments: dict[str, Any]) -> Any:
"""Call tool via STDIO protocol using MCP SDK."""
from mcp import ClientSession
from mcp.client.stdio import stdio_client
async with stdio_client(self._server_params) as (read, write):
async with ClientSession(read, write) as session:
# Initialize the session
await session.initialize()
# Call tool
result = await session.call_tool(tool_name, arguments=arguments)
# Extract content
if result.content:
# MCP returns content as a list of content items
if len(result.content) > 0:
content_item = result.content[0]
# Check if it's a text content item
if hasattr(content_item, 'text'):
return content_item.text
elif hasattr(content_item, 'data'):
return content_item.data
return result.content
return None
def _call_tool_http(self, tool_name: str, arguments: dict[str, Any]) -> Any:
"""Call tool via HTTP protocol."""
if not self._http_client:
raise RuntimeError("HTTP client not initialized")
try:
response = self._http_client.post(
"/mcp/v1",
json={
"jsonrpc": "2.0",
"id": 2,
"method": "tools/call",
"params": {
"name": tool_name,
"arguments": arguments,
},
},
)
response.raise_for_status()
data = response.json()
if "error" in data:
raise RuntimeError(f"Tool execution error: {data['error']}")
return data.get("result", {}).get("content", [])
except Exception as e:
raise RuntimeError(f"Failed to call tool via HTTP: {e}")
def disconnect(self) -> None:
"""Disconnect from the MCP server."""
if self._http_client:
self._http_client.close()
self._http_client = None
self._connected = False
logger.info(f"Disconnected from MCP server '{self.config.name}'")
def __enter__(self):
"""Context manager entry."""
self.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Context manager exit."""
self.disconnect()
+69
View File
@@ -210,6 +210,11 @@ class AgentRunner:
if tools_path.exists():
self._tool_registry.discover_from_module(tools_path)
# Auto-discover MCP servers from mcp_servers.json
mcp_config_path = agent_path / "mcp_servers.json"
if mcp_config_path.exists():
self._load_mcp_servers_from_config(mcp_config_path)
@classmethod
def load(
cls,
@@ -283,6 +288,67 @@ class AgentRunner:
"""
return self._tool_registry.discover_from_module(module_path)
def register_mcp_server(
self,
name: str,
transport: str,
**config_kwargs,
) -> int:
"""
Register an MCP server and discover its tools.
Args:
name: Server name
transport: "stdio" or "http"
**config_kwargs: Additional configuration (command, args, url, etc.)
Returns:
Number of tools registered from this server
Example:
# Register STDIO MCP server
runner.register_mcp_server(
name="aden-tools",
transport="stdio",
command="python",
args=["-m", "aden_tools.mcp_server", "--stdio"],
cwd="/path/to/aden-tools"
)
# Register HTTP MCP server
runner.register_mcp_server(
name="aden-tools",
transport="http",
url="http://localhost:4001"
)
"""
server_config = {
"name": name,
"transport": transport,
**config_kwargs,
}
return self._tool_registry.register_mcp_server(server_config)
def _load_mcp_servers_from_config(self, config_path: Path) -> None:
"""
Load and register MCP servers from a configuration file.
Args:
config_path: Path to mcp_servers.json file
"""
try:
with open(config_path) as f:
config = json.load(f)
servers = config.get("servers", [])
for server_config in servers:
try:
self._tool_registry.register_mcp_server(server_config)
except Exception as e:
print(f"Warning: Failed to register MCP server '{server_config.get('name', 'unknown')}': {e}")
except Exception as e:
print(f"Warning: Failed to load MCP servers config from {config_path}: {e}")
def set_approval_callback(self, callback: Callable) -> None:
"""
Set a callback for human-in-the-loop approval during execution.
@@ -631,6 +697,9 @@ Respond with JSON only:
def cleanup(self) -> None:
"""Clean up resources."""
# Clean up MCP client connections
self._tool_registry.cleanup()
if self._temp_dir:
self._temp_dir.cleanup()
self._temp_dir = None
+130 -2
View File
@@ -3,12 +3,15 @@
import importlib.util
import inspect
import json
from dataclasses import dataclass, field
import logging
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Callable
from framework.llm.provider import Tool, ToolUse, ToolResult
logger = logging.getLogger(__name__)
@dataclass
class RegisteredTool:
@@ -25,11 +28,13 @@ class ToolRegistry:
Tool Discovery Order:
1. Built-in tools (if any)
2. tools.py in agent folder
3. Manually registered tools
3. MCP servers
4. Manually registered tools
"""
def __init__(self):
self._tools: dict[str, RegisteredTool] = {}
self._mcp_clients: list[Any] = [] # List of MCPClient instances
def register(
self,
@@ -222,6 +227,129 @@ class ToolRegistry:
"""Check if a tool is registered."""
return name in self._tools
def register_mcp_server(
self,
server_config: dict[str, Any],
) -> int:
"""
Register an MCP server and discover its tools.
Args:
server_config: MCP server configuration dict with keys:
- name: Server name (required)
- transport: "stdio" or "http" (required)
- command: Command to run (for stdio)
- args: Command arguments (for stdio)
- env: Environment variables (for stdio)
- cwd: Working directory (for stdio)
- url: Server URL (for http)
- headers: HTTP headers (for http)
- description: Server description (optional)
Returns:
Number of tools registered from this server
"""
try:
from framework.runner.mcp_client import MCPClient, MCPServerConfig
# Build config object
config = MCPServerConfig(
name=server_config["name"],
transport=server_config["transport"],
command=server_config.get("command"),
args=server_config.get("args", []),
env=server_config.get("env", {}),
cwd=server_config.get("cwd"),
url=server_config.get("url"),
headers=server_config.get("headers", {}),
description=server_config.get("description", ""),
)
# Create and connect client
client = MCPClient(config)
client.connect()
# Store client for cleanup
self._mcp_clients.append(client)
# Register each tool
count = 0
for mcp_tool in client.list_tools():
# Convert MCP tool to framework Tool
tool = self._convert_mcp_tool_to_framework_tool(mcp_tool)
# Create executor that calls the MCP server
def make_mcp_executor(client_ref: MCPClient, tool_name: str):
def executor(inputs: dict) -> Any:
try:
result = client_ref.call_tool(tool_name, inputs)
# MCP tools return content array, extract the result
if isinstance(result, list) and len(result) > 0:
if isinstance(result[0], dict) and "text" in result[0]:
return result[0]["text"]
return result[0]
return result
except Exception as e:
logger.error(f"MCP tool '{tool_name}' execution failed: {e}")
return {"error": str(e)}
return executor
self.register(
mcp_tool.name,
tool,
make_mcp_executor(client, mcp_tool.name),
)
count += 1
logger.info(f"Registered {count} tools from MCP server '{config.name}'")
return count
except Exception as e:
logger.error(f"Failed to register MCP server: {e}")
return 0
def _convert_mcp_tool_to_framework_tool(self, mcp_tool: Any) -> Tool:
"""
Convert an MCP tool to a framework Tool.
Args:
mcp_tool: MCPTool object
Returns:
Framework Tool object
"""
# Extract parameters from MCP input schema
input_schema = mcp_tool.input_schema
properties = input_schema.get("properties", {})
required = input_schema.get("required", [])
# Convert to framework Tool format
tool = Tool(
name=mcp_tool.name,
description=mcp_tool.description,
parameters={
"type": "object",
"properties": properties,
"required": required,
},
)
return tool
def cleanup(self) -> None:
"""Clean up all MCP client connections."""
for client in self._mcp_clients:
try:
client.disconnect()
except Exception as e:
logger.warning(f"Error disconnecting MCP client: {e}")
self._mcp_clients.clear()
def __del__(self):
"""Destructor to ensure cleanup."""
self.cleanup()
def tool(
description: str | None = None,
+1
View File
@@ -1,6 +1,7 @@
# Core dependencies
pydantic>=2.0
anthropic>=0.40.0
httpx>=0.27.0
# MCP server dependencies
mcp
+2
View File
@@ -144,6 +144,7 @@ services:
- BRAVE_SEARCH_API_KEY=${BRAVE_SEARCH_API_KEY:-}
volumes:
- .:/workspace:rw # Mount project root for file access
- aden_tools_workspaces:/app/workdir/workspaces # Persist file system tool workspaces
working_dir: /workspace # Set working directory so relative paths work
command: ["python", "/app/mcp_server.py"] # Use absolute path since working_dir changed
healthcheck:
@@ -164,3 +165,4 @@ volumes:
timescaledb_data:
mongodb_data:
redis_data:
aden_tools_workspaces: