Merge branch 'adenhq:main' into feature/twitter-x-mcp-tool
This commit is contained in:
+16
-10
@@ -25,16 +25,22 @@
|
||||
"Bash(xargs cat:*)",
|
||||
"mcp__agent-builder__list_mcp_tools",
|
||||
"mcp__agent-builder__add_mcp_server",
|
||||
"mcp__agent-builder__check_missing_credentials",
|
||||
"mcp__agent-builder__store_credential",
|
||||
"mcp__agent-builder__list_stored_credentials",
|
||||
"mcp__agent-builder__delete_stored_credential",
|
||||
"mcp__agent-builder__verify_credentials",
|
||||
"Bash(PYTHONPATH=/home/timothy/oss/hive/core:/home/timothy/oss/hive/exports python:*)",
|
||||
"Bash(PYTHONPATH=core:exports:tools/src python -m hubspot_input:*)",
|
||||
"mcp__agent-builder__export_graph"
|
||||
"Bash(gh issue list:*)",
|
||||
"WebFetch(domain:github.com)",
|
||||
"Bash(pip install:*)",
|
||||
"Bash(python -m pytest:*)",
|
||||
"Bash(git checkout:*)",
|
||||
"Bash(git add:*)",
|
||||
"Bash(git commit -m \"$\\(cat <<''EOF''\nfeat\\(tools\\): Add Excel tool for spreadsheet operations\n\nAdds a new Excel tool for reading and manipulating .xlsx/.xlsm files:\n- excel_read: Read Excel files with pagination and sheet selection\n- excel_write: Create new Excel files with data\n- excel_append: Append rows to existing files\n- excel_info: Get metadata about Excel files \\(sheets, columns, row counts\\)\n- excel_sheet_list: List all sheets in a workbook\n\nIncludes comprehensive test coverage \\(37 tests\\) and documentation.\n\nReferences #2805\n\nCo-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>\nEOF\n\\)\")",
|
||||
"Bash(git push:*)",
|
||||
"Bash(git pull:*)",
|
||||
"Bash(git stash:*)",
|
||||
"Bash(git merge:*)"
|
||||
]
|
||||
},
|
||||
"enabledMcpjsonServers": ["agent-builder", "tools"],
|
||||
"enableAllProjectMcpServers": true
|
||||
"enableAllProjectMcpServers": true,
|
||||
"enabledMcpjsonServers": [
|
||||
"agent-builder",
|
||||
"tools"
|
||||
]
|
||||
}
|
||||
|
||||
+41
-19
@@ -21,23 +21,22 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
cd core
|
||||
pip install -e .
|
||||
pip install -r requirements-dev.txt
|
||||
run: uv sync --project core --group dev
|
||||
|
||||
- name: Ruff lint
|
||||
run: |
|
||||
ruff check core/
|
||||
ruff check tools/
|
||||
uv run --project core ruff check core/
|
||||
uv run --project core ruff check tools/
|
||||
|
||||
- name: Ruff format
|
||||
run: |
|
||||
ruff format --check core/
|
||||
ruff format --check tools/
|
||||
uv run --project core ruff format --check core/
|
||||
uv run --project core ruff format --check tools/
|
||||
|
||||
test:
|
||||
name: Test Python Framework
|
||||
@@ -52,23 +51,23 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
cd core
|
||||
pip install -e .
|
||||
pip install -r requirements-dev.txt
|
||||
uv sync
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
cd core
|
||||
pytest tests/ -v
|
||||
uv run pytest tests/ -v
|
||||
|
||||
validate:
|
||||
name: Validate Agent Exports
|
||||
test-tools:
|
||||
name: Test Tools
|
||||
runs-on: ubuntu-latest
|
||||
needs: [lint, test]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
@@ -76,13 +75,36 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
|
||||
- name: Install dependencies and run tests
|
||||
run: |
|
||||
cd tools
|
||||
uv sync --extra dev
|
||||
uv pip install --python .venv/bin/python -e ../core
|
||||
uv run --extra dev pytest tests/ -v
|
||||
|
||||
validate:
|
||||
name: Validate Agent Exports
|
||||
runs-on: ubuntu-latest
|
||||
needs: [lint, test, test-tools]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
cd core
|
||||
pip install -e .
|
||||
pip install -r requirements-dev.txt
|
||||
uv sync
|
||||
|
||||
- name: Validate exported agents
|
||||
run: |
|
||||
|
||||
@@ -80,7 +80,13 @@ jobs:
|
||||
- help wanted: Extra attention is needed (if issue needs community input)
|
||||
- backlog: Tracked for the future, but not currently planned or prioritized
|
||||
|
||||
You may apply multiple labels if appropriate (e.g., "bug" and "help wanted").
|
||||
### 6. Estimate size (if NOT a duplicate, spam, or invalid)
|
||||
Apply exactly ONE size label to help contributors match their capacity to the task:
|
||||
- "size: small": Docs, typos, single-file fixes, config changes
|
||||
- "size: medium": Bug fixes with tests, adding a single tool, changes within one package
|
||||
- "size: large": Cross-package changes (core + tools), new modules, complex logic, architectural refactors
|
||||
|
||||
You may apply multiple labels if appropriate (e.g., "bug", "size: small", and "good first issue").
|
||||
|
||||
## Tools Available:
|
||||
- mcp__github__get_issue: Get issue details
|
||||
|
||||
@@ -21,18 +21,19 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
cd core
|
||||
pip install -e .
|
||||
pip install -r requirements-dev.txt
|
||||
uv sync
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
cd core
|
||||
pytest tests/ -v
|
||||
uv run pytest tests/ -v
|
||||
|
||||
- name: Generate changelog
|
||||
id: changelog
|
||||
|
||||
+20
-22
@@ -44,7 +44,7 @@ Aden Agent Framework is a Python-based system for building goal-driven, self-imp
|
||||
Ensure you have installed:
|
||||
|
||||
- **Python 3.11+** - [Download](https://www.python.org/downloads/) (3.12 or 3.13 recommended)
|
||||
- **pip** - Package installer for Python (comes with Python)
|
||||
- **uv** - Python package manager ([Install](https://docs.astral.sh/uv/getting-started/installation/))
|
||||
- **git** - Version control
|
||||
- **Claude Code** - [Install](https://docs.anthropic.com/claude/docs/claude-code) (optional, for using building skills)
|
||||
|
||||
@@ -52,7 +52,7 @@ Verify installation:
|
||||
|
||||
```bash
|
||||
python --version # Should be 3.11+
|
||||
pip --version # Should be latest
|
||||
uv --version # Should be latest
|
||||
git --version # Any recent version
|
||||
```
|
||||
|
||||
@@ -128,8 +128,12 @@ hive/ # Repository root
|
||||
│
|
||||
├── .github/ # GitHub configuration
|
||||
│ ├── workflows/
|
||||
│ │ ├── ci.yml # Runs on every PR
|
||||
│ │ └── release.yml # Runs on tags
|
||||
│ │ ├── ci.yml # Lint, test, validate on every PR
|
||||
│ │ ├── release.yml # Runs on tags
|
||||
│ │ ├── pr-requirements.yml # PR requirement checks
|
||||
│ │ ├── pr-check-command.yml # PR check commands
|
||||
│ │ ├── claude-issue-triage.yml # Automated issue triage
|
||||
│ │ └── auto-close-duplicates.yml # Close duplicate issues
|
||||
│ ├── ISSUE_TEMPLATE/ # Bug report & feature request templates
|
||||
│ ├── PULL_REQUEST_TEMPLATE.md # PR description template
|
||||
│ └── CODEOWNERS # Auto-assign reviewers
|
||||
@@ -166,7 +170,6 @@ hive/ # Repository root
|
||||
│ │ ├── testing/ # Testing utilities
|
||||
│ │ └── __init__.py
|
||||
│ ├── pyproject.toml # Package metadata and dependencies
|
||||
│ ├── requirements.txt # Python dependencies
|
||||
│ ├── README.md # Framework documentation
|
||||
│ ├── MCP_INTEGRATION_GUIDE.md # MCP server integration guide
|
||||
│ └── docs/ # Protocol documentation
|
||||
@@ -182,7 +185,6 @@ hive/ # Repository root
|
||||
│ │ ├── mcp_server.py # HTTP MCP server
|
||||
│ │ └── __init__.py
|
||||
│ ├── pyproject.toml # Package metadata
|
||||
│ ├── requirements.txt # Python dependencies
|
||||
│ └── README.md # Tools documentation
|
||||
│
|
||||
├── exports/ # AGENT PACKAGES (user-created, gitignored)
|
||||
@@ -191,14 +193,16 @@ hive/ # Repository root
|
||||
├── docs/ # Documentation
|
||||
│ ├── getting-started.md # Quick start guide
|
||||
│ ├── configuration.md # Configuration reference
|
||||
│ ├── architecture.md # System architecture
|
||||
│ └── articles/ # Technical articles
|
||||
│ ├── architecture/ # System architecture
|
||||
│ ├── articles/ # Technical articles
|
||||
│ ├── quizzes/ # Developer quizzes
|
||||
│ └── i18n/ # Translations
|
||||
│
|
||||
├── scripts/ # Build & utility scripts
|
||||
│ ├── setup-python.sh # Python environment setup
|
||||
│ └── setup.sh # Legacy setup script
|
||||
│
|
||||
├── quickstart.sh # Install Claude Code skills
|
||||
├── quickstart.sh # Interactive setup wizard
|
||||
├── ENVIRONMENT_SETUP.md # Complete Python setup guide
|
||||
├── README.md # Project overview
|
||||
├── DEVELOPER.md # This file
|
||||
@@ -375,7 +379,7 @@ def test_ticket_categorization():
|
||||
- **PEP 8** - Follow Python style guide
|
||||
- **Type hints** - Use for function signatures and class attributes
|
||||
- **Docstrings** - Document classes and public functions
|
||||
- **Black** - Code formatter (run with `black .`)
|
||||
- **Ruff** - Linter and formatter (run with `make check`)
|
||||
|
||||
```python
|
||||
# Good
|
||||
@@ -509,8 +513,8 @@ chore(deps): update React to 18.2.0
|
||||
|
||||
1. Create a feature branch from `main`
|
||||
2. Make your changes with clear commits
|
||||
3. Run tests locally: `PYTHONPATH=core:exports python -m pytest`
|
||||
4. Run linting: `black --check .`
|
||||
3. Run tests locally: `make test`
|
||||
4. Run linting: `make check`
|
||||
5. Push and create a PR
|
||||
6. Fill out the PR template
|
||||
7. Request review from CODEOWNERS
|
||||
@@ -528,16 +532,11 @@ chore(deps): update React to 18.2.0
|
||||
```bash
|
||||
# Add to core framework
|
||||
cd core
|
||||
pip install <package>
|
||||
# Then add to requirements.txt or pyproject.toml
|
||||
uv add <package>
|
||||
|
||||
# Add to tools package
|
||||
cd tools
|
||||
pip install <package>
|
||||
# Then add to requirements.txt or pyproject.toml
|
||||
|
||||
# Reinstall in editable mode
|
||||
pip install -e .
|
||||
uv add <package>
|
||||
```
|
||||
|
||||
### Creating a New Agent
|
||||
@@ -670,9 +669,8 @@ cat .env
|
||||
# Or check shell environment
|
||||
echo $ANTHROPIC_API_KEY
|
||||
|
||||
# Copy from .env.example if needed
|
||||
cp .env.example .env
|
||||
# Then edit .env with your API keys
|
||||
# Create .env if needed
|
||||
# Then add your API keys
|
||||
```
|
||||
|
||||
|
||||
|
||||
+86
-3
@@ -21,6 +21,43 @@ This will:
|
||||
- Fix package compatibility issues (openai + litellm)
|
||||
- Verify all installations
|
||||
|
||||
## Quick Setup (Windows – PowerShell)
|
||||
|
||||
Windows users can use the native PowerShell setup script.
|
||||
|
||||
Before running the script, allow script execution for the current session:
|
||||
|
||||
```powershell
|
||||
Set-ExecutionPolicy -Scope Process -ExecutionPolicy Bypass
|
||||
```
|
||||
|
||||
Run setup from the project root:
|
||||
|
||||
```powershell
|
||||
./scripts/setup-python.ps1
|
||||
```
|
||||
|
||||
This will:
|
||||
|
||||
- Check Python version (requires 3.11+)
|
||||
- Create a local `.venv` virtual environment
|
||||
- Install the core framework package (`framework`)
|
||||
- Install the tools package (`aden_tools`)
|
||||
- Fix package compatibility issues (openai + litellm)
|
||||
- Verify all installations
|
||||
|
||||
After setup, activate the virtual environment:
|
||||
|
||||
```powershell
|
||||
.\.venv\Scripts\Activate.ps1
|
||||
```
|
||||
|
||||
Set `PYTHONPATH` (required in every new PowerShell session):
|
||||
|
||||
```powershell
|
||||
$env:PYTHONPATH="core;exports"
|
||||
```
|
||||
|
||||
## Alpine Linux Setup
|
||||
|
||||
If you are using Alpine Linux (e.g., inside a Docker container), you must install system dependencies and use a virtual environment before running the setup script:
|
||||
@@ -100,6 +137,12 @@ For running agents with real LLMs:
|
||||
export ANTHROPIC_API_KEY="your-key-here"
|
||||
```
|
||||
|
||||
Windows (PowerShell):
|
||||
|
||||
```powershell
|
||||
$env:ANTHROPIC_API_KEY="your-key-here"
|
||||
```
|
||||
|
||||
## Running Agents
|
||||
|
||||
All agent commands must be run from the project root with `PYTHONPATH` set:
|
||||
@@ -109,9 +152,14 @@ All agent commands must be run from the project root with `PYTHONPATH` set:
|
||||
PYTHONPATH=core:exports python -m agent_name COMMAND
|
||||
```
|
||||
|
||||
### Example Commands
|
||||
Windows (PowerShell):
|
||||
|
||||
After building an agent via `/building-agents-construction`, use these commands:
|
||||
```powershell
|
||||
$env:PYTHONPATH="core;exports"
|
||||
python -m agent_name COMMAND
|
||||
```
|
||||
|
||||
### Example: Support Ticket Agent
|
||||
|
||||
```bash
|
||||
# Validate agent structure
|
||||
@@ -248,6 +296,14 @@ source .venv/bin/activate
|
||||
PYTHONPATH=core:exports python -m your_agent_name demo
|
||||
```
|
||||
|
||||
### PowerShell: “running scripts is disabled on this system”
|
||||
|
||||
Run once per session:
|
||||
|
||||
```powershell
|
||||
Set-ExecutionPolicy -Scope Process -ExecutionPolicy Bypass
|
||||
```
|
||||
|
||||
### "ModuleNotFoundError: No module named 'framework'"
|
||||
|
||||
**Solution:** Install the core package:
|
||||
@@ -270,6 +326,12 @@ Or run the setup script:
|
||||
./quickstart.sh
|
||||
```
|
||||
|
||||
Windows:
|
||||
|
||||
```powershell
|
||||
./scripts/setup-python.ps1
|
||||
```
|
||||
|
||||
### "ModuleNotFoundError: No module named 'openai.\_models'"
|
||||
|
||||
**Cause:** Outdated `openai` package (0.27.x) incompatible with `litellm`
|
||||
@@ -284,12 +346,21 @@ pip install --upgrade "openai>=1.0.0"
|
||||
|
||||
**Cause:** Not running from project root, missing PYTHONPATH, or agent not yet created
|
||||
|
||||
**Solution:** Ensure you're in the project root directory, have built an agent, and use:
|
||||
**Solution:** Ensure you're in `/hive/` and use:
|
||||
|
||||
Linux/macOS:
|
||||
|
||||
```bash
|
||||
PYTHONPATH=core:exports python -m your_agent_name validate
|
||||
```
|
||||
|
||||
Windows:
|
||||
|
||||
```powershell
|
||||
$env:PYTHONPATH="core;exports"
|
||||
python -m support_ticket_agent validate
|
||||
```
|
||||
|
||||
### Agent imports fail with "broken installation"
|
||||
|
||||
**Symptom:** `pip list` shows packages pointing to non-existent directories
|
||||
@@ -304,6 +375,12 @@ pip uninstall -y framework tools
|
||||
./quickstart.sh
|
||||
```
|
||||
|
||||
Windows:
|
||||
|
||||
```powershell
|
||||
./scripts/setup-python.ps1
|
||||
```
|
||||
|
||||
## Package Structure
|
||||
|
||||
The Hive framework consists of three Python packages:
|
||||
@@ -402,6 +479,12 @@ This design allows agents in `exports/` to be:
|
||||
./quickstart.sh
|
||||
```
|
||||
|
||||
Windows:
|
||||
|
||||
```powershell
|
||||
./scripts/setup-python.ps1
|
||||
```
|
||||
|
||||
### 2. Build Agent (Claude Code)
|
||||
|
||||
```
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
|
||||
[](https://github.com/adenhq/hive/blob/main/LICENSE)
|
||||
[](https://www.ycombinator.com/companies/aden)
|
||||
[](https://hub.docker.com/u/adenhq)
|
||||
[](https://discord.com/invite/MXE49hrKDk)
|
||||
[](https://x.com/aden_hq)
|
||||
[](https://www.linkedin.com/company/teamaden/)
|
||||
@@ -40,6 +39,31 @@ Build reliable, self-improving AI agents without hardcoding workflows. Define yo
|
||||
|
||||
Visit [adenhq.com](https://adenhq.com) for complete documentation, examples, and guides.
|
||||
|
||||
## Who Is Hive For?
|
||||
|
||||
Hive is designed for developers and teams who want to build **production-grade AI agents** without manually wiring complex workflows.
|
||||
|
||||
Hive is a good fit if you:
|
||||
|
||||
- Want AI agents that **execute real business processes**, not demos
|
||||
- Prefer **goal-driven development** over hardcoded workflows
|
||||
- Need **self-healing and adaptive agents** that improve over time
|
||||
- Require **human-in-the-loop control**, observability, and cost limits
|
||||
- Plan to run agents in **production environments**
|
||||
|
||||
Hive may not be the best fit if you’re only experimenting with simple agent chains or one-off scripts.
|
||||
|
||||
## When Should You Use Hive?
|
||||
|
||||
Use Hive when you need:
|
||||
|
||||
- Long-running, autonomous agents
|
||||
- Multi-agent coordination
|
||||
- Continuous improvement based on failures
|
||||
- Strong monitoring, safety, and budget controls
|
||||
- A framework that evolves with your goals
|
||||
|
||||
|
||||
## What is Aden
|
||||
|
||||
<p align="center">
|
||||
|
||||
+1
-1
@@ -268,7 +268,7 @@ classDef done fill:#9e9e9e,color:#fff,stroke:#757575
|
||||
- [ ] Wake-up Tool (resume agent tasks)
|
||||
|
||||
### Deployment (Self-Hosted)
|
||||
- [ ] Docker container standardization
|
||||
- [ ] Workder agent docker container standardization
|
||||
- [ ] Headless backend execution
|
||||
- [ ] Exposed API for frontend attachment
|
||||
- [ ] Local monitoring & observability
|
||||
|
||||
@@ -11,7 +11,6 @@ our edges can be created dynamically by a Builder agent based on the goal.
|
||||
|
||||
Edge Types:
|
||||
- always: Always traverse after source completes
|
||||
- always: Always traverse after source completes
|
||||
- on_success: Traverse only if source succeeds
|
||||
- on_failure: Traverse only if source fails
|
||||
- conditional: Traverse based on expression evaluation (SAFE SUBSET ONLY)
|
||||
|
||||
@@ -20,6 +20,7 @@ import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Callable
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import UTC
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
@@ -1348,7 +1349,9 @@ Expected output keys: {output_keys}
|
||||
LLM Response:
|
||||
{raw_response}
|
||||
|
||||
Output ONLY the JSON object, nothing else."""
|
||||
Output ONLY the JSON object, nothing else.
|
||||
If no valid JSON object exists in the response, output exactly: {{"error": "NO_JSON_FOUND"}}
|
||||
Do NOT fabricate data or return empty objects."""
|
||||
|
||||
try:
|
||||
result = cleaner_llm.complete(
|
||||
@@ -1395,6 +1398,14 @@ Output ONLY the JSON object, nothing else."""
|
||||
parsed = json.loads(cleaned)
|
||||
except json.JSONDecodeError:
|
||||
parsed = json.loads(_fix_unescaped_newlines_in_json(cleaned))
|
||||
|
||||
# Validate LLM didn't return empty or fabricated data
|
||||
if parsed.get("error") == "NO_JSON_FOUND":
|
||||
raise ValueError("Cannot parse JSON from response")
|
||||
if not parsed or parsed == {}:
|
||||
raise ValueError("Cannot parse JSON from response")
|
||||
if all(v is None for v in parsed.values()):
|
||||
raise ValueError("Cannot parse JSON from response")
|
||||
logger.info(" ✓ LLM cleaned JSON output")
|
||||
return parsed
|
||||
|
||||
@@ -1504,6 +1515,8 @@ Output ONLY the JSON object, nothing else."""
|
||||
|
||||
def _build_system_prompt(self, ctx: NodeContext) -> str:
|
||||
"""Build the system prompt."""
|
||||
from datetime import datetime
|
||||
|
||||
parts = []
|
||||
|
||||
if ctx.node_spec.system_prompt:
|
||||
@@ -1526,6 +1539,15 @@ Output ONLY the JSON object, nothing else."""
|
||||
|
||||
parts.append(prompt)
|
||||
|
||||
# Inject current datetime so LLM knows "now"
|
||||
utc_dt = datetime.now(UTC)
|
||||
local_dt = datetime.now().astimezone()
|
||||
local_tz_name = local_dt.tzname() or "Unknown"
|
||||
parts.append("\n## Runtime Context")
|
||||
parts.append(f"- Current Date/Time (UTC): {utc_dt.isoformat()}")
|
||||
parts.append(f"- Local Timezone: {local_tz_name}")
|
||||
parts.append(f"- Current Date/Time (Local): {local_dt.isoformat()}")
|
||||
|
||||
if ctx.goal_context:
|
||||
parts.append("\n# Goal Context")
|
||||
parts.append(ctx.goal_context)
|
||||
|
||||
@@ -75,16 +75,6 @@ class SafeEvalVisitor(ast.NodeVisitor):
|
||||
def visit_Constant(self, node: ast.Constant) -> Any:
|
||||
return node.value
|
||||
|
||||
# --- Number/String/Bytes/NameConstant (Python < 3.8 compat if needed) ---
|
||||
def visit_Num(self, node: ast.Num) -> Any:
|
||||
return node.n
|
||||
|
||||
def visit_Str(self, node: ast.Str) -> Any:
|
||||
return node.s
|
||||
|
||||
def visit_NameConstant(self, node: ast.NameConstant) -> Any:
|
||||
return node.value
|
||||
|
||||
# --- Data Structures ---
|
||||
def visit_List(self, node: ast.List) -> list:
|
||||
return [self.visit(elt) for elt in node.elts]
|
||||
|
||||
@@ -378,11 +378,18 @@ class LiteLLMProvider(LLMProvider):
|
||||
|
||||
# Execute tools and add results.
|
||||
for tool_call in message.tool_calls:
|
||||
# Parse arguments
|
||||
try:
|
||||
args = json.loads(tool_call.function.arguments)
|
||||
except json.JSONDecodeError:
|
||||
args = {}
|
||||
# Surface error to LLM and skip tool execution
|
||||
current_messages.append(
|
||||
{
|
||||
"role": "tool",
|
||||
"tool_call_id": tool_call.id,
|
||||
"content": "Invalid JSON arguments provided to tool.",
|
||||
}
|
||||
)
|
||||
continue
|
||||
|
||||
tool_use = ToolUse(
|
||||
id=tool_call.id,
|
||||
|
||||
@@ -516,6 +516,36 @@ def _validate_tool_credentials(tools_list: list[str]) -> dict | None:
|
||||
return None
|
||||
|
||||
|
||||
def _validate_agent_path(agent_path: str) -> tuple[Path | None, str | None]:
|
||||
"""
|
||||
Validate and normalize agent_path.
|
||||
|
||||
Returns:
|
||||
(Path, None) if valid
|
||||
(None, error_json) if invalid
|
||||
"""
|
||||
if not agent_path:
|
||||
return None, json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"error": "agent_path is required (e.g., 'exports/my_agent')",
|
||||
}
|
||||
)
|
||||
|
||||
path = Path(agent_path)
|
||||
|
||||
if not path.exists():
|
||||
return None, json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"error": f"Agent path not found: {path}",
|
||||
"hint": "Run export_graph to create an agent in exports/ first",
|
||||
}
|
||||
)
|
||||
|
||||
return path, None
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
def add_node(
|
||||
node_id: Annotated[str, "Unique identifier for the node"],
|
||||
@@ -2597,10 +2627,11 @@ def generate_constraint_tests(
|
||||
if not agent_path and _session:
|
||||
agent_path = f"exports/{_session.name}"
|
||||
|
||||
if not agent_path:
|
||||
return json.dumps({"error": "agent_path required (e.g., 'exports/my_agent')"})
|
||||
path, err = _validate_agent_path(agent_path)
|
||||
if err:
|
||||
return err
|
||||
|
||||
agent_module = _get_agent_module_from_path(agent_path)
|
||||
agent_module = _get_agent_module_from_path(path)
|
||||
|
||||
# Format constraints for display
|
||||
constraints_formatted = (
|
||||
@@ -2619,9 +2650,9 @@ def generate_constraint_tests(
|
||||
return json.dumps(
|
||||
{
|
||||
"goal_id": goal_id,
|
||||
"agent_path": agent_path,
|
||||
"agent_path": str(path),
|
||||
"agent_module": agent_module,
|
||||
"output_file": f"{agent_path}/tests/test_constraints.py",
|
||||
"output_file": f"{str(path)}/tests/test_constraints.py",
|
||||
"constraints": [c.model_dump() for c in goal.constraints] if goal.constraints else [],
|
||||
"constraints_formatted": constraints_formatted,
|
||||
"test_guidelines": {
|
||||
@@ -2677,10 +2708,11 @@ def generate_success_tests(
|
||||
if not agent_path and _session:
|
||||
agent_path = f"exports/{_session.name}"
|
||||
|
||||
if not agent_path:
|
||||
return json.dumps({"error": "agent_path required (e.g., 'exports/my_agent')"})
|
||||
path, err = _validate_agent_path(agent_path)
|
||||
if err:
|
||||
return err
|
||||
|
||||
agent_module = _get_agent_module_from_path(agent_path)
|
||||
agent_module = _get_agent_module_from_path(path)
|
||||
|
||||
# Parse node/tool names for context
|
||||
nodes = [n.strip() for n in node_names.split(",") if n.strip()]
|
||||
@@ -2705,9 +2737,9 @@ def generate_success_tests(
|
||||
return json.dumps(
|
||||
{
|
||||
"goal_id": goal_id,
|
||||
"agent_path": agent_path,
|
||||
"agent_path": str(path),
|
||||
"agent_module": agent_module,
|
||||
"output_file": f"{agent_path}/tests/test_success_criteria.py",
|
||||
"output_file": f"{str(path)}/tests/test_success_criteria.py",
|
||||
"success_criteria": [c.model_dump() for c in goal.success_criteria]
|
||||
if goal.success_criteria
|
||||
else [],
|
||||
@@ -2766,7 +2798,11 @@ def run_tests(
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
tests_dir = Path(agent_path) / "tests"
|
||||
path, err = _validate_agent_path(agent_path)
|
||||
if err:
|
||||
return err
|
||||
|
||||
tests_dir = path / "tests"
|
||||
|
||||
if not tests_dir.exists():
|
||||
return json.dumps(
|
||||
@@ -2957,10 +2993,11 @@ def debug_test(
|
||||
if not agent_path and _session:
|
||||
agent_path = f"exports/{_session.name}"
|
||||
|
||||
if not agent_path:
|
||||
return json.dumps({"error": "agent_path required (e.g., 'exports/my_agent')"})
|
||||
path, err = _validate_agent_path(agent_path)
|
||||
if err:
|
||||
return err
|
||||
|
||||
tests_dir = Path(agent_path) / "tests"
|
||||
tests_dir = path / "tests"
|
||||
|
||||
if not tests_dir.exists():
|
||||
return json.dumps(
|
||||
@@ -3101,10 +3138,11 @@ def list_tests(
|
||||
if not agent_path and _session:
|
||||
agent_path = f"exports/{_session.name}"
|
||||
|
||||
if not agent_path:
|
||||
return json.dumps({"error": "agent_path required (e.g., 'exports/my_agent')"})
|
||||
path, err = _validate_agent_path(agent_path)
|
||||
if err:
|
||||
return err
|
||||
|
||||
tests_dir = Path(agent_path) / "tests"
|
||||
tests_dir = path / "tests"
|
||||
|
||||
if not tests_dir.exists():
|
||||
return json.dumps(
|
||||
|
||||
@@ -167,14 +167,18 @@ class ConcurrentStorage:
|
||||
run: Run to save
|
||||
immediate: If True, save immediately (bypasses batching)
|
||||
"""
|
||||
# Invalidate summary cache since the run data is changing
|
||||
# This ensures load_summary() fetches fresh data after the save
|
||||
self._cache.pop(f"summary:{run.id}", None)
|
||||
|
||||
if immediate or not self._running:
|
||||
await self._save_run_locked(run)
|
||||
# Update cache only after successful immediate write
|
||||
self._cache[f"run:{run.id}"] = CacheEntry(run, time.time())
|
||||
else:
|
||||
# For batched writes, cache will be updated in _flush_batch after successful write
|
||||
await self._write_queue.put(("run", run))
|
||||
|
||||
# Update cache
|
||||
self._cache[f"run:{run.id}"] = CacheEntry(run, time.time())
|
||||
|
||||
async def _save_run_locked(self, run: Run) -> None:
|
||||
"""Save a run with file locking, including index locks."""
|
||||
lock_key = f"run:{run.id}"
|
||||
@@ -363,8 +367,12 @@ class ConcurrentStorage:
|
||||
try:
|
||||
if item_type == "run":
|
||||
await self._save_run_locked(item)
|
||||
# Update cache only after successful batched write
|
||||
# This fixes the race condition where cache was updated before write completed
|
||||
self._cache[f"run:{item.id}"] = CacheEntry(item, time.time())
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save {item_type}: {e}")
|
||||
# Cache is NOT updated on failure - prevents stale/inconsistent cache state
|
||||
|
||||
async def _flush_pending(self) -> None:
|
||||
"""Flush all pending writes."""
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
# Development dependencies
|
||||
-r requirements.txt
|
||||
|
||||
# Testing
|
||||
pytest>=8.0
|
||||
pytest-asyncio>=0.23
|
||||
|
||||
# Linting & type checking
|
||||
ruff>=0.1.0
|
||||
mypy>=1.0
|
||||
@@ -1,14 +0,0 @@
|
||||
# Core dependencies
|
||||
pydantic>=2.0
|
||||
anthropic>=0.40.0
|
||||
httpx>=0.27.0
|
||||
litellm>=1.81.0
|
||||
|
||||
# MCP server dependencies
|
||||
mcp
|
||||
fastmcp
|
||||
|
||||
# Testing (required for test framework)
|
||||
pytest>=8.0
|
||||
pytest-asyncio>=0.23
|
||||
pytest-xdist>=3.0
|
||||
@@ -0,0 +1,162 @@
|
||||
"""Tests for ConcurrentStorage race condition and cache invalidation fixes."""
|
||||
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from framework.schemas.run import Run, RunMetrics, RunStatus
|
||||
from framework.storage.concurrent import ConcurrentStorage
|
||||
|
||||
|
||||
def create_test_run(
|
||||
run_id: str, goal_id: str = "test-goal", status: RunStatus = RunStatus.RUNNING
|
||||
) -> Run:
|
||||
"""Create a minimal test Run object."""
|
||||
return Run(
|
||||
id=run_id,
|
||||
goal_id=goal_id,
|
||||
status=status,
|
||||
narrative="Test run",
|
||||
metrics=RunMetrics(
|
||||
nodes_executed=[],
|
||||
),
|
||||
decisions=[],
|
||||
problems=[],
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cache_invalidation_on_save(tmp_path: Path):
|
||||
"""Test that summary cache is invalidated when a run is saved.
|
||||
|
||||
This tests the fix for the cache invalidation bug where load_summary()
|
||||
would return stale data after a run was updated.
|
||||
"""
|
||||
storage = ConcurrentStorage(tmp_path)
|
||||
await storage.start()
|
||||
|
||||
try:
|
||||
run_id = "test-run-1"
|
||||
|
||||
# Create and save initial run
|
||||
run = create_test_run(run_id, status=RunStatus.RUNNING)
|
||||
await storage.save_run(run, immediate=True)
|
||||
|
||||
# Load summary to populate the cache
|
||||
summary = await storage.load_summary(run_id)
|
||||
assert summary is not None
|
||||
assert summary.status == RunStatus.RUNNING
|
||||
|
||||
# Update run with new status
|
||||
run.status = RunStatus.COMPLETED
|
||||
await storage.save_run(run, immediate=True)
|
||||
|
||||
# Load summary again - should get fresh data, not cached stale data
|
||||
summary = await storage.load_summary(run_id)
|
||||
assert summary is not None
|
||||
assert summary.status == RunStatus.COMPLETED, (
|
||||
"Summary cache should be invalidated on save - got stale data"
|
||||
)
|
||||
finally:
|
||||
await storage.stop()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_batched_write_cache_consistency(tmp_path: Path):
|
||||
"""Test that cache is only updated after successful batched write.
|
||||
|
||||
This tests the fix for the race condition where cache was updated
|
||||
before the batched write completed.
|
||||
"""
|
||||
storage = ConcurrentStorage(tmp_path, batch_interval=0.05)
|
||||
await storage.start()
|
||||
|
||||
try:
|
||||
run_id = "test-run-2"
|
||||
|
||||
# Save via batching (immediate=False)
|
||||
run = create_test_run(run_id, status=RunStatus.RUNNING)
|
||||
await storage.save_run(run, immediate=False)
|
||||
|
||||
# Before batch flush, cache should NOT contain the run
|
||||
# (This is the fix - previously cache was updated immediately)
|
||||
cache_key = f"run:{run_id}"
|
||||
assert cache_key not in storage._cache, (
|
||||
"Cache should not be updated before batch is flushed"
|
||||
)
|
||||
|
||||
# Wait for batch to flush
|
||||
await asyncio.sleep(0.1)
|
||||
|
||||
# After batch flush, cache should contain the run
|
||||
assert cache_key in storage._cache, "Cache should be updated after batch flush"
|
||||
|
||||
# Verify data on disk matches cache
|
||||
loaded_run = await storage.load_run(run_id, use_cache=False)
|
||||
assert loaded_run is not None
|
||||
assert loaded_run.id == run_id
|
||||
assert loaded_run.status == RunStatus.RUNNING
|
||||
finally:
|
||||
await storage.stop()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_immediate_write_updates_cache(tmp_path: Path):
|
||||
"""Test that immediate writes still update cache correctly."""
|
||||
storage = ConcurrentStorage(tmp_path)
|
||||
await storage.start()
|
||||
|
||||
try:
|
||||
run_id = "test-run-3"
|
||||
|
||||
# Save with immediate=True
|
||||
run = create_test_run(run_id, status=RunStatus.COMPLETED)
|
||||
await storage.save_run(run, immediate=True)
|
||||
|
||||
# Cache should be updated immediately for immediate writes
|
||||
cache_key = f"run:{run_id}"
|
||||
assert cache_key in storage._cache, "Cache should be updated after immediate write"
|
||||
|
||||
# Verify cached value is correct
|
||||
cached_run = storage._cache[cache_key].value
|
||||
assert cached_run.id == run_id
|
||||
assert cached_run.status == RunStatus.COMPLETED
|
||||
finally:
|
||||
await storage.stop()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_summary_cache_invalidated_on_multiple_saves(tmp_path: Path):
|
||||
"""Test that summary cache is invalidated on each save, not just the first."""
|
||||
storage = ConcurrentStorage(tmp_path)
|
||||
await storage.start()
|
||||
|
||||
try:
|
||||
run_id = "test-run-4"
|
||||
|
||||
# First save
|
||||
run = create_test_run(run_id, status=RunStatus.RUNNING)
|
||||
await storage.save_run(run, immediate=True)
|
||||
|
||||
# Load summary to cache it
|
||||
summary1 = await storage.load_summary(run_id)
|
||||
assert summary1.status == RunStatus.RUNNING
|
||||
|
||||
# Second save with new status
|
||||
run.status = RunStatus.RUNNING
|
||||
await storage.save_run(run, immediate=True)
|
||||
|
||||
# Load summary - should be fresh
|
||||
summary2 = await storage.load_summary(run_id)
|
||||
assert summary2.status == RunStatus.RUNNING
|
||||
|
||||
# Third save with final status
|
||||
run.status = RunStatus.COMPLETED
|
||||
await storage.save_run(run, immediate=True)
|
||||
|
||||
# Load summary - should be fresh again
|
||||
summary3 = await storage.load_summary(run_id)
|
||||
assert summary3.status == RunStatus.COMPLETED
|
||||
finally:
|
||||
await storage.stop()
|
||||
@@ -209,6 +209,62 @@ class TestLiteLLMProviderToolUse:
|
||||
assert result.output_tokens == 25 # 15 + 10
|
||||
assert mock_completion.call_count == 2
|
||||
|
||||
@patch("litellm.completion")
|
||||
def test_complete_with_tools_invalid_json_arguments_are_handled(self, mock_completion):
|
||||
"""Test that invalid JSON tool arguments do not execute the tool."""
|
||||
# Mock response with invalid JSON arguments
|
||||
tool_call_response = MagicMock()
|
||||
tool_call_response.choices = [MagicMock()]
|
||||
tool_call_response.choices[0].message.content = None
|
||||
tool_call_response.choices[0].message.tool_calls = [MagicMock()]
|
||||
tool_call_response.choices[0].message.tool_calls[0].id = "call_123"
|
||||
tool_call_response.choices[0].message.tool_calls[0].function.name = "test_tool"
|
||||
tool_call_response.choices[0].message.tool_calls[0].function.arguments = "{invalid json"
|
||||
tool_call_response.choices[0].finish_reason = "tool_calls"
|
||||
tool_call_response.model = "gpt-4o-mini"
|
||||
tool_call_response.usage.prompt_tokens = 10
|
||||
tool_call_response.usage.completion_tokens = 5
|
||||
|
||||
# Final response (LLM continues after tool error)
|
||||
final_response = MagicMock()
|
||||
final_response.choices = [MagicMock()]
|
||||
final_response.choices[0].message.content = "Handled error"
|
||||
final_response.choices[0].message.tool_calls = None
|
||||
final_response.choices[0].finish_reason = "stop"
|
||||
final_response.model = "gpt-4o-mini"
|
||||
final_response.usage.prompt_tokens = 5
|
||||
final_response.usage.completion_tokens = 5
|
||||
|
||||
mock_completion.side_effect = [tool_call_response, final_response]
|
||||
|
||||
provider = LiteLLMProvider(model="gpt-4o-mini", api_key="test-key")
|
||||
|
||||
tools = [
|
||||
Tool(
|
||||
name="test_tool",
|
||||
description="Test tool",
|
||||
parameters={"properties": {}, "required": []},
|
||||
)
|
||||
]
|
||||
|
||||
called = {"value": False}
|
||||
|
||||
def tool_executor(tool_use: ToolUse) -> ToolResult:
|
||||
called["value"] = True
|
||||
return ToolResult(
|
||||
tool_use_id=tool_use.id, content="should not be called", is_error=False
|
||||
)
|
||||
|
||||
result = provider.complete_with_tools(
|
||||
messages=[{"role": "user", "content": "Run tool"}],
|
||||
system="You are a test assistant.",
|
||||
tools=tools,
|
||||
tool_executor=tool_executor,
|
||||
)
|
||||
|
||||
assert called["value"] is False
|
||||
assert result.content == "Handled error"
|
||||
|
||||
|
||||
class TestToolConversion:
|
||||
"""Test tool format conversion."""
|
||||
|
||||
@@ -362,7 +362,6 @@ class AgentRequest(BaseModel):
|
||||
raise ValueError('max_tokens too high')
|
||||
return v
|
||||
```
|
||||
|
||||
### Output Sanitization
|
||||
> **Note:** The following snippet is illustrative and shows a simplified example
|
||||
> of output sanitization logic. Actual implementations may differ.
|
||||
|
||||
+1
-3
@@ -14,7 +14,6 @@
|
||||
|
||||
[](https://github.com/adenhq/hive/blob/main/LICENSE)
|
||||
[](https://www.ycombinator.com/companies/aden)
|
||||
[](https://hub.docker.com/u/adenhq)
|
||||
[](https://discord.com/invite/MXE49hrKDk)
|
||||
[](https://x.com/aden_hq)
|
||||
[](https://www.linkedin.com/company/teamaden/)
|
||||
@@ -42,7 +41,7 @@ Visita [adenhq.com](https://adenhq.com) para documentación completa, ejemplos y
|
||||
## ¿Qué es Aden?
|
||||
|
||||
<p align="center">
|
||||
<img width="100%" alt="Aden Architecture" src="docs/assets/aden-architecture-diagram.jpg" />
|
||||
<img width="100%" alt="Aden Architecture" src="../assets/aden-architecture-diagram.jpg" />
|
||||
</p>
|
||||
|
||||
Aden es una plataforma para construir, desplegar, operar y adaptar agentes de IA:
|
||||
@@ -66,7 +65,6 @@ Aden es una plataforma para construir, desplegar, operar y adaptar agentes de IA
|
||||
### Prerrequisitos
|
||||
|
||||
- [Python 3.11+](https://www.python.org/downloads/) - Para desarrollo de agentes
|
||||
- [Docker](https://docs.docker.com/get-docker/) (v20.10+) - Opcional, para herramientas en contenedores
|
||||
|
||||
### Instalación
|
||||
|
||||
|
||||
+1
-1
@@ -44,7 +44,7 @@
|
||||
# Aden क्या है?
|
||||
|
||||
<p align="center">
|
||||
<img width="100%" alt="Aden Architecture" src="docs/assets/aden-architecture-diagram.jpg" />
|
||||
<img width="100%" alt="Aden Architecture" src="../assets/aden-architecture-diagram.jpg" />
|
||||
</p>
|
||||
|
||||
Aden एक ऐसा प्लेटफ़ॉर्म है जो AI एजेंट्स को बनाने, डिप्लॉय करने, ऑपरेट करने और अनुकूलित करने के लिए उपयोग होता है:
|
||||
|
||||
+1
-1
@@ -42,7 +42,7 @@
|
||||
## Adenとは
|
||||
|
||||
<p align="center">
|
||||
<img width="100%" alt="Aden Architecture" src="docs/assets/aden-architecture-diagram.jpg" />
|
||||
<img width="100%" alt="Aden Architecture" src="../assets/aden-architecture-diagram.jpg" />
|
||||
</p>
|
||||
|
||||
Adenは、AIエージェントの構築、デプロイ、運用、適応のためのプラットフォームです:
|
||||
|
||||
+1
-1
@@ -42,7 +42,7 @@
|
||||
## Aden이란 무엇인가
|
||||
|
||||
<p align="center">
|
||||
<img width="100%" alt="Aden Architecture" src="docs/assets/aden-architecture-diagram.jpg" />
|
||||
<img width="100%" alt="Aden Architecture" src="../assets/aden-architecture-diagram.jpg" />
|
||||
</p>
|
||||
|
||||
Aden은 AI 에이전트를 구축, 배포, 운영, 적응시키기 위한 플랫폼입니다:
|
||||
|
||||
+1
-1
@@ -42,7 +42,7 @@ Visite [adenhq.com](https://adenhq.com) para documentação completa, exemplos e
|
||||
## O que é Aden
|
||||
|
||||
<p align="center">
|
||||
<img width="100%" alt="Aden Architecture" src="docs/assets/aden-architecture-diagram.jpg" />
|
||||
<img width="100%" alt="Aden Architecture" src="../assets/aden-architecture-diagram.jpg" />
|
||||
</p>
|
||||
|
||||
Aden é uma plataforma para construir, implantar, operar e adaptar agentes de IA:
|
||||
|
||||
+1
-1
@@ -42,7 +42,7 @@
|
||||
## Что такое Aden
|
||||
|
||||
<p align="center">
|
||||
<img width="100%" alt="Aden Architecture" src="docs/assets/aden-architecture-diagram.jpg" />
|
||||
<img width="100%" alt="Aden Architecture" src="../assets/aden-architecture-diagram.jpg" />
|
||||
</p>
|
||||
|
||||
Aden — это платформа для создания, развёртывания, эксплуатации и адаптации ИИ-агентов:
|
||||
|
||||
+1
-1
@@ -42,7 +42,7 @@
|
||||
## 什么是 Aden
|
||||
|
||||
<p align="center">
|
||||
<img width="100%" alt="Aden Architecture" src="docs/assets/aden-architecture-diagram.jpg" />
|
||||
<img width="100%" alt="Aden Architecture" src="../assets/aden-architecture-diagram.jpg" />
|
||||
</p>
|
||||
|
||||
Aden 是一个用于构建、部署、运营和适应 AI 智能体的平台:
|
||||
|
||||
@@ -0,0 +1,41 @@
|
||||
# Examples
|
||||
|
||||
This directory contains two types of examples to help you build agents with the Hive framework.
|
||||
|
||||
## Recipes vs Templates
|
||||
|
||||
### [recipes/](recipes/) — "How to make it"
|
||||
|
||||
A recipe is a **prompt-only** description of an agent. It tells you the goal, the nodes, the prompts, the edge routing logic, and what tools to wire in — but it's not runnable code. You read the recipe, then build the agent yourself.
|
||||
|
||||
Use recipes when you want to:
|
||||
- Understand a pattern before committing to an implementation
|
||||
- Adapt an idea to your own codebase or tooling
|
||||
- Learn how to think about agent design (goals, nodes, edges, prompts)
|
||||
|
||||
### [templates/](templates/) — "Ready to eat"
|
||||
|
||||
A template is a **working agent scaffold** that follows the standard Hive export structure. Copy the folder, rename it, swap in your own prompts and tools, and run it.
|
||||
|
||||
Use templates when you want to:
|
||||
- Get a new agent running quickly
|
||||
- Start from a known-good structure instead of from scratch
|
||||
- See how all the pieces (goal, nodes, edges, config, CLI) fit together in real code
|
||||
|
||||
## How to use a template
|
||||
|
||||
```bash
|
||||
# 1. Copy the template
|
||||
cp -r examples/templates/marketing_agent exports/my_agent
|
||||
|
||||
# 2. Edit the goal, nodes, and edges in agent.py and nodes/__init__.py
|
||||
|
||||
# 3. Run it
|
||||
PYTHONPATH=core python -m exports.my_agent --help
|
||||
```
|
||||
|
||||
## How to use a recipe
|
||||
|
||||
1. Read the recipe markdown file
|
||||
2. Use the patterns described to build your own agent — either manually or with the builder agent (`/agent-workflow`)
|
||||
3. Refer to the [core README](../core/README.md) for framework API details
|
||||
@@ -0,0 +1,27 @@
|
||||
# Recipes
|
||||
|
||||
A recipe describes an agent's design — the goal, nodes, prompts, edge logic, and tools — without providing runnable code. Think of it as a blueprint: it tells you *how* to build the agent, but you do the building.
|
||||
|
||||
## What's in a recipe
|
||||
|
||||
Each recipe is a markdown file (or folder with a markdown file) containing:
|
||||
|
||||
- **Goal**: What the agent accomplishes, including success criteria and constraints
|
||||
- **Nodes**: Each step in the workflow, with the system prompt, node type, and input/output keys
|
||||
- **Edges**: How nodes connect, including conditions and routing logic
|
||||
- **Tools**: What external tools or MCP servers the agent needs
|
||||
- **Usage notes**: Tips, gotchas, and suggested variations
|
||||
|
||||
## How to use a recipe
|
||||
|
||||
1. Read through the recipe to understand the design
|
||||
2. Create a new agent using the standard export structure (see [templates/](../templates/) for a scaffold)
|
||||
3. Translate the recipe's goal, nodes, and edges into code
|
||||
4. Wire in the tools described
|
||||
5. Test and iterate
|
||||
|
||||
## Available recipes
|
||||
|
||||
| Recipe | Description |
|
||||
|--------|-------------|
|
||||
| [marketing_agent](marketing_agent/) | Multi-channel marketing content generator with audience analysis and A/B copy variants |
|
||||
@@ -0,0 +1,156 @@
|
||||
# Recipe: Marketing Content Agent
|
||||
|
||||
A multi-channel marketing content generator. Given a product description and target audience, this agent analyzes the audience, generates tailored copy for multiple channels, and produces A/B variants.
|
||||
|
||||
## Goal
|
||||
|
||||
```
|
||||
Name: Marketing Content Generator
|
||||
Description: Generate targeted marketing content across multiple channels
|
||||
for a given product and audience.
|
||||
|
||||
Success criteria:
|
||||
- Audience analysis is produced with demographics and pain points
|
||||
- At least 2 channel-specific content pieces are generated
|
||||
- A/B variants are provided for each piece
|
||||
- All content aligns with the specified brand voice
|
||||
|
||||
Constraints:
|
||||
- (hard) No competitor brand names in generated content
|
||||
- (soft) Content should be under 280 characters for social media channels
|
||||
```
|
||||
|
||||
## Input / Output
|
||||
|
||||
**Input:**
|
||||
- `product_description` (str) — What the product is and does
|
||||
- `target_audience` (str) — Who the content is for
|
||||
- `brand_voice` (str) — Tone and style guidelines (e.g., "professional but approachable")
|
||||
- `channels` (list[str]) — Target channels, e.g. `["email", "twitter", "linkedin"]`
|
||||
|
||||
**Output:**
|
||||
- `audience_analysis` (dict) — Demographics, pain points, motivations
|
||||
- `content` (list[dict]) — Per-channel content with A/B variants
|
||||
|
||||
## Workflow
|
||||
|
||||
```
|
||||
[analyze_audience] → [generate_content] → [review_and_refine]
|
||||
|
|
||||
(conditional)
|
||||
|
|
||||
needs_revision == True → [generate_content]
|
||||
needs_revision == False → (done)
|
||||
```
|
||||
|
||||
## Nodes
|
||||
|
||||
### 1. analyze_audience
|
||||
|
||||
| Field | Value |
|
||||
|-------|-------|
|
||||
| Type | `llm_generate` |
|
||||
| Input keys | `product_description`, `target_audience` |
|
||||
| Output keys | `audience_analysis` |
|
||||
| Tools | None |
|
||||
|
||||
**System prompt:**
|
||||
```
|
||||
You are a marketing strategist. Analyze the target audience for a product.
|
||||
|
||||
Product: {product_description}
|
||||
Target audience: {target_audience}
|
||||
|
||||
Produce a structured analysis in JSON:
|
||||
{{
|
||||
"audience_analysis": {{
|
||||
"demographics": "...",
|
||||
"pain_points": ["..."],
|
||||
"motivations": ["..."],
|
||||
"preferred_channels": ["..."],
|
||||
"messaging_angle": "..."
|
||||
}}
|
||||
}}
|
||||
```
|
||||
|
||||
### 2. generate_content
|
||||
|
||||
| Field | Value |
|
||||
|-------|-------|
|
||||
| Type | `llm_generate` |
|
||||
| Input keys | `product_description`, `audience_analysis`, `brand_voice`, `channels` |
|
||||
| Output keys | `content` |
|
||||
| Tools | None |
|
||||
|
||||
**System prompt:**
|
||||
```
|
||||
You are a marketing copywriter. Generate content for each channel.
|
||||
|
||||
Product: {product_description}
|
||||
Audience analysis: {audience_analysis}
|
||||
Brand voice: {brand_voice}
|
||||
Channels: {channels}
|
||||
|
||||
For each channel, produce two variants (A and B).
|
||||
|
||||
Output as JSON:
|
||||
{{
|
||||
"content": [
|
||||
{{
|
||||
"channel": "twitter",
|
||||
"variant_a": "...",
|
||||
"variant_b": "..."
|
||||
}}
|
||||
]
|
||||
}}
|
||||
```
|
||||
|
||||
### 3. review_and_refine
|
||||
|
||||
| Field | Value |
|
||||
|-------|-------|
|
||||
| Type | `llm_generate` |
|
||||
| Input keys | `content`, `brand_voice` |
|
||||
| Output keys | `content`, `needs_revision` |
|
||||
| Tools | None |
|
||||
|
||||
**System prompt:**
|
||||
```
|
||||
You are a senior marketing editor. Review the following content for brand
|
||||
voice alignment, clarity, and channel appropriateness.
|
||||
|
||||
Content: {content}
|
||||
Brand voice: {brand_voice}
|
||||
|
||||
If any piece needs revision, fix it and set needs_revision to true.
|
||||
If everything looks good, return the content unchanged with needs_revision false.
|
||||
|
||||
Output as JSON:
|
||||
{{
|
||||
"content": [...],
|
||||
"needs_revision": false
|
||||
}}
|
||||
```
|
||||
|
||||
## Edges
|
||||
|
||||
| Source | Target | Condition | Priority |
|
||||
|--------|--------|-----------|----------|
|
||||
| analyze_audience | generate_content | `on_success` | 0 |
|
||||
| generate_content | review_and_refine | `on_success` | 0 |
|
||||
| review_and_refine | generate_content | `conditional: needs_revision == True` | 10 |
|
||||
|
||||
The `review_and_refine → generate_content` loop has higher priority so it's checked first. If `needs_revision` is false, execution ends at `review_and_refine` (terminal node).
|
||||
|
||||
## Tools
|
||||
|
||||
This recipe uses no external tools — all nodes are `llm_generate`. To extend it, consider adding:
|
||||
- A web search tool for competitive analysis in the `analyze_audience` node
|
||||
- A URL shortener tool for social media content
|
||||
- An image generation tool for visual content variants
|
||||
|
||||
## Variations
|
||||
|
||||
- **Single-channel mode**: Remove the `channels` input and hardcode to one channel for simpler output
|
||||
- **With approval gate**: Add a `human_input` node between `review_and_refine` and the terminal to require human sign-off
|
||||
- **With analytics**: Add a `function` node that logs generated content to a tracking system
|
||||
@@ -0,0 +1,38 @@
|
||||
# Templates
|
||||
|
||||
A template is a working agent scaffold that follows the standard Hive export structure. Copy it, rename it, customize the goal/nodes/edges, and run it.
|
||||
|
||||
## What's in a template
|
||||
|
||||
Each template is a complete agent package:
|
||||
|
||||
```
|
||||
template_name/
|
||||
├── __init__.py # Package exports
|
||||
├── __main__.py # CLI entry point
|
||||
├── agent.py # Goal, edges, graph spec, agent class
|
||||
├── config.py # Runtime configuration
|
||||
├── nodes/
|
||||
│ └── __init__.py # Node definitions (NodeSpec instances)
|
||||
└── README.md # What this template demonstrates
|
||||
```
|
||||
|
||||
## How to use a template
|
||||
|
||||
```bash
|
||||
# 1. Copy to your exports directory
|
||||
cp -r examples/templates/marketing_agent exports/my_marketing_agent
|
||||
|
||||
# 2. Update the module references in __main__.py and __init__.py
|
||||
|
||||
# 3. Customize goal, nodes, edges, and prompts
|
||||
|
||||
# 4. Run it
|
||||
PYTHONPATH=core python -m exports.my_marketing_agent --input '{"product_description": "..."}'
|
||||
```
|
||||
|
||||
## Available templates
|
||||
|
||||
| Template | Description |
|
||||
|----------|-------------|
|
||||
| [marketing_agent](marketing_agent/) | Multi-channel marketing content generator with audience analysis, content generation, and editorial review nodes |
|
||||
@@ -0,0 +1,57 @@
|
||||
# Template: Marketing Content Agent
|
||||
|
||||
A multi-channel marketing content generator. Given a product and audience, this agent analyzes the audience, generates tailored copy for multiple channels with A/B variants, and reviews the output for quality.
|
||||
|
||||
## Workflow
|
||||
|
||||
```
|
||||
[analyze-audience] → [generate-content] → [review-and-refine]
|
||||
|
|
||||
(conditional)
|
||||
|
|
||||
needs_revision == True → [generate-content]
|
||||
needs_revision == False → (done)
|
||||
```
|
||||
|
||||
## Nodes
|
||||
|
||||
| Node | Type | Description |
|
||||
|------|------|-------------|
|
||||
| `analyze-audience` | `llm_generate` | Produces structured audience analysis |
|
||||
| `generate-content` | `llm_generate` | Creates per-channel copy with A/B variants |
|
||||
| `review-and-refine` | `llm_generate` | Reviews and optionally revises content |
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# From the repo root
|
||||
PYTHONPATH=core python -m examples.templates.marketing_agent
|
||||
|
||||
# With custom input
|
||||
PYTHONPATH=core python -m examples.templates.marketing_agent --input '{
|
||||
"product_description": "A fitness tracking app",
|
||||
"target_audience": "Health-conscious millennials",
|
||||
"brand_voice": "Energetic and motivational",
|
||||
"channels": ["instagram", "email"]
|
||||
}'
|
||||
```
|
||||
|
||||
## Customization ideas
|
||||
|
||||
- Add a `function` node to call an analytics API and inform audience analysis with real data
|
||||
- Add a `human_input` pause node before final output for editorial approval
|
||||
- Swap `llm_generate` nodes to `llm_tool_use` and add web search tools for competitive research
|
||||
- Add an image generation tool to produce visual assets alongside copy
|
||||
|
||||
## File structure
|
||||
|
||||
```
|
||||
marketing_agent/
|
||||
├── __init__.py # Package exports
|
||||
├── __main__.py # CLI entry point
|
||||
├── agent.py # Goal, edges, graph spec, MarketingAgent class
|
||||
├── config.py # RuntimeConfig and AgentMetadata
|
||||
├── nodes/
|
||||
│ └── __init__.py # NodeSpec definitions
|
||||
└── README.md # This file
|
||||
```
|
||||
@@ -0,0 +1,6 @@
|
||||
"""Marketing Content Agent — template example."""
|
||||
|
||||
from .agent import MarketingAgent, goal, edges, nodes
|
||||
from .config import default_config
|
||||
|
||||
__all__ = ["MarketingAgent", "goal", "edges", "nodes", "default_config"]
|
||||
@@ -0,0 +1,31 @@
|
||||
"""CLI entry point for Marketing Content Agent."""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
from .agent import MarketingAgent
|
||||
from .config import default_config
|
||||
|
||||
# Simple CLI — replace with Click for production use
|
||||
input_data = {
|
||||
"product_description": "An AI-powered project management tool for remote teams",
|
||||
"target_audience": "Engineering managers at mid-size tech companies",
|
||||
"brand_voice": "Professional but approachable, concise, data-driven",
|
||||
"channels": ["email", "twitter", "linkedin"],
|
||||
}
|
||||
|
||||
# Accept JSON input from command line
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "--input":
|
||||
input_data = json.loads(sys.argv[2])
|
||||
|
||||
agent = MarketingAgent(config=default_config)
|
||||
result = asyncio.run(agent.run(input_data))
|
||||
|
||||
print(json.dumps(result, indent=2))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,161 @@
|
||||
"""Marketing Content Agent — goal, edges, graph spec, and agent class."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from framework.graph import EdgeCondition, EdgeSpec, Goal, SuccessCriterion, Constraint
|
||||
from framework.graph.edge import GraphSpec
|
||||
from framework.graph.executor import GraphExecutor
|
||||
from framework.runtime.core import Runtime
|
||||
from framework.llm.anthropic import AnthropicProvider
|
||||
|
||||
from .config import default_config, RuntimeConfig
|
||||
from .nodes import all_nodes
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Goal
|
||||
# ---------------------------------------------------------------------------
|
||||
goal = Goal(
|
||||
id="marketing-content",
|
||||
name="Marketing Content Generator",
|
||||
description=(
|
||||
"Generate targeted marketing content across multiple channels "
|
||||
"for a given product and audience."
|
||||
),
|
||||
success_criteria=[
|
||||
SuccessCriterion(
|
||||
id="audience-analyzed",
|
||||
description="Audience analysis is produced with demographics and pain points",
|
||||
metric="output_contains",
|
||||
target="audience_analysis",
|
||||
),
|
||||
SuccessCriterion(
|
||||
id="content-generated",
|
||||
description="At least 2 channel-specific content pieces are generated",
|
||||
metric="custom",
|
||||
target="len(content) >= 2",
|
||||
),
|
||||
SuccessCriterion(
|
||||
id="variants-provided",
|
||||
description="A/B variants are provided for each content piece",
|
||||
metric="custom",
|
||||
target="all variants present",
|
||||
),
|
||||
],
|
||||
constraints=[
|
||||
Constraint(
|
||||
id="no-competitor-names",
|
||||
description="No competitor brand names in generated content",
|
||||
constraint_type="hard",
|
||||
category="safety",
|
||||
),
|
||||
Constraint(
|
||||
id="social-length",
|
||||
description="Social media content should be under 280 characters",
|
||||
constraint_type="soft",
|
||||
category="quality",
|
||||
),
|
||||
],
|
||||
input_schema={
|
||||
"product_description": {"type": "string"},
|
||||
"target_audience": {"type": "string"},
|
||||
"brand_voice": {"type": "string"},
|
||||
"channels": {"type": "array", "items": {"type": "string"}},
|
||||
},
|
||||
output_schema={
|
||||
"audience_analysis": {"type": "object"},
|
||||
"content": {"type": "array"},
|
||||
},
|
||||
)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Edges
|
||||
# ---------------------------------------------------------------------------
|
||||
edges = [
|
||||
EdgeSpec(
|
||||
id="analyze-to-generate",
|
||||
source="analyze-audience",
|
||||
target="generate-content",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
description="After audience analysis, generate content",
|
||||
),
|
||||
EdgeSpec(
|
||||
id="generate-to-review",
|
||||
source="generate-content",
|
||||
target="review-and-refine",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
description="After content generation, review and refine",
|
||||
),
|
||||
EdgeSpec(
|
||||
id="review-to-regenerate",
|
||||
source="review-and-refine",
|
||||
target="generate-content",
|
||||
condition=EdgeCondition.CONDITIONAL,
|
||||
condition_expr="needs_revision == True",
|
||||
priority=10,
|
||||
description="If revision needed, loop back to content generation",
|
||||
),
|
||||
]
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Graph structure
|
||||
# ---------------------------------------------------------------------------
|
||||
entry_node = "analyze-audience"
|
||||
entry_points = {"start": "analyze-audience"}
|
||||
terminal_nodes = ["review-and-refine"]
|
||||
pause_nodes = []
|
||||
nodes = all_nodes
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Agent class
|
||||
# ---------------------------------------------------------------------------
|
||||
class MarketingAgent:
|
||||
"""Multi-channel marketing content generator agent."""
|
||||
|
||||
def __init__(self, config: RuntimeConfig | None = None):
|
||||
self.config = config or default_config
|
||||
self.goal = goal
|
||||
self.nodes = nodes
|
||||
self.edges = edges
|
||||
self.entry_node = entry_node
|
||||
self.terminal_nodes = terminal_nodes
|
||||
self.executor = None
|
||||
|
||||
def _build_graph(self) -> GraphSpec:
|
||||
return GraphSpec(
|
||||
id="marketing-content-graph",
|
||||
goal_id=self.goal.id,
|
||||
entry_node=self.entry_node,
|
||||
entry_points=entry_points,
|
||||
terminal_nodes=self.terminal_nodes,
|
||||
pause_nodes=pause_nodes,
|
||||
nodes=self.nodes,
|
||||
edges=self.edges,
|
||||
default_model=self.config.model,
|
||||
max_tokens=self.config.max_tokens,
|
||||
description="Marketing content generation workflow",
|
||||
)
|
||||
|
||||
def _create_executor(self):
|
||||
runtime = Runtime(storage_path=Path(self.config.storage_path).expanduser())
|
||||
llm = AnthropicProvider(model=self.config.model)
|
||||
self.executor = GraphExecutor(runtime=runtime, llm=llm)
|
||||
return self.executor
|
||||
|
||||
async def run(self, context: dict, mock_mode: bool = False) -> dict:
|
||||
graph = self._build_graph()
|
||||
executor = self._create_executor()
|
||||
result = await executor.execute(
|
||||
graph=graph,
|
||||
goal=self.goal,
|
||||
input_data=context,
|
||||
)
|
||||
return {
|
||||
"success": result.success,
|
||||
"output": result.output,
|
||||
"steps": result.steps_executed,
|
||||
"path": result.path,
|
||||
}
|
||||
|
||||
|
||||
default_agent = MarketingAgent()
|
||||
@@ -0,0 +1,24 @@
|
||||
"""Runtime configuration for Marketing Content Agent."""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
|
||||
@dataclass
|
||||
class RuntimeConfig:
|
||||
model: str = "claude-haiku-4-5-20251001"
|
||||
max_tokens: int = 2048
|
||||
storage_path: str = "~/.hive/storage"
|
||||
mock_mode: bool = False
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentMetadata:
|
||||
name: str = "marketing_agent"
|
||||
version: str = "0.1.0"
|
||||
description: str = "Multi-channel marketing content generator"
|
||||
author: str = ""
|
||||
tags: list[str] = field(default_factory=lambda: ["marketing", "content", "template"])
|
||||
|
||||
|
||||
default_config = RuntimeConfig()
|
||||
metadata = AgentMetadata()
|
||||
@@ -0,0 +1,106 @@
|
||||
"""Node definitions for Marketing Content Agent."""
|
||||
|
||||
from framework.graph import NodeSpec
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Node 1: Analyze the target audience
|
||||
# ---------------------------------------------------------------------------
|
||||
analyze_audience_node = NodeSpec(
|
||||
id="analyze-audience",
|
||||
name="Analyze Audience",
|
||||
description="Produce a structured audience analysis from the product and target audience description.",
|
||||
node_type="llm_generate",
|
||||
input_keys=["product_description", "target_audience"],
|
||||
output_keys=["audience_analysis"],
|
||||
system_prompt="""\
|
||||
You are a marketing strategist. Analyze the target audience for a product.
|
||||
|
||||
Product: {product_description}
|
||||
Target audience: {target_audience}
|
||||
|
||||
Produce a structured analysis as raw JSON (no markdown):
|
||||
{{
|
||||
"audience_analysis": {{
|
||||
"demographics": "...",
|
||||
"pain_points": ["..."],
|
||||
"motivations": ["..."],
|
||||
"preferred_channels": ["..."],
|
||||
"messaging_angle": "..."
|
||||
}}
|
||||
}}
|
||||
""",
|
||||
tools=[],
|
||||
max_retries=2,
|
||||
)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Node 2: Generate channel-specific content with A/B variants
|
||||
# ---------------------------------------------------------------------------
|
||||
generate_content_node = NodeSpec(
|
||||
id="generate-content",
|
||||
name="Generate Content",
|
||||
description="Create marketing copy for each requested channel with two variants per channel.",
|
||||
node_type="llm_generate",
|
||||
input_keys=["product_description", "audience_analysis", "brand_voice", "channels"],
|
||||
output_keys=["content"],
|
||||
system_prompt="""\
|
||||
You are a marketing copywriter. Generate content for each channel.
|
||||
|
||||
Product: {product_description}
|
||||
Audience analysis: {audience_analysis}
|
||||
Brand voice: {brand_voice}
|
||||
Channels: {channels}
|
||||
|
||||
For each channel, produce two variants (A and B).
|
||||
|
||||
Output as raw JSON (no markdown):
|
||||
{{
|
||||
"content": [
|
||||
{{
|
||||
"channel": "twitter",
|
||||
"variant_a": "...",
|
||||
"variant_b": "..."
|
||||
}}
|
||||
]
|
||||
}}
|
||||
""",
|
||||
tools=[],
|
||||
max_retries=2,
|
||||
)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Node 3: Review and refine content
|
||||
# ---------------------------------------------------------------------------
|
||||
review_and_refine_node = NodeSpec(
|
||||
id="review-and-refine",
|
||||
name="Review and Refine",
|
||||
description="Review generated content for brand voice alignment and channel fit. Revise if needed.",
|
||||
node_type="llm_generate",
|
||||
input_keys=["content", "brand_voice"],
|
||||
output_keys=["content", "needs_revision"],
|
||||
system_prompt="""\
|
||||
You are a senior marketing editor. Review the following content for brand
|
||||
voice alignment, clarity, and channel appropriateness.
|
||||
|
||||
Content: {content}
|
||||
Brand voice: {brand_voice}
|
||||
|
||||
If any piece needs revision, fix it and set needs_revision to true.
|
||||
If everything looks good, return the content unchanged with needs_revision false.
|
||||
|
||||
Output as raw JSON (no markdown):
|
||||
{{
|
||||
"content": [...],
|
||||
"needs_revision": false
|
||||
}}
|
||||
""",
|
||||
tools=[],
|
||||
max_retries=2,
|
||||
)
|
||||
|
||||
# All nodes for easy import
|
||||
all_nodes = [
|
||||
analyze_audience_node,
|
||||
generate_content_node,
|
||||
review_and_refine_node,
|
||||
]
|
||||
+135
-83
@@ -11,6 +11,14 @@
|
||||
|
||||
set -e
|
||||
|
||||
# Detect Bash version for compatibility
|
||||
BASH_MAJOR_VERSION="${BASH_VERSINFO[0]}"
|
||||
USE_ASSOC_ARRAYS=false
|
||||
if [ "$BASH_MAJOR_VERSION" -ge 4 ]; then
|
||||
USE_ASSOC_ARRAYS=true
|
||||
fi
|
||||
echo "[debug] Bash version: ${BASH_VERSION}"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
@@ -52,7 +60,7 @@ prompt_choice() {
|
||||
echo -e "${BOLD}$prompt${NC}"
|
||||
for opt in "${options[@]}"; do
|
||||
echo -e " ${CYAN}$i)${NC} $opt"
|
||||
((i++))
|
||||
i=$((i + 1))
|
||||
done
|
||||
echo ""
|
||||
|
||||
@@ -60,7 +68,8 @@ prompt_choice() {
|
||||
while true; do
|
||||
read -r -p "Enter choice (1-${#options[@]}): " choice
|
||||
if [[ "$choice" =~ ^[0-9]+$ ]] && [ "$choice" -ge 1 ] && [ "$choice" -le "${#options[@]}" ]; then
|
||||
return $((choice - 1))
|
||||
PROMPT_CHOICE=$((choice - 1))
|
||||
return 0
|
||||
fi
|
||||
echo -e "${RED}Invalid choice. Please enter 1-${#options[@]}${NC}"
|
||||
done
|
||||
@@ -174,18 +183,12 @@ echo ""
|
||||
echo -e "${DIM}This may take a minute...${NC}"
|
||||
echo ""
|
||||
|
||||
# Upgrade pip, setuptools, and wheel
|
||||
echo -n " Upgrading pip... "
|
||||
$PYTHON_CMD -m pip install --upgrade pip setuptools wheel > /dev/null 2>&1
|
||||
echo -e "${GREEN}ok${NC}"
|
||||
|
||||
# Install framework package from core/
|
||||
echo -n " Installing framework... "
|
||||
cd "$SCRIPT_DIR/core"
|
||||
|
||||
if [ -f "pyproject.toml" ]; then
|
||||
uv sync > /dev/null 2>&1
|
||||
if [ $? -eq 0 ]; then
|
||||
if uv sync > /dev/null 2>&1; then
|
||||
echo -e "${GREEN} ✓ framework package installed${NC}"
|
||||
else
|
||||
echo -e "${YELLOW} ⚠ framework installation had issues (may be OK)${NC}"
|
||||
@@ -200,8 +203,7 @@ echo -n " Installing tools... "
|
||||
cd "$SCRIPT_DIR/tools"
|
||||
|
||||
if [ -f "pyproject.toml" ]; then
|
||||
uv sync > /dev/null 2>&1
|
||||
if [ $? -eq 0 ]; then
|
||||
if uv sync > /dev/null 2>&1; then
|
||||
echo -e "${GREEN} ✓ aden_tools package installed${NC}"
|
||||
else
|
||||
echo -e "${RED} ✗ aden_tools installation failed${NC}"
|
||||
@@ -212,21 +214,6 @@ else
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install MCP dependencies
|
||||
echo -n " Installing MCP... "
|
||||
$PYTHON_CMD -m pip install mcp fastmcp > /dev/null 2>&1
|
||||
echo -e "${GREEN}ok${NC}"
|
||||
|
||||
# Fix openai version compatibility
|
||||
echo -n " Checking openai... "
|
||||
$PYTHON_CMD -m pip install "openai>=1.0.0" > /dev/null 2>&1
|
||||
echo -e "${GREEN}ok${NC}"
|
||||
|
||||
# Install click for CLI
|
||||
echo -n " Installing CLI tools... "
|
||||
$PYTHON_CMD -m pip install click > /dev/null 2>&1
|
||||
echo -e "${GREEN}ok${NC}"
|
||||
|
||||
# Install Playwright browser
|
||||
echo -n " Installing Playwright browser... "
|
||||
if $PYTHON_CMD -c "import playwright" > /dev/null 2>&1; then
|
||||
@@ -344,53 +331,105 @@ echo ""
|
||||
echo -e "${BLUE}Step 4: Verifying Claude Code skills...${NC}"
|
||||
echo ""
|
||||
|
||||
# Provider data as parallel indexed arrays (Bash 3.2 compatible — no declare -A)
|
||||
PROVIDER_ENV_VARS=(ANTHROPIC_API_KEY OPENAI_API_KEY GEMINI_API_KEY GOOGLE_API_KEY GROQ_API_KEY CEREBRAS_API_KEY MISTRAL_API_KEY TOGETHER_API_KEY DEEPSEEK_API_KEY)
|
||||
PROVIDER_DISPLAY_NAMES=("Anthropic (Claude)" "OpenAI (GPT)" "Google Gemini" "Google AI" "Groq" "Cerebras" "Mistral" "Together AI" "DeepSeek")
|
||||
PROVIDER_ID_LIST=(anthropic openai gemini google groq cerebras mistral together deepseek)
|
||||
# Provider configuration - use associative arrays (Bash 4+) or indexed arrays (Bash 3.2)
|
||||
if [ "$USE_ASSOC_ARRAYS" = true ]; then
|
||||
# Bash 4+ - use associative arrays (cleaner and more efficient)
|
||||
declare -A PROVIDER_NAMES=(
|
||||
["ANTHROPIC_API_KEY"]="Anthropic (Claude)"
|
||||
["OPENAI_API_KEY"]="OpenAI (GPT)"
|
||||
["GEMINI_API_KEY"]="Google Gemini"
|
||||
["GOOGLE_API_KEY"]="Google AI"
|
||||
["GROQ_API_KEY"]="Groq"
|
||||
["CEREBRAS_API_KEY"]="Cerebras"
|
||||
["MISTRAL_API_KEY"]="Mistral"
|
||||
["TOGETHER_API_KEY"]="Together AI"
|
||||
["DEEPSEEK_API_KEY"]="DeepSeek"
|
||||
)
|
||||
|
||||
# Default models by provider id (parallel arrays)
|
||||
MODEL_PROVIDER_IDS=(anthropic openai gemini groq cerebras mistral together_ai deepseek)
|
||||
MODEL_DEFAULTS=("claude-sonnet-4-5-20250929" "gpt-4o" "gemini-3.0-flash-preview" "moonshotai/kimi-k2-instruct-0905" "zai-glm-4.7" "mistral-large-latest" "meta-llama/Llama-3.3-70B-Instruct-Turbo" "deepseek-chat")
|
||||
declare -A PROVIDER_IDS=(
|
||||
["ANTHROPIC_API_KEY"]="anthropic"
|
||||
["OPENAI_API_KEY"]="openai"
|
||||
["GEMINI_API_KEY"]="gemini"
|
||||
["GOOGLE_API_KEY"]="google"
|
||||
["GROQ_API_KEY"]="groq"
|
||||
["CEREBRAS_API_KEY"]="cerebras"
|
||||
["MISTRAL_API_KEY"]="mistral"
|
||||
["TOGETHER_API_KEY"]="together"
|
||||
["DEEPSEEK_API_KEY"]="deepseek"
|
||||
)
|
||||
|
||||
# Helper: get provider display name for an env var
|
||||
get_provider_name() {
|
||||
local env_var="$1"
|
||||
local i=0
|
||||
while [ $i -lt ${#PROVIDER_ENV_VARS[@]} ]; do
|
||||
if [ "${PROVIDER_ENV_VARS[$i]}" = "$env_var" ]; then
|
||||
echo "${PROVIDER_DISPLAY_NAMES[$i]}"
|
||||
return
|
||||
fi
|
||||
i=$((i + 1))
|
||||
done
|
||||
}
|
||||
declare -A DEFAULT_MODELS=(
|
||||
["anthropic"]="claude-sonnet-4-5-20250929"
|
||||
["openai"]="gpt-4o"
|
||||
["gemini"]="gemini-3.0-flash-preview"
|
||||
["groq"]="moonshotai/kimi-k2-instruct-0905"
|
||||
["cerebras"]="zai-glm-4.7"
|
||||
["mistral"]="mistral-large-latest"
|
||||
["together_ai"]="meta-llama/Llama-3.3-70B-Instruct-Turbo"
|
||||
["deepseek"]="deepseek-chat"
|
||||
)
|
||||
|
||||
# Helper: get provider id for an env var
|
||||
get_provider_id() {
|
||||
local env_var="$1"
|
||||
local i=0
|
||||
while [ $i -lt ${#PROVIDER_ENV_VARS[@]} ]; do
|
||||
if [ "${PROVIDER_ENV_VARS[$i]}" = "$env_var" ]; then
|
||||
echo "${PROVIDER_ID_LIST[$i]}"
|
||||
return
|
||||
fi
|
||||
i=$((i + 1))
|
||||
done
|
||||
}
|
||||
# Helper functions for Bash 4+
|
||||
get_provider_name() {
|
||||
echo "${PROVIDER_NAMES[$1]}"
|
||||
}
|
||||
|
||||
# Helper: get default model for a provider id
|
||||
get_default_model() {
|
||||
local provider_id="$1"
|
||||
local i=0
|
||||
while [ $i -lt ${#MODEL_PROVIDER_IDS[@]} ]; do
|
||||
if [ "${MODEL_PROVIDER_IDS[$i]}" = "$provider_id" ]; then
|
||||
echo "${MODEL_DEFAULTS[$i]}"
|
||||
return
|
||||
fi
|
||||
i=$((i + 1))
|
||||
done
|
||||
}
|
||||
get_provider_id() {
|
||||
echo "${PROVIDER_IDS[$1]}"
|
||||
}
|
||||
|
||||
get_default_model() {
|
||||
echo "${DEFAULT_MODELS[$1]}"
|
||||
}
|
||||
else
|
||||
# Bash 3.2 - use parallel indexed arrays
|
||||
PROVIDER_ENV_VARS=(ANTHROPIC_API_KEY OPENAI_API_KEY GEMINI_API_KEY GOOGLE_API_KEY GROQ_API_KEY CEREBRAS_API_KEY MISTRAL_API_KEY TOGETHER_API_KEY DEEPSEEK_API_KEY)
|
||||
PROVIDER_DISPLAY_NAMES=("Anthropic (Claude)" "OpenAI (GPT)" "Google Gemini" "Google AI" "Groq" "Cerebras" "Mistral" "Together AI" "DeepSeek")
|
||||
PROVIDER_ID_LIST=(anthropic openai gemini google groq cerebras mistral together deepseek)
|
||||
|
||||
# Default models by provider id (parallel arrays)
|
||||
MODEL_PROVIDER_IDS=(anthropic openai gemini groq cerebras mistral together_ai deepseek)
|
||||
MODEL_DEFAULTS=("claude-sonnet-4-5-20250929" "gpt-4o" "gemini-3.0-flash-preview" "moonshotai/kimi-k2-instruct-0905" "zai-glm-4.7" "mistral-large-latest" "meta-llama/Llama-3.3-70B-Instruct-Turbo" "deepseek-chat")
|
||||
|
||||
# Helper: get provider display name for an env var
|
||||
get_provider_name() {
|
||||
local env_var="$1"
|
||||
local i=0
|
||||
while [ $i -lt ${#PROVIDER_ENV_VARS[@]} ]; do
|
||||
if [ "${PROVIDER_ENV_VARS[$i]}" = "$env_var" ]; then
|
||||
echo "${PROVIDER_DISPLAY_NAMES[$i]}"
|
||||
return
|
||||
fi
|
||||
i=$((i + 1))
|
||||
done
|
||||
}
|
||||
|
||||
# Helper: get provider id for an env var
|
||||
get_provider_id() {
|
||||
local env_var="$1"
|
||||
local i=0
|
||||
while [ $i -lt ${#PROVIDER_ENV_VARS[@]} ]; do
|
||||
if [ "${PROVIDER_ENV_VARS[$i]}" = "$env_var" ]; then
|
||||
echo "${PROVIDER_ID_LIST[$i]}"
|
||||
return
|
||||
fi
|
||||
i=$((i + 1))
|
||||
done
|
||||
}
|
||||
|
||||
# Helper: get default model for a provider id
|
||||
get_default_model() {
|
||||
local provider_id="$1"
|
||||
local i=0
|
||||
while [ $i -lt ${#MODEL_PROVIDER_IDS[@]} ]; do
|
||||
if [ "${MODEL_PROVIDER_IDS[$i]}" = "$provider_id" ]; then
|
||||
echo "${MODEL_DEFAULTS[$i]}"
|
||||
return
|
||||
fi
|
||||
i=$((i + 1))
|
||||
done
|
||||
}
|
||||
fi
|
||||
|
||||
# Configuration directory
|
||||
HIVE_CONFIG_DIR="$HOME/.hive"
|
||||
@@ -413,7 +452,7 @@ config = {
|
||||
'model': '$model',
|
||||
'api_key_env_var': '$env_var'
|
||||
},
|
||||
'created_at': '$(date -Iseconds)'
|
||||
'created_at': '$(date -u +"%Y-%m-%dT%H:%M:%S+00:00")'
|
||||
}
|
||||
with open('$HIVE_CONFIG_FILE', 'w') as f:
|
||||
json.dump(config, f, indent=2)
|
||||
@@ -442,13 +481,25 @@ FOUND_ENV_VARS=() # Corresponding env var names
|
||||
SELECTED_PROVIDER_ID="" # Will hold the chosen provider ID
|
||||
SELECTED_ENV_VAR="" # Will hold the chosen env var
|
||||
|
||||
for env_var in "${PROVIDER_ENV_VARS[@]}"; do
|
||||
value="${!env_var}"
|
||||
if [ -n "$value" ]; then
|
||||
FOUND_PROVIDERS+=("$(get_provider_name "$env_var")")
|
||||
FOUND_ENV_VARS+=("$env_var")
|
||||
fi
|
||||
done
|
||||
if [ "$USE_ASSOC_ARRAYS" = true ]; then
|
||||
# Bash 4+ - iterate over associative array keys
|
||||
for env_var in "${!PROVIDER_NAMES[@]}"; do
|
||||
value="${!env_var}"
|
||||
if [ -n "$value" ]; then
|
||||
FOUND_PROVIDERS+=("$(get_provider_name "$env_var")")
|
||||
FOUND_ENV_VARS+=("$env_var")
|
||||
fi
|
||||
done
|
||||
else
|
||||
# Bash 3.2 - iterate over indexed array
|
||||
for env_var in "${PROVIDER_ENV_VARS[@]}"; do
|
||||
value="${!env_var}"
|
||||
if [ -n "$value" ]; then
|
||||
FOUND_PROVIDERS+=("$(get_provider_name "$env_var")")
|
||||
FOUND_ENV_VARS+=("$env_var")
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [ ${#FOUND_PROVIDERS[@]} -gt 0 ]; then
|
||||
echo "Found API keys:"
|
||||
@@ -476,7 +527,7 @@ if [ ${#FOUND_PROVIDERS[@]} -gt 0 ]; then
|
||||
i=1
|
||||
for provider in "${FOUND_PROVIDERS[@]}"; do
|
||||
echo -e " ${CYAN}$i)${NC} $provider"
|
||||
((i++))
|
||||
i=$((i + 1))
|
||||
done
|
||||
echo ""
|
||||
|
||||
@@ -507,7 +558,7 @@ if [ -z "$SELECTED_PROVIDER_ID" ]; then
|
||||
"Groq - Fast, free tier" \
|
||||
"Cerebras - Fast, free tier" \
|
||||
"Skip for now"
|
||||
choice=$?
|
||||
choice=$PROMPT_CHOICE
|
||||
|
||||
case $choice in
|
||||
0)
|
||||
@@ -542,7 +593,8 @@ if [ -z "$SELECTED_PROVIDER_ID" ]; then
|
||||
;;
|
||||
5)
|
||||
echo ""
|
||||
echo -e "${YELLOW}Skipped.${NC} Add your API key later:"
|
||||
echo -e "${YELLOW}Skipped.${NC} An LLM API key is required to test and use worker agents."
|
||||
echo -e "Add your API key later by running:"
|
||||
echo ""
|
||||
echo -e " ${CYAN}echo 'ANTHROPIC_API_KEY=your-key' >> .env${NC}"
|
||||
echo ""
|
||||
@@ -595,7 +647,7 @@ ERRORS=0
|
||||
|
||||
# Test imports
|
||||
echo -n " ⬡ framework... "
|
||||
if $PYTHON_CMD -c "import framework" > /dev/null 2>&1; then
|
||||
if $CORE_PYTHON -c "import framework" > /dev/null 2>&1; then
|
||||
echo -e "${GREEN}ok${NC}"
|
||||
else
|
||||
echo -e "${RED}failed${NC}"
|
||||
@@ -603,7 +655,7 @@ else
|
||||
fi
|
||||
|
||||
echo -n " ⬡ aden_tools... "
|
||||
if $PYTHON_CMD -c "import aden_tools" > /dev/null 2>&1; then
|
||||
if $TOOLS_PYTHON -c "import aden_tools" > /dev/null 2>&1; then
|
||||
echo -e "${GREEN}ok${NC}"
|
||||
else
|
||||
echo -e "${RED}failed${NC}"
|
||||
@@ -611,7 +663,7 @@ else
|
||||
fi
|
||||
|
||||
echo -n " ⬡ litellm... "
|
||||
if $PYTHON_CMD -c "import litellm" > /dev/null 2>&1; then
|
||||
if $CORE_PYTHON -c "import litellm" > /dev/null 2>&1 || $TOOLS_PYTHON -c "import litellm" > /dev/null 2>&1; then
|
||||
echo -e "${GREEN}ok${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}--${NC}"
|
||||
|
||||
@@ -0,0 +1,251 @@
|
||||
<#
|
||||
|
||||
setup-python.ps1 - Python Environment Setup for Aden Agent Framework
|
||||
|
||||
This script sets up the Python environment with all required packages
|
||||
for building and running goal-driven agents.
|
||||
#>
|
||||
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
# Colors for output
|
||||
$RED = "Red"
|
||||
$GREEN = "Green"
|
||||
$YELLOW = "Yellow"
|
||||
$BLUE = "Cyan"
|
||||
|
||||
# Get the directory where this script is located
|
||||
$SCRIPT_DIR = Split-Path -Parent $MyInvocation.MyCommand.Path
|
||||
$PROJECT_ROOT = Split-Path -Parent $SCRIPT_DIR
|
||||
|
||||
Write-Host ""
|
||||
Write-Host "=================================================="
|
||||
Write-Host " Aden Agent Framework - Python Setup"
|
||||
Write-Host "=================================================="
|
||||
Write-Host ""
|
||||
|
||||
# Check for Python
|
||||
$pythonCmd = $null
|
||||
if (Get-Command python -ErrorAction SilentlyContinue) {
|
||||
$pythonCmd = "python"
|
||||
}
|
||||
|
||||
if (-not $pythonCmd) {
|
||||
Write-Host "Error: Python is not installed." -ForegroundColor $RED
|
||||
Write-Host "Please install Python 3.11+ from https://python.org"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Check Python version
|
||||
$versionInfo = & $pythonCmd -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')"
|
||||
$major = & $pythonCmd -c "import sys; print(sys.version_info.major)"
|
||||
$minor = & $pythonCmd -c "import sys; print(sys.version_info.minor)"
|
||||
|
||||
Write-Host "Detected Python: $versionInfo" -ForegroundColor $BLUE
|
||||
|
||||
if ($major -lt 3 -or ($major -eq 3 -and $minor -lt 11)) {
|
||||
Write-Host "Error: Python 3.11+ is required (found $versionInfo)" -ForegroundColor $RED
|
||||
Write-Host "Please upgrade your Python installation"
|
||||
exit 1
|
||||
}
|
||||
|
||||
if ($minor -lt 11) {
|
||||
Write-Host "Warning: Python 3.11+ is recommended for best compatibility" -ForegroundColor $YELLOW
|
||||
Write-Host "You have Python $versionInfo which may work but is not officially supported" -ForegroundColor $YELLOW
|
||||
Write-Host ""
|
||||
}
|
||||
|
||||
Write-Host "[OK] Python version check passed" -ForegroundColor $GREEN
|
||||
Write-Host ""
|
||||
|
||||
# Create and activate virtual environment
|
||||
Write-Host "=================================================="
|
||||
Write-Host "Setting up Python Virtual Environment"
|
||||
Write-Host "=================================================="
|
||||
Write-Host ""
|
||||
|
||||
$VENV_PATH = Join-Path $PROJECT_ROOT ".venv"
|
||||
$VENV_PYTHON = Join-Path $VENV_PATH "Scripts\python.exe"
|
||||
$VENV_ACTIVATE = Join-Path $VENV_PATH "Scripts\Activate.ps1"
|
||||
|
||||
if (-not (Test-Path $VENV_PYTHON)) {
|
||||
Write-Host "Creating virtual environment at .venv..."
|
||||
& $pythonCmd -m venv $VENV_PATH
|
||||
Write-Host "[OK] Virtual environment created" -ForegroundColor $GREEN
|
||||
}
|
||||
else {
|
||||
Write-Host "[OK] Virtual environment already exists" -ForegroundColor $GREEN
|
||||
}
|
||||
|
||||
# Activate venv
|
||||
Write-Host "Activating virtual environment..."
|
||||
& $VENV_ACTIVATE
|
||||
Write-Host "[OK] Virtual environment activated" -ForegroundColor $GREEN
|
||||
|
||||
# From here on, always use venv python
|
||||
$pythonCmd = $VENV_PYTHON
|
||||
|
||||
Write-Host ""
|
||||
|
||||
# Check for pip
|
||||
try {
|
||||
& $pythonCmd -m pip --version | Out-Null
|
||||
}
|
||||
catch {
|
||||
Write-Host "Error: pip is not installed" -ForegroundColor $RED
|
||||
Write-Host "Please install pip for Python $versionInfo"
|
||||
exit 1
|
||||
}
|
||||
|
||||
Write-Host "[OK] pip detected" -ForegroundColor $GREEN
|
||||
Write-Host ""
|
||||
|
||||
# Upgrade pip, setuptools, and wheel
|
||||
Write-Host "Upgrading pip, setuptools, and wheel..."
|
||||
& $pythonCmd -m pip install --upgrade pip setuptools wheel
|
||||
Write-Host "[OK] Core packages upgraded" -ForegroundColor $GREEN
|
||||
Write-Host ""
|
||||
|
||||
# Install core framework package
|
||||
Write-Host "=================================================="
|
||||
Write-Host "Installing Core Framework Package"
|
||||
Write-Host "=================================================="
|
||||
Write-Host ""
|
||||
|
||||
Set-Location "$PROJECT_ROOT\core"
|
||||
|
||||
if (Test-Path "pyproject.toml") {
|
||||
Write-Host "Installing framework from core/ (editable mode)..."
|
||||
& $pythonCmd -m pip install -e . | Out-Null
|
||||
Write-Host "[OK] Framework package installed" -ForegroundColor $GREEN
|
||||
}
|
||||
else {
|
||||
Write-Host "[WARN] No pyproject.toml found in core/, skipping framework installation" -ForegroundColor $YELLOW
|
||||
}
|
||||
|
||||
Write-Host ""
|
||||
|
||||
# Install tools package
|
||||
Write-Host "=================================================="
|
||||
Write-Host "Installing Tools Package (aden_tools)"
|
||||
Write-Host "=================================================="
|
||||
Write-Host ""
|
||||
|
||||
Set-Location "$PROJECT_ROOT\tools"
|
||||
|
||||
if (Test-Path "pyproject.toml") {
|
||||
Write-Host "Installing aden_tools from tools/ (editable mode)..."
|
||||
& $pythonCmd -m pip install -e . | Out-Null
|
||||
Write-Host "[OK] Tools package installed" -ForegroundColor $GREEN
|
||||
}
|
||||
else {
|
||||
Write-Host "Error: No pyproject.toml found in tools/" -ForegroundColor $RED
|
||||
exit 1
|
||||
}
|
||||
|
||||
Write-Host ""
|
||||
|
||||
# Fix openai version compatibility with litellm
|
||||
Write-Host "=================================================="
|
||||
Write-Host "Fixing Package Compatibility"
|
||||
Write-Host "=================================================="
|
||||
Write-Host ""
|
||||
|
||||
try {
|
||||
$openaiVersion = & $pythonCmd -c "import openai; print(openai.__version__)"
|
||||
}
|
||||
catch {
|
||||
$openaiVersion = "not_installed"
|
||||
}
|
||||
|
||||
if ($openaiVersion -eq "not_installed") {
|
||||
Write-Host "Installing openai package..."
|
||||
& $pythonCmd -m pip install "openai>=1.0.0" | Out-Null
|
||||
Write-Host "[OK] openai package installed" -ForegroundColor $GREEN
|
||||
}
|
||||
elseif ($openaiVersion.StartsWith("0.")) {
|
||||
Write-Host "Found old openai version: $openaiVersion" -ForegroundColor $YELLOW
|
||||
Write-Host "Upgrading to openai 1.x+ for litellm compatibility..."
|
||||
& $pythonCmd -m pip install --upgrade "openai>=1.0.0" | Out-Null
|
||||
$openaiVersion = & $pythonCmd -c "import openai; print(openai.__version__)"
|
||||
Write-Host "[OK] openai upgraded to $openaiVersion" -ForegroundColor $GREEN
|
||||
}
|
||||
else {
|
||||
Write-Host "[OK] openai $openaiVersion is compatible" -ForegroundColor $GREEN
|
||||
}
|
||||
|
||||
Write-Host ""
|
||||
|
||||
# Verify installations
|
||||
Write-Host "=================================================="
|
||||
Write-Host "Verifying Installation"
|
||||
Write-Host "=================================================="
|
||||
Write-Host ""
|
||||
|
||||
Set-Location $PROJECT_ROOT
|
||||
|
||||
# Test framework import
|
||||
& $pythonCmd -c "import framework" 2>$null
|
||||
if ($LASTEXITCODE -eq 0) {
|
||||
Write-Host "[OK] framework package imports successfully" -ForegroundColor Green
|
||||
}
|
||||
else {
|
||||
Write-Host "[FAIL] framework package import failed" -ForegroundColor Red
|
||||
}
|
||||
|
||||
# Test aden_tools import
|
||||
& $pythonCmd -c "import aden_tools" 2>$null
|
||||
if ($LASTEXITCODE -eq 0) {
|
||||
Write-Host "[OK] aden_tools package imports successfully" -ForegroundColor Green
|
||||
}
|
||||
else {
|
||||
Write-Host "[FAIL] aden_tools package import failed" -ForegroundColor Red
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Test litellm
|
||||
& $pythonCmd -c "import litellm" 2>$null
|
||||
if ($LASTEXITCODE -eq 0) {
|
||||
Write-Host "[OK] litellm package imports successfully" -ForegroundColor $GREEN
|
||||
}
|
||||
else {
|
||||
Write-Host "[WARN] litellm import had issues (may be OK if not using LLM features)" -ForegroundColor $YELLOW
|
||||
}
|
||||
|
||||
Write-Host ""
|
||||
|
||||
# Print agent commands
|
||||
Write-Host "=================================================="
|
||||
Write-Host " Setup Complete!"
|
||||
Write-Host "=================================================="
|
||||
Write-Host ""
|
||||
Write-Host "Python packages installed:"
|
||||
Write-Host " - framework (core agent runtime)"
|
||||
Write-Host " - aden_tools (tools and MCP servers)"
|
||||
Write-Host " - All dependencies and compatibility fixes applied"
|
||||
Write-Host ""
|
||||
Write-Host "To run agents on Windows (PowerShell):"
|
||||
Write-Host ""
|
||||
Write-Host "1. From the project root, set PYTHONPATH:"
|
||||
Write-Host " `$env:PYTHONPATH=`"core;exports`""
|
||||
Write-Host ""
|
||||
Write-Host "2. Run an agent command:"
|
||||
Write-Host " python -m agent_name validate"
|
||||
Write-Host " python -m agent_name info"
|
||||
Write-Host " python -m agent_name run --input '{...}'"
|
||||
Write-Host ""
|
||||
Write-Host "Example (support_ticket_agent):"
|
||||
Write-Host " python -m support_ticket_agent validate"
|
||||
Write-Host " python -m support_ticket_agent info"
|
||||
Write-Host " python -m support_ticket_agent run --input '{""ticket_content"":""..."",""customer_id"":""..."",""ticket_id"":""...""}'"
|
||||
Write-Host ""
|
||||
Write-Host "Notes:"
|
||||
Write-Host " - Ensure the virtual environment is activated (.venv)"
|
||||
Write-Host " - PYTHONPATH must be set in each new PowerShell session"
|
||||
Write-Host ""
|
||||
Write-Host "Documentation:"
|
||||
Write-Host " $PROJECT_ROOT\README.md"
|
||||
Write-Host ""
|
||||
Write-Host "Agent Examples:"
|
||||
Write-Host " $PROJECT_ROOT\exports\"
|
||||
Write-Host ""
|
||||
@@ -1,13 +0,0 @@
|
||||
# MCP Server
|
||||
fastmcp
|
||||
|
||||
# Tool dependencies
|
||||
diff-match-patch
|
||||
pypdf
|
||||
beautifulsoup4
|
||||
lxml
|
||||
playwright
|
||||
playwright-stealth
|
||||
requests
|
||||
|
||||
# Note: After installing, run `playwright install` to download browser binaries
|
||||
@@ -9,7 +9,7 @@ Philosophy: Google Strictness + Apple UX
|
||||
|
||||
Usage:
|
||||
from aden_tools.credentials import CredentialStoreAdapter
|
||||
from core.framework.credentials import CredentialStore
|
||||
from framework.credentials import CredentialStore
|
||||
|
||||
# With encrypted storage (production)
|
||||
store = CredentialStore.with_encrypted_storage() # defaults to ~/.hive/credentials
|
||||
|
||||
@@ -7,6 +7,47 @@ Contains credentials for third-party service integrations (HubSpot, etc.).
|
||||
from .base import CredentialSpec
|
||||
|
||||
INTEGRATION_CREDENTIALS = {
|
||||
"github": CredentialSpec(
|
||||
env_var="GITHUB_TOKEN",
|
||||
tools=[
|
||||
"github_list_repos",
|
||||
"github_get_repo",
|
||||
"github_search_repos",
|
||||
"github_list_issues",
|
||||
"github_get_issue",
|
||||
"github_create_issue",
|
||||
"github_update_issue",
|
||||
"github_list_pull_requests",
|
||||
"github_get_pull_request",
|
||||
"github_create_pull_request",
|
||||
"github_search_code",
|
||||
"github_list_branches",
|
||||
"github_get_branch",
|
||||
],
|
||||
required=True,
|
||||
startup_required=False,
|
||||
help_url="https://github.com/settings/tokens",
|
||||
description="GitHub Personal Access Token (classic)",
|
||||
# Auth method support
|
||||
aden_supported=False,
|
||||
direct_api_key_supported=True,
|
||||
api_key_instructions="""To get a GitHub Personal Access Token:
|
||||
1. Go to GitHub Settings > Developer settings > Personal access tokens
|
||||
2. Click "Generate new token" > "Generate new token (classic)"
|
||||
3. Give your token a descriptive name (e.g., "Hive Agent")
|
||||
4. Select the following scopes:
|
||||
- repo (Full control of private repositories)
|
||||
- read:org (Read org and team membership - optional)
|
||||
- user (Read user profile data - optional)
|
||||
5. Click "Generate token" and copy the token (starts with ghp_)
|
||||
6. Store it securely - you won't be able to see it again!""",
|
||||
# Health check configuration
|
||||
health_check_endpoint="https://api.github.com/user",
|
||||
health_check_method="GET",
|
||||
# Credential store mapping
|
||||
credential_id="github",
|
||||
credential_key="access_token",
|
||||
),
|
||||
"hubspot": CredentialSpec(
|
||||
env_var="HUBSPOT_ACCESS_TOKEN",
|
||||
tools=[
|
||||
|
||||
@@ -5,7 +5,7 @@ This provides backward compatibility, allowing existing tools to work unchanged
|
||||
while enabling new features (template resolution, multi-key credentials, etc.).
|
||||
|
||||
Usage:
|
||||
from core.framework.credentials import CredentialStore
|
||||
from framework.credentials import CredentialStore
|
||||
from aden_tools.credentials.store_adapter import CredentialStoreAdapter
|
||||
|
||||
# Create new credential store
|
||||
@@ -31,7 +31,7 @@ from typing import TYPE_CHECKING
|
||||
from .base import CredentialError, CredentialSpec
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from core.framework.credentials import CredentialStore
|
||||
from framework.credentials import CredentialStore
|
||||
|
||||
|
||||
class CredentialStoreAdapter:
|
||||
@@ -368,7 +368,7 @@ class CredentialStoreAdapter:
|
||||
credentials = CredentialStoreAdapter.for_testing({"brave_search": "test-key"})
|
||||
assert credentials.get("brave_search") == "test-key"
|
||||
"""
|
||||
from core.framework.credentials import CredentialStore
|
||||
from framework.credentials import CredentialStore
|
||||
|
||||
# Convert to CredentialStore.for_testing format
|
||||
# Simple credentials get a single "api_key" key
|
||||
@@ -395,13 +395,14 @@ class CredentialStoreAdapter:
|
||||
Returns:
|
||||
CredentialStoreAdapter using env vars for storage
|
||||
"""
|
||||
from core.framework.credentials import CredentialStore
|
||||
from framework.credentials import CredentialStore
|
||||
|
||||
# Build env mapping from specs if not provided
|
||||
if env_mapping is None and specs is None:
|
||||
from . import CREDENTIAL_SPECS
|
||||
if env_mapping is None:
|
||||
if specs is None:
|
||||
from . import CREDENTIAL_SPECS
|
||||
|
||||
specs = CREDENTIAL_SPECS
|
||||
specs = CREDENTIAL_SPECS
|
||||
env_mapping = {name: spec.env_var for name, spec in specs.items()}
|
||||
|
||||
store = CredentialStore.with_env_storage(env_mapping)
|
||||
|
||||
@@ -38,6 +38,7 @@ from .file_system_toolkits.replace_file_content import (
|
||||
# Import file system toolkits
|
||||
from .file_system_toolkits.view_file import register_tools as register_view_file
|
||||
from .file_system_toolkits.write_to_file import register_tools as register_write_to_file
|
||||
from .github_tool import register_tools as register_github
|
||||
from .hubspot_tool import register_tools as register_hubspot
|
||||
from .pdf_read_tool import register_tools as register_pdf_read
|
||||
from .web_scrape_tool import register_tools as register_web_scrape
|
||||
@@ -68,6 +69,7 @@ def register_all_tools(
|
||||
# Tools that need credentials (pass credentials if provided)
|
||||
# web_search supports multiple providers (Google, Brave) with auto-detection
|
||||
register_web_search(mcp, credentials=credentials)
|
||||
register_github(mcp, credentials=credentials)
|
||||
# email supports multiple providers (Resend) with auto-detection
|
||||
register_email(mcp, credentials=credentials)
|
||||
register_hubspot(mcp, credentials=credentials)
|
||||
@@ -102,6 +104,19 @@ def register_all_tools(
|
||||
"csv_append",
|
||||
"csv_info",
|
||||
"csv_sql",
|
||||
"github_list_repos",
|
||||
"github_get_repo",
|
||||
"github_search_repos",
|
||||
"github_list_issues",
|
||||
"github_get_issue",
|
||||
"github_create_issue",
|
||||
"github_update_issue",
|
||||
"github_list_pull_requests",
|
||||
"github_get_pull_request",
|
||||
"github_create_pull_request",
|
||||
"github_search_code",
|
||||
"github_list_branches",
|
||||
"github_get_branch",
|
||||
"send_email",
|
||||
"send_budget_alert_email",
|
||||
"hubspot_search_contacts",
|
||||
|
||||
@@ -0,0 +1,646 @@
|
||||
# GitHub Tool
|
||||
|
||||
Interact with GitHub repositories, issues, and pull requests within the Aden agent framework.
|
||||
|
||||
## Installation
|
||||
|
||||
The GitHub tool uses `httpx` which is already included in the base dependencies. No additional installation required.
|
||||
|
||||
## Setup
|
||||
|
||||
You need a GitHub Personal Access Token (PAT) to use this tool.
|
||||
|
||||
### Getting a GitHub Token
|
||||
|
||||
1. Go to https://github.com/settings/tokens
|
||||
2. Click "Generate new token" → "Generate new token (classic)"
|
||||
3. Give your token a descriptive name (e.g., "Aden Agent Framework")
|
||||
4. Select the following scopes:
|
||||
- `repo` - Full control of private repositories (includes all repo scopes)
|
||||
- `read:org` - Read org and team membership (optional, for org access)
|
||||
- `user` - Read user profile data (optional)
|
||||
5. Click "Generate token"
|
||||
6. Copy the token (starts with `ghp_`)
|
||||
|
||||
**Note:** Keep your token secure! It provides access to your GitHub account.
|
||||
|
||||
### Configuration
|
||||
|
||||
Set the token as an environment variable:
|
||||
|
||||
```bash
|
||||
export GITHUB_TOKEN=ghp_your_token_here
|
||||
```
|
||||
|
||||
Or configure via the credential store (recommended for production).
|
||||
|
||||
## Available Functions
|
||||
|
||||
### Repository Management
|
||||
|
||||
#### `github_list_repos`
|
||||
|
||||
List repositories for a user or the authenticated user.
|
||||
|
||||
**Parameters:**
|
||||
- `username` (str, optional): GitHub username (if None, lists authenticated user's repos)
|
||||
- `visibility` (str, optional): Repository visibility ("all", "public", "private", default "all")
|
||||
- `sort` (str, optional): Sort order ("created", "updated", "pushed", "full_name", default "updated")
|
||||
- `limit` (int, optional): Maximum number of repositories (1-100, default 30)
|
||||
|
||||
**Returns:**
|
||||
```python
|
||||
{
|
||||
"success": True,
|
||||
"data": [
|
||||
{
|
||||
"id": 123456,
|
||||
"name": "my-repo",
|
||||
"full_name": "username/my-repo",
|
||||
"description": "A cool project",
|
||||
"private": False,
|
||||
"html_url": "https://github.com/username/my-repo",
|
||||
"stargazers_count": 42,
|
||||
"forks_count": 7
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
# List your repositories
|
||||
result = github_list_repos()
|
||||
|
||||
# List another user's public repositories
|
||||
result = github_list_repos(username="octocat", limit=10)
|
||||
```
|
||||
|
||||
#### `github_get_repo`
|
||||
|
||||
Get detailed information about a specific repository.
|
||||
|
||||
**Parameters:**
|
||||
- `owner` (str): Repository owner (username or organization)
|
||||
- `repo` (str): Repository name
|
||||
|
||||
**Returns:**
|
||||
```python
|
||||
{
|
||||
"success": True,
|
||||
"data": {
|
||||
"id": 123456,
|
||||
"name": "my-repo",
|
||||
"full_name": "owner/my-repo",
|
||||
"description": "Project description",
|
||||
"private": False,
|
||||
"default_branch": "main",
|
||||
"stargazers_count": 100,
|
||||
"forks_count": 25,
|
||||
"language": "Python",
|
||||
"created_at": "2024-01-01T00:00:00Z",
|
||||
"updated_at": "2024-01-31T12:00:00Z"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
result = github_get_repo(owner="adenhq", repo="hive")
|
||||
print(f"Stars: {result['data']['stargazers_count']}")
|
||||
```
|
||||
|
||||
#### `github_search_repos`
|
||||
|
||||
Search for repositories on GitHub.
|
||||
|
||||
**Parameters:**
|
||||
- `query` (str): Search query (supports GitHub search syntax)
|
||||
- `sort` (str, optional): Sort field ("stars", "forks", "updated")
|
||||
- `limit` (int, optional): Maximum results (1-100, default 30)
|
||||
|
||||
**Returns:**
|
||||
```python
|
||||
{
|
||||
"success": True,
|
||||
"data": {
|
||||
"total_count": 1000,
|
||||
"items": [
|
||||
{
|
||||
"id": 123,
|
||||
"name": "awesome-python",
|
||||
"full_name": "user/awesome-python",
|
||||
"description": "A curated list",
|
||||
"stargazers_count": 5000
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
# Search for Python repos with many stars
|
||||
result = github_search_repos(
|
||||
query="language:python stars:>1000",
|
||||
sort="stars",
|
||||
limit=10
|
||||
)
|
||||
|
||||
# Search in a specific organization
|
||||
result = github_search_repos(query="org:adenhq agent")
|
||||
```
|
||||
|
||||
### Issue Management
|
||||
|
||||
#### `github_list_issues`
|
||||
|
||||
List issues for a repository.
|
||||
|
||||
**Parameters:**
|
||||
- `owner` (str): Repository owner
|
||||
- `repo` (str): Repository name
|
||||
- `state` (str, optional): Issue state ("open", "closed", "all", default "open")
|
||||
- `limit` (int, optional): Maximum issues (1-100, default 30)
|
||||
|
||||
**Returns:**
|
||||
```python
|
||||
{
|
||||
"success": True,
|
||||
"data": [
|
||||
{
|
||||
"number": 42,
|
||||
"title": "Bug in feature X",
|
||||
"state": "open",
|
||||
"user": {"login": "username"},
|
||||
"labels": [{"name": "bug"}],
|
||||
"created_at": "2024-01-30T10:00:00Z",
|
||||
"html_url": "https://github.com/owner/repo/issues/42"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
# List open issues
|
||||
issues = github_list_issues(owner="adenhq", repo="hive", state="open")
|
||||
for issue in issues["data"]:
|
||||
print(f"#{issue['number']}: {issue['title']}")
|
||||
```
|
||||
|
||||
#### `github_get_issue`
|
||||
|
||||
Get a specific issue by number.
|
||||
|
||||
**Parameters:**
|
||||
- `owner` (str): Repository owner
|
||||
- `repo` (str): Repository name
|
||||
- `issue_number` (int): Issue number
|
||||
|
||||
**Returns:**
|
||||
```python
|
||||
{
|
||||
"success": True,
|
||||
"data": {
|
||||
"number": 42,
|
||||
"title": "Issue title",
|
||||
"body": "Detailed description...",
|
||||
"state": "open",
|
||||
"user": {"login": "username"},
|
||||
"assignees": [],
|
||||
"labels": [{"name": "enhancement"}],
|
||||
"comments": 5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
issue = github_get_issue(owner="adenhq", repo="hive", issue_number=2805)
|
||||
print(issue["data"]["body"])
|
||||
```
|
||||
|
||||
#### `github_create_issue`
|
||||
|
||||
Create a new issue in a repository.
|
||||
|
||||
**Parameters:**
|
||||
- `owner` (str): Repository owner
|
||||
- `repo` (str): Repository name
|
||||
- `title` (str): Issue title
|
||||
- `body` (str, optional): Issue description (supports Markdown)
|
||||
- `labels` (list[str], optional): List of label names
|
||||
- `assignees` (list[str], optional): List of usernames to assign
|
||||
|
||||
**Returns:**
|
||||
```python
|
||||
{
|
||||
"success": True,
|
||||
"data": {
|
||||
"number": 43,
|
||||
"title": "New issue",
|
||||
"html_url": "https://github.com/owner/repo/issues/43"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
result = github_create_issue(
|
||||
owner="myorg",
|
||||
repo="myrepo",
|
||||
title="Add new feature",
|
||||
body="## Description\n\nWe need to add...",
|
||||
labels=["enhancement", "help wanted"],
|
||||
assignees=["developer1"]
|
||||
)
|
||||
print(f"Created issue #{result['data']['number']}")
|
||||
```
|
||||
|
||||
#### `github_update_issue`
|
||||
|
||||
Update an existing issue.
|
||||
|
||||
**Parameters:**
|
||||
- `owner` (str): Repository owner
|
||||
- `repo` (str): Repository name
|
||||
- `issue_number` (int): Issue number
|
||||
- `title` (str, optional): New title
|
||||
- `body` (str, optional): New body
|
||||
- `state` (str, optional): New state ("open" or "closed")
|
||||
- `labels` (list[str], optional): New list of label names
|
||||
|
||||
**Returns:**
|
||||
```python
|
||||
{
|
||||
"success": True,
|
||||
"data": {
|
||||
"number": 43,
|
||||
"title": "Updated title",
|
||||
"state": "closed"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
# Close an issue
|
||||
result = github_update_issue(
|
||||
owner="myorg",
|
||||
repo="myrepo",
|
||||
issue_number=43,
|
||||
state="closed",
|
||||
body="Fixed in PR #44"
|
||||
)
|
||||
```
|
||||
|
||||
### Pull Request Management
|
||||
|
||||
#### `github_list_pull_requests`
|
||||
|
||||
List pull requests for a repository.
|
||||
|
||||
**Parameters:**
|
||||
- `owner` (str): Repository owner
|
||||
- `repo` (str): Repository name
|
||||
- `state` (str, optional): PR state ("open", "closed", "all", default "open")
|
||||
- `limit` (int, optional): Maximum PRs (1-100, default 30)
|
||||
|
||||
**Returns:**
|
||||
```python
|
||||
{
|
||||
"success": True,
|
||||
"data": [
|
||||
{
|
||||
"number": 10,
|
||||
"title": "Add new feature",
|
||||
"state": "open",
|
||||
"user": {"login": "contributor"},
|
||||
"head": {"ref": "feature-branch"},
|
||||
"base": {"ref": "main"},
|
||||
"html_url": "https://github.com/owner/repo/pull/10"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
prs = github_list_pull_requests(owner="adenhq", repo="hive", state="open")
|
||||
for pr in prs["data"]:
|
||||
print(f"PR #{pr['number']}: {pr['title']}")
|
||||
```
|
||||
|
||||
#### `github_get_pull_request`
|
||||
|
||||
Get a specific pull request.
|
||||
|
||||
**Parameters:**
|
||||
- `owner` (str): Repository owner
|
||||
- `repo` (str): Repository name
|
||||
- `pull_number` (int): Pull request number
|
||||
|
||||
**Returns:**
|
||||
```python
|
||||
{
|
||||
"success": True,
|
||||
"data": {
|
||||
"number": 10,
|
||||
"title": "PR title",
|
||||
"body": "Description...",
|
||||
"state": "open",
|
||||
"merged": False,
|
||||
"draft": False,
|
||||
"head": {"ref": "feature"},
|
||||
"base": {"ref": "main"}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
pr = github_get_pull_request(owner="adenhq", repo="hive", pull_number=2814)
|
||||
print(f"PR by {pr['data']['user']['login']}")
|
||||
```
|
||||
|
||||
#### `github_create_pull_request`
|
||||
|
||||
Create a new pull request.
|
||||
|
||||
**Parameters:**
|
||||
- `owner` (str): Repository owner
|
||||
- `repo` (str): Repository name
|
||||
- `title` (str): Pull request title
|
||||
- `head` (str): Branch with your changes (e.g., "my-feature")
|
||||
- `base` (str): Branch to merge into (e.g., "main")
|
||||
- `body` (str, optional): Pull request description (supports Markdown)
|
||||
- `draft` (bool, optional): Create as draft PR (default False)
|
||||
|
||||
**Returns:**
|
||||
```python
|
||||
{
|
||||
"success": True,
|
||||
"data": {
|
||||
"number": 11,
|
||||
"title": "New PR",
|
||||
"html_url": "https://github.com/owner/repo/pull/11"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
result = github_create_pull_request(
|
||||
owner="myorg",
|
||||
repo="myrepo",
|
||||
title="feat: Add GitHub integration tool",
|
||||
head="feature/github-tool",
|
||||
base="main",
|
||||
body="## Summary\n\n- Implements GitHub API integration\n- Adds 30+ tests",
|
||||
draft=False
|
||||
)
|
||||
print(f"Created PR: {result['data']['html_url']}")
|
||||
```
|
||||
|
||||
### Search
|
||||
|
||||
#### `github_search_code`
|
||||
|
||||
Search code across GitHub.
|
||||
|
||||
**Parameters:**
|
||||
- `query` (str): Search query (supports GitHub code search syntax)
|
||||
- `limit` (int, optional): Maximum results (1-100, default 30)
|
||||
|
||||
**Returns:**
|
||||
```python
|
||||
{
|
||||
"success": True,
|
||||
"data": {
|
||||
"total_count": 50,
|
||||
"items": [
|
||||
{
|
||||
"name": "example.py",
|
||||
"path": "src/example.py",
|
||||
"repository": {
|
||||
"full_name": "owner/repo"
|
||||
},
|
||||
"html_url": "https://github.com/owner/repo/blob/main/src/example.py"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
# Search for function usage
|
||||
result = github_search_code(
|
||||
query="register_tools language:python repo:adenhq/hive"
|
||||
)
|
||||
|
||||
# Search for specific code pattern
|
||||
result = github_search_code(query="FastMCP extension:py")
|
||||
```
|
||||
|
||||
### Branch Management
|
||||
|
||||
#### `github_list_branches`
|
||||
|
||||
List branches for a repository.
|
||||
|
||||
**Parameters:**
|
||||
- `owner` (str): Repository owner
|
||||
- `repo` (str): Repository name
|
||||
- `limit` (int, optional): Maximum branches (1-100, default 30)
|
||||
|
||||
**Returns:**
|
||||
```python
|
||||
{
|
||||
"success": True,
|
||||
"data": [
|
||||
{
|
||||
"name": "main",
|
||||
"protected": True,
|
||||
"commit": {"sha": "abc123..."}
|
||||
},
|
||||
{
|
||||
"name": "develop",
|
||||
"protected": False
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
branches = github_list_branches(owner="adenhq", repo="hive")
|
||||
for branch in branches["data"]:
|
||||
print(f"Branch: {branch['name']}")
|
||||
```
|
||||
|
||||
#### `github_get_branch`
|
||||
|
||||
Get information about a specific branch.
|
||||
|
||||
**Parameters:**
|
||||
- `owner` (str): Repository owner
|
||||
- `repo` (str): Repository name
|
||||
- `branch` (str): Branch name
|
||||
|
||||
**Returns:**
|
||||
```python
|
||||
{
|
||||
"success": True,
|
||||
"data": {
|
||||
"name": "main",
|
||||
"protected": True,
|
||||
"commit": {
|
||||
"sha": "abc123...",
|
||||
"commit": {
|
||||
"message": "Latest commit message"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
main_branch = github_get_branch(owner="adenhq", repo="hive", branch="main")
|
||||
print(f"Latest commit: {main_branch['data']['commit']['sha']}")
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
All functions return a dict with an `error` key if something goes wrong:
|
||||
|
||||
```python
|
||||
{
|
||||
"error": "GitHub API error (HTTP 404): Not Found"
|
||||
}
|
||||
```
|
||||
|
||||
Common errors:
|
||||
- `not configured` - No GitHub token provided
|
||||
- `Invalid or expired GitHub token` - Token authentication failed (401)
|
||||
- `Forbidden` - Insufficient permissions or rate limit exceeded (403)
|
||||
- `Resource not found` - Repository, issue, or PR doesn't exist (404)
|
||||
- `Validation error` - Invalid request parameters (422)
|
||||
- `Request timed out` - Network timeout
|
||||
- `Network error` - Connection issues
|
||||
|
||||
## Security
|
||||
|
||||
- Personal Access Tokens are never logged or exposed
|
||||
- All API calls use HTTPS
|
||||
- Tokens are retrieved from secure credential store or environment variables
|
||||
- Fine-grained permissions can be configured via GitHub token scopes
|
||||
|
||||
## Use Cases
|
||||
|
||||
### Automated Issue Management
|
||||
```python
|
||||
# Create issues from bug reports
|
||||
github_create_issue(
|
||||
owner="myorg",
|
||||
repo="myapp",
|
||||
title="Bug: Login fails on mobile",
|
||||
body="## Steps to reproduce\n1. Open app on mobile...",
|
||||
labels=["bug", "mobile"]
|
||||
)
|
||||
```
|
||||
|
||||
### CI/CD Integration
|
||||
```python
|
||||
# Create PR after automated changes
|
||||
github_create_pull_request(
|
||||
owner="myorg",
|
||||
repo="myrepo",
|
||||
title="chore: Update dependencies",
|
||||
head="bot/update-deps",
|
||||
base="main",
|
||||
body="Automated dependency updates"
|
||||
)
|
||||
```
|
||||
|
||||
### Repository Analytics
|
||||
```python
|
||||
# Analyze repository activity
|
||||
repo = github_get_repo(owner="adenhq", repo="hive")
|
||||
issues = github_list_issues(owner="adenhq", repo="hive", state="open")
|
||||
prs = github_list_pull_requests(owner="adenhq", repo="hive", state="open")
|
||||
|
||||
print(f"Stars: {repo['data']['stargazers_count']}")
|
||||
print(f"Open Issues: {len(issues['data'])}")
|
||||
print(f"Open PRs: {len(prs['data'])}")
|
||||
```
|
||||
|
||||
### Code Discovery
|
||||
```python
|
||||
# Find examples of API usage
|
||||
results = github_search_code(
|
||||
query="register_tools language:python",
|
||||
limit=50
|
||||
)
|
||||
for item in results["data"]["items"]:
|
||||
print(f"Found in: {item['repository']['full_name']}")
|
||||
```
|
||||
|
||||
### Project Automation
|
||||
```python
|
||||
# Auto-close stale issues
|
||||
issues = github_list_issues(owner="myorg", repo="myrepo", state="open")
|
||||
for issue in issues["data"]:
|
||||
# Check if stale (custom logic)
|
||||
if is_stale(issue):
|
||||
github_update_issue(
|
||||
owner="myorg",
|
||||
repo="myrepo",
|
||||
issue_number=issue["number"],
|
||||
state="closed",
|
||||
body="Closing due to inactivity"
|
||||
)
|
||||
```
|
||||
|
||||
## Rate Limits
|
||||
|
||||
GitHub enforces rate limits on API calls:
|
||||
- **Authenticated requests**: 5,000 requests per hour
|
||||
- **Search API**: 30 requests per minute
|
||||
- **Unauthenticated requests**: 60 requests per hour (not applicable with token)
|
||||
|
||||
The tool handles rate limit errors gracefully with appropriate error messages. Monitor your usage at: https://api.github.com/rate_limit
|
||||
|
||||
## GitHub Search Syntax
|
||||
|
||||
For `github_search_repos` and `github_search_code`, you can use advanced search qualifiers:
|
||||
|
||||
### Repository Search
|
||||
- `language:python` - Filter by language
|
||||
- `stars:>1000` - Repositories with more than 1000 stars
|
||||
- `forks:>100` - Repositories with more than 100 forks
|
||||
- `org:adenhq` - Search within an organization
|
||||
- `topic:machine-learning` - Filter by topic
|
||||
- `created:>2024-01-01` - Created after date
|
||||
|
||||
### Code Search
|
||||
- `repo:owner/repo` - Search in specific repository
|
||||
- `extension:py` - Filter by file extension
|
||||
- `path:src/` - Search in specific path
|
||||
- `language:python` - Filter by language
|
||||
|
||||
Examples:
|
||||
```python
|
||||
# Find popular Python ML projects
|
||||
github_search_repos(
|
||||
query="language:python topic:machine-learning stars:>5000",
|
||||
sort="stars"
|
||||
)
|
||||
|
||||
# Find FastMCP usage examples
|
||||
github_search_code(
|
||||
query="FastMCP extension:py"
|
||||
)
|
||||
```
|
||||
@@ -0,0 +1,5 @@
|
||||
"""GitHub Tool package."""
|
||||
|
||||
from .github_tool import register_tools
|
||||
|
||||
__all__ = ["register_tools"]
|
||||
@@ -0,0 +1,818 @@
|
||||
"""
|
||||
GitHub Tool - Interact with GitHub repositories, issues, and pull requests.
|
||||
|
||||
Supports:
|
||||
- Personal Access Tokens (GITHUB_TOKEN / ghp_...)
|
||||
- OAuth tokens via the credential store
|
||||
|
||||
API Reference: https://docs.github.com/en/rest
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import httpx
|
||||
from fastmcp import FastMCP
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from aden_tools.credentials import CredentialStoreAdapter
|
||||
|
||||
GITHUB_API_BASE = "https://api.github.com"
|
||||
|
||||
|
||||
def _sanitize_path_param(param: str, param_name: str = "parameter") -> str:
|
||||
"""
|
||||
Sanitize URL path parameters to prevent path traversal.
|
||||
|
||||
Args:
|
||||
param: The parameter value to sanitize
|
||||
param_name: Name of the parameter (for error messages)
|
||||
|
||||
Returns:
|
||||
The sanitized parameter
|
||||
|
||||
Raises:
|
||||
ValueError: If parameter contains invalid characters
|
||||
"""
|
||||
if "/" in param or ".." in param:
|
||||
raise ValueError(f"Invalid {param_name}: cannot contain '/' or '..'")
|
||||
return param
|
||||
|
||||
|
||||
def _sanitize_error_message(error: Exception) -> str:
|
||||
"""
|
||||
Sanitize error messages to prevent token leaks.
|
||||
|
||||
httpx.RequestError can include headers in the exception message,
|
||||
which may expose the Bearer token.
|
||||
|
||||
Args:
|
||||
error: The exception to sanitize
|
||||
|
||||
Returns:
|
||||
A safe error message without sensitive information
|
||||
"""
|
||||
error_str = str(error)
|
||||
# Remove any Authorization headers or Bearer tokens
|
||||
if "Authorization" in error_str or "Bearer" in error_str:
|
||||
return "Network error occurred"
|
||||
return f"Network error: {error_str}"
|
||||
|
||||
|
||||
class _GitHubClient:
|
||||
"""Internal client wrapping GitHub REST API v3 calls."""
|
||||
|
||||
def __init__(self, token: str):
|
||||
self._token = token
|
||||
|
||||
@property
|
||||
def _headers(self) -> dict[str, str]:
|
||||
return {
|
||||
"Authorization": f"Bearer {self._token}",
|
||||
"Accept": "application/vnd.github+json",
|
||||
"X-GitHub-Api-Version": "2022-11-28",
|
||||
}
|
||||
|
||||
def _handle_response(self, response: httpx.Response) -> dict[str, Any]:
|
||||
"""Handle GitHub API response format."""
|
||||
if response.status_code == 401:
|
||||
return {"error": "Invalid or expired GitHub token"}
|
||||
if response.status_code == 403:
|
||||
return {"error": "Forbidden - check token permissions or rate limit"}
|
||||
if response.status_code == 404:
|
||||
return {"error": "Resource not found"}
|
||||
if response.status_code == 422:
|
||||
try:
|
||||
detail = response.json().get("message", "Validation failed")
|
||||
except Exception:
|
||||
detail = "Validation failed"
|
||||
return {"error": f"Validation error: {detail}"}
|
||||
if response.status_code >= 400:
|
||||
try:
|
||||
detail = response.json().get("message", response.text)
|
||||
except Exception:
|
||||
detail = response.text
|
||||
return {"error": f"GitHub API error (HTTP {response.status_code}): {detail}"}
|
||||
|
||||
try:
|
||||
return {"success": True, "data": response.json()}
|
||||
except Exception:
|
||||
return {"success": True, "data": {}}
|
||||
|
||||
# --- Repositories ---
|
||||
|
||||
def list_repos(
|
||||
self,
|
||||
username: str | None = None,
|
||||
visibility: str = "all",
|
||||
sort: str = "updated",
|
||||
limit: int = 30,
|
||||
) -> dict[str, Any]:
|
||||
"""List repositories for a user or authenticated user."""
|
||||
if username:
|
||||
username = _sanitize_path_param(username, "username")
|
||||
url = f"{GITHUB_API_BASE}/users/{username}/repos"
|
||||
else:
|
||||
url = f"{GITHUB_API_BASE}/user/repos"
|
||||
|
||||
params = {
|
||||
"visibility": visibility,
|
||||
"sort": sort,
|
||||
"per_page": min(limit, 100),
|
||||
}
|
||||
|
||||
response = httpx.get(
|
||||
url,
|
||||
headers=self._headers,
|
||||
params=params,
|
||||
timeout=30.0,
|
||||
)
|
||||
return self._handle_response(response)
|
||||
|
||||
def get_repo(
|
||||
self,
|
||||
owner: str,
|
||||
repo: str,
|
||||
) -> dict[str, Any]:
|
||||
"""Get repository information."""
|
||||
owner = _sanitize_path_param(owner, "owner")
|
||||
repo = _sanitize_path_param(repo, "repo")
|
||||
response = httpx.get(
|
||||
f"{GITHUB_API_BASE}/repos/{owner}/{repo}",
|
||||
headers=self._headers,
|
||||
timeout=30.0,
|
||||
)
|
||||
return self._handle_response(response)
|
||||
|
||||
def search_repos(
|
||||
self,
|
||||
query: str,
|
||||
sort: str | None = None,
|
||||
limit: int = 30,
|
||||
) -> dict[str, Any]:
|
||||
"""Search for repositories."""
|
||||
params: dict[str, Any] = {
|
||||
"q": query,
|
||||
"per_page": min(limit, 100),
|
||||
}
|
||||
if sort:
|
||||
params["sort"] = sort
|
||||
|
||||
response = httpx.get(
|
||||
f"{GITHUB_API_BASE}/search/repositories",
|
||||
headers=self._headers,
|
||||
params=params,
|
||||
timeout=30.0,
|
||||
)
|
||||
return self._handle_response(response)
|
||||
|
||||
# --- Issues ---
|
||||
|
||||
def list_issues(
|
||||
self,
|
||||
owner: str,
|
||||
repo: str,
|
||||
state: str = "open",
|
||||
limit: int = 30,
|
||||
) -> dict[str, Any]:
|
||||
"""List issues for a repository."""
|
||||
owner = _sanitize_path_param(owner, "owner")
|
||||
repo = _sanitize_path_param(repo, "repo")
|
||||
params = {
|
||||
"state": state,
|
||||
"per_page": min(limit, 100),
|
||||
}
|
||||
|
||||
response = httpx.get(
|
||||
f"{GITHUB_API_BASE}/repos/{owner}/{repo}/issues",
|
||||
headers=self._headers,
|
||||
params=params,
|
||||
timeout=30.0,
|
||||
)
|
||||
return self._handle_response(response)
|
||||
|
||||
def get_issue(
|
||||
self,
|
||||
owner: str,
|
||||
repo: str,
|
||||
issue_number: int,
|
||||
) -> dict[str, Any]:
|
||||
"""Get a specific issue."""
|
||||
owner = _sanitize_path_param(owner, "owner")
|
||||
repo = _sanitize_path_param(repo, "repo")
|
||||
response = httpx.get(
|
||||
f"{GITHUB_API_BASE}/repos/{owner}/{repo}/issues/{issue_number}",
|
||||
headers=self._headers,
|
||||
timeout=30.0,
|
||||
)
|
||||
return self._handle_response(response)
|
||||
|
||||
def create_issue(
|
||||
self,
|
||||
owner: str,
|
||||
repo: str,
|
||||
title: str,
|
||||
body: str | None = None,
|
||||
labels: list[str] | None = None,
|
||||
assignees: list[str] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Create a new issue."""
|
||||
owner = _sanitize_path_param(owner, "owner")
|
||||
repo = _sanitize_path_param(repo, "repo")
|
||||
payload: dict[str, Any] = {"title": title}
|
||||
if body:
|
||||
payload["body"] = body
|
||||
if labels:
|
||||
payload["labels"] = labels
|
||||
if assignees:
|
||||
payload["assignees"] = assignees
|
||||
|
||||
response = httpx.post(
|
||||
f"{GITHUB_API_BASE}/repos/{owner}/{repo}/issues",
|
||||
headers=self._headers,
|
||||
json=payload,
|
||||
timeout=30.0,
|
||||
)
|
||||
return self._handle_response(response)
|
||||
|
||||
def update_issue(
|
||||
self,
|
||||
owner: str,
|
||||
repo: str,
|
||||
issue_number: int,
|
||||
title: str | None = None,
|
||||
body: str | None = None,
|
||||
state: str | None = None,
|
||||
labels: list[str] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Update an existing issue."""
|
||||
owner = _sanitize_path_param(owner, "owner")
|
||||
repo = _sanitize_path_param(repo, "repo")
|
||||
payload: dict[str, Any] = {}
|
||||
if title:
|
||||
payload["title"] = title
|
||||
if body is not None:
|
||||
payload["body"] = body
|
||||
if state:
|
||||
payload["state"] = state
|
||||
if labels is not None:
|
||||
payload["labels"] = labels
|
||||
|
||||
response = httpx.patch(
|
||||
f"{GITHUB_API_BASE}/repos/{owner}/{repo}/issues/{issue_number}",
|
||||
headers=self._headers,
|
||||
json=payload,
|
||||
timeout=30.0,
|
||||
)
|
||||
return self._handle_response(response)
|
||||
|
||||
# --- Pull Requests ---
|
||||
|
||||
def list_pull_requests(
|
||||
self,
|
||||
owner: str,
|
||||
repo: str,
|
||||
state: str = "open",
|
||||
limit: int = 30,
|
||||
) -> dict[str, Any]:
|
||||
"""List pull requests for a repository."""
|
||||
owner = _sanitize_path_param(owner, "owner")
|
||||
repo = _sanitize_path_param(repo, "repo")
|
||||
params = {
|
||||
"state": state,
|
||||
"per_page": min(limit, 100),
|
||||
}
|
||||
|
||||
response = httpx.get(
|
||||
f"{GITHUB_API_BASE}/repos/{owner}/{repo}/pulls",
|
||||
headers=self._headers,
|
||||
params=params,
|
||||
timeout=30.0,
|
||||
)
|
||||
return self._handle_response(response)
|
||||
|
||||
def get_pull_request(
|
||||
self,
|
||||
owner: str,
|
||||
repo: str,
|
||||
pull_number: int,
|
||||
) -> dict[str, Any]:
|
||||
"""Get a specific pull request."""
|
||||
owner = _sanitize_path_param(owner, "owner")
|
||||
repo = _sanitize_path_param(repo, "repo")
|
||||
response = httpx.get(
|
||||
f"{GITHUB_API_BASE}/repos/{owner}/{repo}/pulls/{pull_number}",
|
||||
headers=self._headers,
|
||||
timeout=30.0,
|
||||
)
|
||||
return self._handle_response(response)
|
||||
|
||||
def create_pull_request(
|
||||
self,
|
||||
owner: str,
|
||||
repo: str,
|
||||
title: str,
|
||||
head: str,
|
||||
base: str,
|
||||
body: str | None = None,
|
||||
draft: bool = False,
|
||||
) -> dict[str, Any]:
|
||||
"""Create a new pull request."""
|
||||
owner = _sanitize_path_param(owner, "owner")
|
||||
repo = _sanitize_path_param(repo, "repo")
|
||||
payload: dict[str, Any] = {
|
||||
"title": title,
|
||||
"head": head,
|
||||
"base": base,
|
||||
"draft": draft,
|
||||
}
|
||||
if body:
|
||||
payload["body"] = body
|
||||
|
||||
response = httpx.post(
|
||||
f"{GITHUB_API_BASE}/repos/{owner}/{repo}/pulls",
|
||||
headers=self._headers,
|
||||
json=payload,
|
||||
timeout=30.0,
|
||||
)
|
||||
return self._handle_response(response)
|
||||
|
||||
# --- Search ---
|
||||
|
||||
def search_code(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 30,
|
||||
) -> dict[str, Any]:
|
||||
"""Search code across GitHub."""
|
||||
params = {
|
||||
"q": query,
|
||||
"per_page": min(limit, 100),
|
||||
}
|
||||
|
||||
response = httpx.get(
|
||||
f"{GITHUB_API_BASE}/search/code",
|
||||
headers=self._headers,
|
||||
params=params,
|
||||
timeout=30.0,
|
||||
)
|
||||
return self._handle_response(response)
|
||||
|
||||
# --- Branches ---
|
||||
|
||||
def list_branches(
|
||||
self,
|
||||
owner: str,
|
||||
repo: str,
|
||||
limit: int = 30,
|
||||
) -> dict[str, Any]:
|
||||
"""List branches for a repository."""
|
||||
owner = _sanitize_path_param(owner, "owner")
|
||||
repo = _sanitize_path_param(repo, "repo")
|
||||
params = {
|
||||
"per_page": min(limit, 100),
|
||||
}
|
||||
|
||||
response = httpx.get(
|
||||
f"{GITHUB_API_BASE}/repos/{owner}/{repo}/branches",
|
||||
headers=self._headers,
|
||||
params=params,
|
||||
timeout=30.0,
|
||||
)
|
||||
return self._handle_response(response)
|
||||
|
||||
def get_branch(
|
||||
self,
|
||||
owner: str,
|
||||
repo: str,
|
||||
branch: str,
|
||||
) -> dict[str, Any]:
|
||||
"""Get a specific branch."""
|
||||
owner = _sanitize_path_param(owner, "owner")
|
||||
repo = _sanitize_path_param(repo, "repo")
|
||||
branch = _sanitize_path_param(branch, "branch")
|
||||
response = httpx.get(
|
||||
f"{GITHUB_API_BASE}/repos/{owner}/{repo}/branches/{branch}",
|
||||
headers=self._headers,
|
||||
timeout=30.0,
|
||||
)
|
||||
return self._handle_response(response)
|
||||
|
||||
|
||||
def register_tools(
|
||||
mcp: FastMCP,
|
||||
credentials: CredentialStoreAdapter | None = None,
|
||||
) -> None:
|
||||
"""Register GitHub tools with the MCP server."""
|
||||
|
||||
def _get_token() -> str | None:
|
||||
"""Get GitHub token from credential manager or environment."""
|
||||
if credentials is not None:
|
||||
token = credentials.get("github")
|
||||
if token is not None and not isinstance(token, str):
|
||||
raise TypeError(
|
||||
f"Expected string from credentials.get('github'), got {type(token).__name__}"
|
||||
)
|
||||
return token
|
||||
return os.getenv("GITHUB_TOKEN")
|
||||
|
||||
def _get_client() -> _GitHubClient | dict[str, str]:
|
||||
"""Get a GitHub client, or return an error dict if no credentials."""
|
||||
token = _get_token()
|
||||
if not token:
|
||||
return {
|
||||
"error": "GitHub credentials not configured",
|
||||
"help": (
|
||||
"Set GITHUB_TOKEN environment variable "
|
||||
"or configure via credential store. "
|
||||
"Get a token at https://github.com/settings/tokens"
|
||||
),
|
||||
}
|
||||
return _GitHubClient(token)
|
||||
|
||||
# --- Repositories ---
|
||||
|
||||
@mcp.tool()
|
||||
def github_list_repos(
|
||||
username: str | None = None,
|
||||
visibility: str = "all",
|
||||
sort: str = "updated",
|
||||
limit: int = 30,
|
||||
) -> dict:
|
||||
"""
|
||||
List repositories for a user or the authenticated user.
|
||||
|
||||
Args:
|
||||
username: GitHub username (if None, lists authenticated user's repos)
|
||||
visibility: Repository visibility filter ("all", "public", "private")
|
||||
sort: Sort order ("created", "updated", "pushed", "full_name")
|
||||
limit: Maximum number of repositories to return (1-100, default 30)
|
||||
|
||||
Returns:
|
||||
Dict with list of repositories or error
|
||||
"""
|
||||
client = _get_client()
|
||||
if isinstance(client, dict):
|
||||
return client
|
||||
try:
|
||||
return client.list_repos(username, visibility, sort, limit)
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": _sanitize_error_message(e)}
|
||||
|
||||
@mcp.tool()
|
||||
def github_get_repo(
|
||||
owner: str,
|
||||
repo: str,
|
||||
) -> dict:
|
||||
"""
|
||||
Get information about a specific repository.
|
||||
|
||||
Args:
|
||||
owner: Repository owner (username or organization)
|
||||
repo: Repository name
|
||||
|
||||
Returns:
|
||||
Dict with repository information or error
|
||||
"""
|
||||
client = _get_client()
|
||||
if isinstance(client, dict):
|
||||
return client
|
||||
try:
|
||||
return client.get_repo(owner, repo)
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": _sanitize_error_message(e)}
|
||||
|
||||
@mcp.tool()
|
||||
def github_search_repos(
|
||||
query: str,
|
||||
sort: str | None = None,
|
||||
limit: int = 30,
|
||||
) -> dict:
|
||||
"""
|
||||
Search for repositories on GitHub.
|
||||
|
||||
Args:
|
||||
query: Search query (e.g., "language:python stars:>1000")
|
||||
sort: Sort field ("stars", "forks", "updated")
|
||||
limit: Maximum number of results (1-100, default 30)
|
||||
|
||||
Returns:
|
||||
Dict with search results or error
|
||||
"""
|
||||
client = _get_client()
|
||||
if isinstance(client, dict):
|
||||
return client
|
||||
try:
|
||||
return client.search_repos(query, sort, limit)
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": _sanitize_error_message(e)}
|
||||
|
||||
# --- Issues ---
|
||||
|
||||
@mcp.tool()
|
||||
def github_list_issues(
|
||||
owner: str,
|
||||
repo: str,
|
||||
state: str = "open",
|
||||
limit: int = 30,
|
||||
) -> dict:
|
||||
"""
|
||||
List issues for a repository.
|
||||
|
||||
Args:
|
||||
owner: Repository owner
|
||||
repo: Repository name
|
||||
state: Issue state ("open", "closed", "all")
|
||||
limit: Maximum number of issues to return (1-100, default 30)
|
||||
|
||||
Returns:
|
||||
Dict with list of issues or error
|
||||
"""
|
||||
client = _get_client()
|
||||
if isinstance(client, dict):
|
||||
return client
|
||||
try:
|
||||
return client.list_issues(owner, repo, state, limit)
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": _sanitize_error_message(e)}
|
||||
|
||||
@mcp.tool()
|
||||
def github_get_issue(
|
||||
owner: str,
|
||||
repo: str,
|
||||
issue_number: int,
|
||||
) -> dict:
|
||||
"""
|
||||
Get a specific issue.
|
||||
|
||||
Args:
|
||||
owner: Repository owner
|
||||
repo: Repository name
|
||||
issue_number: Issue number
|
||||
|
||||
Returns:
|
||||
Dict with issue information or error
|
||||
"""
|
||||
client = _get_client()
|
||||
if isinstance(client, dict):
|
||||
return client
|
||||
try:
|
||||
return client.get_issue(owner, repo, issue_number)
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": _sanitize_error_message(e)}
|
||||
|
||||
@mcp.tool()
|
||||
def github_create_issue(
|
||||
owner: str,
|
||||
repo: str,
|
||||
title: str,
|
||||
body: str | None = None,
|
||||
labels: list[str] | None = None,
|
||||
assignees: list[str] | None = None,
|
||||
) -> dict:
|
||||
"""
|
||||
Create a new issue in a repository.
|
||||
|
||||
Args:
|
||||
owner: Repository owner
|
||||
repo: Repository name
|
||||
title: Issue title
|
||||
body: Issue body/description (supports Markdown)
|
||||
labels: List of label names to apply
|
||||
assignees: List of usernames to assign
|
||||
|
||||
Returns:
|
||||
Dict with created issue information or error
|
||||
"""
|
||||
client = _get_client()
|
||||
if isinstance(client, dict):
|
||||
return client
|
||||
try:
|
||||
return client.create_issue(owner, repo, title, body, labels, assignees)
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": _sanitize_error_message(e)}
|
||||
|
||||
@mcp.tool()
|
||||
def github_update_issue(
|
||||
owner: str,
|
||||
repo: str,
|
||||
issue_number: int,
|
||||
title: str | None = None,
|
||||
body: str | None = None,
|
||||
state: str | None = None,
|
||||
labels: list[str] | None = None,
|
||||
) -> dict:
|
||||
"""
|
||||
Update an existing issue.
|
||||
|
||||
Args:
|
||||
owner: Repository owner
|
||||
repo: Repository name
|
||||
issue_number: Issue number
|
||||
title: New issue title
|
||||
body: New issue body
|
||||
state: New state ("open" or "closed")
|
||||
labels: New list of label names
|
||||
|
||||
Returns:
|
||||
Dict with updated issue information or error
|
||||
"""
|
||||
client = _get_client()
|
||||
if isinstance(client, dict):
|
||||
return client
|
||||
try:
|
||||
return client.update_issue(owner, repo, issue_number, title, body, state, labels)
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": _sanitize_error_message(e)}
|
||||
|
||||
# --- Pull Requests ---
|
||||
|
||||
@mcp.tool()
|
||||
def github_list_pull_requests(
|
||||
owner: str,
|
||||
repo: str,
|
||||
state: str = "open",
|
||||
limit: int = 30,
|
||||
) -> dict:
|
||||
"""
|
||||
List pull requests for a repository.
|
||||
|
||||
Args:
|
||||
owner: Repository owner
|
||||
repo: Repository name
|
||||
state: PR state ("open", "closed", "all")
|
||||
limit: Maximum number of PRs to return (1-100, default 30)
|
||||
|
||||
Returns:
|
||||
Dict with list of pull requests or error
|
||||
"""
|
||||
client = _get_client()
|
||||
if isinstance(client, dict):
|
||||
return client
|
||||
try:
|
||||
return client.list_pull_requests(owner, repo, state, limit)
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": _sanitize_error_message(e)}
|
||||
|
||||
@mcp.tool()
|
||||
def github_get_pull_request(
|
||||
owner: str,
|
||||
repo: str,
|
||||
pull_number: int,
|
||||
) -> dict:
|
||||
"""
|
||||
Get a specific pull request.
|
||||
|
||||
Args:
|
||||
owner: Repository owner
|
||||
repo: Repository name
|
||||
pull_number: Pull request number
|
||||
|
||||
Returns:
|
||||
Dict with pull request information or error
|
||||
"""
|
||||
client = _get_client()
|
||||
if isinstance(client, dict):
|
||||
return client
|
||||
try:
|
||||
return client.get_pull_request(owner, repo, pull_number)
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": _sanitize_error_message(e)}
|
||||
|
||||
@mcp.tool()
|
||||
def github_create_pull_request(
|
||||
owner: str,
|
||||
repo: str,
|
||||
title: str,
|
||||
head: str,
|
||||
base: str,
|
||||
body: str | None = None,
|
||||
draft: bool = False,
|
||||
) -> dict:
|
||||
"""
|
||||
Create a new pull request.
|
||||
|
||||
Args:
|
||||
owner: Repository owner
|
||||
repo: Repository name
|
||||
title: Pull request title
|
||||
head: The name of the branch where your changes are (e.g., "my-feature")
|
||||
base: The name of the branch you want to merge into (e.g., "main")
|
||||
body: Pull request description (supports Markdown)
|
||||
draft: Whether to create as a draft PR
|
||||
|
||||
Returns:
|
||||
Dict with created pull request information or error
|
||||
"""
|
||||
client = _get_client()
|
||||
if isinstance(client, dict):
|
||||
return client
|
||||
try:
|
||||
return client.create_pull_request(owner, repo, title, head, base, body, draft)
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": _sanitize_error_message(e)}
|
||||
|
||||
# --- Search ---
|
||||
|
||||
@mcp.tool()
|
||||
def github_search_code(
|
||||
query: str,
|
||||
limit: int = 30,
|
||||
) -> dict:
|
||||
"""
|
||||
Search code across GitHub.
|
||||
|
||||
Args:
|
||||
query: Search query (e.g., "addClass repo:jquery/jquery")
|
||||
limit: Maximum number of results (1-100, default 30)
|
||||
|
||||
Returns:
|
||||
Dict with search results or error
|
||||
"""
|
||||
client = _get_client()
|
||||
if isinstance(client, dict):
|
||||
return client
|
||||
try:
|
||||
return client.search_code(query, limit)
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": _sanitize_error_message(e)}
|
||||
|
||||
# --- Branches ---
|
||||
|
||||
@mcp.tool()
|
||||
def github_list_branches(
|
||||
owner: str,
|
||||
repo: str,
|
||||
limit: int = 30,
|
||||
) -> dict:
|
||||
"""
|
||||
List branches for a repository.
|
||||
|
||||
Args:
|
||||
owner: Repository owner
|
||||
repo: Repository name
|
||||
limit: Maximum number of branches to return (1-100, default 30)
|
||||
|
||||
Returns:
|
||||
Dict with list of branches or error
|
||||
"""
|
||||
client = _get_client()
|
||||
if isinstance(client, dict):
|
||||
return client
|
||||
try:
|
||||
return client.list_branches(owner, repo, limit)
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": _sanitize_error_message(e)}
|
||||
|
||||
@mcp.tool()
|
||||
def github_get_branch(
|
||||
owner: str,
|
||||
repo: str,
|
||||
branch: str,
|
||||
) -> dict:
|
||||
"""
|
||||
Get information about a specific branch.
|
||||
|
||||
Args:
|
||||
owner: Repository owner
|
||||
repo: Repository name
|
||||
branch: Branch name
|
||||
|
||||
Returns:
|
||||
Dict with branch information or error
|
||||
"""
|
||||
client = _get_client()
|
||||
if isinstance(client, dict):
|
||||
return client
|
||||
try:
|
||||
return client.get_branch(owner, repo, branch)
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": _sanitize_error_message(e)}
|
||||
@@ -415,13 +415,13 @@ class TestDealTools:
|
||||
|
||||
class TestHubSpotOAuth2Provider:
|
||||
def test_provider_id(self):
|
||||
from core.framework.credentials.oauth2.hubspot_provider import HubSpotOAuth2Provider
|
||||
from framework.credentials.oauth2.hubspot_provider import HubSpotOAuth2Provider
|
||||
|
||||
provider = HubSpotOAuth2Provider(client_id="cid", client_secret="csecret")
|
||||
assert provider.provider_id == "hubspot_oauth2"
|
||||
|
||||
def test_default_scopes(self):
|
||||
from core.framework.credentials.oauth2.hubspot_provider import (
|
||||
from framework.credentials.oauth2.hubspot_provider import (
|
||||
HUBSPOT_DEFAULT_SCOPES,
|
||||
HubSpotOAuth2Provider,
|
||||
)
|
||||
@@ -430,7 +430,7 @@ class TestHubSpotOAuth2Provider:
|
||||
assert provider.config.default_scopes == HUBSPOT_DEFAULT_SCOPES
|
||||
|
||||
def test_custom_scopes(self):
|
||||
from core.framework.credentials.oauth2.hubspot_provider import HubSpotOAuth2Provider
|
||||
from framework.credentials.oauth2.hubspot_provider import HubSpotOAuth2Provider
|
||||
|
||||
provider = HubSpotOAuth2Provider(
|
||||
client_id="cid",
|
||||
@@ -440,7 +440,7 @@ class TestHubSpotOAuth2Provider:
|
||||
assert provider.config.default_scopes == ["crm.objects.contacts.read"]
|
||||
|
||||
def test_endpoints(self):
|
||||
from core.framework.credentials.oauth2.hubspot_provider import (
|
||||
from framework.credentials.oauth2.hubspot_provider import (
|
||||
HUBSPOT_AUTHORIZATION_URL,
|
||||
HUBSPOT_TOKEN_URL,
|
||||
HubSpotOAuth2Provider,
|
||||
@@ -451,15 +451,15 @@ class TestHubSpotOAuth2Provider:
|
||||
assert provider.config.authorization_url == HUBSPOT_AUTHORIZATION_URL
|
||||
|
||||
def test_supported_types(self):
|
||||
from core.framework.credentials.models import CredentialType
|
||||
from core.framework.credentials.oauth2.hubspot_provider import HubSpotOAuth2Provider
|
||||
from framework.credentials.models import CredentialType
|
||||
from framework.credentials.oauth2.hubspot_provider import HubSpotOAuth2Provider
|
||||
|
||||
provider = HubSpotOAuth2Provider(client_id="cid", client_secret="csecret")
|
||||
assert CredentialType.OAUTH2 in provider.supported_types
|
||||
|
||||
def test_validate_no_access_token(self):
|
||||
from core.framework.credentials.models import CredentialObject
|
||||
from core.framework.credentials.oauth2.hubspot_provider import HubSpotOAuth2Provider
|
||||
from framework.credentials.models import CredentialObject
|
||||
from framework.credentials.oauth2.hubspot_provider import HubSpotOAuth2Provider
|
||||
|
||||
provider = HubSpotOAuth2Provider(client_id="cid", client_secret="csecret")
|
||||
cred = CredentialObject(id="test")
|
||||
|
||||
@@ -25,8 +25,6 @@ pip install playwright playwright-stealth
|
||||
playwright install chromium
|
||||
```
|
||||
|
||||
In Docker, add `RUN playwright install chromium --with-deps` to the Dockerfile.
|
||||
|
||||
## Environment Variables
|
||||
|
||||
This tool does not require any environment variables.
|
||||
|
||||
@@ -0,0 +1,624 @@
|
||||
"""
|
||||
Tests for GitHub tool.
|
||||
|
||||
Covers:
|
||||
- _GitHubClient methods (repositories, issues, PRs, search, branches)
|
||||
- Error handling (API errors, timeout, network errors)
|
||||
- Credential retrieval (CredentialStoreAdapter vs env var)
|
||||
- All 15 MCP tool functions
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
from fastmcp import FastMCP
|
||||
|
||||
from aden_tools.tools.github_tool.github_tool import (
|
||||
_GitHubClient,
|
||||
register_tools,
|
||||
)
|
||||
|
||||
# --- _GitHubClient tests ---
|
||||
|
||||
|
||||
class TestGitHubClient:
|
||||
def setup_method(self):
|
||||
self.client = _GitHubClient("ghp_test_token")
|
||||
|
||||
def test_headers(self):
|
||||
headers = self.client._headers
|
||||
assert headers["Authorization"] == "Bearer ghp_test_token"
|
||||
assert "application/vnd.github+json" in headers["Accept"]
|
||||
|
||||
def test_handle_response_success(self):
|
||||
response = MagicMock()
|
||||
response.status_code = 200
|
||||
response.json.return_value = {"id": 123, "name": "test-repo"}
|
||||
result = self.client._handle_response(response)
|
||||
assert result["success"] is True
|
||||
assert result["data"]["name"] == "test-repo"
|
||||
|
||||
def test_handle_response_401(self):
|
||||
response = MagicMock()
|
||||
response.status_code = 401
|
||||
result = self.client._handle_response(response)
|
||||
assert "error" in result
|
||||
assert "Invalid or expired" in result["error"]
|
||||
|
||||
def test_handle_response_403(self):
|
||||
response = MagicMock()
|
||||
response.status_code = 403
|
||||
result = self.client._handle_response(response)
|
||||
assert "error" in result
|
||||
assert "Forbidden" in result["error"]
|
||||
|
||||
def test_handle_response_404(self):
|
||||
response = MagicMock()
|
||||
response.status_code = 404
|
||||
result = self.client._handle_response(response)
|
||||
assert "error" in result
|
||||
assert "not found" in result["error"]
|
||||
|
||||
def test_handle_response_422(self):
|
||||
response = MagicMock()
|
||||
response.status_code = 422
|
||||
response.json.return_value = {"message": "Validation failed"}
|
||||
result = self.client._handle_response(response)
|
||||
assert "error" in result
|
||||
assert "Validation" in result["error"]
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.get")
|
||||
def test_list_repos(self, mock_get):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = [
|
||||
{"id": 1, "name": "repo1", "full_name": "user/repo1"},
|
||||
{"id": 2, "name": "repo2", "full_name": "user/repo2"},
|
||||
]
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
result = self.client.list_repos(username="testuser")
|
||||
|
||||
mock_get.assert_called_once()
|
||||
assert result["success"] is True
|
||||
assert len(result["data"]) == 2
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.get")
|
||||
def test_list_repos_authenticated_user(self, mock_get):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = []
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
self.client.list_repos(username=None)
|
||||
|
||||
call_url = mock_get.call_args.args[0]
|
||||
assert "/user/repos" in call_url
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.get")
|
||||
def test_get_repo(self, mock_get):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
"id": 123,
|
||||
"name": "test-repo",
|
||||
"full_name": "owner/test-repo",
|
||||
"description": "A test repository",
|
||||
}
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
result = self.client.get_repo("owner", "test-repo")
|
||||
|
||||
assert result["success"] is True
|
||||
assert result["data"]["name"] == "test-repo"
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.get")
|
||||
def test_search_repos(self, mock_get):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
"total_count": 1,
|
||||
"items": [{"id": 123, "name": "test-repo"}],
|
||||
}
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
result = self.client.search_repos("language:python")
|
||||
|
||||
assert result["success"] is True
|
||||
assert "items" in result["data"]
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.get")
|
||||
def test_list_issues(self, mock_get):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = [
|
||||
{"number": 1, "title": "Issue 1", "state": "open"},
|
||||
{"number": 2, "title": "Issue 2", "state": "open"},
|
||||
]
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
result = self.client.list_issues("owner", "repo", state="open")
|
||||
|
||||
assert result["success"] is True
|
||||
assert len(result["data"]) == 2
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.get")
|
||||
def test_get_issue(self, mock_get):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
"number": 1,
|
||||
"title": "Test Issue",
|
||||
"body": "This is a test",
|
||||
}
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
result = self.client.get_issue("owner", "repo", 1)
|
||||
|
||||
assert result["success"] is True
|
||||
assert result["data"]["title"] == "Test Issue"
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.post")
|
||||
def test_create_issue(self, mock_post):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 201
|
||||
mock_response.json.return_value = {
|
||||
"number": 42,
|
||||
"title": "New Issue",
|
||||
"body": "Description",
|
||||
}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
result = self.client.create_issue(
|
||||
"owner", "repo", "New Issue", body="Description", labels=["bug"]
|
||||
)
|
||||
|
||||
assert result["success"] is True
|
||||
assert result["data"]["number"] == 42
|
||||
call_json = mock_post.call_args.kwargs["json"]
|
||||
assert call_json["labels"] == ["bug"]
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.patch")
|
||||
def test_update_issue(self, mock_patch):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
"number": 1,
|
||||
"title": "Updated Title",
|
||||
"state": "closed",
|
||||
}
|
||||
mock_patch.return_value = mock_response
|
||||
|
||||
result = self.client.update_issue("owner", "repo", 1, title="Updated Title", state="closed")
|
||||
|
||||
assert result["success"] is True
|
||||
assert result["data"]["state"] == "closed"
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.get")
|
||||
def test_list_pull_requests(self, mock_get):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = [
|
||||
{"number": 1, "title": "PR 1", "state": "open"},
|
||||
]
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
result = self.client.list_pull_requests("owner", "repo")
|
||||
|
||||
assert result["success"] is True
|
||||
assert len(result["data"]) == 1
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.get")
|
||||
def test_get_pull_request(self, mock_get):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
"number": 1,
|
||||
"title": "Test PR",
|
||||
"head": {"ref": "feature"},
|
||||
"base": {"ref": "main"},
|
||||
}
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
result = self.client.get_pull_request("owner", "repo", 1)
|
||||
|
||||
assert result["success"] is True
|
||||
assert result["data"]["title"] == "Test PR"
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.post")
|
||||
def test_create_pull_request(self, mock_post):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 201
|
||||
mock_response.json.return_value = {
|
||||
"number": 10,
|
||||
"title": "New PR",
|
||||
"draft": False,
|
||||
}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
result = self.client.create_pull_request(
|
||||
"owner", "repo", "New PR", "feature-branch", "main", body="PR description"
|
||||
)
|
||||
|
||||
assert result["success"] is True
|
||||
assert result["data"]["number"] == 10
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.get")
|
||||
def test_search_code(self, mock_get):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
"total_count": 5,
|
||||
"items": [{"name": "file.py", "path": "src/file.py"}],
|
||||
}
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
result = self.client.search_code("addClass repo:jquery/jquery")
|
||||
|
||||
assert result["success"] is True
|
||||
assert "items" in result["data"]
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.get")
|
||||
def test_list_branches(self, mock_get):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = [
|
||||
{"name": "main", "protected": True},
|
||||
{"name": "develop", "protected": False},
|
||||
]
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
result = self.client.list_branches("owner", "repo")
|
||||
|
||||
assert result["success"] is True
|
||||
assert len(result["data"]) == 2
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.get")
|
||||
def test_get_branch(self, mock_get):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
"name": "main",
|
||||
"protected": True,
|
||||
"commit": {"sha": "abc123"},
|
||||
}
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
result = self.client.get_branch("owner", "repo", "main")
|
||||
|
||||
assert result["success"] is True
|
||||
assert result["data"]["name"] == "main"
|
||||
|
||||
|
||||
# --- Credential retrieval tests ---
|
||||
|
||||
|
||||
class TestCredentialRetrieval:
|
||||
@pytest.fixture
|
||||
def mcp(self):
|
||||
return FastMCP("test-server")
|
||||
|
||||
def test_no_credentials_returns_error(self, mcp):
|
||||
"""When no credentials are configured, tools return helpful error."""
|
||||
with patch.dict("os.environ", {}, clear=True):
|
||||
with patch("os.getenv", return_value=None):
|
||||
register_tools(mcp, credentials=None)
|
||||
list_repos = mcp._tool_manager._tools["github_list_repos"].fn
|
||||
|
||||
result = list_repos()
|
||||
|
||||
assert "error" in result
|
||||
assert "not configured" in result["error"]
|
||||
assert "help" in result
|
||||
|
||||
def test_env_var_token(self, mcp):
|
||||
"""Token from GITHUB_TOKEN env var is used."""
|
||||
with patch("os.getenv", return_value="ghp_env_token"):
|
||||
with patch("aden_tools.tools.github_tool.github_tool.httpx.get") as mock_get:
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = []
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
register_tools(mcp, credentials=None)
|
||||
list_repos = mcp._tool_manager._tools["github_list_repos"].fn
|
||||
|
||||
list_repos()
|
||||
|
||||
call_headers = mock_get.call_args.kwargs["headers"]
|
||||
assert call_headers["Authorization"] == "Bearer ghp_env_token"
|
||||
|
||||
def test_credential_store_token(self, mcp):
|
||||
"""Token from CredentialStoreAdapter is preferred."""
|
||||
mock_credentials = MagicMock()
|
||||
mock_credentials.get.return_value = "ghp_store_token"
|
||||
|
||||
with patch("aden_tools.tools.github_tool.github_tool.httpx.get") as mock_get:
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = []
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
register_tools(mcp, credentials=mock_credentials)
|
||||
list_repos = mcp._tool_manager._tools["github_list_repos"].fn
|
||||
|
||||
list_repos()
|
||||
|
||||
mock_credentials.get.assert_called_with("github")
|
||||
call_headers = mock_get.call_args.kwargs["headers"]
|
||||
assert call_headers["Authorization"] == "Bearer ghp_store_token"
|
||||
|
||||
|
||||
# --- MCP Tool function tests ---
|
||||
|
||||
|
||||
class TestGitHubListRepos:
|
||||
@pytest.fixture
|
||||
def mcp(self):
|
||||
return FastMCP("test-server")
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.get")
|
||||
def test_list_repos_success(self, mock_get, mcp):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = [{"id": 1, "name": "test-repo"}]
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
with patch("os.getenv", return_value="ghp_test"):
|
||||
register_tools(mcp, credentials=None)
|
||||
list_repos = mcp._tool_manager._tools["github_list_repos"].fn
|
||||
|
||||
result = list_repos(username="testuser")
|
||||
|
||||
assert result["success"] is True
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.get")
|
||||
def test_list_repos_timeout(self, mock_get, mcp):
|
||||
mock_get.side_effect = httpx.TimeoutException("Timeout")
|
||||
|
||||
with patch("os.getenv", return_value="ghp_test"):
|
||||
register_tools(mcp, credentials=None)
|
||||
list_repos = mcp._tool_manager._tools["github_list_repos"].fn
|
||||
|
||||
result = list_repos()
|
||||
|
||||
assert "error" in result
|
||||
assert "timed out" in result["error"]
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.get")
|
||||
def test_list_repos_network_error(self, mock_get, mcp):
|
||||
mock_get.side_effect = httpx.RequestError("Network error")
|
||||
|
||||
with patch("os.getenv", return_value="ghp_test"):
|
||||
register_tools(mcp, credentials=None)
|
||||
list_repos = mcp._tool_manager._tools["github_list_repos"].fn
|
||||
|
||||
result = list_repos()
|
||||
|
||||
assert "error" in result
|
||||
assert "Network error" in result["error"]
|
||||
|
||||
|
||||
class TestGitHubGetRepo:
|
||||
@pytest.fixture
|
||||
def mcp(self):
|
||||
return FastMCP("test-server")
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.get")
|
||||
def test_get_repo_success(self, mock_get, mcp):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {"id": 1, "name": "test-repo"}
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
with patch("os.getenv", return_value="ghp_test"):
|
||||
register_tools(mcp, credentials=None)
|
||||
get_repo = mcp._tool_manager._tools["github_get_repo"].fn
|
||||
|
||||
result = get_repo(owner="owner", repo="test-repo")
|
||||
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
class TestGitHubSearchRepos:
|
||||
@pytest.fixture
|
||||
def mcp(self):
|
||||
return FastMCP("test-server")
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.get")
|
||||
def test_search_repos_success(self, mock_get, mcp):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {"total_count": 1, "items": []}
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
with patch("os.getenv", return_value="ghp_test"):
|
||||
register_tools(mcp, credentials=None)
|
||||
search_repos = mcp._tool_manager._tools["github_search_repos"].fn
|
||||
|
||||
result = search_repos(query="python")
|
||||
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
class TestGitHubIssues:
|
||||
@pytest.fixture
|
||||
def mcp(self):
|
||||
return FastMCP("test-server")
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.get")
|
||||
def test_list_issues_success(self, mock_get, mcp):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = [{"number": 1, "title": "Test Issue"}]
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
with patch("os.getenv", return_value="ghp_test"):
|
||||
register_tools(mcp, credentials=None)
|
||||
list_issues = mcp._tool_manager._tools["github_list_issues"].fn
|
||||
|
||||
result = list_issues(owner="owner", repo="repo")
|
||||
|
||||
assert result["success"] is True
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.get")
|
||||
def test_get_issue_success(self, mock_get, mcp):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {"number": 1, "title": "Test"}
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
with patch("os.getenv", return_value="ghp_test"):
|
||||
register_tools(mcp, credentials=None)
|
||||
get_issue = mcp._tool_manager._tools["github_get_issue"].fn
|
||||
|
||||
result = get_issue(owner="owner", repo="repo", issue_number=1)
|
||||
|
||||
assert result["success"] is True
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.post")
|
||||
def test_create_issue_success(self, mock_post, mcp):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 201
|
||||
mock_response.json.return_value = {"number": 1, "title": "New Issue"}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
with patch("os.getenv", return_value="ghp_test"):
|
||||
register_tools(mcp, credentials=None)
|
||||
create_issue = mcp._tool_manager._tools["github_create_issue"].fn
|
||||
|
||||
result = create_issue(owner="owner", repo="repo", title="New Issue")
|
||||
|
||||
assert result["success"] is True
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.patch")
|
||||
def test_update_issue_success(self, mock_patch, mcp):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {"number": 1, "state": "closed"}
|
||||
mock_patch.return_value = mock_response
|
||||
|
||||
with patch("os.getenv", return_value="ghp_test"):
|
||||
register_tools(mcp, credentials=None)
|
||||
update_issue = mcp._tool_manager._tools["github_update_issue"].fn
|
||||
|
||||
result = update_issue(owner="owner", repo="repo", issue_number=1, state="closed")
|
||||
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
class TestGitHubPullRequests:
|
||||
@pytest.fixture
|
||||
def mcp(self):
|
||||
return FastMCP("test-server")
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.get")
|
||||
def test_list_pull_requests_success(self, mock_get, mcp):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = [{"number": 1, "title": "Test PR"}]
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
with patch("os.getenv", return_value="ghp_test"):
|
||||
register_tools(mcp, credentials=None)
|
||||
list_prs = mcp._tool_manager._tools["github_list_pull_requests"].fn
|
||||
|
||||
result = list_prs(owner="owner", repo="repo")
|
||||
|
||||
assert result["success"] is True
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.get")
|
||||
def test_get_pull_request_success(self, mock_get, mcp):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {"number": 1, "title": "PR"}
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
with patch("os.getenv", return_value="ghp_test"):
|
||||
register_tools(mcp, credentials=None)
|
||||
get_pr = mcp._tool_manager._tools["github_get_pull_request"].fn
|
||||
|
||||
result = get_pr(owner="owner", repo="repo", pull_number=1)
|
||||
|
||||
assert result["success"] is True
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.post")
|
||||
def test_create_pull_request_success(self, mock_post, mcp):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 201
|
||||
mock_response.json.return_value = {"number": 1, "title": "New PR"}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
with patch("os.getenv", return_value="ghp_test"):
|
||||
register_tools(mcp, credentials=None)
|
||||
create_pr = mcp._tool_manager._tools["github_create_pull_request"].fn
|
||||
|
||||
result = create_pr(
|
||||
owner="owner",
|
||||
repo="repo",
|
||||
title="New PR",
|
||||
head="feature",
|
||||
base="main",
|
||||
)
|
||||
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
class TestGitHubSearch:
|
||||
@pytest.fixture
|
||||
def mcp(self):
|
||||
return FastMCP("test-server")
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.get")
|
||||
def test_search_code_success(self, mock_get, mcp):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {"total_count": 1, "items": []}
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
with patch("os.getenv", return_value="ghp_test"):
|
||||
register_tools(mcp, credentials=None)
|
||||
search_code = mcp._tool_manager._tools["github_search_code"].fn
|
||||
|
||||
result = search_code(query="addClass")
|
||||
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
class TestGitHubBranches:
|
||||
@pytest.fixture
|
||||
def mcp(self):
|
||||
return FastMCP("test-server")
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.get")
|
||||
def test_list_branches_success(self, mock_get, mcp):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = [{"name": "main"}]
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
with patch("os.getenv", return_value="ghp_test"):
|
||||
register_tools(mcp, credentials=None)
|
||||
list_branches = mcp._tool_manager._tools["github_list_branches"].fn
|
||||
|
||||
result = list_branches(owner="owner", repo="repo")
|
||||
|
||||
assert result["success"] is True
|
||||
|
||||
@patch("aden_tools.tools.github_tool.github_tool.httpx.get")
|
||||
def test_get_branch_success(self, mock_get, mcp):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {"name": "main", "protected": True}
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
with patch("os.getenv", return_value="ghp_test"):
|
||||
register_tools(mcp, credentials=None)
|
||||
get_branch = mcp._tool_manager._tools["github_get_branch"].fn
|
||||
|
||||
result = get_branch(owner="owner", repo="repo", branch="main")
|
||||
|
||||
assert result["success"] is True
|
||||
@@ -95,6 +95,7 @@ class TestPdfReadTool:
|
||||
def __init__(self, path: Path) -> None: # noqa: ARG002
|
||||
self.pages = [FakePage(f"Page {i + 1}") for i in range(50)]
|
||||
self.is_encrypted = False
|
||||
self.metadata = None
|
||||
|
||||
# Patch PdfReader used inside the tool so we don't need a real PDF
|
||||
from aden_tools.tools.pdf_read_tool import pdf_read_tool
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"""Tests for web_scrape tool (FastMCP)."""
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from fastmcp import FastMCP
|
||||
@@ -15,60 +15,135 @@ def web_scrape_fn(mcp: FastMCP):
|
||||
return mcp._tool_manager._tools["web_scrape"].fn
|
||||
|
||||
|
||||
def _make_playwright_mocks(html, status=200, final_url="https://example.com/page"):
|
||||
"""Build a full playwright mock chain and return (context_manager, response, page)."""
|
||||
mock_response = MagicMock(
|
||||
status=status,
|
||||
url=final_url,
|
||||
headers={"content-type": "text/html; charset=utf-8"},
|
||||
)
|
||||
|
||||
mock_page = AsyncMock()
|
||||
mock_page.goto.return_value = mock_response
|
||||
mock_page.content.return_value = html
|
||||
mock_page.wait_for_timeout.return_value = None
|
||||
|
||||
mock_context = AsyncMock()
|
||||
mock_context.new_page.return_value = mock_page
|
||||
|
||||
mock_browser = AsyncMock()
|
||||
mock_browser.new_context.return_value = mock_context
|
||||
|
||||
mock_pw = MagicMock()
|
||||
mock_pw.chromium.launch = AsyncMock(return_value=mock_browser)
|
||||
|
||||
# async context manager for async_playwright()
|
||||
mock_cm = MagicMock()
|
||||
mock_cm.__aenter__ = AsyncMock(return_value=mock_pw)
|
||||
mock_cm.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
return mock_cm, mock_response, mock_page
|
||||
|
||||
|
||||
_PW_PATH = "aden_tools.tools.web_scrape_tool.web_scrape_tool.async_playwright"
|
||||
_STEALTH_PATH = "aden_tools.tools.web_scrape_tool.web_scrape_tool.Stealth"
|
||||
|
||||
|
||||
class TestWebScrapeTool:
|
||||
"""Tests for web_scrape tool."""
|
||||
|
||||
def test_url_auto_prefixed_with_https(self, web_scrape_fn):
|
||||
@pytest.mark.asyncio
|
||||
@patch(_STEALTH_PATH)
|
||||
@patch(_PW_PATH)
|
||||
async def test_url_auto_prefixed_with_https(self, mock_pw, mock_stealth, web_scrape_fn):
|
||||
"""URLs without scheme get https:// prefix."""
|
||||
# This will fail to connect, but we can verify the behavior
|
||||
result = web_scrape_fn(url="example.com")
|
||||
# Should either succeed or have a network error (not a validation error)
|
||||
assert isinstance(result, dict)
|
||||
html = "<html><body>Hello</body></html>"
|
||||
mock_cm, _, _ = _make_playwright_mocks(html, final_url="https://example.com")
|
||||
mock_pw.return_value = mock_cm
|
||||
mock_stealth.return_value.apply_stealth_async = AsyncMock()
|
||||
|
||||
def test_max_length_clamped_low(self, web_scrape_fn):
|
||||
result = await web_scrape_fn(url="example.com")
|
||||
assert isinstance(result, dict)
|
||||
assert "error" not in result
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch(_STEALTH_PATH)
|
||||
@patch(_PW_PATH)
|
||||
async def test_max_length_clamped_low(self, mock_pw, mock_stealth, web_scrape_fn):
|
||||
"""max_length below 1000 is clamped to 1000."""
|
||||
# Test with a very low max_length - implementation clamps to 1000
|
||||
result = web_scrape_fn(url="https://example.com", max_length=500)
|
||||
# Should not error due to invalid max_length
|
||||
assert isinstance(result, dict)
|
||||
html = "<html><body>Hello</body></html>"
|
||||
mock_cm, _, _ = _make_playwright_mocks(html, final_url="https://example.com")
|
||||
mock_pw.return_value = mock_cm
|
||||
mock_stealth.return_value.apply_stealth_async = AsyncMock()
|
||||
|
||||
def test_max_length_clamped_high(self, web_scrape_fn):
|
||||
result = await web_scrape_fn(url="https://example.com", max_length=500)
|
||||
assert isinstance(result, dict)
|
||||
assert "error" not in result
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch(_STEALTH_PATH)
|
||||
@patch(_PW_PATH)
|
||||
async def test_max_length_clamped_high(self, mock_pw, mock_stealth, web_scrape_fn):
|
||||
"""max_length above 500000 is clamped to 500000."""
|
||||
# Test with a very high max_length - implementation clamps to 500000
|
||||
result = web_scrape_fn(url="https://example.com", max_length=600000)
|
||||
# Should not error due to invalid max_length
|
||||
assert isinstance(result, dict)
|
||||
html = "<html><body>Hello</body></html>"
|
||||
mock_cm, _, _ = _make_playwright_mocks(html, final_url="https://example.com")
|
||||
mock_pw.return_value = mock_cm
|
||||
mock_stealth.return_value.apply_stealth_async = AsyncMock()
|
||||
|
||||
def test_valid_max_length_accepted(self, web_scrape_fn):
|
||||
result = await web_scrape_fn(url="https://example.com", max_length=600000)
|
||||
assert isinstance(result, dict)
|
||||
assert "error" not in result
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch(_STEALTH_PATH)
|
||||
@patch(_PW_PATH)
|
||||
async def test_valid_max_length_accepted(self, mock_pw, mock_stealth, web_scrape_fn):
|
||||
"""Valid max_length values are accepted."""
|
||||
result = web_scrape_fn(url="https://example.com", max_length=10000)
|
||||
assert isinstance(result, dict)
|
||||
html = "<html><body>Hello</body></html>"
|
||||
mock_cm, _, _ = _make_playwright_mocks(html, final_url="https://example.com")
|
||||
mock_pw.return_value = mock_cm
|
||||
mock_stealth.return_value.apply_stealth_async = AsyncMock()
|
||||
|
||||
def test_include_links_option(self, web_scrape_fn):
|
||||
result = await web_scrape_fn(url="https://example.com", max_length=10000)
|
||||
assert isinstance(result, dict)
|
||||
assert "error" not in result
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch(_STEALTH_PATH)
|
||||
@patch(_PW_PATH)
|
||||
async def test_include_links_option(self, mock_pw, mock_stealth, web_scrape_fn):
|
||||
"""include_links parameter is accepted."""
|
||||
result = web_scrape_fn(url="https://example.com", include_links=True)
|
||||
assert isinstance(result, dict)
|
||||
html = '<html><body><a href="/link">Link</a></body></html>'
|
||||
mock_cm, _, _ = _make_playwright_mocks(html, final_url="https://example.com")
|
||||
mock_pw.return_value = mock_cm
|
||||
mock_stealth.return_value.apply_stealth_async = AsyncMock()
|
||||
|
||||
def test_selector_option(self, web_scrape_fn):
|
||||
"""selector parameter is accepted."""
|
||||
result = web_scrape_fn(url="https://example.com", selector=".content")
|
||||
result = await web_scrape_fn(url="https://example.com", include_links=True)
|
||||
assert isinstance(result, dict)
|
||||
assert "error" not in result
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch(_STEALTH_PATH)
|
||||
@patch(_PW_PATH)
|
||||
async def test_selector_option(self, mock_pw, mock_stealth, web_scrape_fn):
|
||||
"""selector parameter is accepted."""
|
||||
html = '<html><body><div class="content">Content here</div></body></html>'
|
||||
mock_cm, _, _ = _make_playwright_mocks(html, final_url="https://example.com")
|
||||
mock_pw.return_value = mock_cm
|
||||
mock_stealth.return_value.apply_stealth_async = AsyncMock()
|
||||
|
||||
result = await web_scrape_fn(url="https://example.com", selector=".content")
|
||||
assert isinstance(result, dict)
|
||||
assert "error" not in result
|
||||
|
||||
|
||||
class TestWebScrapeToolLinkConversion:
|
||||
"""Tests for link URL conversion (relative to absolute)."""
|
||||
|
||||
def _mock_response(self, html_content, final_url="https://example.com/page"):
|
||||
"""Create a mock httpx response object."""
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.text = html_content
|
||||
mock_response.url = final_url
|
||||
mock_response.headers = {"content-type": "text/html; charset=utf-8"}
|
||||
return mock_response
|
||||
|
||||
@patch("aden_tools.tools.web_scrape_tool.web_scrape_tool.httpx.get")
|
||||
def test_relative_links_converted_to_absolute(self, mock_get, web_scrape_fn):
|
||||
@pytest.mark.asyncio
|
||||
@patch(_STEALTH_PATH)
|
||||
@patch(_PW_PATH)
|
||||
async def test_relative_links_converted_to_absolute(self, mock_pw, mock_stealth, web_scrape_fn):
|
||||
"""Relative URLs like ../page are converted to absolute URLs."""
|
||||
html = """
|
||||
<html>
|
||||
@@ -78,9 +153,11 @@ class TestWebScrapeToolLinkConversion:
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
mock_get.return_value = self._mock_response(html, "https://example.com/blog/post")
|
||||
mock_cm, _, _ = _make_playwright_mocks(html, final_url="https://example.com/blog/post")
|
||||
mock_pw.return_value = mock_cm
|
||||
mock_stealth.return_value.apply_stealth_async = AsyncMock()
|
||||
|
||||
result = web_scrape_fn(url="https://example.com/blog/post", include_links=True)
|
||||
result = await web_scrape_fn(url="https://example.com/blog/post", include_links=True)
|
||||
|
||||
assert "error" not in result
|
||||
assert "links" in result
|
||||
@@ -95,8 +172,10 @@ class TestWebScrapeToolLinkConversion:
|
||||
expected = "https://example.com/blog/page.html"
|
||||
assert hrefs["Next Page"] == expected, f"Got {hrefs['Next Page']}"
|
||||
|
||||
@patch("aden_tools.tools.web_scrape_tool.web_scrape_tool.httpx.get")
|
||||
def test_root_relative_links_converted(self, mock_get, web_scrape_fn):
|
||||
@pytest.mark.asyncio
|
||||
@patch(_STEALTH_PATH)
|
||||
@patch(_PW_PATH)
|
||||
async def test_root_relative_links_converted(self, mock_pw, mock_stealth, web_scrape_fn):
|
||||
"""Root-relative URLs like /about are converted to absolute URLs."""
|
||||
html = """
|
||||
<html>
|
||||
@@ -106,9 +185,11 @@ class TestWebScrapeToolLinkConversion:
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
mock_get.return_value = self._mock_response(html, "https://example.com/blog/post")
|
||||
mock_cm, _, _ = _make_playwright_mocks(html, final_url="https://example.com/blog/post")
|
||||
mock_pw.return_value = mock_cm
|
||||
mock_stealth.return_value.apply_stealth_async = AsyncMock()
|
||||
|
||||
result = web_scrape_fn(url="https://example.com/blog/post", include_links=True)
|
||||
result = await web_scrape_fn(url="https://example.com/blog/post", include_links=True)
|
||||
|
||||
assert "error" not in result
|
||||
assert "links" in result
|
||||
@@ -119,8 +200,10 @@ class TestWebScrapeToolLinkConversion:
|
||||
assert hrefs["About"] == "https://example.com/about"
|
||||
assert hrefs["Contact"] == "https://example.com/contact"
|
||||
|
||||
@patch("aden_tools.tools.web_scrape_tool.web_scrape_tool.httpx.get")
|
||||
def test_absolute_links_unchanged(self, mock_get, web_scrape_fn):
|
||||
@pytest.mark.asyncio
|
||||
@patch(_STEALTH_PATH)
|
||||
@patch(_PW_PATH)
|
||||
async def test_absolute_links_unchanged(self, mock_pw, mock_stealth, web_scrape_fn):
|
||||
"""Absolute URLs remain unchanged."""
|
||||
html = """
|
||||
<html>
|
||||
@@ -130,9 +213,11 @@ class TestWebScrapeToolLinkConversion:
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
mock_get.return_value = self._mock_response(html)
|
||||
mock_cm, _, _ = _make_playwright_mocks(html)
|
||||
mock_pw.return_value = mock_cm
|
||||
mock_stealth.return_value.apply_stealth_async = AsyncMock()
|
||||
|
||||
result = web_scrape_fn(url="https://example.com", include_links=True)
|
||||
result = await web_scrape_fn(url="https://example.com", include_links=True)
|
||||
|
||||
assert "error" not in result
|
||||
assert "links" in result
|
||||
@@ -143,8 +228,10 @@ class TestWebScrapeToolLinkConversion:
|
||||
assert hrefs["Other Site"] == "https://other.com"
|
||||
assert hrefs["Internal"] == "https://example.com/page"
|
||||
|
||||
@patch("aden_tools.tools.web_scrape_tool.web_scrape_tool.httpx.get")
|
||||
def test_links_after_redirects(self, mock_get, web_scrape_fn):
|
||||
@pytest.mark.asyncio
|
||||
@patch(_STEALTH_PATH)
|
||||
@patch(_PW_PATH)
|
||||
async def test_links_after_redirects(self, mock_pw, mock_stealth, web_scrape_fn):
|
||||
"""Links are resolved relative to final URL after redirects."""
|
||||
html = """
|
||||
<html>
|
||||
@@ -155,12 +242,14 @@ class TestWebScrapeToolLinkConversion:
|
||||
</html>
|
||||
"""
|
||||
# Mock redirect: request to /old/url redirects to /new/location
|
||||
mock_get.return_value = self._mock_response(
|
||||
mock_cm, _, _ = _make_playwright_mocks(
|
||||
html,
|
||||
final_url="https://example.com/new/location", # Final URL after redirect
|
||||
)
|
||||
mock_pw.return_value = mock_cm
|
||||
mock_stealth.return_value.apply_stealth_async = AsyncMock()
|
||||
|
||||
result = web_scrape_fn(url="https://example.com/old/url", include_links=True)
|
||||
result = await web_scrape_fn(url="https://example.com/old/url", include_links=True)
|
||||
|
||||
assert "error" not in result
|
||||
assert "links" in result
|
||||
@@ -173,8 +262,10 @@ class TestWebScrapeToolLinkConversion:
|
||||
)
|
||||
assert hrefs["Next"] == "https://example.com/new/next"
|
||||
|
||||
@patch("aden_tools.tools.web_scrape_tool.web_scrape_tool.httpx.get")
|
||||
def test_fragment_links_preserved(self, mock_get, web_scrape_fn):
|
||||
@pytest.mark.asyncio
|
||||
@patch(_STEALTH_PATH)
|
||||
@patch(_PW_PATH)
|
||||
async def test_fragment_links_preserved(self, mock_pw, mock_stealth, web_scrape_fn):
|
||||
"""Fragment links (anchors) are preserved."""
|
||||
html = """
|
||||
<html>
|
||||
@@ -184,9 +275,11 @@ class TestWebScrapeToolLinkConversion:
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
mock_get.return_value = self._mock_response(html, "https://example.com/page")
|
||||
mock_cm, _, _ = _make_playwright_mocks(html, final_url="https://example.com/page")
|
||||
mock_pw.return_value = mock_cm
|
||||
mock_stealth.return_value.apply_stealth_async = AsyncMock()
|
||||
|
||||
result = web_scrape_fn(url="https://example.com/page", include_links=True)
|
||||
result = await web_scrape_fn(url="https://example.com/page", include_links=True)
|
||||
|
||||
assert "error" not in result
|
||||
assert "links" in result
|
||||
@@ -197,8 +290,10 @@ class TestWebScrapeToolLinkConversion:
|
||||
assert hrefs["Section 1"] == "https://example.com/page#section1"
|
||||
assert hrefs["Page Section 2"] == "https://example.com/page#section2"
|
||||
|
||||
@patch("aden_tools.tools.web_scrape_tool.web_scrape_tool.httpx.get")
|
||||
def test_query_parameters_preserved(self, mock_get, web_scrape_fn):
|
||||
@pytest.mark.asyncio
|
||||
@patch(_STEALTH_PATH)
|
||||
@patch(_PW_PATH)
|
||||
async def test_query_parameters_preserved(self, mock_pw, mock_stealth, web_scrape_fn):
|
||||
"""Query parameters in URLs are preserved."""
|
||||
html = """
|
||||
<html>
|
||||
@@ -208,9 +303,11 @@ class TestWebScrapeToolLinkConversion:
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
mock_get.return_value = self._mock_response(html, "https://example.com/blog/post")
|
||||
mock_cm, _, _ = _make_playwright_mocks(html, final_url="https://example.com/blog/post")
|
||||
mock_pw.return_value = mock_cm
|
||||
mock_stealth.return_value.apply_stealth_async = AsyncMock()
|
||||
|
||||
result = web_scrape_fn(url="https://example.com/blog/post", include_links=True)
|
||||
result = await web_scrape_fn(url="https://example.com/blog/post", include_links=True)
|
||||
|
||||
assert "error" not in result
|
||||
assert "links" in result
|
||||
@@ -222,8 +319,10 @@ class TestWebScrapeToolLinkConversion:
|
||||
assert "q=test" in hrefs["Search"]
|
||||
assert "sort=date" in hrefs["Search"]
|
||||
|
||||
@patch("aden_tools.tools.web_scrape_tool.web_scrape_tool.httpx.get")
|
||||
def test_empty_href_skipped(self, mock_get, web_scrape_fn):
|
||||
@pytest.mark.asyncio
|
||||
@patch(_STEALTH_PATH)
|
||||
@patch(_PW_PATH)
|
||||
async def test_empty_href_skipped(self, mock_pw, mock_stealth, web_scrape_fn):
|
||||
"""Links with empty or whitespace text are skipped."""
|
||||
html = """
|
||||
<html>
|
||||
@@ -234,9 +333,11 @@ class TestWebScrapeToolLinkConversion:
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
mock_get.return_value = self._mock_response(html)
|
||||
mock_cm, _, _ = _make_playwright_mocks(html)
|
||||
mock_pw.return_value = mock_cm
|
||||
mock_stealth.return_value.apply_stealth_async = AsyncMock()
|
||||
|
||||
result = web_scrape_fn(url="https://example.com", include_links=True)
|
||||
result = await web_scrape_fn(url="https://example.com", include_links=True)
|
||||
|
||||
assert "error" not in result
|
||||
assert "links" in result
|
||||
|
||||
Reference in New Issue
Block a user