Merge pull request #2808 from TimothyZhang7/feature/credential-manager-aden-provider

Feature/credential manager aden provider
This commit is contained in:
Timothy @aden
2026-01-30 13:32:45 -08:00
committed by GitHub
53 changed files with 4343 additions and 2215 deletions
+10 -5
View File
@@ -24,12 +24,17 @@
"Bash(done)",
"Bash(xargs cat:*)",
"mcp__agent-builder__list_mcp_tools",
"mcp__agent-builder__add_mcp_server"
"mcp__agent-builder__add_mcp_server",
"mcp__agent-builder__check_missing_credentials",
"mcp__agent-builder__store_credential",
"mcp__agent-builder__list_stored_credentials",
"mcp__agent-builder__delete_stored_credential",
"mcp__agent-builder__verify_credentials",
"Bash(PYTHONPATH=/home/timothy/oss/hive/core:/home/timothy/oss/hive/exports python:*)",
"Bash(PYTHONPATH=core:exports:tools/src python -m hubspot_input:*)",
"mcp__agent-builder__export_graph"
]
},
"enabledMcpjsonServers": [
"agent-builder",
"tools"
],
"enabledMcpjsonServers": ["agent-builder", "tools"],
"enableAllProjectMcpServers": true
}
+7 -4
View File
@@ -11,6 +11,7 @@ metadata:
- building-agents-construction
- building-agents-patterns
- testing-agent
- setup-credentials
---
# Agent Development Workflow
@@ -21,10 +22,11 @@ Complete Standard Operating Procedure (SOP) for building production-ready goal-d
This workflow orchestrates specialized skills to take you from initial concept to production-ready agent:
1. **Understand Concepts** (5-10 min)`/building-agents-core` (optional)
2. **Build Structure** (15-30 min)`/building-agents-construction`
3. **Optimize Design** (10-15 min)`/building-agents-patterns` (optional)
4. **Test & Validate** (20-40 min) → `/testing-agent`
1. **Understand Concepts**`/building-agents-core` (optional)
2. **Build Structure**`/building-agents-construction`
3. **Optimize Design**`/building-agents-patterns` (optional)
4. **Setup Credentials**`/setup-credentials` (if agent uses tools requiring API keys)
5. **Test & Validate**`/testing-agent`
## When to Use This Workflow
@@ -44,6 +46,7 @@ Use this meta-skill when:
"Need to understand agent concepts" → building-agents-core
"Build a new agent" → building-agents-construction
"Optimize my agent design" → building-agents-patterns
"Set up API keys for my agent" → setup-credentials
"Test my agent" → testing-agent
"Not sure what I need" → Read phases below, then decide
"Agent has structure but needs implementation" → See agent directory STATUS.md
File diff suppressed because it is too large Load Diff
@@ -1,13 +1,30 @@
"""Runtime configuration."""
from dataclasses import dataclass
import json
from dataclasses import dataclass, field
from pathlib import Path
def _load_preferred_model() -> str:
"""Load preferred model from ~/.hive/configuration.json."""
config_path = Path.home() / ".hive" / "configuration.json"
if config_path.exists():
try:
with open(config_path) as f:
config = json.load(f)
llm = config.get("llm", {})
if llm.get("provider") and llm.get("model"):
return f"{llm['provider']}/{llm['model']}"
except Exception:
pass
return "anthropic/claude-sonnet-4-20250514"
@dataclass
class RuntimeConfig:
model: str = "groq/moonshotai/kimi-k2-instruct-0905"
model: str = field(default_factory=_load_preferred_model)
temperature: float = 0.7
max_tokens: int = 16384
max_tokens: int = 8192
api_key: str | None = None
api_base: str | None = None
+572
View File
@@ -0,0 +1,572 @@
---
name: setup-credentials
description: Set up and install credentials for an agent. Detects missing credentials from agent config, collects them from the user, and stores them securely in the encrypted credential store at ~/.hive/credentials.
license: Apache-2.0
metadata:
author: hive
version: "2.1"
type: utility
---
# Setup Credentials
Interactive credential setup for agents with multiple authentication options. Detects what's missing, offers auth method choices, validates with health checks, and stores credentials securely.
## When to Use
- Before running or testing an agent for the first time
- When `AgentRunner.run()` fails with "missing required credentials"
- When a user asks to configure credentials for an agent
- After building a new agent that uses tools requiring API keys
## Workflow
### Step 1: Identify the Agent
Determine which agent needs credentials. The user will either:
- Name the agent directly (e.g., "set up credentials for hubspot-agent")
- Have an agent directory open (check `exports/` for agent dirs)
- Be working on an agent in the current session
Locate the agent's directory under `exports/{agent_name}/`.
### Step 2: Detect Required Credentials
Read the agent's configuration to determine which tools and node types it uses:
```python
from core.framework.runner import AgentRunner
runner = AgentRunner.load("exports/{agent_name}")
validation = runner.validate()
# validation.missing_credentials contains env var names
# validation.warnings contains detailed messages with help URLs
```
Alternatively, check the credential store directly:
```python
from core.framework.credentials import CredentialStore
# Use encrypted storage (default: ~/.hive/credentials)
store = CredentialStore.with_encrypted_storage()
# Check what's available
available = store.list_credentials()
print(f"Available credentials: {available}")
# Check if specific credential exists
if store.is_available("hubspot"):
print("HubSpot credential found")
else:
print("HubSpot credential missing")
```
To see all known credential specs (for help URLs and setup instructions):
```python
from aden_tools.credentials import CREDENTIAL_SPECS
for name, spec in CREDENTIAL_SPECS.items():
print(f"{name}: env_var={spec.env_var}, aden={spec.aden_supported}")
```
### Step 3: Present Auth Options for Each Missing Credential
For each missing credential, check what authentication methods are available:
```python
from aden_tools.credentials import CREDENTIAL_SPECS
spec = CREDENTIAL_SPECS.get("hubspot")
if spec:
# Determine available auth options
auth_options = []
if spec.aden_supported:
auth_options.append("aden")
if spec.direct_api_key_supported:
auth_options.append("direct")
auth_options.append("custom") # Always available
# Get setup info
setup_info = {
"env_var": spec.env_var,
"description": spec.description,
"help_url": spec.help_url,
"api_key_instructions": spec.api_key_instructions,
}
```
Present the available options using AskUserQuestion:
```
Choose how to configure HUBSPOT_ACCESS_TOKEN:
1) Aden Authorization Server (Recommended)
Secure OAuth2 flow via integration.adenhq.com
- Quick setup with automatic token refresh
- No need to manage API keys manually
2) Direct API Key
Enter your own API key manually
- Requires creating a HubSpot Private App
- Full control over scopes and permissions
3) Custom Credential Store (Advanced)
Programmatic configuration for CI/CD
- For automated deployments
- Requires manual API calls
```
### Step 4: Execute Auth Flow Based on User Choice
#### Option 1: Aden Authorization Server
This is the recommended flow for supported integrations (HubSpot, etc.).
**How Aden OAuth Works:**
The ADEN_API_KEY represents a user who has already completed OAuth authorization on Aden's platform. When users sign up and connect integrations on Aden, those OAuth tokens are stored server-side. Having an ADEN_API_KEY means:
1. User has an Aden account
2. User has already authorized integrations (HubSpot, etc.) via OAuth on Aden
3. We just need to sync those credentials down to the local credential store
**4.1a. Check for ADEN_API_KEY**
```python
import os
aden_key = os.environ.get("ADEN_API_KEY")
```
If not set, guide user to get one from Aden (this is where they do OAuth):
```python
from aden_tools.credentials import open_browser, get_aden_setup_url
# Open browser to Aden - user will sign up and connect integrations there
url = get_aden_setup_url() # https://integration.adenhq.com/setup
success, msg = open_browser(url)
print("Please sign in to Aden and connect your integrations (HubSpot, etc.).")
print("Once done, copy your API key and return here.")
```
Ask user to provide the ADEN_API_KEY they received.
**4.1b. Save ADEN_API_KEY to Shell Config**
With user approval, persist ADEN_API_KEY to their shell config:
```python
from aden_tools.credentials import (
detect_shell,
add_env_var_to_shell_config,
get_shell_source_command,
)
shell_type = detect_shell() # 'bash', 'zsh', or 'unknown'
# Ask user for approval before modifying shell config
# If approved:
success, config_path = add_env_var_to_shell_config(
"ADEN_API_KEY",
user_provided_key,
comment="Aden authorization server API key"
)
if success:
source_cmd = get_shell_source_command()
print(f"Saved to {config_path}")
print(f"Run: {source_cmd}")
```
Also save to `~/.hive/configuration.json` for the framework:
```python
import json
from pathlib import Path
config_path = Path.home() / ".hive" / "configuration.json"
config = json.loads(config_path.read_text()) if config_path.exists() else {}
config["aden"] = {
"api_key_configured": True,
"api_url": "https://api.adenhq.com"
}
config_path.parent.mkdir(parents=True, exist_ok=True)
config_path.write_text(json.dumps(config, indent=2))
```
**4.1c. Sync Credentials from Aden Server**
Since the user has already authorized integrations on Aden, use the one-liner factory method:
```python
from core.framework.credentials import CredentialStore
# This single call handles everything:
# - Creates encrypted local storage at ~/.hive/credentials
# - Configures Aden client from ADEN_API_KEY env var
# - Syncs all credentials from Aden server automatically
store = CredentialStore.with_aden_sync(
base_url="https://api.adenhq.com",
auto_sync=True, # Syncs on creation
)
# Check what was synced
synced = store.list_credentials()
print(f"Synced credentials: {synced}")
# If the required credential wasn't synced, the user hasn't authorized it on Aden yet
if "hubspot" not in synced:
print("HubSpot not found in your Aden account.")
print("Please visit https://integration.adenhq.com to connect HubSpot, then try again.")
```
For more control over the sync process:
```python
from core.framework.credentials import CredentialStore
from core.framework.credentials.aden import (
AdenCredentialClient,
AdenClientConfig,
AdenSyncProvider,
)
# Create client (API key loaded from ADEN_API_KEY env var)
client = AdenCredentialClient(AdenClientConfig(
base_url="https://api.adenhq.com",
))
# Create provider and store
provider = AdenSyncProvider(client=client)
store = CredentialStore.with_encrypted_storage()
# Manual sync
synced_count = provider.sync_all(store)
print(f"Synced {synced_count} credentials from Aden")
```
**4.1d. Run Health Check**
```python
from aden_tools.credentials import check_credential_health
# Get the token from the store
cred = store.get_credential("hubspot")
token = cred.keys["access_token"].value.get_secret_value()
result = check_credential_health("hubspot", token)
if result.valid:
print("HubSpot credentials validated successfully!")
else:
print(f"Validation failed: {result.message}")
# Offer to retry the OAuth flow
```
#### Option 2: Direct API Key
For users who prefer manual API key management.
**4.2a. Show Setup Instructions**
```python
from aden_tools.credentials import CREDENTIAL_SPECS
spec = CREDENTIAL_SPECS.get("hubspot")
if spec and spec.api_key_instructions:
print(spec.api_key_instructions)
# Output:
# To get a HubSpot Private App token:
# 1. Go to HubSpot Settings > Integrations > Private Apps
# 2. Click "Create a private app"
# 3. Name your app (e.g., "Hive Agent")
# ...
if spec and spec.help_url:
print(f"More info: {spec.help_url}")
```
**4.2b. Collect API Key from User**
Use AskUserQuestion to securely collect the API key:
```
Please provide your HubSpot access token:
(This will be stored securely in ~/.hive/credentials)
```
**4.2c. Run Health Check Before Storing**
```python
from aden_tools.credentials import check_credential_health
result = check_credential_health("hubspot", user_provided_token)
if not result.valid:
print(f"Warning: {result.message}")
# Ask user if they want to:
# 1. Try a different token
# 2. Continue anyway (not recommended)
```
**4.2d. Store in Encrypted Credential Store**
```python
from core.framework.credentials import CredentialStore, CredentialObject, CredentialKey
from pydantic import SecretStr
store = CredentialStore.with_encrypted_storage()
cred = CredentialObject(
id="hubspot",
name="HubSpot Access Token",
keys={
"access_token": CredentialKey(
name="access_token",
value=SecretStr(user_provided_token),
)
},
)
store.save_credential(cred)
```
**4.2e. Export to Current Session**
```bash
export HUBSPOT_ACCESS_TOKEN="the-value"
```
#### Option 3: Custom Credential Store (Advanced)
For programmatic/CI/CD setups.
**4.3a. Show Documentation**
```
For advanced credential management, you can use the CredentialStore API directly:
from core.framework.credentials import CredentialStore, CredentialObject, CredentialKey
from pydantic import SecretStr
store = CredentialStore.with_encrypted_storage()
cred = CredentialObject(
id="hubspot",
name="HubSpot Access Token",
keys={"access_token": CredentialKey(name="access_token", value=SecretStr("..."))}
)
store.save_credential(cred)
For CI/CD environments:
- Set HIVE_CREDENTIAL_KEY for encryption
- Pre-populate ~/.hive/credentials programmatically
- Or use environment variables directly (HUBSPOT_ACCESS_TOKEN)
Documentation: See core/framework/credentials/README.md
```
### Step 5: Record Configuration Method
Track which auth method was used for each credential in `~/.hive/configuration.json`:
```python
import json
from pathlib import Path
from datetime import datetime
config_path = Path.home() / ".hive" / "configuration.json"
config = json.loads(config_path.read_text()) if config_path.exists() else {}
if "credential_methods" not in config:
config["credential_methods"] = {}
config["credential_methods"]["hubspot"] = {
"method": "aden", # or "direct" or "custom"
"configured_at": datetime.now().isoformat(),
}
config_path.write_text(json.dumps(config, indent=2))
```
### Step 6: Verify All Credentials
Run validation again to confirm everything is set:
```python
runner = AgentRunner.load("exports/{agent_name}")
validation = runner.validate()
assert not validation.missing_credentials, "Still missing credentials!"
```
Report the result to the user.
## Health Check Reference
Health checks validate credentials by making lightweight API calls:
| Credential | Endpoint | What It Checks |
| -------------- | --------------------------------------- | --------------------------------- |
| `hubspot` | `GET /crm/v3/objects/contacts?limit=1` | Bearer token validity, CRM scopes |
| `brave_search` | `GET /res/v1/web/search?q=test&count=1` | API key validity |
```python
from aden_tools.credentials import check_credential_health, HealthCheckResult
result: HealthCheckResult = check_credential_health("hubspot", token_value)
# result.valid: bool
# result.message: str
# result.details: dict (status_code, rate_limited, etc.)
```
## Encryption Key (HIVE_CREDENTIAL_KEY)
The encrypted credential store requires `HIVE_CREDENTIAL_KEY` to encrypt/decrypt credentials.
- If the user doesn't have one, `EncryptedFileStorage` will auto-generate one and log it
- The user MUST persist this key (e.g., in `~/.bashrc` or a secrets manager)
- Without this key, stored credentials cannot be decrypted
- This is the ONLY secret that should live in `~/.bashrc` or environment config
If `HIVE_CREDENTIAL_KEY` is not set:
1. Let the store generate one
2. Tell the user to save it: `export HIVE_CREDENTIAL_KEY="{generated_key}"`
3. Recommend adding it to `~/.bashrc` or their shell profile
## Security Rules
- **NEVER** log, print, or echo credential values in tool output
- **NEVER** store credentials in plaintext files, git-tracked files, or agent configs
- **NEVER** hardcode credentials in source code
- **ALWAYS** use `SecretStr` from Pydantic when handling credential values in Python
- **ALWAYS** use the encrypted credential store (`~/.hive/credentials`) for persistence
- **ALWAYS** run health checks before storing credentials (when possible)
- **ALWAYS** verify credentials were stored by re-running validation, not by reading them back
- When modifying `~/.bashrc` or `~/.zshrc`, confirm with the user first
## Credential Sources Reference
All credential specs are defined in `tools/src/aden_tools/credentials/`:
| File | Category | Credentials | Aden Supported |
| ----------------- | ------------- | --------------------------------------------- | -------------- |
| `llm.py` | LLM Providers | `anthropic` | No |
| `search.py` | Search Tools | `brave_search`, `google_search`, `google_cse` | No |
| `integrations.py` | Integrations | `hubspot` | Yes |
**Note:** Additional LLM providers (Cerebras, Groq, OpenAI) are handled by LiteLLM via environment
variables (`CEREBRAS_API_KEY`, `GROQ_API_KEY`, `OPENAI_API_KEY`) but are not yet in CREDENTIAL_SPECS.
Add them to `llm.py` as needed.
To check what's registered:
```python
from aden_tools.credentials import CREDENTIAL_SPECS
for name, spec in CREDENTIAL_SPECS.items():
print(f"{name}: aden={spec.aden_supported}, direct={spec.direct_api_key_supported}")
```
## Migration: CredentialManager → CredentialStore
**CredentialManager is deprecated.** Use CredentialStore instead.
| Old (Deprecated) | New (Recommended) |
| ----------------------------------------- | -------------------------------------------------------------------- |
| `CredentialManager()` | `CredentialStore.with_encrypted_storage()` |
| `creds.get("hubspot")` | `store.get("hubspot")` or `store.get_key("hubspot", "access_token")` |
| `creds.validate_for_tools(tools)` | Use `store.is_available(cred_id)` per credential |
| `creds.get_auth_options("hubspot")` | Check `CREDENTIAL_SPECS["hubspot"].aden_supported` |
| `creds.get_setup_instructions("hubspot")` | Access `CREDENTIAL_SPECS["hubspot"]` directly |
**Why migrate?**
- **CredentialStore** supports encrypted storage, multi-key credentials, template resolution, and automatic token refresh
- **CredentialManager** only reads from environment variables and .env files (no encryption, no refresh)
- **CredentialStoreAdapter** exists for backward compatibility during migration
```python
# Old way (deprecated)
from aden_tools.credentials import CredentialManager
creds = CredentialManager()
token = creds.get("hubspot")
# New way (recommended)
from core.framework.credentials import CredentialStore
store = CredentialStore.with_encrypted_storage()
token = store.get("hubspot")
# With Aden sync (recommended for OAuth integrations)
store = CredentialStore.with_aden_sync()
token = store.get_key("hubspot", "access_token")
```
## Example Session
```
User: /setup-credentials for my hubspot-agent
Agent: Let me check what credentials your hubspot-agent needs.
[Runs validation, finds ANTHROPIC_API_KEY and HUBSPOT_ACCESS_TOKEN missing]
Agent: Your hubspot-agent requires 2 credentials:
1. ANTHROPIC_API_KEY - Anthropic API key for LLM calls
2. HUBSPOT_ACCESS_TOKEN - HubSpot access token
Let's set these up one at a time.
--- Setting up ANTHROPIC_API_KEY ---
This credential only supports direct API key entry.
[Shows instructions for getting Anthropic API key]
[AskUserQuestion: "Please provide your Anthropic API key:"]
[User provides key]
[Stores in credential store, exports to env]
--- Setting up HUBSPOT_ACCESS_TOKEN ---
[AskUserQuestion with 3 options]
Choose how to configure HUBSPOT_ACCESS_TOKEN:
1) Aden Authorization Server (Recommended)
2) Direct API Key
3) Custom Credential Store (Advanced)
User: 1 (Aden)
Agent: Great! Let me check if you have an Aden API key configured...
[Checks for ADEN_API_KEY, not found]
[Opens browser to integration.adenhq.com/setup]
Agent: I've opened Aden's setup page. Please:
1. Sign in or create an account
2. Connect your HubSpot account (OAuth happens on Aden's site)
3. Copy the API key shown after setup
[AskUserQuestion: "Please provide your Aden API key:"]
[User provides key]
Agent: Would you like me to save this to your shell config (~/.zshrc) for future sessions?
[User approves]
[Saves to ~/.zshrc and ~/.hive/configuration.json]
Agent: Let me sync your credentials from Aden...
[Syncs credentials from Aden server - OAuth already done on Aden's side]
[Runs health check]
Agent: HubSpot credentials validated successfully!
All credentials are now configured:
- ANTHROPIC_API_KEY: Stored in encrypted credential store
- HUBSPOT_ACCESS_TOKEN: Synced from Aden (OAuth completed on Aden)
- Validation passed - your agent is ready to run!
```
+91 -33
View File
@@ -117,22 +117,47 @@ async def test_happy_path(mock_mode):
5. **Debug failures** - `debug_test(goal_id, test_name, agent_path)`
6. **Iterate** - Repeat steps 4-5 until all pass
## ⚠️ API Key Requirement for Real Testing
## ⚠️ Credential Requirements for Testing
**CRITICAL: Real LLM testing requires an API key.** Mock mode only validates structure and does NOT test actual agent behavior.
**CRITICAL: Testing requires ALL credentials the agent depends on.** This includes both the LLM API key AND any tool-specific credentials (HubSpot, Brave Search, etc.).
### Prerequisites
Before running agent tests, you MUST set your API key:
Before running agent tests, you MUST collect ALL required credentials from the user.
**Step 1: LLM API Key (always required)**
```bash
export ANTHROPIC_API_KEY="your-key-here"
```
**Why API keys are required:**
**Step 2: Tool-specific credentials (depends on agent's tools)**
Inspect the agent's `mcp_servers.json` and tool configuration to determine which tools the agent uses, then check for all required credentials:
```python
from aden_tools.credentials import CredentialManager, CREDENTIAL_SPECS
creds = CredentialManager()
# Determine which tools the agent uses (from agent.json or mcp_servers.json)
agent_tools = [...] # e.g., ["hubspot_search_contacts", "web_search", ...]
# Find all missing credentials for those tools
missing = creds.get_missing_for_tools(agent_tools)
```
Common tool credentials:
| Tool | Env Var | Help URL |
|------|---------|----------|
| HubSpot CRM | `HUBSPOT_ACCESS_TOKEN` | https://developers.hubspot.com/docs/api/private-apps |
| Brave Search | `BRAVE_SEARCH_API_KEY` | https://brave.com/search/api/ |
| Google Search | `GOOGLE_SEARCH_API_KEY` + `GOOGLE_SEARCH_CX` | https://developers.google.com/custom-search |
**Why ALL credentials are required:**
- Tests need to execute the agent's LLM nodes to validate behavior
- Mock mode bypasses LLM calls, providing no confidence in real-world performance
- Success criteria (personalization, reasoning quality, constraint adherence) can only be tested with real LLM calls
- Tools with missing credentials will return error dicts instead of real data
- Mock mode bypasses everything, providing no confidence in real-world performance
- The `AgentRunner.run()` method validates credentials at startup and will fail fast if any are missing
### Mock Mode Limitations
@@ -146,11 +171,11 @@ Mock mode (`--mock` flag or `mock_mode=True`) is **ONLY for structure validation
✗ Does NOT test real API integrations or tool use
✗ Does NOT test personalization or content quality
**Bottom line:** If you're testing whether an agent achieves its goal, you MUST use a real API key.
**Bottom line:** If you're testing whether an agent achieves its goal, you MUST use real credentials for ALL services.
### Enforcing API Key in Tests
### Enforcing Credentials in Tests
When generating tests, **ALWAYS include API key checks**:
When generating tests, **ALWAYS include credential checks for ALL required services**:
```python
import os
@@ -165,11 +190,14 @@ pytestmark = pytest.mark.skipif(
@pytest.fixture(scope="session", autouse=True)
def check_api_key():
"""Ensure API key is set for real testing."""
def check_credentials():
"""Ensure ALL required credentials are set for real testing."""
creds = CredentialManager()
mock_mode = os.environ.get("MOCK_MODE")
# Always check LLM key
if not creds.is_available("anthropic"):
if os.environ.get("MOCK_MODE"):
if mock_mode:
print("\n⚠️ Running in MOCK MODE - structure validation only")
print(" This does NOT test LLM behavior or agent quality")
print(" Set ANTHROPIC_API_KEY for real testing\n")
@@ -183,39 +211,69 @@ def check_api_key():
" MOCK_MODE=1 pytest exports/{agent}/tests/\n\n"
"Note: Mock mode does NOT validate agent behavior or quality."
)
# Check tool-specific credentials (skip in mock mode)
if not mock_mode:
# List the tools this agent uses - update per agent
agent_tools = [] # e.g., ["hubspot_search_contacts", "hubspot_get_contact"]
missing = creds.get_missing_for_tools(agent_tools)
if missing:
lines = ["\n❌ Missing tool credentials!\n"]
for name in missing:
spec = creds.specs.get(name)
if spec:
lines.append(f" {spec.env_var} - {spec.description}")
if spec.help_url:
lines.append(f" Setup: {spec.help_url}")
lines.append("\nSet the required environment variables and re-run.")
pytest.fail("\n".join(lines))
```
### User Communication
When the user asks to test an agent, **ALWAYS check for the API key first**:
When the user asks to test an agent, **ALWAYS check for ALL credentials first** — not just the LLM key:
1. **Identify the agent's tools** from `agent.json` or `mcp_servers.json`
2. **Check ALL required credentials** using `CredentialManager`
3. **Ask the user to provide any missing credentials** before proceeding
```python
from aden_tools.credentials import CredentialManager
from aden_tools.credentials import CredentialManager, CREDENTIAL_SPECS
# Before running any tests
creds = CredentialManager()
if not creds.is_available("anthropic"):
print("⚠️ No ANTHROPIC_API_KEY found!")
print()
print("Testing requires a real API key to validate agent behavior.")
print()
print("Options:")
print("1. Set your API key (RECOMMENDED):")
print(" export ANTHROPIC_API_KEY='your-key-here'")
print()
print("2. Run in mock mode (structure validation only):")
print(" MOCK_MODE=1 pytest exports/{agent}/tests/")
print()
print("Mock mode does NOT test:")
print(" - LLM message generation")
print(" - Reasoning or decision quality")
print(" - Constraint validation")
print(" - Real API integrations")
# Ask user what to do
# 1. Check LLM key
missing_creds = []
if not creds.is_available("anthropic"):
missing_creds.append(("ANTHROPIC_API_KEY", "Anthropic API key for LLM calls"))
# 2. Check tool-specific credentials
agent_tools = [...] # Determined from agent config
missing_tools = creds.get_missing_for_tools(agent_tools)
for name in missing_tools:
spec = CREDENTIAL_SPECS.get(name)
if spec:
missing_creds.append((spec.env_var, spec.description))
# 3. Present ALL missing credentials to the user at once
if missing_creds:
print("⚠️ Missing credentials required by this agent:\n")
for env_var, description in missing_creds:
print(f"{env_var}{description}")
print()
print("Please set the missing environment variables:")
for env_var, _ in missing_creds:
print(f" export {env_var}='your-value-here'")
print()
print("Or run in mock mode (structure validation only):")
print(" MOCK_MODE=1 pytest exports/{agent}/tests/")
# Ask user to provide credentials or choose mock mode
AskUserQuestion(...)
```
**IMPORTANT:** Do NOT skip credential collection. If an agent uses HubSpot tools, the user MUST provide `HUBSPOT_ACCESS_TOKEN`. If it uses web search, the user MUST provide the appropriate search API key. Collect ALL missing credentials in a single prompt rather than discovering them one at a time during test failures.
## The Three-Stage Flow
```
+12 -6
View File
@@ -1,14 +1,20 @@
{
"mcpServers": {
"agent-builder": {
"command": "core/.venv/bin/python",
"command": ".venv/bin/python",
"args": ["-m", "framework.mcp.agent_builder_server"],
"cwd": "."
"cwd": "core",
"env": {
"PYTHONPATH": "../tools/src"
}
},
"tools": {
"command": "tools/.venv/bin/python",
"args": ["-m", "aden_tools.mcp_server", "--stdio"],
"cwd": "."
"command": ".venv/bin/python",
"args": ["mcp_server.py", "--stdio"],
"cwd": "tools",
"env": {
"PYTHONPATH": "src:../core"
}
}
}
}
}
+51
View File
@@ -0,0 +1,51 @@
## Summary
- **Added HubSpot integration** — new HubSpot MCP tool with search, get, create, and update operations for contacts, companies, and deals. Includes OAuth2 provider for HubSpot credentials and credential store adapter for the tools layer.
- **Replaced web_scrape tool with Playwright + stealth** — swapped httpx/BeautifulSoup for a headless Chromium browser using `playwright` (async API) and `playwright-stealth`, enabling JS-rendered page scraping and bot detection evasion
- **Added empty response retry logic** — LLM provider now detects empty responses (e.g. Gemini returning 200 with no content on rate limit) and retries with exponential backoff, preventing hallucinated output from the cleanup LLM
- **Added context-aware input compaction** — LLM nodes now estimate input token count before calling the model and progressively truncate the largest values if they exceed the context window budget
- **Increased rate limit retries to 10** with verbose `[retry]` and `[compaction]` logging that includes model name, finish reason, and attempt count
- **Updated setup scripts** — `scripts/setup-python.sh` now installs Playwright Chromium browser automatically for web scraping support
- **Interactive quickstart onboarding** — `quickstart.sh` rewritten as bee-themed interactive wizard that detects existing API keys (including Claude Code subscription), lets user pick ONE default LLM provider, and saves configuration to `~/.hive/configuration.json`
- **Fixed lint errors** across `hubspot_tool.py` (line length) and `agent_builder_server.py` (unused variable)
## Changed files
### HubSpot Integration
- `tools/src/aden_tools/tools/hubspot_tool/` — New MCP tool: contacts, companies, and deals CRUD
- `tools/src/aden_tools/tools/__init__.py` — Registered HubSpot tools
- `tools/src/aden_tools/credentials/integrations.py` — HubSpot credential integration
- `tools/src/aden_tools/credentials/__init__.py` — Updated credential exports
- `core/framework/credentials/oauth2/hubspot_provider.py` — HubSpot OAuth2 provider
- `core/framework/credentials/oauth2/__init__.py` — Registered HubSpot OAuth2 provider
- `core/framework/runner/runner.py` — Updated runner for credential support
### Web Scrape Rewrite
- `tools/src/aden_tools/tools/web_scrape_tool/web_scrape_tool.py` — Playwright async rewrite
- `tools/src/aden_tools/tools/web_scrape_tool/README.md` — Updated docs
- `tools/pyproject.toml` — Added `playwright`, `playwright-stealth` deps
- `tools/Dockerfile` — Added `playwright install chromium --with-deps`
- `scripts/setup-python.sh` — Added Playwright Chromium browser install step
### LLM Reliability
- `core/framework/llm/litellm.py` — Empty response retry + max retries 10 + verbose logging
- `core/framework/graph/node.py` — Input compaction via `_compact_inputs()`, `_estimate_tokens()`, `_get_context_limit()`
### Quickstart & Setup
- `quickstart.sh` — Interactive bee-themed onboarding wizard with single provider selection
- `~/.hive/configuration.json` — New user config file for default LLM provider/model
### Fixes
- `core/framework/mcp/agent_builder_server.py` — Removed unused variable
- `tools/src/aden_tools/tools/hubspot_tool/hubspot_tool.py` — Fixed E501 line length violations
## Test plan
- [ ] Run `make lint` — passes clean
- [ ] Run `./quickstart.sh` and verify interactive flow works, config saved to `~/.hive/configuration.json`
- [ ] Run `./scripts/setup-python.sh` and verify Playwright Chromium installs
- [ ] Run `pytest tests/tools/test_web_scrape_tool.py -v`
- [ ] Run agent against a JS-heavy site and verify `web_scrape` returns rendered content
- [ ] Set `HUBSPOT_ACCESS_TOKEN` and verify HubSpot tool CRUD operations work
- [ ] Trigger rate limit and verify `[retry]` logs appear with correct attempt counts
- [ ] Run agent with large inputs and verify `[compaction]` logs show truncation
🤖 Generated with [Claude Code](https://claude.com/claude-code)
+1 -1
View File
@@ -12,7 +12,7 @@ Quick Start:
from core.framework.credentials import CredentialStore, CredentialObject
# Create store with encrypted storage
store = CredentialStore.with_encrypted_storage("/var/hive/credentials")
store = CredentialStore.with_encrypted_storage() # defaults to ~/.hive/credentials
# Get a credential
api_key = store.get("brave_search")
+12 -10
View File
@@ -8,12 +8,12 @@ This client fetches tokens and delegates refresh operations to Aden.
Usage:
# API key loaded from ADEN_API_KEY environment variable by default
client = AdenCredentialClient(AdenClientConfig(
base_url="https://hive.adenhq.com",
base_url="https://api.adenhq.com",
))
# Or explicitly provide the API key
client = AdenCredentialClient(AdenClientConfig(
base_url="https://hive.adenhq.com",
base_url="https://api.adenhq.com",
api_key="your-api-key",
))
@@ -85,7 +85,7 @@ class AdenClientConfig:
"""Configuration for Aden API client."""
base_url: str
"""Base URL of the Aden server (e.g., 'https://hive.adenhq.com')."""
"""Base URL of the Aden server (e.g., 'https://api.adenhq.com')."""
api_key: str | None = None
"""Agent's API key for authenticating with Aden.
@@ -140,20 +140,22 @@ class AdenCredentialResponse:
"""Additional integration-specific metadata."""
@classmethod
def from_dict(cls, data: dict[str, Any]) -> AdenCredentialResponse:
def from_dict(
cls, data: dict[str, Any], integration_id: str | None = None
) -> AdenCredentialResponse:
"""Create from API response dictionary."""
expires_at = None
if data.get("expires_at"):
expires_at = datetime.fromisoformat(data["expires_at"].replace("Z", "+00:00"))
return cls(
integration_id=data["integration_id"],
integration_type=data["integration_type"],
integration_id=integration_id or data.get("alias", data.get("provider", "")),
integration_type=data.get("provider", ""),
access_token=data["access_token"],
token_type=data.get("token_type", "Bearer"),
expires_at=expires_at,
scopes=data.get("scopes", []),
metadata=data.get("metadata", {}),
metadata={"email": data.get("email")} if data.get("email") else {},
)
@@ -197,7 +199,7 @@ class AdenCredentialClient:
Usage:
# API key loaded from ADEN_API_KEY environment variable
config = AdenClientConfig(
base_url="https://hive.adenhq.com",
base_url="https://api.adenhq.com",
)
client = AdenCredentialClient(config)
@@ -332,7 +334,7 @@ class AdenCredentialClient:
try:
response = self._request_with_retry("GET", f"/v1/credentials/{integration_id}")
data = response.json()
return AdenCredentialResponse.from_dict(data)
return AdenCredentialResponse.from_dict(data, integration_id=integration_id)
except AdenNotFoundError:
return None
@@ -357,7 +359,7 @@ class AdenCredentialClient:
"""
response = self._request_with_retry("POST", f"/v1/credentials/{integration_id}/refresh")
data = response.json()
return AdenCredentialResponse.from_dict(data)
return AdenCredentialResponse.from_dict(data, integration_id=integration_id)
def list_integrations(self) -> list[AdenIntegrationInfo]:
"""
+1 -1
View File
@@ -79,7 +79,7 @@ class AdenSyncProvider(CredentialProvider):
Usage:
client = AdenCredentialClient(AdenClientConfig(
base_url="https://hive.adenhq.com",
base_url="https://api.adenhq.com",
api_key=os.environ["ADEN_API_KEY"],
))
@@ -20,8 +20,7 @@ Quick Start:
# Create store with OAuth2 provider
store = CredentialStore.with_encrypted_storage(
"/var/hive/credentials",
providers=[provider]
providers=[provider] # defaults to ~/.hive/credentials
)
# Get token using client credentials
@@ -64,6 +63,7 @@ For advanced lifecycle management:
"""
from .base_provider import BaseOAuth2Provider
from .hubspot_provider import HubSpotOAuth2Provider
from .lifecycle import TokenLifecycleManager, TokenRefreshResult
from .provider import (
OAuth2Config,
@@ -79,8 +79,9 @@ __all__ = [
"OAuth2Token",
"OAuth2Config",
"TokenPlacement",
# Provider
# Providers
"BaseOAuth2Provider",
"HubSpotOAuth2Provider",
# Lifecycle
"TokenLifecycleManager",
"TokenRefreshResult",
@@ -0,0 +1,112 @@
"""
HubSpot-specific OAuth2 provider.
Pre-configured for HubSpot's OAuth2 endpoints and CRM scopes.
Extends BaseOAuth2Provider for HubSpot-specific behavior.
Usage:
provider = HubSpotOAuth2Provider(
client_id="your-client-id",
client_secret="your-client-secret",
)
# Use with credential store
store = CredentialStore(
storage=EncryptedFileStorage(), # defaults to ~/.hive/credentials
providers=[provider],
)
See: https://developers.hubspot.com/docs/api/oauth-quickstart-guide
"""
from __future__ import annotations
import logging
from typing import Any
from ..models import CredentialObject, CredentialType
from .base_provider import BaseOAuth2Provider
from .provider import OAuth2Config
logger = logging.getLogger(__name__)
# HubSpot OAuth2 endpoints
HUBSPOT_TOKEN_URL = "https://api.hubapi.com/oauth/v1/token"
HUBSPOT_AUTHORIZATION_URL = "https://app.hubspot.com/oauth/authorize"
# Default CRM scopes for contacts, companies, and deals
HUBSPOT_DEFAULT_SCOPES = [
"crm.objects.contacts.read",
"crm.objects.contacts.write",
"crm.objects.companies.read",
"crm.objects.companies.write",
"crm.objects.deals.read",
"crm.objects.deals.write",
]
class HubSpotOAuth2Provider(BaseOAuth2Provider):
"""
HubSpot OAuth2 provider with pre-configured endpoints.
Handles HubSpot-specific OAuth2 behavior:
- Pre-configured token and authorization URLs
- Default CRM scopes for contacts, companies, and deals
- Token validation via HubSpot API
Example:
provider = HubSpotOAuth2Provider(
client_id="your-hubspot-client-id",
client_secret="your-hubspot-client-secret",
scopes=["crm.objects.contacts.read"], # Override default scopes
)
"""
def __init__(
self,
client_id: str,
client_secret: str,
scopes: list[str] | None = None,
):
config = OAuth2Config(
token_url=HUBSPOT_TOKEN_URL,
authorization_url=HUBSPOT_AUTHORIZATION_URL,
client_id=client_id,
client_secret=client_secret,
default_scopes=scopes or HUBSPOT_DEFAULT_SCOPES,
)
super().__init__(config, provider_id="hubspot_oauth2")
@property
def supported_types(self) -> list[CredentialType]:
return [CredentialType.OAUTH2]
def validate(self, credential: CredentialObject) -> bool:
"""
Validate HubSpot credential by making a lightweight API call.
Tests the access token against the contacts endpoint with limit=1.
"""
access_token = credential.get_key("access_token")
if not access_token:
return False
try:
client = self._get_client()
response = client.get(
"https://api.hubapi.com/crm/v3/objects/contacts",
headers={
"Authorization": f"Bearer {access_token}",
"Accept": "application/json",
},
params={"limit": "1"},
)
return response.status_code == 200
except Exception:
return False
def _parse_token_response(self, response_data: dict[str, Any]) -> Any:
"""Parse HubSpot token response."""
from .provider import OAuth2Token
return OAuth2Token.from_token_response(response_data)
+7 -5
View File
@@ -111,14 +111,16 @@ class EncryptedFileStorage(CredentialStorage):
If not set, a new key is generated (and must be persisted for data recovery).
Example:
storage = EncryptedFileStorage("/var/hive/credentials")
storage = EncryptedFileStorage("~/.hive/credentials")
storage.save(credential)
credential = storage.load("brave_search")
"""
DEFAULT_PATH = "~/.hive/credentials"
def __init__(
self,
base_path: str | Path,
base_path: str | Path | None = None,
encryption_key: bytes | None = None,
key_env_var: str = "HIVE_CREDENTIAL_KEY",
):
@@ -126,7 +128,7 @@ class EncryptedFileStorage(CredentialStorage):
Initialize encrypted storage.
Args:
base_path: Directory for credential files
base_path: Directory for credential files. Defaults to ~/.hive/credentials.
encryption_key: 32-byte Fernet key. If None, reads from env var.
key_env_var: Environment variable containing encryption key
"""
@@ -137,7 +139,7 @@ class EncryptedFileStorage(CredentialStorage):
"Encrypted storage requires 'cryptography'. Install with: pip install cryptography"
) from e
self.base_path = Path(base_path)
self.base_path = Path(base_path or self.DEFAULT_PATH).expanduser()
self._ensure_dirs()
self._key_env_var = key_env_var
@@ -459,7 +461,7 @@ class CompositeStorage(CredentialStorage):
Example:
storage = CompositeStorage(
primary=EncryptedFileStorage("/var/hive/credentials"),
primary=EncryptedFileStorage("~/.hive/credentials"),
fallbacks=[EnvVarStorage({"brave_search": "BRAVE_SEARCH_API_KEY"})]
)
"""
+5 -5
View File
@@ -45,7 +45,7 @@ class CredentialStore:
Usage:
# Basic usage
store = CredentialStore(
storage=EncryptedFileStorage("/path/to/creds"),
storage=EncryptedFileStorage("~/.hive/credentials"),
providers=[OAuth2Provider(), StaticProvider()]
)
@@ -566,7 +566,7 @@ class CredentialStore:
@classmethod
def with_encrypted_storage(
cls,
base_path: str,
base_path: str | None = None,
providers: list[CredentialProvider] | None = None,
**kwargs: Any,
) -> CredentialStore:
@@ -574,7 +574,7 @@ class CredentialStore:
Create a credential store with encrypted file storage.
Args:
base_path: Directory for credential files
base_path: Directory for credential files. Defaults to ~/.hive/credentials.
providers: List of credential providers
**kwargs: Additional arguments passed to CredentialStore
@@ -616,7 +616,7 @@ class CredentialStore:
@classmethod
def with_aden_sync(
cls,
base_url: str = "https://hive.adenhq.com",
base_url: str = "https://api.adenhq.com",
cache_ttl_seconds: int = 300,
local_path: str | None = None,
auto_sync: bool = True,
@@ -630,7 +630,7 @@ class CredentialStore:
is unreachable.
Args:
base_url: Aden server URL (default: https://hive.adenhq.com)
base_url: Aden server URL (default: https://api.adenhq.com)
cache_ttl_seconds: How long to cache credentials locally (default: 5 min)
local_path: Path for local credential storage (default: ~/.hive/credentials)
auto_sync: Whether to sync all credentials on startup (default: True)
+7 -3
View File
@@ -49,7 +49,7 @@ class ExecutionResult:
# Execution quality metrics
total_retries: int = 0 # Total number of retries across all nodes
nodes_with_failures: list[str] = field(default_factory=list) # Node IDs that failed but recovered
nodes_with_failures: list[str] = field(default_factory=list) # Failed but recovered
retry_details: dict[str, int] = field(default_factory=dict) # {node_id: retry_count}
had_partial_failures: bool = False # True if any node failed but eventually succeeded
execution_quality: str = "clean" # "clean", "degraded", or "failed"
@@ -572,12 +572,16 @@ class GraphExecutor:
# Update narrative to reflect execution quality
quality_suffix = ""
if exec_quality == "degraded":
quality_suffix = f" (⚠️ {total_retries_count} retries across {len(nodes_failed)} nodes)"
retries = total_retries_count
failed = len(nodes_failed)
quality_suffix = f" ({retries} retries across {failed} nodes)"
self.runtime.end_run(
success=True,
output_data=output,
narrative=f"Executed {steps} steps through path: {' -> '.join(path)}{quality_suffix}",
narrative=(
f"Executed {steps} steps through path: {' -> '.join(path)}{quality_suffix}"
),
)
return ExecutionResult(
+135 -6
View File
@@ -669,6 +669,137 @@ Keep the same JSON structure but with shorter content values.
return match.group(1).strip()
return content
def _estimate_tokens(
self, model: str, system: str, messages: list[dict], tools: list | None
) -> int:
"""Estimate total input tokens for an LLM call."""
import json
try:
import litellm as _litellm
except ImportError:
# Rough estimate: 1 token ≈ 4 chars
total_chars = len(system)
for m in messages:
total_chars += len(str(m.get("content", "")))
if tools:
total_chars += len(
json.dumps(
[
{
"name": t.name,
"description": t.description,
"parameters": t.parameters,
}
for t in tools
],
default=str,
)
)
return total_chars // 4
total = 0
if system:
total += _litellm.token_counter(model=model, text=system)
for m in messages:
content = str(m.get("content", ""))
if content:
total += _litellm.token_counter(model=model, text=content)
if tools:
tools_text = json.dumps(
[
{"name": t.name, "description": t.description, "parameters": t.parameters}
for t in tools
],
default=str,
)
total += _litellm.token_counter(model=model, text=tools_text)
return total
def _get_context_limit(self, model: str) -> int:
"""Get usable input token budget (80% of model's max_input_tokens)."""
try:
import litellm as _litellm
info = _litellm.get_model_info(model)
max_input = info.get("max_input_tokens") or info.get("max_tokens") or 8192
return int(max_input * 0.8)
except Exception:
return 8192
def _compact_inputs(
self, ctx: NodeContext, system: str, messages: list[dict], tools: list | None
) -> list[dict]:
"""Compact message inputs if they exceed the model's context window.
Uses a sliding window strategy: iteratively halves the longest input
value until the total token count fits within the budget.
"""
model = ctx.llm.model if hasattr(ctx.llm, "model") else "gpt-3.5-turbo"
budget = self._get_context_limit(model)
estimated = self._estimate_tokens(model, system, messages, tools)
if estimated <= budget:
return messages
logger.warning(
f"[compaction] Input tokens (~{estimated}) exceed budget ({budget}) "
f"for model {model}. Compacting inputs..."
)
# Parse user message into key:value pairs for selective truncation
if not messages or not messages[0].get("content"):
return messages
content = messages[0]["content"]
lines = content.split("\n")
pairs: list[tuple[str, str]] = []
for line in lines:
if ": " in line:
key, _, value = line.partition(": ")
pairs.append((key, value))
else:
pairs.append(("", line))
# Iteratively halve the longest value until we fit
max_iterations = 20
for i in range(max_iterations):
# Find longest value
longest_idx = -1
longest_len = 0
for idx, (key, value) in enumerate(pairs):
if key and len(value) > longest_len:
longest_len = len(value)
longest_idx = idx
if longest_idx == -1 or longest_len <= 100:
break
key, value = pairs[longest_idx]
new_len = max(longest_len // 2, 100)
pairs[longest_idx] = (key, value[:new_len] + "...")
logger.warning(f"[compaction] Truncated '{key}' from {longest_len} to {new_len} chars")
# Re-estimate
new_content = "\n".join(f"{k}: {v}" if k else v for k, v in pairs)
test_messages = [{"role": "user", "content": new_content}]
estimated = self._estimate_tokens(model, system, test_messages, tools)
if estimated <= budget:
logger.warning(
f"[compaction] Fits within budget after {i + 1} rounds (~{estimated} tokens)"
)
return test_messages
# Final reassembly even if still over budget
final_content = "\n".join(f"{k}: {v}" if k else v for k, v in pairs)
final_messages = [{"role": "user", "content": final_content}]
final_est = self._estimate_tokens(model, system, final_messages, tools)
logger.warning(
f"[compaction] Still ~{final_est} tokens after max compaction "
f"(budget={budget}). Proceeding anyway."
)
return final_messages
async def execute(self, ctx: NodeContext) -> NodeResult:
"""Execute the LLM node."""
import time
@@ -711,6 +842,9 @@ Keep the same JSON structure but with shorter content values.
# Build system prompt
system = self._build_system_prompt(ctx)
# Compact inputs if they exceed the model's context window
messages = self._compact_inputs(ctx, system, messages, ctx.available_tools)
# Log the LLM call details
logger.info(" 🤖 LLM Call:")
logger.info(
@@ -1185,10 +1319,7 @@ Keep the same JSON structure but with shorter content values.
# Use configured cleanup model, or fall back to defaults
if cleanup_llm_model:
# Use the configured cleanup model (LiteLLM handles API keys via env vars)
cleaner_llm = LiteLLMProvider(
model=cleanup_llm_model,
temperature=0.0,
)
cleaner_llm = LiteLLMProvider(model=cleanup_llm_model)
logger.info(f" Using configured cleanup LLM: {cleanup_llm_model}")
else:
# Fall back to default logic: Cerebras preferred, then Haiku
@@ -1203,13 +1334,11 @@ Keep the same JSON structure but with shorter content values.
cleaner_llm = LiteLLMProvider(
api_key=os.environ.get("CEREBRAS_API_KEY"),
model="cerebras/llama-3.3-70b",
temperature=0.0,
)
else:
cleaner_llm = LiteLLMProvider(
api_key=api_key,
model="claude-3-5-haiku-20241022",
temperature=0.0,
)
prompt = f"""Extract the JSON object from this LLM response.
+1 -2
View File
@@ -118,7 +118,6 @@ class OutputCleaner:
self.llm = LiteLLMProvider(
api_key=api_key,
model=config.fast_model,
temperature=0.0, # Deterministic cleaning
)
logger.info(f"✓ Initialized OutputCleaner with {config.fast_model}")
else:
@@ -240,7 +239,7 @@ class OutputCleaner:
for key, value in output.items():
if isinstance(value, str):
repaired = _heuristic_repair(value)
if repaired and isinstance(repaired, (dict, list)):
if repaired and isinstance(repaired, dict | list):
# Check if this repaired structure looks like what we want
# e.g. if the key is 'data' and the string contained valid JSON
fixed_output[key] = repaired
+7 -7
View File
@@ -8,17 +8,17 @@ from framework.llm.litellm import LiteLLMProvider
from framework.llm.provider import LLMProvider, LLMResponse, Tool, ToolResult, ToolUse
def _get_api_key_from_credential_manager() -> str | None:
"""Get API key from CredentialManager or environment.
def _get_api_key_from_credential_store() -> str | None:
"""Get API key from CredentialStoreAdapter or environment.
Priority:
1. CredentialManager (supports .env hot-reload)
1. CredentialStoreAdapter (supports encrypted storage + env vars)
2. os.environ fallback
"""
try:
from aden_tools.credentials import CredentialManager
from aden_tools.credentials import CredentialStoreAdapter
creds = CredentialManager()
creds = CredentialStoreAdapter.with_env_storage()
if creds.is_available("anthropic"):
return creds.get("anthropic")
except ImportError:
@@ -44,12 +44,12 @@ class AnthropicProvider(LLMProvider):
Initialize the Anthropic provider.
Args:
api_key: Anthropic API key. If not provided, uses CredentialManager
api_key: Anthropic API key. If not provided, uses CredentialStoreAdapter
or ANTHROPIC_API_KEY env var.
model: Model to use (default: claude-haiku-4-5-20251001)
"""
# Delegate to LiteLLMProvider internally.
self.api_key = api_key or _get_api_key_from_credential_manager()
self.api_key = api_key or _get_api_key_from_credential_store()
if not self.api_key:
raise ValueError(
"Anthropic API key required. Set ANTHROPIC_API_KEY env var or pass api_key."
+147 -2
View File
@@ -8,16 +8,79 @@ See: https://docs.litellm.ai/docs/providers
"""
import json
import logging
import time
from collections.abc import Callable
from datetime import datetime
from pathlib import Path
from typing import Any
try:
import litellm
from litellm.exceptions import RateLimitError
except ImportError:
litellm = None # type: ignore[assignment]
RateLimitError = Exception # type: ignore[assignment, misc]
from framework.llm.provider import LLMProvider, LLMResponse, Tool, ToolResult, ToolUse
logger = logging.getLogger(__name__)
RATE_LIMIT_MAX_RETRIES = 10
RATE_LIMIT_BACKOFF_BASE = 2 # seconds
# Directory for dumping failed requests
FAILED_REQUESTS_DIR = Path.home() / ".hive" / "failed_requests"
def _estimate_tokens(model: str, messages: list[dict]) -> tuple[int, str]:
"""Estimate token count for messages. Returns (token_count, method)."""
# Try litellm's token counter first
if litellm is not None:
try:
count = litellm.token_counter(model=model, messages=messages)
return count, "litellm"
except Exception:
pass
# Fallback: rough estimate based on character count (~4 chars per token)
total_chars = sum(len(str(m.get("content", ""))) for m in messages)
return total_chars // 4, "estimate"
def _dump_failed_request(
model: str,
kwargs: dict[str, Any],
error_type: str,
attempt: int,
) -> str:
"""Dump failed request to a file for debugging. Returns the file path."""
FAILED_REQUESTS_DIR.mkdir(parents=True, exist_ok=True)
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
filename = f"{error_type}_{model.replace('/', '_')}_{timestamp}.json"
filepath = FAILED_REQUESTS_DIR / filename
# Build dump data
messages = kwargs.get("messages", [])
dump_data = {
"timestamp": datetime.now().isoformat(),
"model": model,
"error_type": error_type,
"attempt": attempt,
"estimated_tokens": _estimate_tokens(model, messages),
"num_messages": len(messages),
"messages": messages,
"tools": kwargs.get("tools"),
"max_tokens": kwargs.get("max_tokens"),
"temperature": kwargs.get("temperature"),
}
with open(filepath, "w") as f:
json.dump(dump_data, f, indent=2, default=str)
return str(filepath)
class LiteLLMProvider(LLMProvider):
"""
@@ -85,6 +148,88 @@ class LiteLLMProvider(LLMProvider):
"LiteLLM is not installed. Please install it with: pip install litellm"
)
def _completion_with_rate_limit_retry(self, **kwargs: Any) -> Any:
"""Call litellm.completion with retry on 429 rate limit errors and empty responses."""
model = kwargs.get("model", self.model)
for attempt in range(RATE_LIMIT_MAX_RETRIES + 1):
try:
response = litellm.completion(**kwargs) # type: ignore[union-attr]
# Some providers (e.g. Gemini) return 200 with empty content on
# rate limit / quota exhaustion instead of a proper 429. Treat
# empty responses the same as a rate-limit error and retry.
content = response.choices[0].message.content if response.choices else None
has_tool_calls = bool(response.choices and response.choices[0].message.tool_calls)
if not content and not has_tool_calls:
finish_reason = (
response.choices[0].finish_reason if response.choices else "unknown"
)
# Dump full request to file for debugging
messages = kwargs.get("messages", [])
token_count, token_method = _estimate_tokens(model, messages)
dump_path = _dump_failed_request(
model=model,
kwargs=kwargs,
error_type="empty_response",
attempt=attempt,
)
logger.warning(
f"[retry] Empty response - {len(messages)} messages, "
f"~{token_count} tokens ({token_method}). "
f"Full request dumped to: {dump_path}"
)
if attempt == RATE_LIMIT_MAX_RETRIES:
logger.error(
f"[retry] GAVE UP on {model} after {RATE_LIMIT_MAX_RETRIES + 1} "
f"attempts — empty response "
f"(finish_reason={finish_reason}, "
f"choices={len(response.choices) if response.choices else 0})"
)
return response
wait = RATE_LIMIT_BACKOFF_BASE * (2**attempt)
logger.warning(
f"[retry] {model} returned empty response "
f"(finish_reason={finish_reason}, "
f"choices={len(response.choices) if response.choices else 0}) — "
f"likely rate limited or quota exceeded. "
f"Retrying in {wait}s "
f"(attempt {attempt + 1}/{RATE_LIMIT_MAX_RETRIES})"
)
time.sleep(wait)
continue
return response
except RateLimitError as e:
# Dump full request to file for debugging
messages = kwargs.get("messages", [])
token_count, token_method = _estimate_tokens(model, messages)
dump_path = _dump_failed_request(
model=model,
kwargs=kwargs,
error_type="rate_limit",
attempt=attempt,
)
if attempt == RATE_LIMIT_MAX_RETRIES:
logger.error(
f"[retry] GAVE UP on {model} after {RATE_LIMIT_MAX_RETRIES + 1} "
f"attempts — rate limit error: {e!s}. "
f"~{token_count} tokens ({token_method}). "
f"Full request dumped to: {dump_path}"
)
raise
wait = RATE_LIMIT_BACKOFF_BASE * (2**attempt)
logger.warning(
f"[retry] {model} rate limited (429): {e!s}. "
f"~{token_count} tokens ({token_method}). "
f"Full request dumped to: {dump_path}. "
f"Retrying in {wait}s "
f"(attempt {attempt + 1}/{RATE_LIMIT_MAX_RETRIES})"
)
time.sleep(wait)
# unreachable, but satisfies type checker
raise RuntimeError("Exhausted rate limit retries")
def complete(
self,
messages: list[dict[str, Any]],
@@ -133,7 +278,7 @@ class LiteLLMProvider(LLMProvider):
kwargs["response_format"] = response_format
# Make the call
response = litellm.completion(**kwargs) # type: ignore[union-attr]
response = self._completion_with_rate_limit_retry(**kwargs)
# Extract content
content = response.choices[0].message.content or ""
@@ -189,7 +334,7 @@ class LiteLLMProvider(LLMProvider):
if self.api_base:
kwargs["api_base"] = self.api_base
response = litellm.completion(**kwargs) # type: ignore[union-attr]
response = self._completion_with_rate_limit_retry(**kwargs)
# Track tokens
usage = response.usage
+2 -3
View File
@@ -1,5 +1,4 @@
"""MCP servers for worker-bee."""
from framework.mcp.agent_builder_server import mcp as agent_builder_server
__all__ = ["agent_builder_server"]
# Don't auto-import servers to avoid double-import issues when running with -m
__all__ = []
+288 -10
View File
@@ -457,14 +457,27 @@ def _validate_tool_credentials(tools_list: list[str]) -> dict | None:
return None
try:
from aden_tools.credentials import CredentialManager
from aden_tools.credentials import CREDENTIAL_SPECS
cred_manager = CredentialManager()
missing_creds = cred_manager.get_missing_for_tools(tools_list)
store = _get_credential_store()
if missing_creds:
cred_errors = []
for cred_name, spec in missing_creds:
# Build tool -> credential mapping
tool_to_cred: dict[str, str] = {}
for cred_name, spec in CREDENTIAL_SPECS.items():
for tool_name in spec.tools:
tool_to_cred[tool_name] = cred_name
# Find missing credentials
cred_errors = []
checked: set[str] = set()
for tool_name in tools_list:
cred_name = tool_to_cred.get(tool_name)
if cred_name is None or cred_name in checked:
continue
checked.add(cred_name)
spec = CREDENTIAL_SPECS[cred_name]
cred_id = spec.credential_id or cred_name
if spec.required and not store.is_available(cred_id):
affected_tools = [t for t in tools_list if t in spec.tools]
cred_errors.append(
{
@@ -476,15 +489,16 @@ def _validate_tool_credentials(tools_list: list[str]) -> dict | None:
}
)
if cred_errors:
return {
"valid": False,
"errors": [f"Missing credentials for tools: {[e['env_var'] for e in cred_errors]}"],
"missing_credentials": cred_errors,
"action_required": "Add the credentials to your .env file and retry",
"action_required": "Store credentials via store_credential and retry",
"example": f"Add to .env:\n{cred_errors[0]['env_var']}=your_key_here",
"message": (
"Cannot add node: missing API credentials. "
"Add them to .env and retry this command."
"Store them via store_credential and retry this command."
),
}
except ImportError as e:
@@ -492,7 +506,7 @@ def _validate_tool_credentials(tools_list: list[str]) -> dict | None:
return {
"valid": True,
"warnings": [
f"⚠️ Credential validation SKIPPED: aden_tools not available ({e}). "
f"Credential validation SKIPPED: aden_tools not available ({e}). "
"Tools may fail at runtime if credentials are missing. "
"Add tools/src to PYTHONPATH to enable validation."
],
@@ -3229,9 +3243,273 @@ def load_exported_plan(
return json.dumps({"success": False, "error": str(e)})
# =============================================================================
# CREDENTIAL STORE TOOLS
# =============================================================================
def _get_credential_store():
"""Get a CredentialStore that checks encrypted files and env vars.
Uses CompositeStorage: encrypted file storage (primary) with env var fallback.
This ensures credentials stored via `store_credential` AND env vars are both found.
"""
from framework.credentials import CredentialStore
from framework.credentials.storage import CompositeStorage, EncryptedFileStorage, EnvVarStorage
# Build env var mapping from CREDENTIAL_SPECS for the fallback
env_mapping: dict[str, str] = {}
try:
from aden_tools.credentials import CREDENTIAL_SPECS
for name, spec in CREDENTIAL_SPECS.items():
cred_id = spec.credential_id or name
env_mapping[cred_id] = spec.env_var
except ImportError:
pass
storage = CompositeStorage(
primary=EncryptedFileStorage(),
fallbacks=[EnvVarStorage(env_mapping=env_mapping)],
)
return CredentialStore(storage=storage)
@mcp.tool()
def check_missing_credentials(
agent_path: Annotated[str, "Path to the exported agent directory (e.g., 'exports/my-agent')"],
) -> str:
"""
Detect missing credentials for an agent by inspecting its tools and node types.
Returns a list of missing credentials with env var names, descriptions, and help URLs.
Use this before running or testing an agent to identify what needs to be configured.
"""
try:
from aden_tools.credentials import CREDENTIAL_SPECS
from framework.runner import AgentRunner
runner = AgentRunner.load(agent_path)
runner.validate()
store = _get_credential_store()
info = runner.info()
node_types = list({node.node_type for node in runner.graph.nodes})
# Build reverse mappings: tool/node_type -> credential name
tool_to_cred: dict[str, str] = {}
node_type_to_cred: dict[str, str] = {}
for cred_name, spec in CREDENTIAL_SPECS.items():
for tool_name in spec.tools:
tool_to_cred[tool_name] = cred_name
for nt in spec.node_types:
node_type_to_cred[nt] = cred_name
# Gather missing credentials (tools + node types), deduplicated
seen: set[str] = set()
all_missing = []
for name_list, mapping in [
(info.required_tools, tool_to_cred),
(node_types, node_type_to_cred),
]:
for item_name in name_list:
cred_name = mapping.get(item_name)
if cred_name is None or cred_name in seen:
continue
seen.add(cred_name)
spec = CREDENTIAL_SPECS[cred_name]
cred_id = spec.credential_id or cred_name
if spec.required and not store.is_available(cred_id):
all_missing.append(
{
"credential_name": cred_name,
"env_var": spec.env_var,
"description": spec.description,
"help_url": spec.help_url,
"tools": spec.tools,
}
)
# Also check what's already set
available = []
for name, spec in CREDENTIAL_SPECS.items():
if name in seen:
continue
cred_id = spec.credential_id or name
if store.is_available(cred_id):
relevant_tools = [t for t in spec.tools if t in info.required_tools]
relevant_nodes = [n for n in spec.node_types if n in node_types]
if relevant_tools or relevant_nodes:
available.append(
{
"credential_name": name,
"env_var": spec.env_var,
"description": spec.description,
"status": "available",
}
)
return json.dumps(
{
"agent": agent_path,
"missing": all_missing,
"available": available,
"total_missing": len(all_missing),
"ready": len(all_missing) == 0,
},
indent=2,
)
except Exception as e:
return json.dumps({"error": str(e)})
@mcp.tool()
def store_credential(
credential_name: Annotated[
str, "Logical credential name (e.g., 'hubspot', 'brave_search', 'anthropic')"
],
credential_value: Annotated[str, "The secret value to store (API key, token, etc.)"],
key_name: Annotated[
str, "Key name within the credential (e.g., 'api_key', 'access_token')"
] = "api_key",
display_name: Annotated[str, "Human-readable name (e.g., 'HubSpot Access Token')"] = "",
) -> str:
"""
Store a credential securely in the encrypted credential store at ~/.hive/credentials.
Uses Fernet encryption (AES-128-CBC + HMAC). Requires HIVE_CREDENTIAL_KEY env var.
"""
try:
from pydantic import SecretStr
from framework.credentials import CredentialKey, CredentialObject
store = _get_credential_store()
if not display_name:
display_name = credential_name.replace("_", " ").title()
cred = CredentialObject(
id=credential_name,
name=display_name,
keys={
key_name: CredentialKey(
name=key_name,
value=SecretStr(credential_value),
)
},
)
store.save_credential(cred)
return json.dumps(
{
"success": True,
"credential": credential_name,
"key": key_name,
"location": "~/.hive/credentials",
"encrypted": True,
}
)
except Exception as e:
return json.dumps({"success": False, "error": str(e)})
@mcp.tool()
def list_stored_credentials() -> str:
"""
List all credentials currently stored in the encrypted credential store.
Returns credential IDs and metadata (never returns secret values).
"""
try:
store = _get_credential_store()
credential_ids = store.list_credentials()
credentials = []
for cred_id in credential_ids:
try:
cred = store.get_credential(cred_id)
credentials.append(
{
"id": cred.id,
"name": cred.name,
"keys": list(cred.keys.keys()),
"created_at": cred.created_at.isoformat() if cred.created_at else None,
}
)
except Exception:
credentials.append({"id": cred_id, "error": "Could not load"})
return json.dumps(
{
"count": len(credentials),
"credentials": credentials,
"location": "~/.hive/credentials",
},
indent=2,
)
except Exception as e:
return json.dumps({"error": str(e)})
@mcp.tool()
def delete_stored_credential(
credential_name: Annotated[str, "Logical credential name to delete (e.g., 'hubspot')"],
) -> str:
"""
Delete a credential from the encrypted credential store.
"""
try:
store = _get_credential_store()
deleted = store.delete_credential(credential_name)
return json.dumps(
{
"success": deleted,
"credential": credential_name,
"message": f"Credential '{credential_name}' deleted"
if deleted
else f"Credential '{credential_name}' not found",
}
)
except Exception as e:
return json.dumps({"success": False, "error": str(e)})
@mcp.tool()
def verify_credentials(
agent_path: Annotated[str, "Path to the exported agent directory (e.g., 'exports/my-agent')"],
) -> str:
"""
Verify that all required credentials are configured for an agent.
Runs the full validation pipeline and reports pass/fail status.
Use this after storing credentials to confirm the agent is ready to run.
"""
try:
from framework.runner import AgentRunner
runner = AgentRunner.load(agent_path)
validation = runner.validate()
return json.dumps(
{
"agent": agent_path,
"ready": not validation.missing_credentials,
"missing_credentials": validation.missing_credentials,
"warnings": validation.warnings,
"errors": validation.errors,
},
indent=2,
)
except Exception as e:
return json.dumps({"error": str(e)})
# =============================================================================
# MAIN
# =============================================================================
if __name__ == "__main__":
mcp.run()
mcp.run(transport="stdio")
+131 -23
View File
@@ -26,6 +26,41 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
# Configuration paths
HIVE_CONFIG_FILE = Path.home() / ".hive" / "configuration.json"
CLAUDE_CREDENTIALS_FILE = Path.home() / ".claude" / ".credentials.json"
def get_hive_config() -> dict[str, Any]:
"""Load hive configuration from ~/.hive/configuration.json."""
if not HIVE_CONFIG_FILE.exists():
return {}
try:
with open(HIVE_CONFIG_FILE) as f:
return json.load(f)
except (json.JSONDecodeError, OSError):
return {}
def get_claude_code_token() -> str | None:
"""
Get the OAuth token from Claude Code subscription.
Reads from ~/.claude/.credentials.json which is created by the
Claude Code CLI when users authenticate with their subscription.
Returns:
The access token if available, None otherwise.
"""
if not CLAUDE_CREDENTIALS_FILE.exists():
return None
try:
with open(CLAUDE_CREDENTIALS_FILE) as f:
creds = json.load(f)
return creds.get("claudeAiOauth", {}).get("accessToken")
except (json.JSONDecodeError, OSError):
return None
@dataclass
class AgentInfo:
@@ -430,15 +465,32 @@ class AgentRunner:
self._llm = MockLLMProvider(model=self.model)
else:
# Detect required API key from model name
api_key_env = self._get_api_key_env_var(self.model)
if api_key_env and os.environ.get(api_key_env):
from framework.llm.litellm import LiteLLMProvider
from framework.llm.litellm import LiteLLMProvider
self._llm = LiteLLMProvider(model=self.model)
elif api_key_env:
logger.warning(f"{api_key_env} not set. LLM calls will fail.")
logger.warning(f"Set it with: export {api_key_env}=your-api-key")
# Check if Claude Code subscription is configured
config = get_hive_config()
llm_config = config.get("llm", {})
use_claude_code = llm_config.get("use_claude_code_subscription", False)
api_key = None
if use_claude_code:
# Get OAuth token from Claude Code subscription
api_key = get_claude_code_token()
if not api_key:
print("Warning: Claude Code subscription configured but no token found.")
print("Run 'claude' to authenticate, then try again.")
if api_key:
# Use Claude Code subscription token
self._llm = LiteLLMProvider(model=self.model, api_key=api_key)
else:
# Fall back to environment variable
api_key_env = self._get_api_key_env_var(self.model)
if api_key_env and os.environ.get(api_key_env):
self._llm = LiteLLMProvider(model=self.model)
elif api_key_env:
print(f"Warning: {api_key_env} not set. LLM calls will fail.")
print(f"Set it with: export {api_key_env}=your-api-key")
# Get tools for executor/runtime
tools = list(self._tool_registry.get_tools().values())
@@ -534,6 +586,10 @@ class AgentRunner:
"""
Execute the agent with given input data.
Validates credentials before execution. If any required credentials
are missing, returns an error result with instructions on how to
provide them.
For single-entry-point agents, this is the standard execution path.
For multi-entry-point agents, you can optionally specify which entry point to use.
@@ -546,6 +602,20 @@ class AgentRunner:
Returns:
ExecutionResult with output, path, and metrics
"""
# Validate credentials before execution (fail-fast)
validation = self.validate()
if validation.missing_credentials:
error_lines = ["Cannot run agent: missing required credentials\n"]
for warning in validation.warnings:
if "Missing " in warning:
error_lines.append(f" {warning}")
error_lines.append("\nSet the required environment variables and re-run the agent.")
error_msg = "\n".join(error_lines)
return ExecutionResult(
success=False,
error=error_msg,
)
if self._uses_async_entry_points:
# Multi-entry-point mode: use AgentRuntime
return await self._run_with_agent_runtime(
@@ -826,28 +896,66 @@ class AgentRunner:
warnings.append(f"Missing tool implementations: {', '.join(missing_tools)}")
# Check credentials for required tools and node types
# Uses CredentialStore (encrypted files + env var fallback)
missing_credentials = []
try:
from aden_tools.credentials import CredentialManager
from aden_tools.credentials import CREDENTIAL_SPECS
cred_manager = CredentialManager()
from framework.credentials import CredentialStore
from framework.credentials.storage import (
CompositeStorage,
EncryptedFileStorage,
EnvVarStorage,
)
# Check tool credentials (Tier 2)
missing_creds = cred_manager.get_missing_for_tools(info.required_tools)
for _, spec in missing_creds:
missing_credentials.append(spec.env_var)
affected_tools = [t for t in info.required_tools if t in spec.tools]
tools_str = ", ".join(affected_tools)
warning_msg = f"Missing {spec.env_var} for {tools_str}"
if spec.help_url:
warning_msg += f"\n Get it at: {spec.help_url}"
warnings.append(warning_msg)
# Build env mapping for fallback
env_mapping = {
(spec.credential_id or name): spec.env_var
for name, spec in CREDENTIAL_SPECS.items()
}
storage = CompositeStorage(
primary=EncryptedFileStorage(),
fallbacks=[EnvVarStorage(env_mapping=env_mapping)],
)
store = CredentialStore(storage=storage)
# Build reverse mappings
tool_to_cred: dict[str, str] = {}
node_type_to_cred: dict[str, str] = {}
for cred_name, spec in CREDENTIAL_SPECS.items():
for tool_name in spec.tools:
tool_to_cred[tool_name] = cred_name
for nt in spec.node_types:
node_type_to_cred[nt] = cred_name
# Check tool credentials
checked: set[str] = set()
for tool_name in info.required_tools:
cred_name = tool_to_cred.get(tool_name)
if cred_name is None or cred_name in checked:
continue
checked.add(cred_name)
spec = CREDENTIAL_SPECS[cred_name]
cred_id = spec.credential_id or cred_name
if spec.required and not store.is_available(cred_id):
missing_credentials.append(spec.env_var)
affected_tools = [t for t in info.required_tools if t in spec.tools]
tools_str = ", ".join(affected_tools)
warning_msg = f"Missing {spec.env_var} for {tools_str}"
if spec.help_url:
warning_msg += f"\n Get it at: {spec.help_url}"
warnings.append(warning_msg)
# Check node type credentials (e.g., ANTHROPIC_API_KEY for LLM nodes)
node_types = list({node.node_type for node in self.graph.nodes})
missing_node_creds = cred_manager.get_missing_for_node_types(node_types)
for _, spec in missing_node_creds:
if spec.env_var not in missing_credentials: # Avoid duplicates
for nt in node_types:
cred_name = node_type_to_cred.get(nt)
if cred_name is None or cred_name in checked:
continue
checked.add(cred_name)
spec = CREDENTIAL_SPECS[cred_name]
cred_id = spec.credential_id or cred_name
if spec.required and not store.is_available(cred_id):
missing_credentials.append(spec.env_var)
affected_types = [t for t in node_types if t in spec.node_types]
types_str = ", ".join(affected_types)
+7 -7
View File
@@ -20,11 +20,11 @@ from {agent_module} import default_agent
def _get_api_key():
"""Get API key from CredentialManager (Anthropic) or environment (Any)."""
# 1. Try CredentialManager for Anthropic (the only provider it currently supports)
"""Get API key from CredentialStoreAdapter or environment."""
# 1. Try CredentialStoreAdapter for Anthropic
try:
from aden_tools.credentials import CredentialManager
creds = CredentialManager()
from aden_tools.credentials import CredentialStoreAdapter
creds = CredentialStoreAdapter.with_env_storage()
if creds.is_available("anthropic"):
return creds.get("anthropic")
except (ImportError, KeyError):
@@ -54,10 +54,10 @@ import pytest
def _get_api_key():
"""Get API key from CredentialManager (Anthropic) or environment (Any)."""
"""Get API key from CredentialStoreAdapter or environment."""
try:
from aden_tools.credentials import CredentialManager
creds = CredentialManager()
from aden_tools.credentials import CredentialStoreAdapter
creds = CredentialStoreAdapter.with_env_storage()
if creds.is_available("anthropic"):
return creds.get("anthropic")
except (ImportError, KeyError):
+1 -1
View File
@@ -98,4 +98,4 @@ class TestMCPPackageExports:
from framework.mcp import agent_builder_server
assert agent_builder_server is not None
assert isinstance(agent_builder_server, FastMCP)
assert isinstance(agent_builder_server.mcp, FastMCP)
+1 -3
View File
@@ -6,10 +6,9 @@ ToolResult instances. Historically, invalid JSON in ToolResult.content
could cause a json.JSONDecodeError and crash execution.
"""
from pathlib import Path
import textwrap
from pathlib import Path
from framework.llm.provider import Tool, ToolResult
from framework.runner.tool_registry import ToolRegistry
@@ -92,4 +91,3 @@ def test_discover_from_module_handles_empty_content(tmp_path):
result = registered.executor({})
assert isinstance(result, dict)
assert result == {}
+5 -5
View File
@@ -120,7 +120,7 @@ Response 400 Bad Request:
"error": "refresh_failed",
"message": "Refresh token is invalid or revoked. User must re-authorize.",
"requires_reauthorization": true,
"reauthorization_url": "https://hive.adenhq.com/integrations/hubspot/connect"
"reauthorization_url": "https://api.adenhq.com/integrations/hubspot/connect"
}
Response 429 Too Many Requests:
@@ -196,7 +196,7 @@ Response 200 OK (needs reauth):
"valid": false,
"reason": "refresh_token_revoked",
"requires_reauthorization": true,
"reauthorization_url": "https://hive.adenhq.com/integrations/hubspot/connect"
"reauthorization_url": "https://api.adenhq.com/integrations/hubspot/connect"
}
```
@@ -266,7 +266,7 @@ HTTP client for communicating with the Aden server.
@dataclass
class AdenClientConfig:
"""Configuration for Aden API client."""
base_url: str # e.g., "https://hive.adenhq.com"
base_url: str # e.g., "https://api.adenhq.com"
api_key: str | None = None # Loaded from ADEN_API_KEY env var if not provided
tenant_id: str | None = None # For multi-tenant
timeout: float = 30.0
@@ -322,7 +322,7 @@ class AdenSyncProvider(CredentialProvider):
Usage:
# API key loaded from ADEN_API_KEY env var by default
client = AdenCredentialClient(AdenClientConfig(
base_url="https://hive.adenhq.com",
base_url="https://api.adenhq.com",
))
provider = AdenSyncProvider(client=client)
@@ -573,7 +573,7 @@ provider = HubSpotOAuth2Provider(
# After: Delegate to Aden
provider = AdenSyncProvider(
client=AdenCredentialClient(AdenClientConfig(
base_url="https://hive.adenhq.com",
base_url="https://api.adenhq.com",
api_key="...",
))
)
+2 -2
View File
@@ -751,7 +751,7 @@ class CredentialStore:
Usage:
store = CredentialStore(
storage=EncryptedFileStorage("/path/to/creds"),
storage=EncryptedFileStorage("~/.hive/credentials"),
providers=[OAuth2Provider(), StaticProvider()]
)
@@ -1514,7 +1514,7 @@ from framework.credentials.storage import EncryptedFileStorage
# Create store with encrypted storage
store = CredentialStore(
storage=EncryptedFileStorage("/var/hive/credentials")
storage=EncryptedFileStorage("~/.hive/credentials")
)
# Tool specifies how to use credentials (bipartisan model)
+9 -9
View File
@@ -90,7 +90,7 @@ from core.framework.credentials import (
)
# Option 1: Encrypted file storage (recommended for production)
store = CredentialStore.with_encrypted_storage("/var/hive/credentials")
store = CredentialStore.with_encrypted_storage("~/.hive/credentials")
# Option 2: Environment variable storage (backward compatible)
store = CredentialStore.with_env_storage({
@@ -103,7 +103,7 @@ store = CredentialStore(storage=InMemoryStorage())
# Option 4: Custom storage configuration
storage = EncryptedFileStorage(
base_path="/var/hive/credentials",
base_path="~/.hive/credentials",
key_env_var="HIVE_CREDENTIAL_KEY" # Encryption key from env
)
store = CredentialStore(storage=storage)
@@ -253,18 +253,18 @@ Encrypts credentials at rest using Fernet (AES-128-CBC + HMAC).
from core.framework.credentials import EncryptedFileStorage
# The encryption key is read from HIVE_CREDENTIAL_KEY env var
storage = EncryptedFileStorage("/var/hive/credentials")
storage = EncryptedFileStorage("~/.hive/credentials")
# Or provide the key directly (32-byte Fernet key)
storage = EncryptedFileStorage(
base_path="/var/hive/credentials",
base_path="~/.hive/credentials",
encryption_key=b"your-32-byte-fernet-key-here..."
)
```
**Directory structure:**
```
/var/hive/credentials/
~/.hive/credentials/
├── credentials/
│ ├── brave_search.enc # Encrypted credential JSON
│ └── github_oauth.enc
@@ -305,7 +305,7 @@ Combines multiple storage backends with fallback.
from core.framework.credentials import CompositeStorage, EncryptedFileStorage, EnvVarStorage
storage = CompositeStorage(
primary=EncryptedFileStorage("/var/hive/credentials"),
primary=EncryptedFileStorage("~/.hive/credentials"),
fallbacks=[
EnvVarStorage({"brave_search": "BRAVE_SEARCH_API_KEY"})
]
@@ -762,7 +762,7 @@ credentials = CredentialManager()
from aden_tools.credentials import CredentialStoreAdapter
from core.framework.credentials import CredentialStore
store = CredentialStore.with_encrypted_storage("/var/hive/credentials")
store = CredentialStore.with_encrypted_storage("~/.hive/credentials")
credentials = CredentialStoreAdapter(store)
# All existing code works unchanged
@@ -790,7 +790,7 @@ from core.framework.credentials import CredentialStore, CompositeStorage, Encryp
# Use encrypted storage as primary, env vars as fallback
storage = CompositeStorage(
primary=EncryptedFileStorage("/var/hive/credentials"),
primary=EncryptedFileStorage("~/.hive/credentials"),
fallbacks=[EnvVarStorage({"brave_search": "BRAVE_SEARCH_API_KEY"})]
)
@@ -809,7 +809,7 @@ credentials = CredentialStoreAdapter(store)
```python
# Always use EncryptedFileStorage for production
store = CredentialStore.with_encrypted_storage("/var/hive/credentials")
store = CredentialStore.with_encrypted_storage("~/.hive/credentials")
```
### 2. Protect the Encryption Key
+423 -118
View File
@@ -1,11 +1,12 @@
#!/bin/bash
#
# quickstart.sh - Complete setup for Aden Agent Framework skills
# quickstart.sh - Interactive onboarding for Aden Agent Framework
#
# This script:
# 1. Installs Python dependencies (framework, aden_tools, MCP)
# 2. Installs Claude Code skills for building and testing agents
# 3. Verifies the setup is ready to use
# An interactive setup wizard that:
# 1. Installs Python dependencies
# 2. Installs Playwright browser for web scraping
# 3. Helps configure LLM API keys
# 4. Verifies everything works
#
set -e
@@ -15,31 +16,91 @@ RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
BOLD='\033[1m'
DIM='\033[2m'
NC='\033[0m' # No Color
# Get the directory where this script is located
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Claude Code skills directory
CLAUDE_SKILLS_DIR="$HOME/.claude/skills"
# Helper function for prompts
prompt_yes_no() {
local prompt="$1"
local default="${2:-y}"
local response
if [ "$default" = "y" ]; then
prompt="$prompt [Y/n] "
else
prompt="$prompt [y/N] "
fi
read -r -p "$prompt" response
response="${response:-$default}"
[[ "$response" =~ ^[Yy] ]]
}
# Helper function for choice prompts
prompt_choice() {
local prompt="$1"
shift
local options=("$@")
local i=1
echo ""
echo -e "${BOLD}$prompt${NC}"
for opt in "${options[@]}"; do
echo -e " ${CYAN}$i)${NC} $opt"
((i++))
done
echo ""
local choice
while true; do
read -r -p "Enter choice (1-${#options[@]}): " choice
if [[ "$choice" =~ ^[0-9]+$ ]] && [ "$choice" -ge 1 ] && [ "$choice" -le "${#options[@]}" ]; then
return $((choice - 1))
fi
echo -e "${RED}Invalid choice. Please enter 1-${#options[@]}${NC}"
done
}
clear
echo ""
echo "=================================================="
echo " Aden Agent Framework - Complete Setup"
echo "=================================================="
echo -e "${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}"
echo ""
echo -e "${BOLD} A D E N H I V E${NC}"
echo ""
echo -e "${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}${DIM}${NC}${YELLOW}${NC}"
echo ""
echo -e "${DIM} Goal-driven AI agent framework${NC}"
echo ""
echo "This wizard will help you set up everything you need"
echo "to build and run goal-driven AI agents."
echo ""
if ! prompt_yes_no "Ready to begin?"; then
echo ""
echo "No problem! Run this script again when you're ready."
exit 0
fi
echo ""
# ============================================================
# Step 1: Check Python Prerequisites
# Step 1: Check Python
# ============================================================
echo -e "${BLUE}Step 1: Checking Python prerequisites...${NC}"
echo -e "${YELLOW}${NC} ${BLUE}${BOLD}Step 1: Checking Python...${NC}"
echo ""
# Check for Python
if ! command -v python &> /dev/null && ! command -v python3 &> /dev/null; then
echo -e "${RED}Error: Python is not installed.${NC}"
echo -e "${RED}Python is not installed.${NC}"
echo ""
echo "Please install Python 3.11+ from https://python.org"
echo "Then run this script again."
exit 1
fi
@@ -69,25 +130,14 @@ PYTHON_VERSION=$($PYTHON_CMD -c 'import sys; print(f"{sys.version_info.major}.{s
PYTHON_MAJOR=$($PYTHON_CMD -c 'import sys; print(sys.version_info.major)')
PYTHON_MINOR=$($PYTHON_CMD -c 'import sys; print(sys.version_info.minor)')
echo -e " Detected Python: ${GREEN}$PYTHON_VERSION${NC} (${PYTHON_CMD})"
if [ "$PYTHON_MAJOR" -lt 3 ] || ([ "$PYTHON_MAJOR" -eq 3 ] && [ "$PYTHON_MINOR" -lt 11 ]); then
echo -e "${RED}Error: Python 3.11+ is required (found $PYTHON_VERSION via ${PYTHON_CMD})${NC}"
echo "Please upgrade your Python installation or ensure python3.11+ is on your PATH"
echo -e "${RED}Python 3.11+ is required (found $PYTHON_VERSION)${NC}"
echo ""
echo "Please upgrade your Python installation and run this script again."
exit 1
fi
echo -e "${GREEN} ✓ Python version OK${NC}"
echo ""
# Check for pip
if ! $PYTHON_CMD -m pip --version &> /dev/null; then
echo -e "${RED}Error: pip is not installed${NC}"
echo "Please install pip for Python $PYTHON_VERSION"
exit 1
fi
echo -e "${GREEN} ✓ pip detected${NC}"
echo -e "${GREEN}${NC} Python $PYTHON_VERSION"
echo ""
# Check for uv (install automatically if missing)
@@ -118,11 +168,19 @@ echo ""
# Step 2: Install Python Packages
# ============================================================
echo -e "${BLUE}Step 2: Installing Python packages...${NC}"
echo -e "${YELLOW}${NC} ${BLUE}${BOLD}Step 2: Installing packages...${NC}"
echo ""
echo -e "${DIM}This may take a minute...${NC}"
echo ""
# Upgrade pip, setuptools, and wheel
echo -n " Upgrading pip... "
$PYTHON_CMD -m pip install --upgrade pip setuptools wheel > /dev/null 2>&1
echo -e "${GREEN}ok${NC}"
# Install framework package from core/
echo " Installing framework package from core/..."
echo -n " Installing framework... "
cd "$SCRIPT_DIR/core"
if [ -f "pyproject.toml" ]; then
@@ -133,12 +191,12 @@ if [ -f "pyproject.toml" ]; then
echo -e "${YELLOW} ⚠ framework installation had issues (may be OK)${NC}"
fi
else
echo -e "${RED} ✗ No pyproject.toml in core/${NC}"
echo -e "${RED}failed (no pyproject.toml)${NC}"
exit 1
fi
# Install aden_tools package from tools/
echo " Installing aden_tools package from tools/..."
echo -n " Installing tools... "
cd "$SCRIPT_DIR/tools"
if [ -f "pyproject.toml" ]; then
@@ -150,10 +208,47 @@ if [ -f "pyproject.toml" ]; then
exit 1
fi
else
echo -e "${RED} ✗ No pyproject.toml in tools/${NC}"
echo -e "${RED}failed${NC}"
exit 1
fi
# Install MCP dependencies
echo -n " Installing MCP... "
$PYTHON_CMD -m pip install mcp fastmcp > /dev/null 2>&1
echo -e "${GREEN}ok${NC}"
# Fix openai version compatibility
echo -n " Checking openai... "
$PYTHON_CMD -m pip install "openai>=1.0.0" > /dev/null 2>&1
echo -e "${GREEN}ok${NC}"
# Install click for CLI
echo -n " Installing CLI tools... "
$PYTHON_CMD -m pip install click > /dev/null 2>&1
echo -e "${GREEN}ok${NC}"
# Install Playwright browser
echo -n " Installing Playwright browser... "
if $PYTHON_CMD -c "import playwright" > /dev/null 2>&1; then
if $PYTHON_CMD -m playwright install chromium > /dev/null 2>&1; then
echo -e "${GREEN}ok${NC}"
else
echo -e "${YELLOW}${NC}"
fi
else
echo -e "${YELLOW}${NC}"
fi
cd "$SCRIPT_DIR"
echo ""
echo -e "${GREEN}${NC} All packages installed"
echo ""
# ============================================================
# Step 3: Configure LLM API Key
# ============================================================
echo -e "${YELLOW}${NC} ${BLUE}${BOLD}Step 3: Configuring LLM provider...${NC}"
# Install MCP dependencies (in tools venv)
echo " Installing MCP dependencies..."
TOOLS_PYTHON="$SCRIPT_DIR/tools/.venv/bin/python"
@@ -249,120 +344,330 @@ echo ""
echo -e "${BLUE}Step 4: Verifying Claude Code skills...${NC}"
echo ""
# Check if .claude/skills exists in this repo
if [ ! -d "$SCRIPT_DIR/.claude/skills" ]; then
echo -e "${RED}Error: Skills directory not found at $SCRIPT_DIR/.claude/skills${NC}"
exit 1
# Define supported providers (env_var -> display_name, litellm_provider, default_model)
declare -A PROVIDER_NAMES=(
["ANTHROPIC_API_KEY"]="Anthropic (Claude)"
["OPENAI_API_KEY"]="OpenAI (GPT)"
["GEMINI_API_KEY"]="Google Gemini"
["GOOGLE_API_KEY"]="Google AI"
["GROQ_API_KEY"]="Groq"
["CEREBRAS_API_KEY"]="Cerebras"
["MISTRAL_API_KEY"]="Mistral"
["TOGETHER_API_KEY"]="Together AI"
["DEEPSEEK_API_KEY"]="DeepSeek"
)
declare -A PROVIDER_IDS=(
["ANTHROPIC_API_KEY"]="anthropic"
["OPENAI_API_KEY"]="openai"
["GEMINI_API_KEY"]="gemini"
["GOOGLE_API_KEY"]="google"
["GROQ_API_KEY"]="groq"
["CEREBRAS_API_KEY"]="cerebras"
["MISTRAL_API_KEY"]="mistral"
["TOGETHER_API_KEY"]="together"
["DEEPSEEK_API_KEY"]="deepseek"
)
declare -A DEFAULT_MODELS=(
["anthropic"]="claude-sonnet-4-5-20250929"
["openai"]="gpt-4o"
["gemini"]="gemini-3.0-flash-preview"
["groq"]="moonshotai/kimi-k2-instruct-0905"
["cerebras"]="zai-glm-4.7"
["mistral"]="mistral-large-latest"
["together_ai"]="meta-llama/Llama-3.3-70B-Instruct-Turbo"
["deepseek"]="deepseek-chat"
)
# Configuration directory
HIVE_CONFIG_DIR="$HOME/.hive"
HIVE_CONFIG_FILE="$HIVE_CONFIG_DIR/configuration.json"
# Function to save configuration
save_configuration() {
local provider_id="$1"
local env_var="$2"
local model="${DEFAULT_MODELS[$provider_id]}"
mkdir -p "$HIVE_CONFIG_DIR"
$PYTHON_CMD -c "
import json
config = {
'llm': {
'provider': '$provider_id',
'model': '$model',
'api_key_env_var': '$env_var'
},
'created_at': '$(date -Iseconds)'
}
with open('$HIVE_CONFIG_FILE', 'w') as f:
json.dump(config, f, indent=2)
print(json.dumps(config, indent=2))
" 2>/dev/null
}
# Check for .env files
if [ -f "$SCRIPT_DIR/.env" ]; then
set -a
source "$SCRIPT_DIR/.env" 2>/dev/null || true
set +a
fi
# Verify all 5 agent-related skills exist locally
SKILLS=("building-agents-core" "building-agents-construction" "building-agents-patterns" "testing-agent" "agent-workflow")
for skill in "${SKILLS[@]}"; do
if [ -d "$SCRIPT_DIR/.claude/skills/$skill" ]; then
echo -e "${GREEN} ✓ Found: $skill${NC}"
else
echo -e "${RED} ✗ Not found: $skill${NC}"
exit 1
if [ -f "$HOME/.env" ]; then
set -a
source "$HOME/.env" 2>/dev/null || true
set +a
fi
# Find all available API keys
FOUND_PROVIDERS=() # Display names for UI
FOUND_ENV_VARS=() # Corresponding env var names
SELECTED_PROVIDER_ID="" # Will hold the chosen provider ID
SELECTED_ENV_VAR="" # Will hold the chosen env var
for env_var in "${!PROVIDER_NAMES[@]}"; do
value="${!env_var}"
if [ -n "$value" ]; then
FOUND_PROVIDERS+=("${PROVIDER_NAMES[$env_var]}")
FOUND_ENV_VARS+=("$env_var")
fi
done
if [ ${#FOUND_PROVIDERS[@]} -gt 0 ]; then
echo "Found API keys:"
echo ""
for provider in "${FOUND_PROVIDERS[@]}"; do
echo -e " ${GREEN}${NC} $provider"
done
echo ""
if [ ${#FOUND_PROVIDERS[@]} -eq 1 ]; then
# Only one provider found, use it automatically
if prompt_yes_no "Use this key?"; then
SELECTED_ENV_VAR="${FOUND_ENV_VARS[0]}"
SELECTED_PROVIDER_ID="${PROVIDER_IDS[$SELECTED_ENV_VAR]}"
echo ""
echo -e "${GREEN}${NC} Using ${FOUND_PROVIDERS[0]}"
fi
else
# Multiple providers found, let user pick one
echo -e "${BOLD}Select your default LLM provider:${NC}"
echo ""
# Build choice menu from found providers
i=1
for provider in "${FOUND_PROVIDERS[@]}"; do
echo -e " ${CYAN}$i)${NC} $provider"
((i++))
done
echo ""
while true; do
read -r -p "Enter choice (1-${#FOUND_PROVIDERS[@]}): " choice
if [[ "$choice" =~ ^[0-9]+$ ]] && [ "$choice" -ge 1 ] && [ "$choice" -le "${#FOUND_PROVIDERS[@]}" ]; then
idx=$((choice - 1))
SELECTED_ENV_VAR="${FOUND_ENV_VARS[$idx]}"
SELECTED_PROVIDER_ID="${PROVIDER_IDS[$SELECTED_ENV_VAR]}"
echo ""
echo -e "${GREEN}${NC} Selected: ${FOUND_PROVIDERS[$idx]}"
break
fi
echo -e "${RED}Invalid choice. Please enter 1-${#FOUND_PROVIDERS[@]}${NC}"
done
fi
fi
if [ -z "$SELECTED_PROVIDER_ID" ]; then
echo "No API keys found. Let's configure one."
echo ""
prompt_choice "Select your LLM provider:" \
"Anthropic (Claude) - Recommended" \
"OpenAI (GPT)" \
"Google Gemini - Free tier available" \
"Groq - Fast, free tier" \
"Cerebras - Fast, free tier" \
"Skip for now"
choice=$?
case $choice in
0)
SELECTED_ENV_VAR="ANTHROPIC_API_KEY"
SELECTED_PROVIDER_ID="anthropic"
PROVIDER_NAME="Anthropic"
SIGNUP_URL="https://console.anthropic.com/settings/keys"
;;
1)
SELECTED_ENV_VAR="OPENAI_API_KEY"
SELECTED_PROVIDER_ID="openai"
PROVIDER_NAME="OpenAI"
SIGNUP_URL="https://platform.openai.com/api-keys"
;;
2)
SELECTED_ENV_VAR="GEMINI_API_KEY"
SELECTED_PROVIDER_ID="gemini"
PROVIDER_NAME="Google Gemini"
SIGNUP_URL="https://aistudio.google.com/apikey"
;;
3)
SELECTED_ENV_VAR="GROQ_API_KEY"
SELECTED_PROVIDER_ID="groq"
PROVIDER_NAME="Groq"
SIGNUP_URL="https://console.groq.com/keys"
;;
4)
SELECTED_ENV_VAR="CEREBRAS_API_KEY"
SELECTED_PROVIDER_ID="cerebras"
PROVIDER_NAME="Cerebras"
SIGNUP_URL="https://cloud.cerebras.ai/"
;;
5)
echo ""
echo -e "${YELLOW}Skipped.${NC} Add your API key later:"
echo ""
echo -e " ${CYAN}echo 'ANTHROPIC_API_KEY=your-key' >> .env${NC}"
echo ""
SELECTED_ENV_VAR=""
SELECTED_PROVIDER_ID=""
;;
esac
if [ -n "$SELECTED_ENV_VAR" ] && [ -z "${!SELECTED_ENV_VAR}" ]; then
echo ""
echo -e "Get your API key from: ${CYAN}$SIGNUP_URL${NC}"
echo ""
read -r -p "Paste your $PROVIDER_NAME API key (or press Enter to skip): " API_KEY
if [ -n "$API_KEY" ]; then
# Save to .env
echo "" >> "$SCRIPT_DIR/.env"
echo "$SELECTED_ENV_VAR=$API_KEY" >> "$SCRIPT_DIR/.env"
export "$SELECTED_ENV_VAR=$API_KEY"
echo ""
echo -e "${GREEN}${NC} API key saved to .env"
else
echo ""
echo -e "${YELLOW}Skipped.${NC} Add your API key to .env when ready."
SELECTED_ENV_VAR=""
SELECTED_PROVIDER_ID=""
fi
fi
fi
# Save configuration if a provider was selected
if [ -n "$SELECTED_PROVIDER_ID" ]; then
echo ""
echo -n " Saving configuration... "
save_configuration "$SELECTED_PROVIDER_ID" "$SELECTED_ENV_VAR" > /dev/null
echo -e "${GREEN}${NC}"
echo -e " ${DIM}~/.hive/configuration.json${NC}"
fi
echo ""
# ============================================================
# Step 5: Verify MCP Configuration
# Step 4: Verify Setup
# ============================================================
echo -e "${BLUE}Step 5: Verifying MCP configuration...${NC}"
echo -e "${YELLOW}${NC} ${BLUE}${BOLD}Step 4: Verifying installation...${NC}"
echo ""
ERRORS=0
# Test imports
echo -n " ⬡ framework... "
if $PYTHON_CMD -c "import framework" > /dev/null 2>&1; then
echo -e "${GREEN}ok${NC}"
else
echo -e "${RED}failed${NC}"
ERRORS=$((ERRORS + 1))
fi
echo -n " ⬡ aden_tools... "
if $PYTHON_CMD -c "import aden_tools" > /dev/null 2>&1; then
echo -e "${GREEN}ok${NC}"
else
echo -e "${RED}failed${NC}"
ERRORS=$((ERRORS + 1))
fi
echo -n " ⬡ litellm... "
if $PYTHON_CMD -c "import litellm" > /dev/null 2>&1; then
echo -e "${GREEN}ok${NC}"
else
echo -e "${YELLOW}--${NC}"
fi
echo -n " ⬡ MCP config... "
if [ -f "$SCRIPT_DIR/.mcp.json" ]; then
echo -e "${GREEN} ✓ .mcp.json found at project root${NC}"
echo ""
echo " MCP servers configured:"
$PYTHON_CMD -c "
import json
with open('$SCRIPT_DIR/.mcp.json') as f:
config = json.load(f)
for name in config.get('mcpServers', {}):
print(f' - {name}')
" 2>/dev/null || echo " (could not parse config)"
echo -e "${GREEN}ok${NC}"
else
echo -e "${YELLOW} ⚠ No .mcp.json found at project root${NC}"
echo " Claude Code will not have access to MCP tools"
echo -e "${YELLOW}--${NC}"
fi
echo -n " ⬡ skills... "
if [ -d "$SCRIPT_DIR/.claude/skills" ]; then
SKILL_COUNT=$(ls -1d "$SCRIPT_DIR/.claude/skills"/*/ 2>/dev/null | wc -l)
echo -e "${GREEN}${SKILL_COUNT} found${NC}"
else
echo -e "${YELLOW}--${NC}"
fi
echo ""
# ============================================================
# Step 6: Check API Key
# ============================================================
echo -e "${BLUE}Step 6: Checking API key...${NC}"
echo ""
# Check using CredentialManager (preferred)
API_KEY_AVAILABLE=$($PYTHON_CMD -c "
from aden_tools.credentials import CredentialManager
creds = CredentialManager()
print('yes' if creds.is_available('anthropic') else 'no')
" 2>/dev/null || echo "no")
if [ "$API_KEY_AVAILABLE" = "yes" ]; then
echo -e "${GREEN} ✓ ANTHROPIC_API_KEY is available${NC}"
elif [ -n "$ANTHROPIC_API_KEY" ]; then
echo -e "${GREEN} ✓ ANTHROPIC_API_KEY is set in environment${NC}"
else
echo -e "${YELLOW} ⚠ ANTHROPIC_API_KEY not found${NC}"
echo ""
echo " For real agent testing, you'll need to set your API key:"
echo " ${BLUE}export ANTHROPIC_API_KEY='your-key-here'${NC}"
echo ""
echo " Or add it to your .env file or credential manager."
if [ $ERRORS -gt 0 ]; then
echo -e "${RED}Setup failed with $ERRORS error(s).${NC}"
echo "Please check the errors above and try again."
exit 1
fi
echo ""
# ============================================================
# Step 7: Success Summary
# Success!
# ============================================================
echo "=================================================="
echo -e "${GREEN} ✓ Setup Complete!${NC}"
echo "=================================================="
clear
echo ""
echo "Installed Python packages:"
echo " • framework (core agent runtime)"
echo " • aden_tools (tools and MCP servers)"
echo " • MCP dependencies (mcp, fastmcp)"
echo -e "${GREEN}${NC}${DIM}${NC}${GREEN}${NC}${DIM}${NC}${GREEN}${NC}${DIM}${NC}${GREEN}${NC}${DIM}${NC}${GREEN}${NC}${DIM}${NC}${GREEN}${NC}${DIM}${NC}${GREEN}${NC}${DIM}${NC}${GREEN}${NC}${DIM}${NC}${GREEN}${NC}${DIM}${NC}${GREEN}${NC}${DIM}${NC}${GREEN}${NC}${DIM}${NC}${GREEN}${NC}${DIM}${NC}${GREEN}${NC}"
echo ""
echo "Available Claude Code skills (in project directory):"
echo " • /building-agents-core - Fundamental concepts"
echo " • /building-agents-construction - Step-by-step build guide"
echo " • /building-agents-patterns - Best practices"
echo " • /testing-agent - Test and validate agents"
echo " • /agent-workflow - Complete workflow"
echo -e "${GREEN}${BOLD} ADEN HIVE — READY${NC}"
echo ""
echo "Usage:"
echo " 1. Open Claude Code in this directory:"
echo " cd $SCRIPT_DIR && claude"
echo -e "${GREEN}${NC}${DIM}${NC}${GREEN}${NC}${DIM}${NC}${GREEN}${NC}${DIM}${NC}${GREEN}${NC}${DIM}${NC}${GREEN}${NC}${DIM}${NC}${GREEN}${NC}${DIM}${NC}${GREEN}${NC}${DIM}${NC}${GREEN}${NC}${DIM}${NC}${GREEN}${NC}${DIM}${NC}${GREEN}${NC}${DIM}${NC}${GREEN}${NC}${DIM}${NC}${GREEN}${NC}${DIM}${NC}${GREEN}${NC}"
echo ""
echo " 2. Build a new agent:"
echo " /building-agents-construction"
echo -e "Your environment is configured for building AI agents."
echo ""
echo " 3. Test an existing agent:"
echo " /testing-agent"
# Show configured provider
if [ -n "$SELECTED_PROVIDER_ID" ]; then
SELECTED_MODEL="${DEFAULT_MODELS[$SELECTED_PROVIDER_ID]}"
echo -e "${BOLD}Default LLM:${NC}"
echo -e " ${CYAN}$SELECTED_PROVIDER_ID${NC}${DIM}$SELECTED_MODEL${NC}"
echo ""
fi
echo -e "${BOLD}Quick Start:${NC}"
echo ""
echo " 4. Or use the complete workflow:"
echo " /agent-workflow"
echo -e " 1. Open Claude Code in this directory:"
echo -e " ${CYAN}claude${NC}"
echo ""
echo "MCP Tools available (when running from this directory):"
echo " • mcp__agent-builder__create_session"
echo " • mcp__agent-builder__set_goal"
echo " • mcp__agent-builder__add_node"
echo " • mcp__agent-builder__run_tests"
echo " • ... and more"
echo -e " 2. Build a new agent:"
echo -e " ${CYAN}/agent-workflow${NC}"
echo ""
echo "Documentation:"
echo " • Skills: $SCRIPT_DIR/.claude/skills/"
echo " • Examples: $SCRIPT_DIR/exports/"
echo -e " 3. Test an existing agent:"
echo -e " ${CYAN}/testing-agent${NC}"
echo ""
echo -e "${BOLD}Skills:${NC}"
if [ -d "$SCRIPT_DIR/.claude/skills" ]; then
for skill_dir in "$SCRIPT_DIR/.claude/skills"/*/; do
skill_name=$(basename "$skill_dir")
echo -e "${CYAN}/$skill_name${NC}"
done
fi
echo ""
echo -e "${BOLD}Examples:${NC} ${CYAN}exports/${NC}"
echo ""
echo -e "${DIM}Run ./quickstart.sh again to reconfigure.${NC}"
echo ""
+19
View File
@@ -169,6 +169,25 @@ else
fi
echo ""
# Install Playwright browser for web scraping
echo "=================================================="
echo "Installing Playwright Browser"
echo "=================================================="
echo ""
if $PYTHON_CMD -c "import playwright" > /dev/null 2>&1; then
echo "Installing Chromium browser for web scraping..."
if $PYTHON_CMD -m playwright install chromium > /dev/null 2>&1; then
echo -e "${GREEN}${NC} Playwright Chromium installed"
else
echo -e "${YELLOW}${NC} Playwright browser install failed (web_scrape tool may not work)"
echo " Run manually: python -m playwright install chromium"
fi
else
echo -e "${YELLOW}${NC} Playwright not found, skipping browser install"
fi
echo ""
# Fix openai version compatibility with litellm
echo "=================================================="
echo "Fixing Package Compatibility"
+3
View File
@@ -14,6 +14,9 @@ COPY mcp_server.py ./
# Install package with all dependencies
RUN pip install --no-cache-dir -e .
# Install Playwright Chromium browser and system dependencies
RUN playwright install chromium --with-deps
# Create non-root user for security
RUN useradd -m -u 1001 appuser
+14 -4
View File
@@ -65,11 +65,21 @@ from fastmcp import FastMCP # noqa: E402
from starlette.requests import Request # noqa: E402
from starlette.responses import PlainTextResponse # noqa: E402
from aden_tools.credentials import CredentialError, CredentialManager # noqa: E402
from aden_tools.credentials import CredentialError, CredentialStoreAdapter # noqa: E402
from aden_tools.tools import register_all_tools # noqa: E402
# Create credential manager
credentials = CredentialManager()
# Create credential store with access to both env vars AND encrypted store
# This allows using Aden-synced credentials from ~/.hive/credentials
try:
from framework.credentials import CredentialStore
store = CredentialStore.with_encrypted_storage() # ~/.hive/credentials
credentials = CredentialStoreAdapter(store)
logger.info("Using CredentialStoreAdapter with encrypted storage")
except Exception as e:
# Fall back to env-only adapter if encrypted storage fails
credentials = CredentialStoreAdapter.with_env_storage()
logger.warning(f"Falling back to env-only CredentialStoreAdapter: {e}")
# Tier 1: Validate startup-required credentials (if any)
try:
@@ -81,7 +91,7 @@ except CredentialError as e:
mcp = FastMCP("tools")
# Register all tools with the MCP server, passing credential manager
# Register all tools with the MCP server, passing credential store
tools = register_all_tools(mcp, credentials=credentials)
# Only print to stdout in HTTP mode (STDIO mode requires clean stdout for JSON-RPC)
if "--stdio" not in sys.argv:
+11 -10
View File
@@ -17,16 +17,17 @@ classifiers = [
]
dependencies = [
"pydantic>=2.0.0",
"httpx>=0.27.0",
"beautifulsoup4>=4.12.0",
"pypdf>=4.0.0",
"pandas>=2.0.0",
"jsonpath-ng>=1.6.0",
"fastmcp>=2.0.0",
"diff-match-patch>=20230430",
"python-dotenv>=1.0.0",
"litellm>=1.81.0",
"pydantic>=2.0.0",
"httpx>=0.27.0",
"beautifulsoup4>=4.12.0",
"pypdf>=4.0.0",
"pandas>=2.0.0",
"jsonpath-ng>=1.6.0",
"fastmcp>=2.0.0",
"diff-match-patch>=20230430",
"python-dotenv>=1.0.0",
"playwright>=1.40.0",
"playwright-stealth>=1.0.5",
]
[project.optional-dependencies]
+13
View File
@@ -0,0 +1,13 @@
# MCP Server
fastmcp
# Tool dependencies
diff-match-patch
pypdf
beautifulsoup4
lxml
playwright
playwright-stealth
requests
# Note: After installing, run `playwright install` to download browser binaries
+17 -9
View File
@@ -7,38 +7,46 @@ external systems, process data, and perform actions.
Usage:
from fastmcp import FastMCP
from aden_tools.tools import register_all_tools
from aden_tools.credentials import CredentialManager
from aden_tools.credentials import CredentialStoreAdapter
mcp = FastMCP("my-server")
credentials = CredentialManager()
credentials = CredentialStoreAdapter.with_env_storage()
register_all_tools(mcp, credentials=credentials)
"""
__version__ = "0.1.0"
# Utilities
# Credential management
# Credential management (no external dependencies)
from .credentials import (
CREDENTIAL_SPECS,
CredentialError,
CredentialManager,
CredentialSpec,
CredentialStoreAdapter,
)
# MCP registration
from .tools import register_all_tools
# Utilities (no external dependencies)
from .utils import get_env_var
def __getattr__(name: str):
"""Lazy import for tools that require fastmcp."""
if name == "register_all_tools":
from .tools import register_all_tools
return register_all_tools
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
__all__ = [
# Version
"__version__",
# Utilities
"get_env_var",
# Credentials
"CredentialManager",
"CredentialStoreAdapter",
"CredentialSpec",
"CredentialError",
"CREDENTIAL_SPECS",
# MCP registration
# MCP registration (lazy loaded)
"register_all_tools",
]
+37 -18
View File
@@ -8,9 +8,15 @@ Philosophy: Google Strictness + Apple UX
- Guided error messages with clear next steps
Usage:
# In mcp_server.py (startup validation)
credentials = CredentialManager()
credentials.validate_startup()
from aden_tools.credentials import CredentialStoreAdapter
from core.framework.credentials import CredentialStore
# With encrypted storage (production)
store = CredentialStore.with_encrypted_storage() # defaults to ~/.hive/credentials
credentials = CredentialStoreAdapter(store)
# With env vars only (simple setup)
credentials = CredentialStoreAdapter.with_env_storage()
# In agent runner (validate at agent load time)
credentials.validate_for_tools(["web_search", "file_read"])
@@ -19,19 +25,9 @@ Usage:
api_key = credentials.get("brave_search")
# In tests
creds = CredentialManager.for_testing({"brave_search": "test-key"})
creds = CredentialStoreAdapter.for_testing({"brave_search": "test-key"})
For advanced usage with the new credential store:
from aden_tools.credentials import CredentialStoreAdapter
from core.framework.credentials import CredentialStore
store = CredentialStore.with_encrypted_storage("/var/hive/credentials")
credentials = CredentialStoreAdapter(store)
# Existing API works unchanged
api_key = credentials.get("brave_search")
# New features available
# Template resolution
headers = credentials.resolve_headers({
"Authorization": "Bearer {{github_oauth.access_token}}"
})
@@ -39,6 +35,7 @@ For advanced usage with the new credential store:
Credential categories:
- llm.py: LLM provider credentials (anthropic, openai, etc.)
- search.py: Search tool credentials (brave_search, google_search, etc.)
- integrations.py: Third-party integrations (hubspot, etc.)
To add a new credential:
1. Find the appropriate category file (or create a new one)
@@ -46,27 +43,49 @@ To add a new credential:
3. If new category, import and merge it in this __init__.py
"""
from .base import CredentialError, CredentialManager, CredentialSpec
from .base import CredentialError, CredentialSpec
from .browser import get_aden_auth_url, get_aden_setup_url, open_browser
from .health_check import HealthCheckResult, check_credential_health
from .integrations import INTEGRATION_CREDENTIALS
from .llm import LLM_CREDENTIALS
from .search import SEARCH_CREDENTIALS
from .shell_config import (
add_env_var_to_shell_config,
detect_shell,
get_shell_config_path,
get_shell_source_command,
)
from .store_adapter import CredentialStoreAdapter
# Merged registry of all credentials
CREDENTIAL_SPECS = {
**LLM_CREDENTIALS,
**SEARCH_CREDENTIALS,
**INTEGRATION_CREDENTIALS,
}
__all__ = [
# Core classes
"CredentialSpec",
"CredentialManager",
"CredentialError",
# New credential store adapter
# Credential store adapter (replaces deprecated CredentialManager)
"CredentialStoreAdapter",
# Health check utilities
"HealthCheckResult",
"check_credential_health",
# Browser utilities for OAuth2 flows
"open_browser",
"get_aden_auth_url",
"get_aden_setup_url",
# Shell config utilities
"detect_shell",
"get_shell_config_path",
"get_shell_source_command",
"add_env_var_to_shell_config",
# Merged registry
"CREDENTIAL_SPECS",
# Category registries (for direct access if needed)
"LLM_CREDENTIALS",
"SEARCH_CREDENTIALS",
"INTEGRATION_CREDENTIALS",
]
+87
View File
@@ -43,6 +43,33 @@ class CredentialSpec:
description: str = ""
"""Human-readable description of what this credential is for"""
# Auth method support
aden_supported: bool = False
"""Whether this credential can be obtained via Aden OAuth2 flow"""
aden_provider_name: str = ""
"""Provider name on Aden server (e.g., 'hubspot')"""
direct_api_key_supported: bool = True
"""Whether users can directly enter an API key"""
api_key_instructions: str = ""
"""Step-by-step instructions for getting the API key directly"""
# Health check configuration
health_check_endpoint: str = ""
"""API endpoint for validating the credential (lightweight check)"""
health_check_method: str = "GET"
"""HTTP method for health check"""
# Credential store mapping
credential_id: str = ""
"""Credential store ID (e.g., 'hubspot' for the CredentialStore)"""
credential_key: str = "access_token"
"""Key name within the credential (e.g., 'access_token', 'api_key')"""
class CredentialError(Exception):
"""Raised when required credentials are missing."""
@@ -401,3 +428,63 @@ class CredentialManager:
lines.append("Set these environment variables and restart the server.")
return "\n".join(lines)
def get_auth_options(self, credential_name: str) -> list[str]:
"""
Get available authentication options for a credential.
Args:
credential_name: Name of the credential (e.g., 'hubspot')
Returns:
List of available auth methods: 'aden', 'direct', 'custom'
Example:
>>> creds = CredentialManager()
>>> options = creds.get_auth_options("hubspot")
>>> print(options) # ['aden', 'direct', 'custom']
"""
spec = self._specs.get(credential_name)
if spec is None:
return ["direct", "custom"]
options = []
if spec.aden_supported:
options.append("aden")
if spec.direct_api_key_supported:
options.append("direct")
options.append("custom") # Always available
return options
def get_setup_instructions(self, credential_name: str) -> dict:
"""
Get setup instructions for a credential.
Args:
credential_name: Name of the credential (e.g., 'hubspot')
Returns:
Dict with setup information including env_var, description,
help_url, api_key_instructions, and auth method support flags.
Example:
>>> creds = CredentialManager()
>>> info = creds.get_setup_instructions("hubspot")
>>> print(info['api_key_instructions'])
"""
spec = self._specs.get(credential_name)
if spec is None:
return {}
return {
"env_var": spec.env_var,
"description": spec.description,
"help_url": spec.help_url,
"api_key_instructions": spec.api_key_instructions,
"aden_supported": spec.aden_supported,
"aden_provider_name": spec.aden_provider_name,
"direct_api_key_supported": spec.direct_api_key_supported,
"credential_id": spec.credential_id,
"credential_key": spec.credential_key,
}
+102
View File
@@ -0,0 +1,102 @@
"""
Browser utilities for OAuth2 flows.
Opens URLs in the user's default browser for authorization flows.
Supports macOS, Linux, and Windows.
"""
from __future__ import annotations
import platform
import subprocess
import webbrowser
def open_browser(url: str) -> tuple[bool, str]:
"""
Open a URL in the user's default browser.
Uses platform-specific commands for reliability:
- macOS: `open` command
- Linux: `xdg-open` command (falls back to webbrowser module)
- Windows: webbrowser module
Args:
url: The URL to open
Returns:
Tuple of (success, message)
Example:
>>> success, msg = open_browser("https://integration.adenhq.com/connect/hubspot")
>>> if success:
... print("Browser opened!")
"""
system = platform.system()
try:
if system == "Darwin": # macOS
subprocess.run(
["open", url],
check=True,
capture_output=True,
)
return True, "Opened in browser"
elif system == "Linux":
# Try xdg-open first (most Linux distros)
try:
subprocess.run(
["xdg-open", url],
check=True,
capture_output=True,
)
return True, "Opened in browser"
except FileNotFoundError:
# xdg-open not available, fall back to webbrowser
if webbrowser.open(url):
return True, "Opened in browser"
return False, "Could not open browser (xdg-open not found)"
elif system == "Windows":
if webbrowser.open(url):
return True, "Opened in browser"
return False, "Could not open browser"
else:
# Unknown system - try webbrowser module
if webbrowser.open(url):
return True, "Opened in browser"
return False, f"Could not open browser on {system}"
except subprocess.CalledProcessError as e:
return False, f"Failed to open browser: {e}"
except Exception as e:
return False, f"Failed to open browser: {e}"
def get_aden_auth_url(provider_name: str, base_url: str = "https://integration.adenhq.com") -> str:
"""
Get the Aden authorization URL for a provider.
Args:
provider_name: Provider name (e.g., 'hubspot')
base_url: Aden server base URL
Returns:
Full authorization URL
"""
return f"{base_url}/connect/{provider_name}"
def get_aden_setup_url(base_url: str = "https://integration.adenhq.com") -> str:
"""
Get the Aden setup URL for creating an API key.
Args:
base_url: Aden server base URL
Returns:
Setup URL for getting an Aden API key
"""
return f"{base_url}/setup"
@@ -0,0 +1,279 @@
"""
Credential health checks per integration.
Validates that stored credentials are valid before agent execution.
Each integration has a lightweight health check that makes a minimal API call
to verify the credential works.
"""
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Any, Protocol
import httpx
@dataclass
class HealthCheckResult:
"""Result of a credential health check."""
valid: bool
"""Whether the credential is valid."""
message: str
"""Human-readable status message."""
details: dict[str, Any] = field(default_factory=dict)
"""Additional details (e.g., error codes, rate limit info)."""
class CredentialHealthChecker(Protocol):
"""Protocol for credential health checkers."""
def check(self, credential_value: str) -> HealthCheckResult:
"""
Check if the credential is valid.
Args:
credential_value: The credential value to validate
Returns:
HealthCheckResult with validation status
"""
...
class HubSpotHealthChecker:
"""Health checker for HubSpot credentials."""
ENDPOINT = "https://api.hubapi.com/crm/v3/objects/contacts"
TIMEOUT = 10.0
def check(self, access_token: str) -> HealthCheckResult:
"""
Validate HubSpot token by making lightweight API call.
Makes a GET request for 1 contact to verify the token works.
"""
try:
with httpx.Client(timeout=self.TIMEOUT) as client:
response = client.get(
self.ENDPOINT,
headers={
"Authorization": f"Bearer {access_token}",
"Accept": "application/json",
},
params={"limit": "1"},
)
if response.status_code == 200:
return HealthCheckResult(
valid=True,
message="HubSpot credentials valid",
)
elif response.status_code == 401:
return HealthCheckResult(
valid=False,
message="HubSpot token is invalid or expired",
details={"status_code": 401},
)
elif response.status_code == 403:
return HealthCheckResult(
valid=False,
message="HubSpot token lacks required scopes",
details={"status_code": 403, "required": "crm.objects.contacts.read"},
)
else:
return HealthCheckResult(
valid=False,
message=f"HubSpot API returned status {response.status_code}",
details={"status_code": response.status_code},
)
except httpx.TimeoutException:
return HealthCheckResult(
valid=False,
message="HubSpot API request timed out",
details={"error": "timeout"},
)
except httpx.RequestError as e:
return HealthCheckResult(
valid=False,
message=f"Failed to connect to HubSpot: {e}",
details={"error": str(e)},
)
class BraveSearchHealthChecker:
"""Health checker for Brave Search API."""
ENDPOINT = "https://api.search.brave.com/res/v1/web/search"
TIMEOUT = 10.0
def check(self, api_key: str) -> HealthCheckResult:
"""
Validate Brave Search API key.
Makes a minimal search request to verify the key works.
"""
try:
with httpx.Client(timeout=self.TIMEOUT) as client:
response = client.get(
self.ENDPOINT,
headers={"X-Subscription-Token": api_key},
params={"q": "test", "count": "1"},
)
if response.status_code == 200:
return HealthCheckResult(
valid=True,
message="Brave Search API key valid",
)
elif response.status_code == 401:
return HealthCheckResult(
valid=False,
message="Brave Search API key is invalid",
details={"status_code": 401},
)
elif response.status_code == 429:
# Rate limited but key is valid
return HealthCheckResult(
valid=True,
message="Brave Search API key valid (rate limited)",
details={"status_code": 429, "rate_limited": True},
)
else:
return HealthCheckResult(
valid=False,
message=f"Brave Search API returned status {response.status_code}",
details={"status_code": response.status_code},
)
except httpx.TimeoutException:
return HealthCheckResult(
valid=False,
message="Brave Search API request timed out",
details={"error": "timeout"},
)
except httpx.RequestError as e:
return HealthCheckResult(
valid=False,
message=f"Failed to connect to Brave Search: {e}",
details={"error": str(e)},
)
class GoogleSearchHealthChecker:
"""Health checker for Google Custom Search API."""
ENDPOINT = "https://www.googleapis.com/customsearch/v1"
TIMEOUT = 10.0
def check(self, api_key: str, cse_id: str | None = None) -> HealthCheckResult:
"""
Validate Google Custom Search API key.
Note: Requires both API key and CSE ID for a full check.
If CSE ID is not provided, we can only do a partial validation.
"""
if not cse_id:
return HealthCheckResult(
valid=True,
message="Google API key format valid (CSE ID needed for full check)",
details={"partial_check": True},
)
try:
with httpx.Client(timeout=self.TIMEOUT) as client:
response = client.get(
self.ENDPOINT,
params={
"key": api_key,
"cx": cse_id,
"q": "test",
"num": "1",
},
)
if response.status_code == 200:
return HealthCheckResult(
valid=True,
message="Google Custom Search credentials valid",
)
elif response.status_code == 400:
return HealthCheckResult(
valid=False,
message="Google Custom Search: Invalid CSE ID",
details={"status_code": 400},
)
elif response.status_code == 403:
return HealthCheckResult(
valid=False,
message="Google API key is invalid or quota exceeded",
details={"status_code": 403},
)
else:
return HealthCheckResult(
valid=False,
message=f"Google API returned status {response.status_code}",
details={"status_code": response.status_code},
)
except httpx.TimeoutException:
return HealthCheckResult(
valid=False,
message="Google API request timed out",
details={"error": "timeout"},
)
except httpx.RequestError as e:
return HealthCheckResult(
valid=False,
message=f"Failed to connect to Google API: {e}",
details={"error": str(e)},
)
# Registry of health checkers
HEALTH_CHECKERS: dict[str, CredentialHealthChecker] = {
"hubspot": HubSpotHealthChecker(),
"brave_search": BraveSearchHealthChecker(),
}
def check_credential_health(
credential_name: str,
credential_value: str,
**kwargs: Any,
) -> HealthCheckResult:
"""
Check if a credential is valid.
Args:
credential_name: Name of the credential (e.g., 'hubspot', 'brave_search')
credential_value: The credential value to validate
**kwargs: Additional arguments passed to the checker (e.g., cse_id for Google)
Returns:
HealthCheckResult with validation status
Example:
>>> result = check_credential_health("hubspot", "pat-xxx-yyy")
>>> if result.valid:
... print("Credential is valid!")
... else:
... print(f"Invalid: {result.message}")
"""
checker = HEALTH_CHECKERS.get(credential_name)
if checker is None:
# No health checker registered - assume valid
return HealthCheckResult(
valid=True,
message=f"No health checker for '{credential_name}', assuming valid",
details={"no_checker": True},
)
# Special case for Google which needs CSE ID
if credential_name == "google_search" and "cse_id" in kwargs:
checker = GoogleSearchHealthChecker()
return checker.check(credential_value, kwargs["cse_id"])
return checker.check(credential_value)
@@ -0,0 +1,53 @@
"""
Integration credentials.
Contains credentials for third-party service integrations (HubSpot, etc.).
"""
from .base import CredentialSpec
INTEGRATION_CREDENTIALS = {
"hubspot": CredentialSpec(
env_var="HUBSPOT_ACCESS_TOKEN",
tools=[
"hubspot_search_contacts",
"hubspot_get_contact",
"hubspot_create_contact",
"hubspot_update_contact",
"hubspot_search_companies",
"hubspot_get_company",
"hubspot_create_company",
"hubspot_update_company",
"hubspot_search_deals",
"hubspot_get_deal",
"hubspot_create_deal",
"hubspot_update_deal",
],
required=True,
startup_required=False,
help_url="https://developers.hubspot.com/docs/api/private-apps",
description="HubSpot access token (Private App or OAuth2)",
# Auth method support
aden_supported=True,
aden_provider_name="hubspot",
direct_api_key_supported=True,
api_key_instructions="""To get a HubSpot Private App token:
1. Go to HubSpot Settings > Integrations > Private Apps
2. Click "Create a private app"
3. Name your app (e.g., "Hive Agent")
4. Go to the "Scopes" tab and enable:
- crm.objects.contacts.read
- crm.objects.contacts.write
- crm.objects.companies.read
- crm.objects.companies.write
- crm.objects.deals.read
- crm.objects.deals.write
5. Click "Create app" and copy the access token""",
# Health check configuration
health_check_endpoint="https://api.hubapi.com/crm/v3/objects/contacts?limit=1",
health_check_method="GET",
# Credential store mapping
credential_id="hubspot",
credential_key="access_token",
),
}
@@ -0,0 +1,229 @@
"""
Shell configuration utilities for persisting environment variables.
Supports both bash and zsh, detecting the user's default shell.
Used primarily for persisting ADEN_API_KEY across sessions.
"""
from __future__ import annotations
import os
import re
from pathlib import Path
from typing import Literal
ShellType = Literal["bash", "zsh", "unknown"]
def detect_shell() -> ShellType:
"""
Detect the user's default shell.
Checks $SHELL environment variable first, then falls back to
detecting which config files exist.
Returns:
ShellType: 'bash', 'zsh', or 'unknown'
"""
shell = os.environ.get("SHELL", "")
if "zsh" in shell:
return "zsh"
elif "bash" in shell:
return "bash"
else:
# Try to detect from config file existence
home = Path.home()
if (home / ".zshrc").exists():
return "zsh"
elif (home / ".bashrc").exists():
return "bash"
return "unknown"
def get_shell_config_path(shell_type: ShellType | None = None) -> Path:
"""
Get the path to the shell configuration file.
Args:
shell_type: Override shell detection. If None, auto-detect.
Returns:
Path to the shell config file (.bashrc, .zshrc, etc.)
"""
if shell_type is None:
shell_type = detect_shell()
home = Path.home()
if shell_type == "zsh":
return home / ".zshrc"
elif shell_type == "bash":
return home / ".bashrc"
else:
# Default to .bashrc for unknown shells
return home / ".bashrc"
def check_env_var_in_shell_config(
env_var: str,
shell_type: ShellType | None = None,
) -> tuple[bool, str | None]:
"""
Check if an environment variable is already set in shell config.
Args:
env_var: Environment variable name to check
shell_type: Override shell detection
Returns:
Tuple of (exists, current_value or None)
"""
config_path = get_shell_config_path(shell_type)
if not config_path.exists():
return False, None
content = config_path.read_text()
# Look for export ENV_VAR=value or export ENV_VAR="value"
pattern = rf"^export\s+{re.escape(env_var)}=(.+)$"
match = re.search(pattern, content, re.MULTILINE)
if match:
value = match.group(1).strip()
# Remove surrounding quotes if present
if (value.startswith('"') and value.endswith('"')) or (
value.startswith("'") and value.endswith("'")
):
value = value[1:-1]
return True, value
return False, None
def add_env_var_to_shell_config(
env_var: str,
value: str,
shell_type: ShellType | None = None,
comment: str = "Added by Hive credential setup",
) -> tuple[bool, str]:
"""
Add an environment variable export to shell config.
If the variable already exists, it will be updated in place.
If it doesn't exist, it will be appended to the file.
Args:
env_var: Environment variable name
value: Value to set
shell_type: Override shell detection
comment: Comment to add above the export line
Returns:
Tuple of (success, config_path or error message)
"""
config_path = get_shell_config_path(shell_type)
# Quote the value to handle special characters
export_line = f'export {env_var}="{value}"'
try:
if config_path.exists():
content = config_path.read_text()
# Check if already exists
pattern = rf"^export\s+{re.escape(env_var)}=.*$"
if re.search(pattern, content, re.MULTILINE):
# Update existing line
new_content = re.sub(
pattern,
export_line,
content,
flags=re.MULTILINE,
)
config_path.write_text(new_content)
return True, str(config_path)
# Append to file
with open(config_path, "a") as f:
f.write(f"\n# {comment}\n")
f.write(f"{export_line}\n")
return True, str(config_path)
except PermissionError:
return False, f"Permission denied writing to {config_path}"
except Exception as e:
return False, str(e)
def remove_env_var_from_shell_config(
env_var: str,
shell_type: ShellType | None = None,
) -> tuple[bool, str]:
"""
Remove an environment variable from shell config.
Args:
env_var: Environment variable name to remove
shell_type: Override shell detection
Returns:
Tuple of (success, config_path or error message)
"""
config_path = get_shell_config_path(shell_type)
if not config_path.exists():
return True, "Config file does not exist"
try:
content = config_path.read_text()
lines = content.split("\n")
new_lines = []
skip_next_comment = False
for i, line in enumerate(lines):
stripped = line.strip()
# Skip comment lines that precede the export
if stripped.startswith("# Added by Hive"):
# Check if next non-empty line is the export
for j in range(i + 1, len(lines)):
next_line = lines[j].strip()
if next_line:
if next_line.startswith(f"export {env_var}="):
skip_next_comment = True
break
if skip_next_comment:
continue
# Skip the export line itself
if stripped.startswith(f"export {env_var}="):
skip_next_comment = False
continue
new_lines.append(line)
config_path.write_text("\n".join(new_lines))
return True, str(config_path)
except PermissionError:
return False, f"Permission denied writing to {config_path}"
except Exception as e:
return False, str(e)
def get_shell_source_command(shell_type: ShellType | None = None) -> str:
"""
Get the command to source the shell config file.
Args:
shell_type: Override shell detection
Returns:
Shell command to source the config (e.g., 'source ~/.bashrc')
"""
config_path = get_shell_config_path(shell_type)
return f"source {config_path}"
@@ -9,7 +9,7 @@ Usage:
from aden_tools.credentials.store_adapter import CredentialStoreAdapter
# Create new credential store
store = CredentialStore.with_encrypted_storage("/var/hive/credentials")
store = CredentialStore.with_encrypted_storage() # defaults to ~/.hive/credentials
# Wrap with adapter for backward compatibility
credentials = CredentialStoreAdapter(store)
+22 -6
View File
@@ -4,19 +4,21 @@ Aden Tools - Tool implementations for FastMCP.
Usage:
from fastmcp import FastMCP
from aden_tools.tools import register_all_tools
from aden_tools.credentials import CredentialManager
from aden_tools.credentials import CredentialStoreAdapter
mcp = FastMCP("my-server")
credentials = CredentialManager()
credentials = CredentialStoreAdapter.with_env_storage()
register_all_tools(mcp, credentials=credentials)
"""
from typing import TYPE_CHECKING, Optional
from __future__ import annotations
from typing import TYPE_CHECKING
from fastmcp import FastMCP
if TYPE_CHECKING:
from aden_tools.credentials import CredentialManager
from aden_tools.credentials import CredentialStoreAdapter
# Import register_tools from each tool module
from .csv_tool import register_tools as register_csv
@@ -35,6 +37,7 @@ from .file_system_toolkits.replace_file_content import (
# Import file system toolkits
from .file_system_toolkits.view_file import register_tools as register_view_file
from .file_system_toolkits.write_to_file import register_tools as register_write_to_file
from .hubspot_tool import register_tools as register_hubspot
from .pdf_read_tool import register_tools as register_pdf_read
from .web_scrape_tool import register_tools as register_web_scrape
from .web_search_tool import register_tools as register_web_search
@@ -42,14 +45,14 @@ from .web_search_tool import register_tools as register_web_search
def register_all_tools(
mcp: FastMCP,
credentials: Optional["CredentialManager"] = None,
credentials: CredentialStoreAdapter | None = None,
) -> list[str]:
"""
Register all tools with a FastMCP server.
Args:
mcp: FastMCP server instance
credentials: Optional CredentialManager for centralized credential access.
credentials: Optional CredentialStoreAdapter instance.
If not provided, tools fall back to direct os.getenv() calls.
Returns:
@@ -63,6 +66,7 @@ def register_all_tools(
# Tools that need credentials (pass credentials if provided)
# web_search supports multiple providers (Google, Brave) with auto-detection
register_web_search(mcp, credentials=credentials)
register_hubspot(mcp, credentials=credentials)
# Register file system toolkits
register_view_file(mcp)
@@ -93,6 +97,18 @@ def register_all_tools(
"csv_append",
"csv_info",
"csv_sql",
"hubspot_search_contacts",
"hubspot_get_contact",
"hubspot_create_contact",
"hubspot_update_contact",
"hubspot_search_companies",
"hubspot_get_company",
"hubspot_create_company",
"hubspot_update_company",
"hubspot_search_deals",
"hubspot_get_deal",
"hubspot_create_deal",
"hubspot_update_deal",
]
@@ -0,0 +1,9 @@
"""
HubSpot CRM Tool - Manage contacts, companies, and deals via HubSpot API v3.
Supports Private App tokens and OAuth2 authentication.
"""
from .hubspot_tool import register_tools
__all__ = ["register_tools"]
@@ -0,0 +1,477 @@
"""
HubSpot CRM Tool - Manage contacts, companies, and deals via HubSpot API v3.
Supports:
- Private App access tokens (HUBSPOT_ACCESS_TOKEN)
- OAuth2 tokens via the credential store
API Reference: https://developers.hubspot.com/docs/api/crm
"""
from __future__ import annotations
import os
from typing import TYPE_CHECKING, Any
import httpx
from fastmcp import FastMCP
if TYPE_CHECKING:
from aden_tools.credentials import CredentialStoreAdapter
HUBSPOT_API_BASE = "https://api.hubapi.com"
class _HubSpotClient:
"""Internal client wrapping HubSpot CRM API v3 calls."""
def __init__(self, access_token: str):
self._token = access_token
@property
def _headers(self) -> dict[str, str]:
return {
"Authorization": f"Bearer {self._token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
def _handle_response(self, response: httpx.Response) -> dict[str, Any]:
"""Handle common HTTP error codes."""
if response.status_code == 401:
return {"error": "Invalid or expired HubSpot access token"}
if response.status_code == 403:
return {"error": "Insufficient permissions. Check your HubSpot app scopes."}
if response.status_code == 404:
return {"error": "Resource not found"}
if response.status_code == 429:
return {"error": "HubSpot rate limit exceeded. Try again later."}
if response.status_code >= 400:
try:
detail = response.json().get("message", response.text)
except Exception:
detail = response.text
return {"error": f"HubSpot API error (HTTP {response.status_code}): {detail}"}
return response.json()
def search_objects(
self,
object_type: str,
query: str = "",
properties: list[str] | None = None,
limit: int = 10,
) -> dict[str, Any]:
"""Search CRM objects."""
body: dict[str, Any] = {"limit": min(limit, 100)}
if query:
body["query"] = query
if properties:
body["properties"] = properties
response = httpx.post(
f"{HUBSPOT_API_BASE}/crm/v3/objects/{object_type}/search",
headers=self._headers,
json=body,
timeout=30.0,
)
return self._handle_response(response)
def get_object(
self,
object_type: str,
object_id: str,
properties: list[str] | None = None,
) -> dict[str, Any]:
"""Get a single CRM object by ID."""
params: dict[str, str] = {}
if properties:
params["properties"] = ",".join(properties)
response = httpx.get(
f"{HUBSPOT_API_BASE}/crm/v3/objects/{object_type}/{object_id}",
headers=self._headers,
params=params,
timeout=30.0,
)
return self._handle_response(response)
def create_object(
self,
object_type: str,
properties: dict[str, str],
) -> dict[str, Any]:
"""Create a CRM object."""
response = httpx.post(
f"{HUBSPOT_API_BASE}/crm/v3/objects/{object_type}",
headers=self._headers,
json={"properties": properties},
timeout=30.0,
)
return self._handle_response(response)
def update_object(
self,
object_type: str,
object_id: str,
properties: dict[str, str],
) -> dict[str, Any]:
"""Update a CRM object."""
response = httpx.patch(
f"{HUBSPOT_API_BASE}/crm/v3/objects/{object_type}/{object_id}",
headers=self._headers,
json={"properties": properties},
timeout=30.0,
)
return self._handle_response(response)
def register_tools(
mcp: FastMCP,
credentials: CredentialStoreAdapter | None = None,
) -> None:
"""Register HubSpot CRM tools with the MCP server."""
def _get_token() -> str | None:
"""Get HubSpot access token from credential manager or environment."""
if credentials is not None:
token = credentials.get("hubspot")
# Defensive check: ensure we get a string, not a complex object
if token is not None and not isinstance(token, str):
raise TypeError(
f"Expected string from credentials.get('hubspot'), got {type(token).__name__}"
)
return token
return os.getenv("HUBSPOT_ACCESS_TOKEN")
def _get_client() -> _HubSpotClient | dict[str, str]:
"""Get a HubSpot client, or return an error dict if no credentials."""
token = _get_token()
if not token:
return {
"error": "HubSpot credentials not configured",
"help": (
"Set HUBSPOT_ACCESS_TOKEN environment variable "
"or configure via credential store"
),
}
return _HubSpotClient(token)
# --- Contacts ---
@mcp.tool()
def hubspot_search_contacts(
query: str = "",
properties: list[str] | None = None,
limit: int = 10,
) -> dict:
"""
Search HubSpot contacts.
Args:
query: Search query string (searches across name, email, phone, etc.)
properties: List of properties to return
(e.g., ["email", "firstname", "lastname", "phone"])
limit: Maximum number of results (1-100, default 10)
Returns:
Dict with search results or error
"""
client = _get_client()
if isinstance(client, dict):
return client
try:
return client.search_objects(
"contacts", query, properties or ["email", "firstname", "lastname"], limit
)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def hubspot_get_contact(
contact_id: str,
properties: list[str] | None = None,
) -> dict:
"""
Get a HubSpot contact by ID.
Args:
contact_id: The HubSpot contact ID
properties: List of properties to return
(e.g., ["email", "firstname", "lastname", "phone"])
Returns:
Dict with contact data or error
"""
client = _get_client()
if isinstance(client, dict):
return client
try:
return client.get_object("contacts", contact_id, properties)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def hubspot_create_contact(
properties: dict[str, str],
) -> dict:
"""
Create a new HubSpot contact.
Args:
properties: Contact properties
(e.g., {"email": "j@example.com", "firstname": "Jane"})
Returns:
Dict with created contact data or error
"""
client = _get_client()
if isinstance(client, dict):
return client
try:
return client.create_object("contacts", properties)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def hubspot_update_contact(
contact_id: str,
properties: dict[str, str],
) -> dict:
"""
Update an existing HubSpot contact.
Args:
contact_id: The HubSpot contact ID
properties: Properties to update (e.g., {"phone": "+1234567890"})
Returns:
Dict with updated contact data or error
"""
client = _get_client()
if isinstance(client, dict):
return client
try:
return client.update_object("contacts", contact_id, properties)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
# --- Companies ---
@mcp.tool()
def hubspot_search_companies(
query: str = "",
properties: list[str] | None = None,
limit: int = 10,
) -> dict:
"""
Search HubSpot companies.
Args:
query: Search query string (searches across name, domain, etc.)
properties: List of properties to return (e.g., ["name", "domain", "industry"])
limit: Maximum number of results (1-100, default 10)
Returns:
Dict with search results or error
"""
client = _get_client()
if isinstance(client, dict):
return client
try:
return client.search_objects(
"companies", query, properties or ["name", "domain", "industry"], limit
)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def hubspot_get_company(
company_id: str,
properties: list[str] | None = None,
) -> dict:
"""
Get a HubSpot company by ID.
Args:
company_id: The HubSpot company ID
properties: List of properties to return (e.g., ["name", "domain", "industry"])
Returns:
Dict with company data or error
"""
client = _get_client()
if isinstance(client, dict):
return client
try:
return client.get_object("companies", company_id, properties)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def hubspot_create_company(
properties: dict[str, str],
) -> dict:
"""
Create a new HubSpot company.
Args:
properties: Company properties
(e.g., {"name": "Acme Inc", "domain": "acme.com"})
Returns:
Dict with created company data or error
"""
client = _get_client()
if isinstance(client, dict):
return client
try:
return client.create_object("companies", properties)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def hubspot_update_company(
company_id: str,
properties: dict[str, str],
) -> dict:
"""
Update an existing HubSpot company.
Args:
company_id: The HubSpot company ID
properties: Properties to update (e.g., {"industry": "Finance"})
Returns:
Dict with updated company data or error
"""
client = _get_client()
if isinstance(client, dict):
return client
try:
return client.update_object("companies", company_id, properties)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
# --- Deals ---
@mcp.tool()
def hubspot_search_deals(
query: str = "",
properties: list[str] | None = None,
limit: int = 10,
) -> dict:
"""
Search HubSpot deals.
Args:
query: Search query string (searches across deal name, etc.)
properties: List of properties to return
(e.g., ["dealname", "amount", "dealstage"])
limit: Maximum number of results (1-100, default 10)
Returns:
Dict with search results or error
"""
client = _get_client()
if isinstance(client, dict):
return client
try:
return client.search_objects(
"deals", query, properties or ["dealname", "amount", "dealstage"], limit
)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def hubspot_get_deal(
deal_id: str,
properties: list[str] | None = None,
) -> dict:
"""
Get a HubSpot deal by ID.
Args:
deal_id: The HubSpot deal ID
properties: List of properties to return
(e.g., ["dealname", "amount", "dealstage"])
Returns:
Dict with deal data or error
"""
client = _get_client()
if isinstance(client, dict):
return client
try:
return client.get_object("deals", deal_id, properties)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def hubspot_create_deal(
properties: dict[str, str],
) -> dict:
"""
Create a new HubSpot deal.
Args:
properties: Deal properties
(e.g., {"dealname": "New Deal", "amount": "10000"})
Returns:
Dict with created deal data or error
"""
client = _get_client()
if isinstance(client, dict):
return client
try:
return client.create_object("deals", properties)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def hubspot_update_deal(
deal_id: str,
properties: dict[str, str],
) -> dict:
"""
Update an existing HubSpot deal.
Args:
deal_id: The HubSpot deal ID
properties: Properties to update
(e.g., {"amount": "15000", "dealstage": "qualifiedtobuy"})
Returns:
Dict with updated deal data or error
"""
client = _get_client()
if isinstance(client, dict):
return client
try:
return client.update_object("deals", deal_id, properties)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@@ -0,0 +1,490 @@
"""
Tests for HubSpot CRM tool and OAuth2 provider.
Covers:
- _HubSpotClient methods (search, get, create, update)
- Error handling (401, 403, 404, 429, 500, timeout)
- Credential retrieval (CredentialStoreAdapter vs env var)
- All 12 MCP tool functions
- HubSpotOAuth2Provider configuration
"""
from __future__ import annotations
from unittest.mock import MagicMock, patch
import httpx
import pytest
from aden_tools.tools.hubspot_tool.hubspot_tool import (
HUBSPOT_API_BASE,
_HubSpotClient,
register_tools,
)
# --- _HubSpotClient tests ---
class TestHubSpotClient:
def setup_method(self):
self.client = _HubSpotClient("test-token")
def test_headers(self):
headers = self.client._headers
assert headers["Authorization"] == "Bearer test-token"
assert headers["Content-Type"] == "application/json"
def test_handle_response_success(self):
response = MagicMock()
response.status_code = 200
response.json.return_value = {"results": []}
assert self.client._handle_response(response) == {"results": []}
@pytest.mark.parametrize(
"status_code,expected_substring",
[
(401, "Invalid or expired"),
(403, "Insufficient permissions"),
(404, "not found"),
(429, "rate limit"),
],
)
def test_handle_response_errors(self, status_code, expected_substring):
response = MagicMock()
response.status_code = status_code
result = self.client._handle_response(response)
assert "error" in result
assert expected_substring in result["error"]
def test_handle_response_generic_error(self):
response = MagicMock()
response.status_code = 500
response.json.return_value = {"message": "Internal Server Error"}
result = self.client._handle_response(response)
assert "error" in result
assert "500" in result["error"]
@patch("aden_tools.tools.hubspot_tool.hubspot_tool.httpx.post")
def test_search_objects(self, mock_post):
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {
"total": 1,
"results": [{"id": "1", "properties": {"email": "test@example.com"}}],
}
mock_post.return_value = mock_response
result = self.client.search_objects("contacts", query="test", properties=["email"], limit=5)
mock_post.assert_called_once_with(
f"{HUBSPOT_API_BASE}/crm/v3/objects/contacts/search",
headers=self.client._headers,
json={"limit": 5, "query": "test", "properties": ["email"]},
timeout=30.0,
)
assert result["total"] == 1
@patch("aden_tools.tools.hubspot_tool.hubspot_tool.httpx.post")
def test_search_objects_no_query(self, mock_post):
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {"total": 0, "results": []}
mock_post.return_value = mock_response
self.client.search_objects("contacts", limit=10)
call_json = mock_post.call_args.kwargs["json"]
assert "query" not in call_json
assert call_json["limit"] == 10
@patch("aden_tools.tools.hubspot_tool.hubspot_tool.httpx.post")
def test_search_objects_limit_capped(self, mock_post):
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {"total": 0, "results": []}
mock_post.return_value = mock_response
self.client.search_objects("contacts", limit=200)
call_json = mock_post.call_args.kwargs["json"]
assert call_json["limit"] == 100
@patch("aden_tools.tools.hubspot_tool.hubspot_tool.httpx.get")
def test_get_object(self, mock_get):
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {"id": "123", "properties": {"email": "test@example.com"}}
mock_get.return_value = mock_response
result = self.client.get_object("contacts", "123", properties=["email"])
mock_get.assert_called_once_with(
f"{HUBSPOT_API_BASE}/crm/v3/objects/contacts/123",
headers=self.client._headers,
params={"properties": "email"},
timeout=30.0,
)
assert result["id"] == "123"
@patch("aden_tools.tools.hubspot_tool.hubspot_tool.httpx.get")
def test_get_object_no_properties(self, mock_get):
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {"id": "123"}
mock_get.return_value = mock_response
self.client.get_object("contacts", "123")
assert mock_get.call_args.kwargs["params"] == {}
@patch("aden_tools.tools.hubspot_tool.hubspot_tool.httpx.post")
def test_create_object(self, mock_post):
mock_response = MagicMock()
mock_response.status_code = 201
mock_response.json.return_value = {
"id": "456",
"properties": {"email": "new@example.com", "firstname": "Jane"},
}
mock_post.return_value = mock_response
result = self.client.create_object(
"contacts", {"email": "new@example.com", "firstname": "Jane"}
)
mock_post.assert_called_once_with(
f"{HUBSPOT_API_BASE}/crm/v3/objects/contacts",
headers=self.client._headers,
json={"properties": {"email": "new@example.com", "firstname": "Jane"}},
timeout=30.0,
)
assert result["id"] == "456"
@patch("aden_tools.tools.hubspot_tool.hubspot_tool.httpx.patch")
def test_update_object(self, mock_patch):
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {"id": "123", "properties": {"phone": "+1234567890"}}
mock_patch.return_value = mock_response
result = self.client.update_object("contacts", "123", {"phone": "+1234567890"})
mock_patch.assert_called_once_with(
f"{HUBSPOT_API_BASE}/crm/v3/objects/contacts/123",
headers=self.client._headers,
json={"properties": {"phone": "+1234567890"}},
timeout=30.0,
)
assert result["id"] == "123"
# --- MCP tool registration and credential tests ---
class TestToolRegistration:
def _get_tool_fn(self, mcp_mock, tool_name):
"""Extract a registered tool function by name from mcp.tool() calls."""
for call in mcp_mock.tool.return_value.call_args_list:
fn = call[0][0]
if fn.__name__ == tool_name:
return fn
raise ValueError(f"Tool '{tool_name}' not found in registered tools")
def test_register_tools_registers_all_tools(self):
mcp = MagicMock()
mcp.tool.return_value = lambda fn: fn
register_tools(mcp)
assert mcp.tool.call_count == 12
def test_no_credentials_returns_error(self):
mcp = MagicMock()
registered_fns = []
mcp.tool.return_value = lambda fn: registered_fns.append(fn) or fn
with patch.dict("os.environ", {}, clear=True):
register_tools(mcp, credentials=None)
# Pick the first tool and call it
search_fn = next(fn for fn in registered_fns if fn.__name__ == "hubspot_search_contacts")
result = search_fn()
assert "error" in result
assert "not configured" in result["error"]
def test_credentials_from_credential_manager(self):
mcp = MagicMock()
registered_fns = []
mcp.tool.return_value = lambda fn: registered_fns.append(fn) or fn
cred_manager = MagicMock()
cred_manager.get.return_value = "test-token"
register_tools(mcp, credentials=cred_manager)
search_fn = next(fn for fn in registered_fns if fn.__name__ == "hubspot_search_contacts")
with patch("aden_tools.tools.hubspot_tool.hubspot_tool.httpx.post") as mock_post:
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {"total": 0, "results": []}
mock_post.return_value = mock_response
result = search_fn(query="test")
cred_manager.get.assert_called_with("hubspot")
assert result["total"] == 0
def test_credentials_from_env_var(self):
mcp = MagicMock()
registered_fns = []
mcp.tool.return_value = lambda fn: registered_fns.append(fn) or fn
register_tools(mcp, credentials=None)
search_fn = next(fn for fn in registered_fns if fn.__name__ == "hubspot_search_contacts")
with (
patch.dict("os.environ", {"HUBSPOT_ACCESS_TOKEN": "env-token"}),
patch("aden_tools.tools.hubspot_tool.hubspot_tool.httpx.post") as mock_post,
):
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {"total": 0, "results": []}
mock_post.return_value = mock_response
result = search_fn(query="test")
assert result["total"] == 0
# Verify the token was used in headers
call_headers = mock_post.call_args.kwargs["headers"]
assert call_headers["Authorization"] == "Bearer env-token"
# --- Individual tool function tests ---
class TestContactTools:
def setup_method(self):
self.mcp = MagicMock()
self.fns = []
self.mcp.tool.return_value = lambda fn: self.fns.append(fn) or fn
cred = MagicMock()
cred.get.return_value = "tok"
register_tools(self.mcp, credentials=cred)
def _fn(self, name):
return next(f for f in self.fns if f.__name__ == name)
@patch("aden_tools.tools.hubspot_tool.hubspot_tool.httpx.post")
def test_search_contacts(self, mock_post):
mock_post.return_value = MagicMock(
status_code=200, json=MagicMock(return_value={"total": 1, "results": [{"id": "1"}]})
)
result = self._fn("hubspot_search_contacts")(query="john")
assert result["total"] == 1
@patch("aden_tools.tools.hubspot_tool.hubspot_tool.httpx.get")
def test_get_contact(self, mock_get):
mock_get.return_value = MagicMock(status_code=200, json=MagicMock(return_value={"id": "1"}))
result = self._fn("hubspot_get_contact")(contact_id="1")
assert result["id"] == "1"
@patch("aden_tools.tools.hubspot_tool.hubspot_tool.httpx.post")
def test_create_contact(self, mock_post):
mock_post.return_value = MagicMock(
status_code=201, json=MagicMock(return_value={"id": "2"})
)
result = self._fn("hubspot_create_contact")(properties={"email": "a@b.com"})
assert result["id"] == "2"
@patch("aden_tools.tools.hubspot_tool.hubspot_tool.httpx.patch")
def test_update_contact(self, mock_patch):
mock_patch.return_value = MagicMock(
status_code=200, json=MagicMock(return_value={"id": "1"})
)
result = self._fn("hubspot_update_contact")(contact_id="1", properties={"phone": "123"})
assert result["id"] == "1"
@patch("aden_tools.tools.hubspot_tool.hubspot_tool.httpx.post")
def test_search_contacts_timeout(self, mock_post):
mock_post.side_effect = httpx.TimeoutException("timed out")
result = self._fn("hubspot_search_contacts")(query="test")
assert "error" in result
assert "timed out" in result["error"]
@patch("aden_tools.tools.hubspot_tool.hubspot_tool.httpx.get")
def test_get_contact_network_error(self, mock_get):
mock_get.side_effect = httpx.RequestError("connection failed")
result = self._fn("hubspot_get_contact")(contact_id="1")
assert "error" in result
assert "Network error" in result["error"]
class TestCompanyTools:
def setup_method(self):
self.mcp = MagicMock()
self.fns = []
self.mcp.tool.return_value = lambda fn: self.fns.append(fn) or fn
cred = MagicMock()
cred.get.return_value = "tok"
register_tools(self.mcp, credentials=cred)
def _fn(self, name):
return next(f for f in self.fns if f.__name__ == name)
@patch("aden_tools.tools.hubspot_tool.hubspot_tool.httpx.post")
def test_search_companies(self, mock_post):
mock_post.return_value = MagicMock(
status_code=200, json=MagicMock(return_value={"total": 2, "results": []})
)
result = self._fn("hubspot_search_companies")(query="acme")
assert result["total"] == 2
@patch("aden_tools.tools.hubspot_tool.hubspot_tool.httpx.get")
def test_get_company(self, mock_get):
mock_get.return_value = MagicMock(
status_code=200, json=MagicMock(return_value={"id": "10"})
)
result = self._fn("hubspot_get_company")(company_id="10")
assert result["id"] == "10"
@patch("aden_tools.tools.hubspot_tool.hubspot_tool.httpx.post")
def test_create_company(self, mock_post):
mock_post.return_value = MagicMock(
status_code=201, json=MagicMock(return_value={"id": "11"})
)
result = self._fn("hubspot_create_company")(properties={"name": "Acme"})
assert result["id"] == "11"
@patch("aden_tools.tools.hubspot_tool.hubspot_tool.httpx.patch")
def test_update_company(self, mock_patch):
mock_patch.return_value = MagicMock(
status_code=200, json=MagicMock(return_value={"id": "10"})
)
result = self._fn("hubspot_update_company")(
company_id="10", properties={"industry": "Tech"}
)
assert result["id"] == "10"
class TestDealTools:
def setup_method(self):
self.mcp = MagicMock()
self.fns = []
self.mcp.tool.return_value = lambda fn: self.fns.append(fn) or fn
cred = MagicMock()
cred.get.return_value = "tok"
register_tools(self.mcp, credentials=cred)
def _fn(self, name):
return next(f for f in self.fns if f.__name__ == name)
@patch("aden_tools.tools.hubspot_tool.hubspot_tool.httpx.post")
def test_search_deals(self, mock_post):
mock_post.return_value = MagicMock(
status_code=200, json=MagicMock(return_value={"total": 3, "results": []})
)
result = self._fn("hubspot_search_deals")(query="big deal")
assert result["total"] == 3
@patch("aden_tools.tools.hubspot_tool.hubspot_tool.httpx.get")
def test_get_deal(self, mock_get):
mock_get.return_value = MagicMock(
status_code=200, json=MagicMock(return_value={"id": "20"})
)
result = self._fn("hubspot_get_deal")(deal_id="20")
assert result["id"] == "20"
@patch("aden_tools.tools.hubspot_tool.hubspot_tool.httpx.post")
def test_create_deal(self, mock_post):
mock_post.return_value = MagicMock(
status_code=201, json=MagicMock(return_value={"id": "21"})
)
result = self._fn("hubspot_create_deal")(properties={"dealname": "New Deal"})
assert result["id"] == "21"
@patch("aden_tools.tools.hubspot_tool.hubspot_tool.httpx.patch")
def test_update_deal(self, mock_patch):
mock_patch.return_value = MagicMock(
status_code=200, json=MagicMock(return_value={"id": "20"})
)
result = self._fn("hubspot_update_deal")(deal_id="20", properties={"amount": "5000"})
assert result["id"] == "20"
# --- HubSpotOAuth2Provider tests ---
class TestHubSpotOAuth2Provider:
def test_provider_id(self):
from core.framework.credentials.oauth2.hubspot_provider import HubSpotOAuth2Provider
provider = HubSpotOAuth2Provider(client_id="cid", client_secret="csecret")
assert provider.provider_id == "hubspot_oauth2"
def test_default_scopes(self):
from core.framework.credentials.oauth2.hubspot_provider import (
HUBSPOT_DEFAULT_SCOPES,
HubSpotOAuth2Provider,
)
provider = HubSpotOAuth2Provider(client_id="cid", client_secret="csecret")
assert provider.config.default_scopes == HUBSPOT_DEFAULT_SCOPES
def test_custom_scopes(self):
from core.framework.credentials.oauth2.hubspot_provider import HubSpotOAuth2Provider
provider = HubSpotOAuth2Provider(
client_id="cid",
client_secret="csecret",
scopes=["crm.objects.contacts.read"],
)
assert provider.config.default_scopes == ["crm.objects.contacts.read"]
def test_endpoints(self):
from core.framework.credentials.oauth2.hubspot_provider import (
HUBSPOT_AUTHORIZATION_URL,
HUBSPOT_TOKEN_URL,
HubSpotOAuth2Provider,
)
provider = HubSpotOAuth2Provider(client_id="cid", client_secret="csecret")
assert provider.config.token_url == HUBSPOT_TOKEN_URL
assert provider.config.authorization_url == HUBSPOT_AUTHORIZATION_URL
def test_supported_types(self):
from core.framework.credentials.models import CredentialType
from core.framework.credentials.oauth2.hubspot_provider import HubSpotOAuth2Provider
provider = HubSpotOAuth2Provider(client_id="cid", client_secret="csecret")
assert CredentialType.OAUTH2 in provider.supported_types
def test_validate_no_access_token(self):
from core.framework.credentials.models import CredentialObject
from core.framework.credentials.oauth2.hubspot_provider import HubSpotOAuth2Provider
provider = HubSpotOAuth2Provider(client_id="cid", client_secret="csecret")
cred = CredentialObject(id="test")
assert provider.validate(cred) is False
# --- Credential spec tests ---
class TestCredentialSpec:
def test_hubspot_credential_spec_exists(self):
from aden_tools.credentials import CREDENTIAL_SPECS
assert "hubspot" in CREDENTIAL_SPECS
def test_hubspot_spec_env_var(self):
from aden_tools.credentials import CREDENTIAL_SPECS
spec = CREDENTIAL_SPECS["hubspot"]
assert spec.env_var == "HUBSPOT_ACCESS_TOKEN"
def test_hubspot_spec_tools(self):
from aden_tools.credentials import CREDENTIAL_SPECS
spec = CREDENTIAL_SPECS["hubspot"]
assert "hubspot_search_contacts" in spec.tools
assert "hubspot_create_deal" in spec.tools
assert len(spec.tools) == 12
@@ -1,10 +1,10 @@
# Web Scrape Tool
Scrape and extract text content from webpages.
Scrape and extract text content from webpages using a headless browser.
## Description
Use when you need to read the content of a specific URL, extract data from a website, or read articles/documentation. Automatically removes noise elements (scripts, navigation, footers) and extracts the main content.
Use when you need to read the content of a specific URL, extract data from a website, or read articles/documentation. Uses Playwright with stealth to render JavaScript-heavy pages and evade bot detection. Automatically removes noise elements (scripts, navigation, footers) and extracts the main content.
## Arguments
@@ -14,6 +14,18 @@ Use when you need to read the content of a specific URL, extract data from a web
| `selector` | str | No | `None` | CSS selector to target specific content (e.g., 'article', '.main-content') |
| `include_links` | bool | No | `False` | Include extracted links in the response |
| `max_length` | int | No | `50000` | Maximum length of extracted text (1000-500000) |
| `respect_robots_txt` | bool | No | `True` | Whether to respect robots.txt rules |
## Setup
Requires Chromium browser binaries:
```bash
pip install playwright playwright-stealth
playwright install chromium
```
In Docker, add `RUN playwright install chromium --with-deps` to the Dockerfile.
## Environment Variables
@@ -23,14 +35,18 @@ This tool does not require any environment variables.
Returns error dicts for common issues:
- `HTTP <status>: Failed to fetch URL` - Server returned error status
- `Navigation failed: no response received` - Browser could not navigate to URL
- `No elements found matching selector: <selector>` - CSS selector matched nothing
- `Request timed out` - Request exceeded 30s timeout
- `Network error: <error>` - Connection or DNS issues
- `Request timed out` - Page load exceeded 30s timeout
- `Browser error: <error>` - Playwright/Chromium error
- `Scraping failed: <error>` - HTML parsing or other error
## Notes
- Uses Playwright (Chromium) with playwright-stealth for bot detection evasion
- Renders JavaScript before extracting content (works with SPAs and dynamic pages)
- URLs without protocol are automatically prefixed with `https://`
- Follows redirects automatically
- Waits for `networkidle` before extracting content
- Removes script, style, nav, footer, header, aside, noscript, and iframe elements
- Auto-detects main content using article, main, or common content class selectors
- Respects robots.txt by default (uses httpx for lightweight robots.txt fetching)
@@ -1,113 +1,47 @@
"""
Web Scrape Tool - Extract content from web pages.
Uses httpx for requests and BeautifulSoup for HTML parsing.
Returns clean text content from web pages.
Respect robots.txt by default for ethical scraping.
Uses Playwright with stealth for headless browser scraping,
enabling JavaScript-rendered content and bot detection evasion.
Uses BeautifulSoup for HTML parsing and content extraction.
"""
from __future__ import annotations
from typing import Any
from urllib.parse import urljoin, urlparse
from urllib.robotparser import RobotFileParser
from urllib.parse import urljoin
import httpx
from bs4 import BeautifulSoup
from fastmcp import FastMCP
# Cache for robots.txt parsers (domain -> parser)
_robots_cache: dict[str, RobotFileParser | None] = {}
# User-Agent for the scraper - identifies as a bot for transparency
USER_AGENT = "AdenBot/1.0 (https://adenhq.com; web scraping tool)"
from playwright.async_api import (
Error as PlaywrightError,
TimeoutError as PlaywrightTimeout,
async_playwright,
)
from playwright_stealth import Stealth
# Browser-like User-Agent for actual page requests
BROWSER_USER_AGENT = (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/120.0.0.0 Safari/537.36"
"Chrome/131.0.0.0 Safari/537.36"
)
def _get_robots_parser(base_url: str, timeout: float = 10.0) -> RobotFileParser | None:
"""
Fetch and parse robots.txt for a domain.
Args:
base_url: Base URL of the domain (e.g., 'https://example.com')
timeout: Timeout for fetching robots.txt
Returns:
RobotFileParser if robots.txt exists and was parsed, None otherwise
"""
if base_url in _robots_cache:
return _robots_cache[base_url]
robots_url = f"{base_url}/robots.txt"
parser = RobotFileParser()
try:
response = httpx.get(
robots_url,
headers={"User-Agent": USER_AGENT},
follow_redirects=True,
timeout=timeout,
)
if response.status_code == 200:
parser.parse(response.text.splitlines())
_robots_cache[base_url] = parser
return parser
else:
# No robots.txt or error (4xx/5xx) - allow all by convention
_robots_cache[base_url] = None
return None
except (httpx.TimeoutException, httpx.RequestError):
# Can't fetch robots.txt - allow but don't cache (might be temporary)
return None
def _is_allowed_by_robots(url: str) -> tuple[bool, str]:
"""
Check if URL is allowed by robots.txt.
Args:
url: Full URL to check
Returns:
Tuple of (allowed: bool, reason: str)
"""
parsed = urlparse(url)
base_url = f"{parsed.scheme}://{parsed.netloc}"
path = parsed.path or "/"
parser = _get_robots_parser(base_url)
if parser is None:
# No robots.txt found or couldn't fetch - all paths allowed
return True, "No robots.txt found or not accessible"
# Check both our bot user-agent and wildcard
if parser.can_fetch(USER_AGENT, path) and parser.can_fetch("*", path):
return True, "Allowed by robots.txt"
else:
return False, f"Blocked by robots.txt for path: {path}"
def register_tools(mcp: FastMCP) -> None:
"""Register web scrape tools with the MCP server."""
@mcp.tool()
def web_scrape(
async def web_scrape(
url: str,
selector: str | None = None,
include_links: bool = False,
max_length: int = 50000,
respect_robots_txt: bool = True,
) -> dict:
"""
Scrape and extract text content from a webpage.
Uses a headless browser to render JavaScript and bypass bot detection.
Use when you need to read the content of a specific URL,
extract data from a website, or read articles/documentation.
@@ -116,7 +50,6 @@ def register_tools(mcp: FastMCP) -> None:
selector: CSS selector to target specific content (e.g., 'article', '.main-content')
include_links: Include extracted links in the response
max_length: Maximum length of extracted text (1000-500000)
respect_robots_txt: Whether to respect robots.txt rules (default: True)
Returns:
Dict with scraped content (url, title, description, content, length) or error dict
@@ -126,45 +59,60 @@ def register_tools(mcp: FastMCP) -> None:
if not url.startswith(("http://", "https://")):
url = "https://" + url
# Check robots.txt if enabled
if respect_robots_txt:
allowed, reason = _is_allowed_by_robots(url)
if not allowed:
return {
"error": f"Scraping blocked: {reason}",
"blocked_by_robots_txt": True,
"url": url,
}
# Validate max_length
max_length = max(1000, min(max_length, 500000))
# Make request
response = httpx.get(
url,
headers={
"User-Agent": BROWSER_USER_AGENT,
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.5",
},
follow_redirects=True,
timeout=30.0,
)
# Launch headless browser with stealth
async with async_playwright() as p:
browser = await p.chromium.launch(
headless=True,
args=[
"--no-sandbox",
"--disable-setuid-sandbox",
"--disable-dev-shm-usage",
"--disable-blink-features=AutomationControlled",
],
)
try:
context = await browser.new_context(
viewport={"width": 1920, "height": 1080},
user_agent=BROWSER_USER_AGENT,
locale="en-US",
)
page = await context.new_page()
await Stealth().apply_stealth_async(page)
if response.status_code != 200:
return {"error": f"HTTP {response.status_code}: Failed to fetch URL"}
response = await page.goto(
url,
wait_until="domcontentloaded",
timeout=60000,
)
# Check content type
content_type = response.headers.get("content-type", "").lower()
if not any(t in content_type for t in ["text/html", "application/xhtml+xml"]):
return {
"error": f"Skipping non-HTML content (Content-Type: {content_type})",
"url": url,
"skipped": True,
}
# Give JS a moment to render dynamic content
await page.wait_for_timeout(2000)
# Parse HTML
soup = BeautifulSoup(response.text, "html.parser")
if response is None:
return {"error": "Navigation failed: no response received"}
if response.status != 200:
return {"error": f"HTTP {response.status}: Failed to fetch URL"}
# Validate Content-Type
content_type = response.headers.get("content-type", "").lower()
if not any(t in content_type for t in ["text/html", "application/xhtml+xml"]):
return {
"error": (f"Skipping non-HTML content (Content-Type: {content_type})"),
"url": url,
"skipped": True,
}
# Get fully rendered HTML
html_content = await page.content()
finally:
await browser.close()
# Parse rendered HTML with BeautifulSoup
soup = BeautifulSoup(html_content, "html.parser")
# Remove noise elements
for tag in soup(
@@ -205,12 +153,11 @@ def register_tools(mcp: FastMCP) -> None:
text = text[:max_length] + "..."
result: dict[str, Any] = {
"url": str(response.url),
"url": url,
"title": title,
"description": description,
"content": text,
"length": len(text),
"robots_txt_respected": respect_robots_txt,
}
# Extract links if requested
@@ -228,9 +175,9 @@ def register_tools(mcp: FastMCP) -> None:
return result
except httpx.TimeoutException:
except PlaywrightTimeout:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {str(e)}"}
except PlaywrightError as e:
return {"error": f"Browser error: {e!s}"}
except Exception as e:
return {"error": f"Scraping failed: {str(e)}"}
return {"error": f"Scraping failed: {e!s}"}
@@ -17,12 +17,12 @@ import httpx
from fastmcp import FastMCP
if TYPE_CHECKING:
from aden_tools.credentials import CredentialManager
from aden_tools.credentials import CredentialStoreAdapter
def register_tools(
mcp: FastMCP,
credentials: CredentialManager | None = None,
credentials: CredentialStoreAdapter | None = None,
) -> None:
"""Register web search tools with the MCP server."""
+4 -4
View File
@@ -5,7 +5,7 @@ from pathlib import Path
import pytest
from fastmcp import FastMCP
from aden_tools.credentials import CredentialManager
from aden_tools.credentials import CredentialStoreAdapter
@pytest.fixture
@@ -15,9 +15,9 @@ def mcp() -> FastMCP:
@pytest.fixture
def mock_credentials() -> CredentialManager:
"""Create a CredentialManager with mock test credentials."""
return CredentialManager.for_testing(
def mock_credentials() -> CredentialStoreAdapter:
"""Create a CredentialStoreAdapter with mock test credentials."""
return CredentialStoreAdapter.for_testing(
{
"anthropic": "test-anthropic-api-key",
"brave_search": "test-brave-api-key",
+49 -195
View File
@@ -1,37 +1,37 @@
"""Tests for CredentialManager."""
"""Tests for CredentialStoreAdapter."""
import pytest
from aden_tools.credentials import (
CREDENTIAL_SPECS,
CredentialError,
CredentialManager,
CredentialSpec,
CredentialStoreAdapter,
)
class TestCredentialManager:
"""Tests for CredentialManager class."""
class TestCredentialStoreAdapter:
"""Tests for CredentialStoreAdapter class."""
def test_get_returns_env_value(self, monkeypatch):
"""get() returns environment variable value."""
monkeypatch.setenv("BRAVE_SEARCH_API_KEY", "test-api-key")
creds = CredentialManager()
creds = CredentialStoreAdapter.with_env_storage()
assert creds.get("brave_search") == "test-api-key"
def test_get_returns_none_when_not_set(self, monkeypatch, tmp_path):
def test_get_returns_none_when_not_set(self, monkeypatch):
"""get() returns None when env var is not set."""
monkeypatch.delenv("BRAVE_SEARCH_API_KEY", raising=False)
creds = CredentialManager(dotenv_path=tmp_path / ".env")
creds = CredentialStoreAdapter.with_env_storage()
assert creds.get("brave_search") is None
def test_get_raises_for_unknown_credential(self):
"""get() raises KeyError for unknown credential name."""
creds = CredentialManager()
creds = CredentialStoreAdapter.with_env_storage()
with pytest.raises(KeyError) as exc_info:
creds.get("unknown_credential")
@@ -39,47 +39,33 @@ class TestCredentialManager:
assert "unknown_credential" in str(exc_info.value)
assert "Available" in str(exc_info.value)
def test_get_reads_fresh_for_hot_reload(self, monkeypatch):
"""get() reads fresh each time to support hot-reload."""
monkeypatch.setenv("BRAVE_SEARCH_API_KEY", "original-key")
creds = CredentialManager()
# First call
assert creds.get("brave_search") == "original-key"
# Change env var
monkeypatch.setenv("BRAVE_SEARCH_API_KEY", "new-key")
# Should return the new value (no caching)
assert creds.get("brave_search") == "new-key"
def test_is_available_true_when_set(self, monkeypatch):
"""is_available() returns True when credential is set."""
monkeypatch.setenv("BRAVE_SEARCH_API_KEY", "test-key")
creds = CredentialManager()
creds = CredentialStoreAdapter.with_env_storage()
assert creds.is_available("brave_search") is True
def test_is_available_false_when_not_set(self, monkeypatch, tmp_path):
def test_is_available_false_when_not_set(self, monkeypatch):
"""is_available() returns False when credential is not set."""
monkeypatch.delenv("BRAVE_SEARCH_API_KEY", raising=False)
creds = CredentialManager(dotenv_path=tmp_path / ".env")
creds = CredentialStoreAdapter.with_env_storage()
assert creds.is_available("brave_search") is False
def test_is_available_false_for_empty_string(self, monkeypatch, tmp_path):
def test_is_available_false_for_empty_string(self, monkeypatch):
"""is_available() returns False for empty string."""
monkeypatch.setenv("BRAVE_SEARCH_API_KEY", "")
creds = CredentialManager(dotenv_path=tmp_path / ".env")
creds = CredentialStoreAdapter.with_env_storage()
assert creds.is_available("brave_search") is False
def test_get_spec_returns_spec(self):
"""get_spec() returns the credential spec."""
creds = CredentialManager()
creds = CredentialStoreAdapter.with_env_storage()
spec = creds.get_spec("brave_search")
@@ -88,33 +74,33 @@ class TestCredentialManager:
def test_get_spec_raises_for_unknown(self):
"""get_spec() raises KeyError for unknown credential."""
creds = CredentialManager()
creds = CredentialStoreAdapter.with_env_storage()
with pytest.raises(KeyError):
creds.get_spec("unknown")
class TestCredentialManagerToolMapping:
class TestCredentialStoreAdapterToolMapping:
"""Tests for tool-to-credential mapping."""
def test_get_credential_for_tool(self):
"""get_credential_for_tool() returns correct credential name."""
creds = CredentialManager()
creds = CredentialStoreAdapter.with_env_storage()
assert creds.get_credential_for_tool("web_search") == "brave_search"
def test_get_credential_for_tool_returns_none_for_unknown(self):
"""get_credential_for_tool() returns None for tools without credentials."""
creds = CredentialManager()
creds = CredentialStoreAdapter.with_env_storage()
assert creds.get_credential_for_tool("file_read") is None
assert creds.get_credential_for_tool("unknown_tool") is None
def test_get_missing_for_tools_returns_missing(self, monkeypatch, tmp_path):
def test_get_missing_for_tools_returns_missing(self, monkeypatch):
"""get_missing_for_tools() returns missing required credentials."""
monkeypatch.delenv("BRAVE_SEARCH_API_KEY", raising=False)
creds = CredentialManager(dotenv_path=tmp_path / ".env")
creds = CredentialStoreAdapter.with_env_storage()
missing = creds.get_missing_for_tools(["web_search", "file_read"])
assert len(missing) == 1
@@ -126,14 +112,14 @@ class TestCredentialManagerToolMapping:
"""get_missing_for_tools() returns empty list when all credentials present."""
monkeypatch.setenv("BRAVE_SEARCH_API_KEY", "test-key")
creds = CredentialManager()
creds = CredentialStoreAdapter.with_env_storage()
missing = creds.get_missing_for_tools(["web_search", "file_read"])
assert missing == []
def test_get_missing_for_tools_no_duplicates(self, monkeypatch):
"""get_missing_for_tools() doesn't return duplicates for same credential."""
monkeypatch.delenv("BRAVE_SEARCH_API_KEY", raising=False)
monkeypatch.delenv("SHARED_KEY", raising=False)
# Create spec where multiple tools share a credential
custom_specs = {
@@ -144,21 +130,21 @@ class TestCredentialManagerToolMapping:
)
}
creds = CredentialManager(specs=custom_specs)
creds = CredentialStoreAdapter.with_env_storage(specs=custom_specs)
missing = creds.get_missing_for_tools(["tool_a", "tool_b"])
# Should only appear once even though two tools need it
assert len(missing) == 1
class TestCredentialManagerValidation:
class TestCredentialStoreAdapterValidation:
"""Tests for validate_for_tools() behavior."""
def test_validate_for_tools_raises_for_missing(self, monkeypatch, tmp_path):
def test_validate_for_tools_raises_for_missing(self, monkeypatch):
"""validate_for_tools() raises CredentialError when required creds missing."""
monkeypatch.delenv("BRAVE_SEARCH_API_KEY", raising=False)
creds = CredentialManager(dotenv_path=tmp_path / ".env")
creds = CredentialStoreAdapter.with_env_storage()
with pytest.raises(CredentialError) as exc_info:
creds.validate_for_tools(["web_search"])
@@ -172,21 +158,21 @@ class TestCredentialManagerValidation:
"""validate_for_tools() succeeds when all required credentials are set."""
monkeypatch.setenv("BRAVE_SEARCH_API_KEY", "test-key")
creds = CredentialManager()
creds = CredentialStoreAdapter.with_env_storage()
# Should not raise
creds.validate_for_tools(["web_search", "file_read"])
def test_validate_for_tools_passes_for_tools_without_credentials(self):
"""validate_for_tools() succeeds for tools that don't need credentials."""
creds = CredentialManager()
creds = CredentialStoreAdapter.with_env_storage()
# Should not raise - file_read doesn't need credentials
creds.validate_for_tools(["file_read"])
def test_validate_for_tools_passes_for_empty_list(self):
"""validate_for_tools() succeeds for empty tool list."""
creds = CredentialManager()
creds = CredentialStoreAdapter.with_env_storage()
# Should not raise
creds.validate_for_tools([])
@@ -202,18 +188,18 @@ class TestCredentialManagerValidation:
}
monkeypatch.delenv("OPTIONAL_KEY", raising=False)
creds = CredentialManager(specs=custom_specs)
creds = CredentialStoreAdapter.with_env_storage(specs=custom_specs)
# Should not raise because credential is optional
creds.validate_for_tools(["optional_tool"])
class TestCredentialManagerForTesting:
class TestCredentialStoreAdapterForTesting:
"""Tests for test factory method."""
def test_for_testing_uses_overrides(self):
"""for_testing() uses provided override values."""
creds = CredentialManager.for_testing({"brave_search": "mock-key"})
creds = CredentialStoreAdapter.for_testing({"brave_search": "mock-key"})
assert creds.get("brave_search") == "mock-key"
@@ -221,22 +207,22 @@ class TestCredentialManagerForTesting:
"""for_testing() ignores actual environment variables."""
monkeypatch.setenv("BRAVE_SEARCH_API_KEY", "real-key")
creds = CredentialManager.for_testing({"brave_search": "mock-key"})
creds = CredentialStoreAdapter.for_testing({"brave_search": "mock-key"})
assert creds.get("brave_search") == "mock-key"
def test_for_testing_validation_passes_with_overrides(self):
"""for_testing() credentials pass validation."""
creds = CredentialManager.for_testing({"brave_search": "mock-key"})
creds = CredentialStoreAdapter.for_testing({"brave_search": "mock-key"})
# Should not raise
creds.validate_for_tools(["web_search"])
def test_for_testing_validation_fails_without_override(self, monkeypatch, tmp_path):
def test_for_testing_validation_fails_without_override(self, monkeypatch):
"""for_testing() without override still fails validation."""
monkeypatch.delenv("BRAVE_SEARCH_API_KEY", raising=False)
creds = CredentialManager.for_testing({}, dotenv_path=tmp_path / ".env") # No overrides
creds = CredentialStoreAdapter.for_testing({}) # No overrides
with pytest.raises(CredentialError):
creds.validate_for_tools(["web_search"])
@@ -251,7 +237,7 @@ class TestCredentialManagerForTesting:
)
}
creds = CredentialManager.for_testing(
creds = CredentialStoreAdapter.for_testing(
{"custom_cred": "test-value"},
specs=custom_specs,
)
@@ -326,7 +312,7 @@ class TestCredentialSpecs:
class TestNodeTypeValidation:
"""Tests for node type credential validation."""
def test_get_missing_for_node_types_returns_missing(self, monkeypatch, tmp_path):
def test_get_missing_for_node_types_returns_missing(self, monkeypatch):
"""get_missing_for_node_types() returns missing credentials."""
monkeypatch.delenv("REQUIRED_KEY", raising=False)
@@ -338,7 +324,7 @@ class TestNodeTypeValidation:
)
}
creds = CredentialManager(specs=custom_specs, dotenv_path=tmp_path / ".env")
creds = CredentialStoreAdapter.with_env_storage(specs=custom_specs)
missing = creds.get_missing_for_node_types(["required_node"])
assert len(missing) == 1
@@ -358,7 +344,7 @@ class TestNodeTypeValidation:
)
}
creds = CredentialManager(specs=custom_specs)
creds = CredentialStoreAdapter.with_env_storage(specs=custom_specs)
missing = creds.get_missing_for_node_types(["required_node"])
assert missing == []
@@ -367,12 +353,12 @@ class TestNodeTypeValidation:
"""get_missing_for_node_types() ignores node types without credentials."""
monkeypatch.setenv("ANTHROPIC_API_KEY", "test-key")
creds = CredentialManager()
creds = CredentialStoreAdapter.with_env_storage()
missing = creds.get_missing_for_node_types(["unknown_type", "another_type"])
assert missing == []
def test_validate_for_node_types_raises_for_missing(self, monkeypatch, tmp_path):
def test_validate_for_node_types_raises_for_missing(self, monkeypatch):
"""validate_for_node_types() raises CredentialError when missing."""
monkeypatch.delenv("REQUIRED_KEY", raising=False)
@@ -384,7 +370,7 @@ class TestNodeTypeValidation:
)
}
creds = CredentialManager(specs=custom_specs, dotenv_path=tmp_path / ".env")
creds = CredentialStoreAdapter.with_env_storage(specs=custom_specs)
with pytest.raises(CredentialError) as exc_info:
creds.validate_for_node_types(["required_node"])
@@ -397,7 +383,7 @@ class TestNodeTypeValidation:
"""validate_for_node_types() passes when credentials present."""
monkeypatch.setenv("ANTHROPIC_API_KEY", "test-key")
creds = CredentialManager()
creds = CredentialStoreAdapter.with_env_storage()
# Should not raise
creds.validate_for_node_types(["llm_generate", "llm_tool_use"])
@@ -406,7 +392,7 @@ class TestNodeTypeValidation:
class TestStartupValidation:
"""Tests for startup credential validation."""
def test_validate_startup_raises_for_missing(self, monkeypatch, tmp_path):
def test_validate_startup_raises_for_missing(self, monkeypatch):
"""validate_startup() raises CredentialError when startup creds missing."""
monkeypatch.delenv("STARTUP_KEY", raising=False)
@@ -418,7 +404,7 @@ class TestStartupValidation:
)
}
creds = CredentialManager(specs=custom_specs, dotenv_path=tmp_path / ".env")
creds = CredentialStoreAdapter.with_env_storage(specs=custom_specs)
with pytest.raises(CredentialError) as exc_info:
creds.validate_startup()
@@ -431,7 +417,7 @@ class TestStartupValidation:
"""validate_startup() passes when all startup creds are set."""
monkeypatch.setenv("ANTHROPIC_API_KEY", "test-key")
creds = CredentialManager()
creds = CredentialStoreAdapter.with_env_storage()
# Should not raise
creds.validate_startup()
@@ -441,146 +427,14 @@ class TestStartupValidation:
monkeypatch.setenv("ANTHROPIC_API_KEY", "test-key")
monkeypatch.delenv("BRAVE_SEARCH_API_KEY", raising=False)
creds = CredentialManager()
creds = CredentialStoreAdapter.with_env_storage()
# Should not raise - BRAVE_SEARCH_API_KEY is not startup_required
creds.validate_startup()
def test_validate_startup_with_test_overrides(self):
"""validate_startup() works with for_testing() overrides."""
creds = CredentialManager.for_testing({"anthropic": "test-key"})
creds = CredentialStoreAdapter.for_testing({"anthropic": "test-key"})
# Should not raise
creds.validate_startup()
class TestDotenvReading:
"""Tests for .env file reading (hot-reload support)."""
def test_reads_from_dotenv_file(self, tmp_path, monkeypatch):
"""CredentialManager reads credentials from .env file."""
# Ensure env var is not set
monkeypatch.delenv("BRAVE_SEARCH_API_KEY", raising=False)
# Create a .env file
dotenv_file = tmp_path / ".env"
dotenv_file.write_text("BRAVE_SEARCH_API_KEY=dotenv-key\n")
creds = CredentialManager(dotenv_path=dotenv_file)
assert creds.get("brave_search") == "dotenv-key"
def test_env_var_takes_precedence_over_dotenv(self, tmp_path, monkeypatch):
"""os.environ takes precedence over .env file."""
# Set both env var and .env file
monkeypatch.setenv("BRAVE_SEARCH_API_KEY", "env-key")
dotenv_file = tmp_path / ".env"
dotenv_file.write_text("BRAVE_SEARCH_API_KEY=dotenv-key\n")
creds = CredentialManager(dotenv_path=dotenv_file)
# Should return env var value, not dotenv value
assert creds.get("brave_search") == "env-key"
def test_missing_dotenv_file_returns_none(self, tmp_path, monkeypatch):
"""Missing .env file doesn't crash, returns None."""
monkeypatch.delenv("BRAVE_SEARCH_API_KEY", raising=False)
# Point to non-existent file
dotenv_file = tmp_path / ".env" # Not created
creds = CredentialManager(dotenv_path=dotenv_file)
assert creds.get("brave_search") is None
def test_hot_reload_from_dotenv(self, tmp_path, monkeypatch):
"""CredentialManager picks up changes to .env file without restart."""
monkeypatch.delenv("BRAVE_SEARCH_API_KEY", raising=False)
dotenv_file = tmp_path / ".env"
dotenv_file.write_text("BRAVE_SEARCH_API_KEY=original-key\n")
creds = CredentialManager(dotenv_path=dotenv_file)
# First read
assert creds.get("brave_search") == "original-key"
# Update the .env file (simulating user adding credential)
dotenv_file.write_text("BRAVE_SEARCH_API_KEY=updated-key\n")
# Should read the new value (hot-reload)
assert creds.get("brave_search") == "updated-key"
def test_is_available_works_with_dotenv(self, tmp_path, monkeypatch):
"""is_available() works correctly with .env file credentials."""
monkeypatch.delenv("BRAVE_SEARCH_API_KEY", raising=False)
dotenv_file = tmp_path / ".env"
dotenv_file.write_text("BRAVE_SEARCH_API_KEY=dotenv-key\n")
creds = CredentialManager(dotenv_path=dotenv_file)
assert creds.is_available("brave_search") is True
def test_validation_works_with_dotenv(self, tmp_path, monkeypatch):
"""validate_for_tools() works with .env file credentials."""
monkeypatch.delenv("BRAVE_SEARCH_API_KEY", raising=False)
dotenv_file = tmp_path / ".env"
dotenv_file.write_text("BRAVE_SEARCH_API_KEY=dotenv-key\n")
creds = CredentialManager(dotenv_path=dotenv_file)
# Should not raise because credential is available in .env
creds.validate_for_tools(["web_search"])
def test_dotenv_with_multiple_credentials(self, tmp_path, monkeypatch):
"""CredentialManager reads multiple credentials from .env file."""
monkeypatch.delenv("ANTHROPIC_API_KEY", raising=False)
monkeypatch.delenv("BRAVE_SEARCH_API_KEY", raising=False)
dotenv_file = tmp_path / ".env"
dotenv_file.write_text("ANTHROPIC_API_KEY=anthropic-key\nBRAVE_SEARCH_API_KEY=brave-key\n")
creds = CredentialManager(dotenv_path=dotenv_file)
assert creds.get("anthropic") == "anthropic-key"
assert creds.get("brave_search") == "brave-key"
def test_dotenv_with_quoted_values(self, tmp_path, monkeypatch):
"""CredentialManager handles quoted values in .env file."""
monkeypatch.delenv("BRAVE_SEARCH_API_KEY", raising=False)
dotenv_file = tmp_path / ".env"
dotenv_file.write_text('BRAVE_SEARCH_API_KEY="quoted-key"\n')
creds = CredentialManager(dotenv_path=dotenv_file)
assert creds.get("brave_search") == "quoted-key"
def test_dotenv_with_comments(self, tmp_path, monkeypatch):
"""CredentialManager ignores comments in .env file."""
monkeypatch.delenv("BRAVE_SEARCH_API_KEY", raising=False)
dotenv_file = tmp_path / ".env"
dotenv_file.write_text("# This is a comment\nBRAVE_SEARCH_API_KEY=key-after-comment\n")
creds = CredentialManager(dotenv_path=dotenv_file)
assert creds.get("brave_search") == "key-after-comment"
def test_overrides_take_precedence_over_dotenv(self, tmp_path, monkeypatch):
"""Test override values take precedence over .env file."""
monkeypatch.delenv("BRAVE_SEARCH_API_KEY", raising=False)
dotenv_file = tmp_path / ".env"
dotenv_file.write_text("BRAVE_SEARCH_API_KEY=dotenv-key\n")
creds = CredentialManager.for_testing(
{"brave_search": "override-key"},
)
# Note: for_testing doesn't use dotenv_path, but we test the principle
# that _overrides always win
assert creds.get("brave_search") == "override-key"