Merge remote-tracking branch 'upstream/main' into event-loop-arch
Resolve conflict in tools/mcp_server.py: take main's CredentialStoreAdapter.default() which encapsulates the same CompositeStorage logic our branch had inline. Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
+2
-12
@@ -218,19 +218,9 @@ class OnlineResearchAgent:
|
||||
tool_registry = ToolRegistry()
|
||||
|
||||
# Load MCP servers (always load, needed for tool validation)
|
||||
agent_dir = Path(__file__).parent
|
||||
mcp_config_path = agent_dir / "mcp_servers.json"
|
||||
|
||||
mcp_config_path = Path(__file__).parent / "mcp_servers.json"
|
||||
if mcp_config_path.exists():
|
||||
with open(mcp_config_path) as f:
|
||||
mcp_servers = json.load(f)
|
||||
|
||||
for server_config in mcp_servers.get("servers", []):
|
||||
# Resolve relative cwd paths
|
||||
cwd = server_config.get("cwd")
|
||||
if cwd and not Path(cwd).is_absolute():
|
||||
server_config["cwd"] = str(agent_dir / cwd)
|
||||
tool_registry.register_mcp_server(server_config)
|
||||
tool_registry.load_mcp_config(mcp_config_path)
|
||||
|
||||
llm = None
|
||||
if not mock_mode:
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
---
|
||||
name: setup-credentials
|
||||
description: Set up and install credentials for an agent. Detects missing credentials from agent config, collects them from the user, and stores them securely in the encrypted credential store at ~/.hive/credentials.
|
||||
description: Set up and install credentials for an agent. Detects missing credentials from agent config, collects them from the user, and stores them securely in the local encrypted store at ~/.hive/credentials.
|
||||
license: Apache-2.0
|
||||
metadata:
|
||||
author: hive
|
||||
version: "2.1"
|
||||
version: "2.2"
|
||||
type: utility
|
||||
---
|
||||
|
||||
@@ -31,48 +31,96 @@ Determine which agent needs credentials. The user will either:
|
||||
|
||||
Locate the agent's directory under `exports/{agent_name}/`.
|
||||
|
||||
### Step 2: Detect Required Credentials
|
||||
### Step 2: Detect Required Credentials (Bash-First)
|
||||
|
||||
Read the agent's configuration to determine which tools and node types it uses:
|
||||
Use bash commands to determine what the agent needs and what's already configured. This avoids Python import issues and works even when `HIVE_CREDENTIAL_KEY` is not set.
|
||||
|
||||
```python
|
||||
from core.framework.runner import AgentRunner
|
||||
#### Step 2a: Read Agent Requirements
|
||||
|
||||
runner = AgentRunner.load("exports/{agent_name}")
|
||||
validation = runner.validate()
|
||||
Extract `required_tools` and node types from the agent config:
|
||||
|
||||
# validation.missing_credentials contains env var names
|
||||
# validation.warnings contains detailed messages with help URLs
|
||||
```bash
|
||||
# Get required tools
|
||||
jq -r '.required_tools[]?' exports/{agent_name}/agent.json 2>/dev/null
|
||||
|
||||
# Get node types from graph nodes
|
||||
jq -r '.graph.nodes[]?.node_type' exports/{agent_name}/agent.json 2>/dev/null | sort -u
|
||||
```
|
||||
|
||||
Alternatively, check the credential store directly:
|
||||
Map the extracted tools and node types to credentials by reading the spec files directly:
|
||||
|
||||
```python
|
||||
from core.framework.credentials import CredentialStore
|
||||
|
||||
# Use encrypted storage (default: ~/.hive/credentials)
|
||||
store = CredentialStore.with_encrypted_storage()
|
||||
|
||||
# Check what's available
|
||||
available = store.list_credentials()
|
||||
print(f"Available credentials: {available}")
|
||||
|
||||
# Check if specific credential exists
|
||||
if store.is_available("hubspot"):
|
||||
print("HubSpot credential found")
|
||||
else:
|
||||
print("HubSpot credential missing")
|
||||
```bash
|
||||
# Read all credential specs — each file defines tools, node_types, env_var, and credential_id
|
||||
cat tools/src/aden_tools/credentials/llm.py tools/src/aden_tools/credentials/search.py tools/src/aden_tools/credentials/email.py tools/src/aden_tools/credentials/integrations.py
|
||||
```
|
||||
|
||||
To see all known credential specs (for help URLs and setup instructions):
|
||||
For each `CredentialSpec`, match its `tools` and `node_types` lists against the agent's required tools and node types. Extract the `env_var`, `credential_id`, and `credential_group` for every match. This is the list of needed credentials.
|
||||
|
||||
```python
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS
|
||||
#### Step 2b: Check Existing Credential Sources
|
||||
|
||||
for name, spec in CREDENTIAL_SPECS.items():
|
||||
print(f"{name}: env_var={spec.env_var}, aden={spec.aden_supported}")
|
||||
For each needed credential, check three sources. A credential is "found" if it exists in ANY of them:
|
||||
|
||||
**1. Encrypted store metadata index** (unencrypted JSON — no decryption key needed):
|
||||
|
||||
```bash
|
||||
cat ~/.hive/credentials/metadata/index.json 2>/dev/null | jq -r '.credentials | keys[]'
|
||||
```
|
||||
|
||||
If a credential ID appears in this list, it is stored in the encrypted store.
|
||||
|
||||
**2. Environment variables:**
|
||||
|
||||
```bash
|
||||
# Check each needed env var, e.g.:
|
||||
printenv ANTHROPIC_API_KEY > /dev/null 2>&1 && echo "ANTHROPIC_API_KEY: set" || echo "ANTHROPIC_API_KEY: not set"
|
||||
printenv BRAVE_SEARCH_API_KEY > /dev/null 2>&1 && echo "BRAVE_SEARCH_API_KEY: set" || echo "BRAVE_SEARCH_API_KEY: not set"
|
||||
```
|
||||
|
||||
**3. Project `.env` file:**
|
||||
|
||||
```bash
|
||||
# Check each needed env var, e.g.:
|
||||
grep -q '^ANTHROPIC_API_KEY=' .env 2>/dev/null && echo "ANTHROPIC_API_KEY: in .env" || echo "ANTHROPIC_API_KEY: not in .env"
|
||||
grep -q '^BRAVE_SEARCH_API_KEY=' .env 2>/dev/null && echo "BRAVE_SEARCH_API_KEY: in .env" || echo "BRAVE_SEARCH_API_KEY: not in .env"
|
||||
```
|
||||
|
||||
#### Step 2c: HIVE_CREDENTIAL_KEY Check
|
||||
|
||||
If any credentials were found in the encrypted store metadata index, verify the encryption key is available. The key is typically persisted to shell config by a previous setup-credentials run.
|
||||
|
||||
Check both the current session AND shell config files:
|
||||
|
||||
```bash
|
||||
# Check 1: Current session
|
||||
printenv HIVE_CREDENTIAL_KEY > /dev/null 2>&1 && echo "session: set" || echo "session: not set"
|
||||
|
||||
# Check 2: Shell config files (where setup-credentials persists it)
|
||||
# Note: check each file individually to avoid non-zero exit when one doesn't exist
|
||||
for f in ~/.zshrc ~/.bashrc ~/.profile; do [ -f "$f" ] && grep -q 'HIVE_CREDENTIAL_KEY' "$f" && echo "$f"; done
|
||||
```
|
||||
|
||||
Decision logic:
|
||||
- **In current session** — no action needed, credentials in the store are usable
|
||||
- **In shell config but NOT in current session** — the key is persisted but this shell hasn't sourced it. Run `source ~/.zshrc` (or `~/.bashrc`), then re-check. Credentials in the store are usable after sourcing.
|
||||
- **Not in session AND not in shell config** — the key was never persisted. Warn the user that credentials in the store cannot be decrypted. Help fix the key situation (recover/re-persist), do NOT re-collect credential values that are already stored.
|
||||
|
||||
#### Step 2d: Compute Missing & Group
|
||||
|
||||
Diff the "needed" credentials against the "found" credentials to get the truly missing list.
|
||||
|
||||
Group related credentials by their `credential_group` field from the spec files. Credentials that share the same non-empty `credential_group` value should be presented as a single setup step rather than asking for each one individually.
|
||||
|
||||
**If nothing is missing and there's no HIVE_CREDENTIAL_KEY issue:** Report all credentials as configured and skip Steps 3-5. Example:
|
||||
|
||||
```
|
||||
All required credentials are already configured:
|
||||
✓ anthropic (ANTHROPIC_API_KEY) — found in encrypted store
|
||||
✓ brave_search (BRAVE_SEARCH_API_KEY) — found in environment
|
||||
Your agent is ready to run!
|
||||
```
|
||||
|
||||
**If credentials are missing:** Continue to Step 3 with only the missing ones.
|
||||
|
||||
### Step 3: Present Auth Options for Each Missing Credential
|
||||
|
||||
For each missing credential, check what authentication methods are available:
|
||||
@@ -104,7 +152,7 @@ Present the available options using AskUserQuestion:
|
||||
```
|
||||
Choose how to configure HUBSPOT_ACCESS_TOKEN:
|
||||
|
||||
1) Aden Authorization Server (Recommended)
|
||||
1) Aden Platform (OAuth) (Recommended)
|
||||
Secure OAuth2 flow via integration.adenhq.com
|
||||
- Quick setup with automatic token refresh
|
||||
- No need to manage API keys manually
|
||||
@@ -114,7 +162,7 @@ Choose how to configure HUBSPOT_ACCESS_TOKEN:
|
||||
- Requires creating a HubSpot Private App
|
||||
- Full control over scopes and permissions
|
||||
|
||||
3) Custom Credential Store (Advanced)
|
||||
3) Local Credential Setup (Advanced)
|
||||
Programmatic configuration for CI/CD
|
||||
- For automated deployments
|
||||
- Requires manual API calls
|
||||
@@ -122,7 +170,7 @@ Choose how to configure HUBSPOT_ACCESS_TOKEN:
|
||||
|
||||
### Step 4: Execute Auth Flow Based on User Choice
|
||||
|
||||
#### Option 1: Aden Authorization Server
|
||||
#### Option 1: Aden Platform (OAuth)
|
||||
|
||||
This is the recommended flow for supported integrations (HubSpot, etc.).
|
||||
|
||||
@@ -174,7 +222,7 @@ shell_type = detect_shell() # 'bash', 'zsh', or 'unknown'
|
||||
success, config_path = add_env_var_to_shell_config(
|
||||
"ADEN_API_KEY",
|
||||
user_provided_key,
|
||||
comment="Aden authorization server API key"
|
||||
comment="Aden Platform (OAuth) API key"
|
||||
)
|
||||
|
||||
if success:
|
||||
@@ -313,7 +361,7 @@ if not result.valid:
|
||||
# 2. Continue anyway (not recommended)
|
||||
```
|
||||
|
||||
**4.2d. Store in Encrypted Credential Store**
|
||||
**4.2d. Store in Local Encrypted Store**
|
||||
|
||||
```python
|
||||
from core.framework.credentials import CredentialStore, CredentialObject, CredentialKey
|
||||
@@ -340,7 +388,7 @@ store.save_credential(cred)
|
||||
export HUBSPOT_ACCESS_TOKEN="the-value"
|
||||
```
|
||||
|
||||
#### Option 3: Custom Credential Store (Advanced)
|
||||
#### Option 3: Local Credential Setup (Advanced)
|
||||
|
||||
For programmatic/CI/CD setups.
|
||||
|
||||
@@ -408,10 +456,14 @@ Report the result to the user.
|
||||
|
||||
Health checks validate credentials by making lightweight API calls:
|
||||
|
||||
| Credential | Endpoint | What It Checks |
|
||||
| -------------- | --------------------------------------- | --------------------------------- |
|
||||
| `hubspot` | `GET /crm/v3/objects/contacts?limit=1` | Bearer token validity, CRM scopes |
|
||||
| `brave_search` | `GET /res/v1/web/search?q=test&count=1` | API key validity |
|
||||
| Credential | Endpoint | What It Checks |
|
||||
| --------------- | --------------------------------------- | ---------------------------------- |
|
||||
| `anthropic` | `POST /v1/messages` | API key validity |
|
||||
| `brave_search` | `GET /res/v1/web/search?q=test&count=1` | API key validity |
|
||||
| `google_search` | `GET /customsearch/v1?q=test&num=1` | API key + CSE ID validity |
|
||||
| `github` | `GET /user` | Token validity, user identity |
|
||||
| `hubspot` | `GET /crm/v3/objects/contacts?limit=1` | Bearer token validity, CRM scopes |
|
||||
| `resend` | `GET /domains` | API key validity |
|
||||
|
||||
```python
|
||||
from aden_tools.credentials import check_credential_health, HealthCheckResult
|
||||
@@ -424,7 +476,7 @@ result: HealthCheckResult = check_credential_health("hubspot", token_value)
|
||||
|
||||
## Encryption Key (HIVE_CREDENTIAL_KEY)
|
||||
|
||||
The encrypted credential store requires `HIVE_CREDENTIAL_KEY` to encrypt/decrypt credentials.
|
||||
The local encrypted store requires `HIVE_CREDENTIAL_KEY` to encrypt/decrypt credentials.
|
||||
|
||||
- If the user doesn't have one, `EncryptedFileStorage` will auto-generate one and log it
|
||||
- The user MUST persist this key (e.g., in `~/.bashrc` or a secrets manager)
|
||||
@@ -443,7 +495,7 @@ If `HIVE_CREDENTIAL_KEY` is not set:
|
||||
- **NEVER** store credentials in plaintext files, git-tracked files, or agent configs
|
||||
- **NEVER** hardcode credentials in source code
|
||||
- **ALWAYS** use `SecretStr` from Pydantic when handling credential values in Python
|
||||
- **ALWAYS** use the encrypted credential store (`~/.hive/credentials`) for persistence
|
||||
- **ALWAYS** use the local encrypted store (`~/.hive/credentials`) for persistence
|
||||
- **ALWAYS** run health checks before storing credentials (when possible)
|
||||
- **ALWAYS** verify credentials were stored by re-running validation, not by reading them back
|
||||
- When modifying `~/.bashrc` or `~/.zshrc`, confirm with the user first
|
||||
@@ -456,7 +508,8 @@ All credential specs are defined in `tools/src/aden_tools/credentials/`:
|
||||
| ----------------- | ------------- | --------------------------------------------- | -------------- |
|
||||
| `llm.py` | LLM Providers | `anthropic` | No |
|
||||
| `search.py` | Search Tools | `brave_search`, `google_search`, `google_cse` | No |
|
||||
| `integrations.py` | Integrations | `hubspot` | Yes |
|
||||
| `email.py` | Email | `resend` | No |
|
||||
| `integrations.py` | Integrations | `github`, `hubspot` | No / Yes |
|
||||
|
||||
**Note:** Additional LLM providers (Cerebras, Groq, OpenAI) are handled by LiteLLM via environment
|
||||
variables (`CEREBRAS_API_KEY`, `GROQ_API_KEY`, `OPENAI_API_KEY`) but are not yet in CREDENTIAL_SPECS.
|
||||
@@ -507,66 +560,90 @@ token = store.get_key("hubspot", "access_token")
|
||||
## Example Session
|
||||
|
||||
```
|
||||
User: /setup-credentials for my hubspot-agent
|
||||
User: /setup-credentials for my research-agent
|
||||
|
||||
Agent: Let me check what credentials your hubspot-agent needs.
|
||||
Agent: Let me check what credentials your research-agent needs.
|
||||
|
||||
[Runs validation, finds ANTHROPIC_API_KEY and HUBSPOT_ACCESS_TOKEN missing]
|
||||
[Reads agent config]
|
||||
$ jq -r '.required_tools[]?' exports/research-agent/agent.json
|
||||
web_search
|
||||
google_search
|
||||
|
||||
Agent: Your hubspot-agent requires 2 credentials:
|
||||
$ jq -r '.graph.nodes[]?.node_type' exports/research-agent/agent.json | sort -u
|
||||
llm_tool_use
|
||||
|
||||
1. ANTHROPIC_API_KEY - Anthropic API key for LLM calls
|
||||
2. HUBSPOT_ACCESS_TOKEN - HubSpot access token
|
||||
[Maps tools/nodes to credentials using lookup table]
|
||||
Needed: anthropic, brave_search, google_search, google_cse
|
||||
|
||||
Let's set these up one at a time.
|
||||
[Checks encrypted store metadata index]
|
||||
$ cat ~/.hive/credentials/metadata/index.json | jq -r '.credentials | keys[]'
|
||||
anthropic
|
||||
brave_search
|
||||
|
||||
--- Setting up ANTHROPIC_API_KEY ---
|
||||
[Checks environment variables]
|
||||
$ printenv ANTHROPIC_API_KEY > /dev/null 2>&1 && echo "set" || echo "not set"
|
||||
not set
|
||||
$ printenv BRAVE_SEARCH_API_KEY > /dev/null 2>&1 && echo "set" || echo "not set"
|
||||
not set
|
||||
$ printenv GOOGLE_API_KEY > /dev/null 2>&1 && echo "set" || echo "not set"
|
||||
not set
|
||||
$ printenv GOOGLE_CSE_ID > /dev/null 2>&1 && echo "set" || echo "not set"
|
||||
not set
|
||||
|
||||
This credential only supports direct API key entry.
|
||||
[Checks HIVE_CREDENTIAL_KEY since credentials found in store]
|
||||
$ printenv HIVE_CREDENTIAL_KEY > /dev/null 2>&1 && echo "session: set" || echo "session: not set"
|
||||
session: not set
|
||||
$ for f in ~/.zshrc ~/.bashrc ~/.profile; do [ -f "$f" ] && grep -q 'HIVE_CREDENTIAL_KEY' "$f" && echo "$f"; done
|
||||
/Users/user/.zshrc
|
||||
|
||||
[Shows instructions for getting Anthropic API key]
|
||||
[AskUserQuestion: "Please provide your Anthropic API key:"]
|
||||
[User provides key]
|
||||
[Stores in credential store, exports to env]
|
||||
[Key is in shell config but not current session — sourcing it]
|
||||
$ source ~/.zshrc
|
||||
|
||||
--- Setting up HUBSPOT_ACCESS_TOKEN ---
|
||||
[Computes missing credentials]
|
||||
Found:
|
||||
✓ anthropic (ANTHROPIC_API_KEY) — in encrypted store
|
||||
✓ brave_search (BRAVE_SEARCH_API_KEY) — in encrypted store
|
||||
Missing:
|
||||
✗ google_search (GOOGLE_API_KEY)
|
||||
✗ google_cse (GOOGLE_CSE_ID)
|
||||
|
||||
[AskUserQuestion with 3 options]
|
||||
Agent: 2 of 4 required credentials are already configured. Only Google Custom
|
||||
Search needs setup (2 values as a single group).
|
||||
|
||||
Choose how to configure HUBSPOT_ACCESS_TOKEN:
|
||||
1) Aden Authorization Server (Recommended)
|
||||
2) Direct API Key
|
||||
3) Custom Credential Store (Advanced)
|
||||
--- Setting up Google Custom Search (google_search + google_cse) ---
|
||||
|
||||
User: 1 (Aden)
|
||||
This requires two values that work together.
|
||||
|
||||
Agent: Great! Let me check if you have an Aden API key configured...
|
||||
First, the Google API Key:
|
||||
1. Go to https://console.cloud.google.com/apis/credentials
|
||||
2. Create a new project (or select an existing one)
|
||||
3. Enable the "Custom Search API" from the API Library
|
||||
4. Go to Credentials > Create Credentials > API Key
|
||||
5. Copy the generated API key
|
||||
|
||||
[Checks for ADEN_API_KEY, not found]
|
||||
[Opens browser to integration.adenhq.com/setup]
|
||||
|
||||
Agent: I've opened Aden's setup page. Please:
|
||||
1. Sign in or create an account
|
||||
2. Connect your HubSpot account (OAuth happens on Aden's site)
|
||||
3. Copy the API key shown after setup
|
||||
|
||||
[AskUserQuestion: "Please provide your Aden API key:"]
|
||||
[AskUserQuestion: "Please provide your Google API key:"]
|
||||
[User provides key]
|
||||
|
||||
Agent: Would you like me to save this to your shell config (~/.zshrc) for future sessions?
|
||||
Now, the Custom Search Engine ID:
|
||||
1. Go to https://programmablesearchengine.google.com/controlpanel/all
|
||||
2. Click "Add" to create a new search engine
|
||||
3. Under "What to search", select "Search the entire web"
|
||||
4. Give your search engine a name
|
||||
5. Click "Create"
|
||||
6. Copy the Search Engine ID (cx value)
|
||||
|
||||
[User approves]
|
||||
[Saves to ~/.zshrc and ~/.hive/configuration.json]
|
||||
[AskUserQuestion: "Please provide your Google CSE ID:"]
|
||||
[User provides ID]
|
||||
|
||||
Agent: Let me sync your credentials from Aden...
|
||||
[Runs health check with both values - GET /customsearch/v1?q=test&num=1 → 200 OK]
|
||||
[Stores both in local encrypted store, exports to env]
|
||||
|
||||
[Syncs credentials from Aden server - OAuth already done on Aden's side]
|
||||
[Runs health check]
|
||||
|
||||
Agent: HubSpot credentials validated successfully!
|
||||
✓ Google Custom Search credentials valid
|
||||
|
||||
All credentials are now configured:
|
||||
- ANTHROPIC_API_KEY: Stored in encrypted credential store
|
||||
- HUBSPOT_ACCESS_TOKEN: Synced from Aden (OAuth completed on Aden)
|
||||
- Validation passed - your agent is ready to run!
|
||||
✓ anthropic (ANTHROPIC_API_KEY) — already in encrypted store
|
||||
✓ brave_search (BRAVE_SEARCH_API_KEY) — already in encrypted store
|
||||
✓ google_search (GOOGLE_API_KEY) — stored in encrypted store
|
||||
✓ google_cse (GOOGLE_CSE_ID) — stored in encrypted store
|
||||
Your agent is ready to run!
|
||||
```
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
---
|
||||
name: Bug Report
|
||||
about: Report a bug to help us improve
|
||||
title: '[Bug]: '
|
||||
labels: bug
|
||||
title: "[Bug]: "
|
||||
labels: bug, enhancement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## Describe the Bug
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
---
|
||||
name: Feature Request
|
||||
about: Suggest a new feature or enhancement
|
||||
title: '[Feature]: '
|
||||
title: "[Feature]: "
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## Problem Statement
|
||||
|
||||
@@ -0,0 +1,71 @@
|
||||
---
|
||||
name: Integration Request
|
||||
about: Suggest a new integration
|
||||
title: "[Integration]:"
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## Service
|
||||
|
||||
Name and brief description of the service and what it enables agents to do.
|
||||
|
||||
**Description:** [e.g., "API key for Slack Bot" — short one-liner for the credential spec]
|
||||
|
||||
## Credential Identity
|
||||
|
||||
- **credential_id:** [e.g., `slack`]
|
||||
- **env_var:** [e.g., `SLACK_BOT_TOKEN`]
|
||||
- **credential_key:** [e.g., `access_token`, `api_key`, `bot_token`]
|
||||
|
||||
## Tools
|
||||
|
||||
Tool function names that require this credential:
|
||||
|
||||
- [e.g., `slack_send_message`]
|
||||
- [e.g., `slack_list_channels`]
|
||||
|
||||
## Auth Methods
|
||||
|
||||
- **Direct API key supported:** Yes / No
|
||||
- **Aden OAuth supported:** Yes / No
|
||||
|
||||
If Aden OAuth is supported, describe the OAuth scopes/permissions required.
|
||||
|
||||
## How to Get the Credential
|
||||
|
||||
Link where users obtain the key/token:
|
||||
|
||||
[e.g., https://api.slack.com/apps]
|
||||
|
||||
Step-by-step instructions:
|
||||
|
||||
1. Go to ...
|
||||
2. Create a ...
|
||||
3. Select scopes/permissions: ...
|
||||
4. Copy the key/token
|
||||
|
||||
## Health Check
|
||||
|
||||
A lightweight API call to validate the credential (no writes, no charges).
|
||||
|
||||
- **Endpoint:** [e.g., `https://slack.com/api/auth.test`]
|
||||
- **Method:** [e.g., `GET` or `POST`]
|
||||
- **Auth header:** [e.g., `Authorization: Bearer {token}` or `X-Api-Key: {key}`]
|
||||
- **Parameters (if any):** [e.g., `?limit=1`]
|
||||
- **200 means:** [e.g., key is valid]
|
||||
- **401 means:** [e.g., invalid or expired]
|
||||
- **429 means:** [e.g., rate limited but key is valid]
|
||||
|
||||
## Credential Group
|
||||
|
||||
Does this require multiple credentials configured together? (e.g., Google Custom Search needs
|
||||
both an API key and a CSE ID)
|
||||
|
||||
- [ ] No, single credential
|
||||
- [ ] Yes — list the other credential IDs in the group:
|
||||
|
||||
## Additional Context
|
||||
|
||||
Links to API docs, rate limits, free tier availability, or anything else relevant.
|
||||
+18
-18
@@ -21,23 +21,22 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
cd core
|
||||
pip install -e .
|
||||
pip install -r requirements-dev.txt
|
||||
run: uv sync --project core --group dev
|
||||
|
||||
- name: Ruff lint
|
||||
run: |
|
||||
ruff check core/
|
||||
ruff check tools/
|
||||
uv run --project core ruff check core/
|
||||
uv run --project core ruff check tools/
|
||||
|
||||
- name: Ruff format
|
||||
run: |
|
||||
ruff format --check core/
|
||||
ruff format --check tools/
|
||||
uv run --project core ruff format --check core/
|
||||
uv run --project core ruff format --check tools/
|
||||
|
||||
test:
|
||||
name: Test Python Framework
|
||||
@@ -52,18 +51,19 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
cd core
|
||||
pip install -e .
|
||||
pip install -r requirements-dev.txt
|
||||
uv sync
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
cd core
|
||||
pytest tests/ -v
|
||||
uv run pytest tests/ -v
|
||||
|
||||
test-tools:
|
||||
name: Test Tools
|
||||
@@ -83,8 +83,7 @@ jobs:
|
||||
run: |
|
||||
cd tools
|
||||
uv sync --extra dev
|
||||
uv pip install --python .venv/bin/python -e ../core
|
||||
uv run --extra dev pytest tests/ -v
|
||||
uv run pytest tests/ -v
|
||||
|
||||
validate:
|
||||
name: Validate Agent Exports
|
||||
@@ -97,13 +96,14 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
cd core
|
||||
pip install -e .
|
||||
pip install -r requirements-dev.txt
|
||||
uv sync
|
||||
|
||||
- name: Validate exported agents
|
||||
run: |
|
||||
|
||||
@@ -21,18 +21,19 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
cd core
|
||||
pip install -e .
|
||||
pip install -r requirements-dev.txt
|
||||
uv sync
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
cd core
|
||||
pytest tests/ -v
|
||||
uv run pytest tests/ -v
|
||||
|
||||
- name: Generate changelog
|
||||
id: changelog
|
||||
|
||||
@@ -1,20 +1,14 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"agent-builder": {
|
||||
"command": ".venv/bin/python",
|
||||
"args": ["-m", "framework.mcp.agent_builder_server"],
|
||||
"cwd": "core",
|
||||
"env": {
|
||||
"PYTHONPATH": "../tools/src"
|
||||
}
|
||||
"command": "uv",
|
||||
"args": ["run", "-m", "framework.mcp.agent_builder_server"],
|
||||
"cwd": "core"
|
||||
},
|
||||
"tools": {
|
||||
"command": ".venv/bin/python",
|
||||
"args": ["mcp_server.py", "--stdio"],
|
||||
"cwd": "tools",
|
||||
"env": {
|
||||
"PYTHONPATH": "src:../core"
|
||||
}
|
||||
"command": "uv",
|
||||
"args": ["run", "mcp_server.py", "--stdio"],
|
||||
"cwd": "tools"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -88,11 +88,13 @@ Aden is a platform for building, deploying, operating, and adapting AI agents:
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Prerequisites
|
||||
## Prerequisites
|
||||
|
||||
- [Python 3.11+](https://www.python.org/downloads/) for agent development
|
||||
- Python 3.11+ for agent development
|
||||
- Claude Code or Cursor for utilizing agent skills
|
||||
|
||||
> **Note for Windows Users:** It is strongly recommended to use **WSL (Windows Subsystem for Linux)** or **Git Bash** to run this framework. Some core automation scripts may not execute correctly in standard Command Prompt or PowerShell.
|
||||
|
||||
### Installation
|
||||
|
||||
```bash
|
||||
|
||||
@@ -21,6 +21,7 @@ import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Callable
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import UTC
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
@@ -1533,6 +1534,8 @@ Do NOT fabricate data or return empty objects."""
|
||||
|
||||
def _build_system_prompt(self, ctx: NodeContext) -> str:
|
||||
"""Build the system prompt."""
|
||||
from datetime import datetime
|
||||
|
||||
parts = []
|
||||
|
||||
if ctx.node_spec.system_prompt:
|
||||
@@ -1555,6 +1558,15 @@ Do NOT fabricate data or return empty objects."""
|
||||
|
||||
parts.append(prompt)
|
||||
|
||||
# Inject current datetime so LLM knows "now"
|
||||
utc_dt = datetime.now(UTC)
|
||||
local_dt = datetime.now().astimezone()
|
||||
local_tz_name = local_dt.tzname() or "Unknown"
|
||||
parts.append("\n## Runtime Context")
|
||||
parts.append(f"- Current Date/Time (UTC): {utc_dt.isoformat()}")
|
||||
parts.append(f"- Local Timezone: {local_tz_name}")
|
||||
parts.append(f"- Current Date/Time (Local): {local_dt.isoformat()}")
|
||||
|
||||
if ctx.goal_context:
|
||||
parts.append("\n# Goal Context")
|
||||
parts.append(ctx.goal_context)
|
||||
|
||||
@@ -18,7 +18,7 @@ def _get_api_key_from_credential_store() -> str | None:
|
||||
try:
|
||||
from aden_tools.credentials import CredentialStoreAdapter
|
||||
|
||||
creds = CredentialStoreAdapter.with_env_storage()
|
||||
creds = CredentialStoreAdapter.default()
|
||||
if creds.is_available("anthropic"):
|
||||
return creds.get("anthropic")
|
||||
except ImportError:
|
||||
|
||||
@@ -516,6 +516,36 @@ def _validate_tool_credentials(tools_list: list[str]) -> dict | None:
|
||||
return None
|
||||
|
||||
|
||||
def _validate_agent_path(agent_path: str) -> tuple[Path | None, str | None]:
|
||||
"""
|
||||
Validate and normalize agent_path.
|
||||
|
||||
Returns:
|
||||
(Path, None) if valid
|
||||
(None, error_json) if invalid
|
||||
"""
|
||||
if not agent_path:
|
||||
return None, json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"error": "agent_path is required (e.g., 'exports/my_agent')",
|
||||
}
|
||||
)
|
||||
|
||||
path = Path(agent_path)
|
||||
|
||||
if not path.exists():
|
||||
return None, json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"error": f"Agent path not found: {path}",
|
||||
"hint": "Run export_graph to create an agent in exports/ first",
|
||||
}
|
||||
)
|
||||
|
||||
return path, None
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
def add_node(
|
||||
node_id: Annotated[str, "Unique identifier for the node"],
|
||||
@@ -2597,10 +2627,11 @@ def generate_constraint_tests(
|
||||
if not agent_path and _session:
|
||||
agent_path = f"exports/{_session.name}"
|
||||
|
||||
if not agent_path:
|
||||
return json.dumps({"error": "agent_path required (e.g., 'exports/my_agent')"})
|
||||
path, err = _validate_agent_path(agent_path)
|
||||
if err:
|
||||
return err
|
||||
|
||||
agent_module = _get_agent_module_from_path(agent_path)
|
||||
agent_module = _get_agent_module_from_path(path)
|
||||
|
||||
# Format constraints for display
|
||||
constraints_formatted = (
|
||||
@@ -2619,9 +2650,9 @@ def generate_constraint_tests(
|
||||
return json.dumps(
|
||||
{
|
||||
"goal_id": goal_id,
|
||||
"agent_path": agent_path,
|
||||
"agent_path": str(path),
|
||||
"agent_module": agent_module,
|
||||
"output_file": f"{agent_path}/tests/test_constraints.py",
|
||||
"output_file": f"{str(path)}/tests/test_constraints.py",
|
||||
"constraints": [c.model_dump() for c in goal.constraints] if goal.constraints else [],
|
||||
"constraints_formatted": constraints_formatted,
|
||||
"test_guidelines": {
|
||||
@@ -2677,10 +2708,11 @@ def generate_success_tests(
|
||||
if not agent_path and _session:
|
||||
agent_path = f"exports/{_session.name}"
|
||||
|
||||
if not agent_path:
|
||||
return json.dumps({"error": "agent_path required (e.g., 'exports/my_agent')"})
|
||||
path, err = _validate_agent_path(agent_path)
|
||||
if err:
|
||||
return err
|
||||
|
||||
agent_module = _get_agent_module_from_path(agent_path)
|
||||
agent_module = _get_agent_module_from_path(path)
|
||||
|
||||
# Parse node/tool names for context
|
||||
nodes = [n.strip() for n in node_names.split(",") if n.strip()]
|
||||
@@ -2705,9 +2737,9 @@ def generate_success_tests(
|
||||
return json.dumps(
|
||||
{
|
||||
"goal_id": goal_id,
|
||||
"agent_path": agent_path,
|
||||
"agent_path": str(path),
|
||||
"agent_module": agent_module,
|
||||
"output_file": f"{agent_path}/tests/test_success_criteria.py",
|
||||
"output_file": f"{str(path)}/tests/test_success_criteria.py",
|
||||
"success_criteria": [c.model_dump() for c in goal.success_criteria]
|
||||
if goal.success_criteria
|
||||
else [],
|
||||
@@ -2766,7 +2798,11 @@ def run_tests(
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
tests_dir = Path(agent_path) / "tests"
|
||||
path, err = _validate_agent_path(agent_path)
|
||||
if err:
|
||||
return err
|
||||
|
||||
tests_dir = path / "tests"
|
||||
|
||||
if not tests_dir.exists():
|
||||
return json.dumps(
|
||||
@@ -2957,10 +2993,11 @@ def debug_test(
|
||||
if not agent_path and _session:
|
||||
agent_path = f"exports/{_session.name}"
|
||||
|
||||
if not agent_path:
|
||||
return json.dumps({"error": "agent_path required (e.g., 'exports/my_agent')"})
|
||||
path, err = _validate_agent_path(agent_path)
|
||||
if err:
|
||||
return err
|
||||
|
||||
tests_dir = Path(agent_path) / "tests"
|
||||
tests_dir = path / "tests"
|
||||
|
||||
if not tests_dir.exists():
|
||||
return json.dumps(
|
||||
@@ -3101,10 +3138,11 @@ def list_tests(
|
||||
if not agent_path and _session:
|
||||
agent_path = f"exports/{_session.name}"
|
||||
|
||||
if not agent_path:
|
||||
return json.dumps({"error": "agent_path required (e.g., 'exports/my_agent')"})
|
||||
path, err = _validate_agent_path(agent_path)
|
||||
if err:
|
||||
return err
|
||||
|
||||
tests_dir = Path(agent_path) / "tests"
|
||||
tests_dir = path / "tests"
|
||||
|
||||
if not tests_dir.exists():
|
||||
return json.dumps(
|
||||
@@ -3379,7 +3417,7 @@ def store_credential(
|
||||
display_name: Annotated[str, "Human-readable name (e.g., 'HubSpot Access Token')"] = "",
|
||||
) -> str:
|
||||
"""
|
||||
Store a credential securely in the encrypted credential store at ~/.hive/credentials.
|
||||
Store a credential securely in the local encrypted store at ~/.hive/credentials.
|
||||
|
||||
Uses Fernet encryption (AES-128-CBC + HMAC). Requires HIVE_CREDENTIAL_KEY env var.
|
||||
"""
|
||||
@@ -3421,7 +3459,7 @@ def store_credential(
|
||||
@mcp.tool()
|
||||
def list_stored_credentials() -> str:
|
||||
"""
|
||||
List all credentials currently stored in the encrypted credential store.
|
||||
List all credentials currently stored in the local encrypted store.
|
||||
|
||||
Returns credential IDs and metadata (never returns secret values).
|
||||
"""
|
||||
@@ -3461,7 +3499,7 @@ def delete_stored_credential(
|
||||
credential_name: Annotated[str, "Logical credential name to delete (e.g., 'hubspot')"],
|
||||
) -> str:
|
||||
"""
|
||||
Delete a credential from the encrypted credential store.
|
||||
Delete a credential from the local encrypted store.
|
||||
"""
|
||||
try:
|
||||
store = _get_credential_store()
|
||||
|
||||
@@ -411,25 +411,8 @@ class AgentRunner:
|
||||
return self._tool_registry.register_mcp_server(server_config)
|
||||
|
||||
def _load_mcp_servers_from_config(self, config_path: Path) -> None:
|
||||
"""
|
||||
Load and register MCP servers from a configuration file.
|
||||
|
||||
Args:
|
||||
config_path: Path to mcp_servers.json file
|
||||
"""
|
||||
try:
|
||||
with open(config_path) as f:
|
||||
config = json.load(f)
|
||||
|
||||
servers = config.get("servers", [])
|
||||
for server_config in servers:
|
||||
try:
|
||||
self._tool_registry.register_mcp_server(server_config)
|
||||
except Exception as e:
|
||||
server_name = server_config.get("name", "unknown")
|
||||
logger.warning(f"Failed to register MCP server '{server_name}': {e}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to load MCP servers config from {config_path}: {e}")
|
||||
"""Load and register MCP servers from a configuration file."""
|
||||
self._tool_registry.load_mcp_config(config_path)
|
||||
|
||||
def set_approval_callback(self, callback: Callable) -> None:
|
||||
"""
|
||||
|
||||
@@ -257,6 +257,34 @@ class ToolRegistry:
|
||||
"""
|
||||
self._session_context.update(context)
|
||||
|
||||
def load_mcp_config(self, config_path: Path) -> None:
|
||||
"""
|
||||
Load and register MCP servers from a config file.
|
||||
|
||||
Resolves relative ``cwd`` paths against the config file's parent
|
||||
directory so callers never need to handle path resolution themselves.
|
||||
|
||||
Args:
|
||||
config_path: Path to an ``mcp_servers.json`` file.
|
||||
"""
|
||||
try:
|
||||
with open(config_path) as f:
|
||||
config = json.load(f)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to load MCP config from {config_path}: {e}")
|
||||
return
|
||||
|
||||
base_dir = config_path.parent
|
||||
for server_config in config.get("servers", []):
|
||||
cwd = server_config.get("cwd")
|
||||
if cwd and not Path(cwd).is_absolute():
|
||||
server_config["cwd"] = str((base_dir / cwd).resolve())
|
||||
try:
|
||||
self.register_mcp_server(server_config)
|
||||
except Exception as e:
|
||||
name = server_config.get("name", "unknown")
|
||||
logger.warning(f"Failed to register MCP server '{name}': {e}")
|
||||
|
||||
def register_mcp_server(
|
||||
self,
|
||||
server_config: dict[str, Any],
|
||||
@@ -309,11 +337,21 @@ class ToolRegistry:
|
||||
tool = self._convert_mcp_tool_to_framework_tool(mcp_tool)
|
||||
|
||||
# Create executor that calls the MCP server
|
||||
def make_mcp_executor(client_ref: MCPClient, tool_name: str, registry_ref):
|
||||
def make_mcp_executor(
|
||||
client_ref: MCPClient,
|
||||
tool_name: str,
|
||||
registry_ref,
|
||||
tool_params: set[str],
|
||||
):
|
||||
def executor(inputs: dict) -> Any:
|
||||
try:
|
||||
# Inject session context for tools that need it
|
||||
merged_inputs = {**registry_ref._session_context, **inputs}
|
||||
# Only inject session context params the tool accepts
|
||||
filtered_context = {
|
||||
k: v
|
||||
for k, v in registry_ref._session_context.items()
|
||||
if k in tool_params
|
||||
}
|
||||
merged_inputs = {**filtered_context, **inputs}
|
||||
result = client_ref.call_tool(tool_name, merged_inputs)
|
||||
# MCP tools return content array, extract the result
|
||||
if isinstance(result, list) and len(result) > 0:
|
||||
@@ -327,10 +365,11 @@ class ToolRegistry:
|
||||
|
||||
return executor
|
||||
|
||||
tool_params = set(mcp_tool.input_schema.get("properties", {}).keys())
|
||||
self.register(
|
||||
mcp_tool.name,
|
||||
tool,
|
||||
make_mcp_executor(client, mcp_tool.name, self),
|
||||
make_mcp_executor(client, mcp_tool.name, self, tool_params),
|
||||
)
|
||||
count += 1
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ def _get_api_key():
|
||||
# 1. Try CredentialStoreAdapter for Anthropic
|
||||
try:
|
||||
from aden_tools.credentials import CredentialStoreAdapter
|
||||
creds = CredentialStoreAdapter.with_env_storage()
|
||||
creds = CredentialStoreAdapter.default()
|
||||
if creds.is_available("anthropic"):
|
||||
return creds.get("anthropic")
|
||||
except (ImportError, KeyError):
|
||||
@@ -57,7 +57,7 @@ def _get_api_key():
|
||||
"""Get API key from CredentialStoreAdapter or environment."""
|
||||
try:
|
||||
from aden_tools.credentials import CredentialStoreAdapter
|
||||
creds = CredentialStoreAdapter.with_env_storage()
|
||||
creds = CredentialStoreAdapter.default()
|
||||
if creds.is_available("anthropic"):
|
||||
return creds.get("anthropic")
|
||||
except (ImportError, KeyError):
|
||||
|
||||
@@ -14,6 +14,7 @@ dependencies = [
|
||||
"pytest>=8.0",
|
||||
"pytest-asyncio>=0.23",
|
||||
"pytest-xdist>=3.0",
|
||||
"tools",
|
||||
]
|
||||
|
||||
# [project.optional-dependencies]
|
||||
@@ -21,6 +22,9 @@ dependencies = [
|
||||
[project.scripts]
|
||||
hive = "framework.cli:main"
|
||||
|
||||
[tool.uv.sources]
|
||||
tools = { workspace = true }
|
||||
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
# Development dependencies
|
||||
-r requirements.txt
|
||||
|
||||
# Testing
|
||||
pytest>=8.0
|
||||
pytest-asyncio>=0.23
|
||||
|
||||
# Linting & type checking
|
||||
ruff>=0.1.0
|
||||
mypy>=1.0
|
||||
@@ -1,14 +0,0 @@
|
||||
# Core dependencies
|
||||
pydantic>=2.0
|
||||
anthropic>=0.40.0
|
||||
httpx>=0.27.0
|
||||
litellm>=1.81.0
|
||||
|
||||
# MCP server dependencies
|
||||
mcp
|
||||
fastmcp
|
||||
|
||||
# Testing (required for test framework)
|
||||
pytest>=8.0
|
||||
pytest-asyncio>=0.23
|
||||
pytest-xdist>=3.0
|
||||
@@ -1757,7 +1757,7 @@ tools/src/aden_tools/credentials/
|
||||
|
||||
### Manual Testing
|
||||
|
||||
- [ ] Create encrypted credential store
|
||||
- [ ] Create local encrypted store
|
||||
- [ ] Save and load multi-key credentials
|
||||
- [ ] Verify template resolution in tool headers
|
||||
- [ ] Test OAuth2 token refresh
|
||||
|
||||
+1
-1
@@ -41,7 +41,7 @@ Visita [adenhq.com](https://adenhq.com) para documentación completa, ejemplos y
|
||||
## ¿Qué es Aden?
|
||||
|
||||
<p align="center">
|
||||
<img width="100%" alt="Aden Architecture" src="docs/assets/aden-architecture-diagram.jpg" />
|
||||
<img width="100%" alt="Aden Architecture" src="../assets/aden-architecture-diagram.jpg" />
|
||||
</p>
|
||||
|
||||
Aden es una plataforma para construir, desplegar, operar y adaptar agentes de IA:
|
||||
|
||||
+1
-1
@@ -44,7 +44,7 @@
|
||||
# Aden क्या है?
|
||||
|
||||
<p align="center">
|
||||
<img width="100%" alt="Aden Architecture" src="docs/assets/aden-architecture-diagram.jpg" />
|
||||
<img width="100%" alt="Aden Architecture" src="../assets/aden-architecture-diagram.jpg" />
|
||||
</p>
|
||||
|
||||
Aden एक ऐसा प्लेटफ़ॉर्म है जो AI एजेंट्स को बनाने, डिप्लॉय करने, ऑपरेट करने और अनुकूलित करने के लिए उपयोग होता है:
|
||||
|
||||
+1
-1
@@ -42,7 +42,7 @@
|
||||
## Adenとは
|
||||
|
||||
<p align="center">
|
||||
<img width="100%" alt="Aden Architecture" src="docs/assets/aden-architecture-diagram.jpg" />
|
||||
<img width="100%" alt="Aden Architecture" src="../assets/aden-architecture-diagram.jpg" />
|
||||
</p>
|
||||
|
||||
Adenは、AIエージェントの構築、デプロイ、運用、適応のためのプラットフォームです:
|
||||
|
||||
+1
-1
@@ -42,7 +42,7 @@
|
||||
## Aden이란 무엇인가
|
||||
|
||||
<p align="center">
|
||||
<img width="100%" alt="Aden Architecture" src="docs/assets/aden-architecture-diagram.jpg" />
|
||||
<img width="100%" alt="Aden Architecture" src="../assets/aden-architecture-diagram.jpg" />
|
||||
</p>
|
||||
|
||||
Aden은 AI 에이전트를 구축, 배포, 운영, 적응시키기 위한 플랫폼입니다:
|
||||
|
||||
+1
-1
@@ -42,7 +42,7 @@ Visite [adenhq.com](https://adenhq.com) para documentação completa, exemplos e
|
||||
## O que é Aden
|
||||
|
||||
<p align="center">
|
||||
<img width="100%" alt="Aden Architecture" src="docs/assets/aden-architecture-diagram.jpg" />
|
||||
<img width="100%" alt="Aden Architecture" src="../assets/aden-architecture-diagram.jpg" />
|
||||
</p>
|
||||
|
||||
Aden é uma plataforma para construir, implantar, operar e adaptar agentes de IA:
|
||||
|
||||
+1
-1
@@ -42,7 +42,7 @@
|
||||
## Что такое Aden
|
||||
|
||||
<p align="center">
|
||||
<img width="100%" alt="Aden Architecture" src="docs/assets/aden-architecture-diagram.jpg" />
|
||||
<img width="100%" alt="Aden Architecture" src="../assets/aden-architecture-diagram.jpg" />
|
||||
</p>
|
||||
|
||||
Aden — это платформа для создания, развёртывания, эксплуатации и адаптации ИИ-агентов:
|
||||
|
||||
+1
-1
@@ -42,7 +42,7 @@
|
||||
## 什么是 Aden
|
||||
|
||||
<p align="center">
|
||||
<img width="100%" alt="Aden Architecture" src="docs/assets/aden-architecture-diagram.jpg" />
|
||||
<img width="100%" alt="Aden Architecture" src="../assets/aden-architecture-diagram.jpg" />
|
||||
</p>
|
||||
|
||||
Aden 是一个用于构建、部署、运营和适应 AI 智能体的平台:
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
[tool.uv.workspace]
|
||||
members = ["core", "tools"]
|
||||
+149
-153
@@ -11,6 +11,14 @@
|
||||
|
||||
set -e
|
||||
|
||||
# Detect Bash version for compatibility
|
||||
BASH_MAJOR_VERSION="${BASH_VERSINFO[0]}"
|
||||
USE_ASSOC_ARRAYS=false
|
||||
if [ "$BASH_MAJOR_VERSION" -ge 4 ]; then
|
||||
USE_ASSOC_ARRAYS=true
|
||||
fi
|
||||
echo "[debug] Bash version: ${BASH_VERSION}"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
@@ -52,7 +60,7 @@ prompt_choice() {
|
||||
echo -e "${BOLD}$prompt${NC}"
|
||||
for opt in "${options[@]}"; do
|
||||
echo -e " ${CYAN}$i)${NC} $opt"
|
||||
((i++))
|
||||
i=$((i + 1))
|
||||
done
|
||||
echo ""
|
||||
|
||||
@@ -60,7 +68,8 @@ prompt_choice() {
|
||||
while true; do
|
||||
read -r -p "Enter choice (1-${#options[@]}): " choice
|
||||
if [[ "$choice" =~ ^[0-9]+$ ]] && [ "$choice" -ge 1 ] && [ "$choice" -le "${#options[@]}" ]; then
|
||||
return $((choice - 1))
|
||||
PROMPT_CHOICE=$((choice - 1))
|
||||
return 0
|
||||
fi
|
||||
echo -e "${RED}Invalid choice. Please enter 1-${#options[@]}${NC}"
|
||||
done
|
||||
@@ -174,63 +183,26 @@ echo ""
|
||||
echo -e "${DIM}This may take a minute...${NC}"
|
||||
echo ""
|
||||
|
||||
# Upgrade pip, setuptools, and wheel
|
||||
echo -n " Upgrading pip... "
|
||||
$PYTHON_CMD -m pip install --upgrade pip setuptools wheel > /dev/null 2>&1
|
||||
echo -e "${GREEN}ok${NC}"
|
||||
|
||||
# Install framework package from core/
|
||||
echo -n " Installing framework... "
|
||||
cd "$SCRIPT_DIR/core"
|
||||
# Install all workspace packages (core + tools) from workspace root
|
||||
echo -n " Installing workspace packages... "
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
if [ -f "pyproject.toml" ]; then
|
||||
uv sync > /dev/null 2>&1
|
||||
if [ $? -eq 0 ]; then
|
||||
echo -e "${GREEN} ✓ framework package installed${NC}"
|
||||
if uv sync > /dev/null 2>&1; then
|
||||
echo -e "${GREEN} ✓ workspace packages installed${NC}"
|
||||
else
|
||||
echo -e "${YELLOW} ⚠ framework installation had issues (may be OK)${NC}"
|
||||
fi
|
||||
else
|
||||
echo -e "${RED}failed (no pyproject.toml)${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install aden_tools package from tools/
|
||||
echo -n " Installing tools... "
|
||||
cd "$SCRIPT_DIR/tools"
|
||||
|
||||
if [ -f "pyproject.toml" ]; then
|
||||
uv sync > /dev/null 2>&1
|
||||
if [ $? -eq 0 ]; then
|
||||
echo -e "${GREEN} ✓ aden_tools package installed${NC}"
|
||||
else
|
||||
echo -e "${RED} ✗ aden_tools installation failed${NC}"
|
||||
echo -e "${RED} ✗ workspace installation failed${NC}"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo -e "${RED}failed${NC}"
|
||||
echo -e "${RED}failed (no root pyproject.toml)${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install MCP dependencies
|
||||
echo -n " Installing MCP... "
|
||||
$PYTHON_CMD -m pip install mcp fastmcp > /dev/null 2>&1
|
||||
echo -e "${GREEN}ok${NC}"
|
||||
|
||||
# Fix openai version compatibility
|
||||
echo -n " Checking openai... "
|
||||
$PYTHON_CMD -m pip install "openai>=1.0.0" > /dev/null 2>&1
|
||||
echo -e "${GREEN}ok${NC}"
|
||||
|
||||
# Install click for CLI
|
||||
echo -n " Installing CLI tools... "
|
||||
$PYTHON_CMD -m pip install click > /dev/null 2>&1
|
||||
echo -e "${GREEN}ok${NC}"
|
||||
|
||||
# Install Playwright browser
|
||||
echo -n " Installing Playwright browser... "
|
||||
if $PYTHON_CMD -c "import playwright" > /dev/null 2>&1; then
|
||||
if $PYTHON_CMD -m playwright install chromium > /dev/null 2>&1; then
|
||||
if uv run python -c "import playwright" > /dev/null 2>&1; then
|
||||
if uv run python -m playwright install chromium > /dev/null 2>&1; then
|
||||
echo -e "${GREEN}ok${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}⏭${NC}"
|
||||
@@ -249,33 +221,6 @@ echo ""
|
||||
# ============================================================
|
||||
|
||||
echo -e "${YELLOW}⬢${NC} ${BLUE}${BOLD}Step 3: Configuring LLM provider...${NC}"
|
||||
# Install MCP dependencies (in tools venv)
|
||||
echo " Installing MCP dependencies..."
|
||||
TOOLS_PYTHON="$SCRIPT_DIR/tools/.venv/bin/python"
|
||||
uv pip install --python "$TOOLS_PYTHON" mcp fastmcp > /dev/null 2>&1
|
||||
echo -e "${GREEN} ✓ MCP dependencies installed${NC}"
|
||||
|
||||
# Fix openai version compatibility (in tools venv)
|
||||
TOOLS_PYTHON="$SCRIPT_DIR/tools/.venv/bin/python"
|
||||
OPENAI_VERSION=$($TOOLS_PYTHON -c "import openai; print(openai.__version__)" 2>/dev/null || echo "not_installed")
|
||||
if [ "$OPENAI_VERSION" = "not_installed" ]; then
|
||||
echo " Installing openai package..."
|
||||
uv pip install --python "$TOOLS_PYTHON" "openai>=1.0.0" > /dev/null 2>&1
|
||||
echo -e "${GREEN} ✓ openai installed${NC}"
|
||||
elif [[ "$OPENAI_VERSION" =~ ^0\. ]]; then
|
||||
echo " Upgrading openai to 1.x+ for litellm compatibility..."
|
||||
uv pip install --python "$TOOLS_PYTHON" --upgrade "openai>=1.0.0" > /dev/null 2>&1
|
||||
echo -e "${GREEN} ✓ openai upgraded${NC}"
|
||||
else
|
||||
echo -e "${GREEN} ✓ openai $OPENAI_VERSION is compatible${NC}"
|
||||
fi
|
||||
|
||||
# Install click for CLI (in tools venv)
|
||||
TOOLS_PYTHON="$SCRIPT_DIR/tools/.venv/bin/python"
|
||||
uv pip install --python "$TOOLS_PYTHON" click > /dev/null 2>&1
|
||||
echo -e "${GREEN} ✓ click installed${NC}"
|
||||
|
||||
cd "$SCRIPT_DIR"
|
||||
echo ""
|
||||
|
||||
# ============================================================
|
||||
@@ -287,42 +232,28 @@ echo ""
|
||||
|
||||
IMPORT_ERRORS=0
|
||||
|
||||
# Test imports using their respective venvs
|
||||
CORE_PYTHON="$SCRIPT_DIR/core/.venv/bin/python"
|
||||
TOOLS_PYTHON="$SCRIPT_DIR/tools/.venv/bin/python"
|
||||
|
||||
# Test framework import (from core venv)
|
||||
if [ -f "$CORE_PYTHON" ] && $CORE_PYTHON -c "import framework" > /dev/null 2>&1; then
|
||||
# Test imports using workspace venv via uv run
|
||||
if uv run python -c "import framework" > /dev/null 2>&1; then
|
||||
echo -e "${GREEN} ✓ framework imports OK${NC}"
|
||||
else
|
||||
echo -e "${RED} ✗ framework import failed${NC}"
|
||||
IMPORT_ERRORS=$((IMPORT_ERRORS + 1))
|
||||
fi
|
||||
|
||||
# Test aden_tools import (from tools venv)
|
||||
if [ -f "$TOOLS_PYTHON" ] && $TOOLS_PYTHON -c "import aden_tools" > /dev/null 2>&1; then
|
||||
if uv run python -c "import aden_tools" > /dev/null 2>&1; then
|
||||
echo -e "${GREEN} ✓ aden_tools imports OK${NC}"
|
||||
else
|
||||
echo -e "${RED} ✗ aden_tools import failed${NC}"
|
||||
IMPORT_ERRORS=$((IMPORT_ERRORS + 1))
|
||||
fi
|
||||
|
||||
# Test litellm import (from core venv)
|
||||
if [ -f "$CORE_PYTHON" ] && $CORE_PYTHON -c "import litellm" > /dev/null 2>&1; then
|
||||
echo -e "${GREEN} ✓ litellm imports OK (core)${NC}"
|
||||
if uv run python -c "import litellm" > /dev/null 2>&1; then
|
||||
echo -e "${GREEN} ✓ litellm imports OK${NC}"
|
||||
else
|
||||
echo -e "${YELLOW} ⚠ litellm import issues in core (may be OK)${NC}"
|
||||
echo -e "${YELLOW} ⚠ litellm import issues (may be OK)${NC}"
|
||||
fi
|
||||
|
||||
# Test litellm import (from tools venv)
|
||||
if [ -f "$TOOLS_PYTHON" ] && $TOOLS_PYTHON -c "import litellm" > /dev/null 2>&1; then
|
||||
echo -e "${GREEN} ✓ litellm imports OK (tools)${NC}"
|
||||
else
|
||||
echo -e "${YELLOW} ⚠ litellm import issues in tools (may be OK)${NC}"
|
||||
fi
|
||||
|
||||
# Test MCP server module (from core venv)
|
||||
if [ -f "$CORE_PYTHON" ] && $CORE_PYTHON -c "from framework.mcp import agent_builder_server" > /dev/null 2>&1; then
|
||||
if uv run python -c "from framework.mcp import agent_builder_server" > /dev/null 2>&1; then
|
||||
echo -e "${GREEN} ✓ MCP server module OK${NC}"
|
||||
else
|
||||
echo -e "${RED} ✗ MCP server module failed${NC}"
|
||||
@@ -344,53 +275,105 @@ echo ""
|
||||
echo -e "${BLUE}Step 4: Verifying Claude Code skills...${NC}"
|
||||
echo ""
|
||||
|
||||
# Provider data as parallel indexed arrays (Bash 3.2 compatible — no declare -A)
|
||||
PROVIDER_ENV_VARS=(ANTHROPIC_API_KEY OPENAI_API_KEY GEMINI_API_KEY GOOGLE_API_KEY GROQ_API_KEY CEREBRAS_API_KEY MISTRAL_API_KEY TOGETHER_API_KEY DEEPSEEK_API_KEY)
|
||||
PROVIDER_DISPLAY_NAMES=("Anthropic (Claude)" "OpenAI (GPT)" "Google Gemini" "Google AI" "Groq" "Cerebras" "Mistral" "Together AI" "DeepSeek")
|
||||
PROVIDER_ID_LIST=(anthropic openai gemini google groq cerebras mistral together deepseek)
|
||||
# Provider configuration - use associative arrays (Bash 4+) or indexed arrays (Bash 3.2)
|
||||
if [ "$USE_ASSOC_ARRAYS" = true ]; then
|
||||
# Bash 4+ - use associative arrays (cleaner and more efficient)
|
||||
declare -A PROVIDER_NAMES=(
|
||||
["ANTHROPIC_API_KEY"]="Anthropic (Claude)"
|
||||
["OPENAI_API_KEY"]="OpenAI (GPT)"
|
||||
["GEMINI_API_KEY"]="Google Gemini"
|
||||
["GOOGLE_API_KEY"]="Google AI"
|
||||
["GROQ_API_KEY"]="Groq"
|
||||
["CEREBRAS_API_KEY"]="Cerebras"
|
||||
["MISTRAL_API_KEY"]="Mistral"
|
||||
["TOGETHER_API_KEY"]="Together AI"
|
||||
["DEEPSEEK_API_KEY"]="DeepSeek"
|
||||
)
|
||||
|
||||
# Default models by provider id (parallel arrays)
|
||||
MODEL_PROVIDER_IDS=(anthropic openai gemini groq cerebras mistral together_ai deepseek)
|
||||
MODEL_DEFAULTS=("claude-sonnet-4-5-20250929" "gpt-4o" "gemini-3.0-flash-preview" "moonshotai/kimi-k2-instruct-0905" "zai-glm-4.7" "mistral-large-latest" "meta-llama/Llama-3.3-70B-Instruct-Turbo" "deepseek-chat")
|
||||
declare -A PROVIDER_IDS=(
|
||||
["ANTHROPIC_API_KEY"]="anthropic"
|
||||
["OPENAI_API_KEY"]="openai"
|
||||
["GEMINI_API_KEY"]="gemini"
|
||||
["GOOGLE_API_KEY"]="google"
|
||||
["GROQ_API_KEY"]="groq"
|
||||
["CEREBRAS_API_KEY"]="cerebras"
|
||||
["MISTRAL_API_KEY"]="mistral"
|
||||
["TOGETHER_API_KEY"]="together"
|
||||
["DEEPSEEK_API_KEY"]="deepseek"
|
||||
)
|
||||
|
||||
# Helper: get provider display name for an env var
|
||||
get_provider_name() {
|
||||
local env_var="$1"
|
||||
local i=0
|
||||
while [ $i -lt ${#PROVIDER_ENV_VARS[@]} ]; do
|
||||
if [ "${PROVIDER_ENV_VARS[$i]}" = "$env_var" ]; then
|
||||
echo "${PROVIDER_DISPLAY_NAMES[$i]}"
|
||||
return
|
||||
fi
|
||||
i=$((i + 1))
|
||||
done
|
||||
}
|
||||
declare -A DEFAULT_MODELS=(
|
||||
["anthropic"]="claude-sonnet-4-5-20250929"
|
||||
["openai"]="gpt-4o"
|
||||
["gemini"]="gemini-3.0-flash-preview"
|
||||
["groq"]="moonshotai/kimi-k2-instruct-0905"
|
||||
["cerebras"]="zai-glm-4.7"
|
||||
["mistral"]="mistral-large-latest"
|
||||
["together_ai"]="meta-llama/Llama-3.3-70B-Instruct-Turbo"
|
||||
["deepseek"]="deepseek-chat"
|
||||
)
|
||||
|
||||
# Helper: get provider id for an env var
|
||||
get_provider_id() {
|
||||
local env_var="$1"
|
||||
local i=0
|
||||
while [ $i -lt ${#PROVIDER_ENV_VARS[@]} ]; do
|
||||
if [ "${PROVIDER_ENV_VARS[$i]}" = "$env_var" ]; then
|
||||
echo "${PROVIDER_ID_LIST[$i]}"
|
||||
return
|
||||
fi
|
||||
i=$((i + 1))
|
||||
done
|
||||
}
|
||||
# Helper functions for Bash 4+
|
||||
get_provider_name() {
|
||||
echo "${PROVIDER_NAMES[$1]}"
|
||||
}
|
||||
|
||||
# Helper: get default model for a provider id
|
||||
get_default_model() {
|
||||
local provider_id="$1"
|
||||
local i=0
|
||||
while [ $i -lt ${#MODEL_PROVIDER_IDS[@]} ]; do
|
||||
if [ "${MODEL_PROVIDER_IDS[$i]}" = "$provider_id" ]; then
|
||||
echo "${MODEL_DEFAULTS[$i]}"
|
||||
return
|
||||
fi
|
||||
i=$((i + 1))
|
||||
done
|
||||
}
|
||||
get_provider_id() {
|
||||
echo "${PROVIDER_IDS[$1]}"
|
||||
}
|
||||
|
||||
get_default_model() {
|
||||
echo "${DEFAULT_MODELS[$1]}"
|
||||
}
|
||||
else
|
||||
# Bash 3.2 - use parallel indexed arrays
|
||||
PROVIDER_ENV_VARS=(ANTHROPIC_API_KEY OPENAI_API_KEY GEMINI_API_KEY GOOGLE_API_KEY GROQ_API_KEY CEREBRAS_API_KEY MISTRAL_API_KEY TOGETHER_API_KEY DEEPSEEK_API_KEY)
|
||||
PROVIDER_DISPLAY_NAMES=("Anthropic (Claude)" "OpenAI (GPT)" "Google Gemini" "Google AI" "Groq" "Cerebras" "Mistral" "Together AI" "DeepSeek")
|
||||
PROVIDER_ID_LIST=(anthropic openai gemini google groq cerebras mistral together deepseek)
|
||||
|
||||
# Default models by provider id (parallel arrays)
|
||||
MODEL_PROVIDER_IDS=(anthropic openai gemini groq cerebras mistral together_ai deepseek)
|
||||
MODEL_DEFAULTS=("claude-sonnet-4-5-20250929" "gpt-4o" "gemini-3.0-flash-preview" "moonshotai/kimi-k2-instruct-0905" "zai-glm-4.7" "mistral-large-latest" "meta-llama/Llama-3.3-70B-Instruct-Turbo" "deepseek-chat")
|
||||
|
||||
# Helper: get provider display name for an env var
|
||||
get_provider_name() {
|
||||
local env_var="$1"
|
||||
local i=0
|
||||
while [ $i -lt ${#PROVIDER_ENV_VARS[@]} ]; do
|
||||
if [ "${PROVIDER_ENV_VARS[$i]}" = "$env_var" ]; then
|
||||
echo "${PROVIDER_DISPLAY_NAMES[$i]}"
|
||||
return
|
||||
fi
|
||||
i=$((i + 1))
|
||||
done
|
||||
}
|
||||
|
||||
# Helper: get provider id for an env var
|
||||
get_provider_id() {
|
||||
local env_var="$1"
|
||||
local i=0
|
||||
while [ $i -lt ${#PROVIDER_ENV_VARS[@]} ]; do
|
||||
if [ "${PROVIDER_ENV_VARS[$i]}" = "$env_var" ]; then
|
||||
echo "${PROVIDER_ID_LIST[$i]}"
|
||||
return
|
||||
fi
|
||||
i=$((i + 1))
|
||||
done
|
||||
}
|
||||
|
||||
# Helper: get default model for a provider id
|
||||
get_default_model() {
|
||||
local provider_id="$1"
|
||||
local i=0
|
||||
while [ $i -lt ${#MODEL_PROVIDER_IDS[@]} ]; do
|
||||
if [ "${MODEL_PROVIDER_IDS[$i]}" = "$provider_id" ]; then
|
||||
echo "${MODEL_DEFAULTS[$i]}"
|
||||
return
|
||||
fi
|
||||
i=$((i + 1))
|
||||
done
|
||||
}
|
||||
fi
|
||||
|
||||
# Configuration directory
|
||||
HIVE_CONFIG_DIR="$HOME/.hive"
|
||||
@@ -413,7 +396,7 @@ config = {
|
||||
'model': '$model',
|
||||
'api_key_env_var': '$env_var'
|
||||
},
|
||||
'created_at': '$(date -Iseconds)'
|
||||
'created_at': '$(date -u +"%Y-%m-%dT%H:%M:%S+00:00")'
|
||||
}
|
||||
with open('$HIVE_CONFIG_FILE', 'w') as f:
|
||||
json.dump(config, f, indent=2)
|
||||
@@ -442,13 +425,25 @@ FOUND_ENV_VARS=() # Corresponding env var names
|
||||
SELECTED_PROVIDER_ID="" # Will hold the chosen provider ID
|
||||
SELECTED_ENV_VAR="" # Will hold the chosen env var
|
||||
|
||||
for env_var in "${PROVIDER_ENV_VARS[@]}"; do
|
||||
value="${!env_var}"
|
||||
if [ -n "$value" ]; then
|
||||
FOUND_PROVIDERS+=("$(get_provider_name "$env_var")")
|
||||
FOUND_ENV_VARS+=("$env_var")
|
||||
fi
|
||||
done
|
||||
if [ "$USE_ASSOC_ARRAYS" = true ]; then
|
||||
# Bash 4+ - iterate over associative array keys
|
||||
for env_var in "${!PROVIDER_NAMES[@]}"; do
|
||||
value="${!env_var}"
|
||||
if [ -n "$value" ]; then
|
||||
FOUND_PROVIDERS+=("$(get_provider_name "$env_var")")
|
||||
FOUND_ENV_VARS+=("$env_var")
|
||||
fi
|
||||
done
|
||||
else
|
||||
# Bash 3.2 - iterate over indexed array
|
||||
for env_var in "${PROVIDER_ENV_VARS[@]}"; do
|
||||
value="${!env_var}"
|
||||
if [ -n "$value" ]; then
|
||||
FOUND_PROVIDERS+=("$(get_provider_name "$env_var")")
|
||||
FOUND_ENV_VARS+=("$env_var")
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [ ${#FOUND_PROVIDERS[@]} -gt 0 ]; then
|
||||
echo "Found API keys:"
|
||||
@@ -476,7 +471,7 @@ if [ ${#FOUND_PROVIDERS[@]} -gt 0 ]; then
|
||||
i=1
|
||||
for provider in "${FOUND_PROVIDERS[@]}"; do
|
||||
echo -e " ${CYAN}$i)${NC} $provider"
|
||||
((i++))
|
||||
i=$((i + 1))
|
||||
done
|
||||
echo ""
|
||||
|
||||
@@ -507,7 +502,7 @@ if [ -z "$SELECTED_PROVIDER_ID" ]; then
|
||||
"Groq - Fast, free tier" \
|
||||
"Cerebras - Fast, free tier" \
|
||||
"Skip for now"
|
||||
choice=$?
|
||||
choice=$PROMPT_CHOICE
|
||||
|
||||
case $choice in
|
||||
0)
|
||||
@@ -542,7 +537,8 @@ if [ -z "$SELECTED_PROVIDER_ID" ]; then
|
||||
;;
|
||||
5)
|
||||
echo ""
|
||||
echo -e "${YELLOW}Skipped.${NC} Add your API key later:"
|
||||
echo -e "${YELLOW}Skipped.${NC} An LLM API key is required to test and use worker agents."
|
||||
echo -e "Add your API key later by running:"
|
||||
echo ""
|
||||
echo -e " ${CYAN}echo 'ANTHROPIC_API_KEY=your-key' >> .env${NC}"
|
||||
echo ""
|
||||
@@ -595,7 +591,7 @@ ERRORS=0
|
||||
|
||||
# Test imports
|
||||
echo -n " ⬡ framework... "
|
||||
if $PYTHON_CMD -c "import framework" > /dev/null 2>&1; then
|
||||
if uv run python -c "import framework" > /dev/null 2>&1; then
|
||||
echo -e "${GREEN}ok${NC}"
|
||||
else
|
||||
echo -e "${RED}failed${NC}"
|
||||
@@ -603,7 +599,7 @@ else
|
||||
fi
|
||||
|
||||
echo -n " ⬡ aden_tools... "
|
||||
if $PYTHON_CMD -c "import aden_tools" > /dev/null 2>&1; then
|
||||
if uv run python -c "import aden_tools" > /dev/null 2>&1; then
|
||||
echo -e "${GREEN}ok${NC}"
|
||||
else
|
||||
echo -e "${RED}failed${NC}"
|
||||
@@ -611,7 +607,7 @@ else
|
||||
fi
|
||||
|
||||
echo -n " ⬡ litellm... "
|
||||
if $PYTHON_CMD -c "import litellm" > /dev/null 2>&1; then
|
||||
if uv run python -c "import litellm" > /dev/null 2>&1; then
|
||||
echo -e "${GREEN}ok${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}--${NC}"
|
||||
|
||||
+1
-26
@@ -68,32 +68,7 @@ from starlette.responses import PlainTextResponse # noqa: E402
|
||||
from aden_tools.credentials import CredentialError, CredentialStoreAdapter # noqa: E402
|
||||
from aden_tools.tools import register_all_tools # noqa: E402
|
||||
|
||||
# Create credential store with access to both env vars AND encrypted store.
|
||||
# CompositeStorage tries encrypted store first, then falls back to env vars.
|
||||
# This allows Aden-synced credentials from ~/.hive/credentials while still
|
||||
# supporting plain GITHUB_TOKEN / BRAVE_SEARCH_API_KEY env vars.
|
||||
try:
|
||||
from framework.credentials import CredentialStore
|
||||
from framework.credentials.storage import (
|
||||
CompositeStorage,
|
||||
EncryptedFileStorage,
|
||||
EnvVarStorage,
|
||||
)
|
||||
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS
|
||||
|
||||
_env_mapping = {name: spec.env_var for name, spec in CREDENTIAL_SPECS.items()}
|
||||
_storage = CompositeStorage(
|
||||
primary=EncryptedFileStorage(),
|
||||
fallbacks=[EnvVarStorage(env_mapping=_env_mapping)],
|
||||
)
|
||||
store = CredentialStore(storage=_storage)
|
||||
credentials = CredentialStoreAdapter(store)
|
||||
logger.info("Using CompositeStorage (encrypted + env fallback)")
|
||||
except Exception as e:
|
||||
# Fall back to env-only adapter if composite setup fails
|
||||
credentials = CredentialStoreAdapter.with_env_storage()
|
||||
logger.warning(f"Falling back to env-only CredentialStoreAdapter: {e}")
|
||||
credentials = CredentialStoreAdapter.default()
|
||||
|
||||
# Tier 1: Validate startup-required credentials (if any)
|
||||
try:
|
||||
|
||||
@@ -30,6 +30,7 @@ dependencies = [
|
||||
"playwright-stealth>=1.0.5",
|
||||
"litellm>=1.81.0",
|
||||
"resend>=2.0.0",
|
||||
"framework",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
@@ -54,6 +55,9 @@ all = [
|
||||
"duckdb>=1.0.0",
|
||||
]
|
||||
|
||||
[tool.uv.sources]
|
||||
framework = { workspace = true }
|
||||
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
@@ -10,7 +10,7 @@ Usage:
|
||||
from aden_tools.credentials import CredentialStoreAdapter
|
||||
|
||||
mcp = FastMCP("my-server")
|
||||
credentials = CredentialStoreAdapter.with_env_storage()
|
||||
credentials = CredentialStoreAdapter.default()
|
||||
register_all_tools(mcp, credentials=credentials)
|
||||
"""
|
||||
|
||||
|
||||
@@ -15,8 +15,8 @@ Usage:
|
||||
store = CredentialStore.with_encrypted_storage() # defaults to ~/.hive/credentials
|
||||
credentials = CredentialStoreAdapter(store)
|
||||
|
||||
# With env vars only (simple setup)
|
||||
credentials = CredentialStoreAdapter.with_env_storage()
|
||||
# With composite storage (encrypted primary + env fallback)
|
||||
credentials = CredentialStoreAdapter.default()
|
||||
|
||||
# In agent runner (validate at agent load time)
|
||||
credentials.validate_for_tools(["web_search", "file_read"])
|
||||
|
||||
@@ -70,6 +70,9 @@ class CredentialSpec:
|
||||
credential_key: str = "access_token"
|
||||
"""Key name within the credential (e.g., 'access_token', 'api_key')"""
|
||||
|
||||
credential_group: str = ""
|
||||
"""Group name for credentials that must be configured together (e.g., 'google_custom_search')"""
|
||||
|
||||
|
||||
class CredentialError(Exception):
|
||||
"""Raised when required credentials are missing."""
|
||||
|
||||
@@ -15,5 +15,22 @@ EMAIL_CREDENTIALS = {
|
||||
startup_required=False,
|
||||
help_url="https://resend.com/api-keys",
|
||||
description="API key for Resend email service",
|
||||
# Auth method support
|
||||
direct_api_key_supported=True,
|
||||
api_key_instructions="""To get a Resend API key:
|
||||
1. Go to https://resend.com and create an account (or sign in)
|
||||
2. Navigate to API Keys in the dashboard
|
||||
3. Click "Create API Key"
|
||||
4. Give it a name (e.g., "Hive Agent") and choose permissions:
|
||||
- "Sending access" is sufficient for most use cases
|
||||
- "Full access" if you also need to manage domains
|
||||
5. Copy the API key (starts with re_)
|
||||
6. Store it securely - you won't be able to see it again!
|
||||
7. Note: You'll also need to verify a domain to send emails from custom addresses""",
|
||||
# Health check configuration
|
||||
health_check_endpoint="https://api.resend.com/domains",
|
||||
# Credential store mapping
|
||||
credential_id="resend",
|
||||
credential_key="api_key",
|
||||
),
|
||||
}
|
||||
|
||||
@@ -231,10 +231,213 @@ class GoogleSearchHealthChecker:
|
||||
)
|
||||
|
||||
|
||||
class AnthropicHealthChecker:
|
||||
"""Health checker for Anthropic API credentials."""
|
||||
|
||||
ENDPOINT = "https://api.anthropic.com/v1/messages"
|
||||
TIMEOUT = 10.0
|
||||
|
||||
def check(self, api_key: str) -> HealthCheckResult:
|
||||
"""
|
||||
Validate Anthropic API key without consuming tokens.
|
||||
|
||||
Sends a deliberately invalid request (empty messages) to the messages endpoint.
|
||||
A 401 means invalid key; 400 (bad request) means the key authenticated
|
||||
but the payload was rejected — confirming the key is valid without
|
||||
generating any tokens. 429 (rate limited) also indicates a valid key.
|
||||
"""
|
||||
try:
|
||||
with httpx.Client(timeout=self.TIMEOUT) as client:
|
||||
response = client.post(
|
||||
self.ENDPOINT,
|
||||
headers={
|
||||
"x-api-key": api_key,
|
||||
"anthropic-version": "2023-06-01",
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
# Empty messages triggers 400 (not 200), so no tokens are consumed.
|
||||
json={
|
||||
"model": "claude-sonnet-4-20250514",
|
||||
"max_tokens": 1,
|
||||
"messages": [],
|
||||
},
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message="Anthropic API key valid",
|
||||
)
|
||||
elif response.status_code == 401:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message="Anthropic API key is invalid",
|
||||
details={"status_code": 401},
|
||||
)
|
||||
elif response.status_code == 429:
|
||||
# Rate limited but key is valid
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message="Anthropic API key valid (rate limited)",
|
||||
details={"status_code": 429, "rate_limited": True},
|
||||
)
|
||||
elif response.status_code == 400:
|
||||
# Bad request but key authenticated - key is valid
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message="Anthropic API key valid",
|
||||
details={"status_code": 400},
|
||||
)
|
||||
else:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"Anthropic API returned status {response.status_code}",
|
||||
details={"status_code": response.status_code},
|
||||
)
|
||||
except httpx.TimeoutException:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message="Anthropic API request timed out",
|
||||
details={"error": "timeout"},
|
||||
)
|
||||
except httpx.RequestError as e:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"Failed to connect to Anthropic API: {e}",
|
||||
details={"error": str(e)},
|
||||
)
|
||||
|
||||
|
||||
class GitHubHealthChecker:
|
||||
"""Health checker for GitHub Personal Access Token."""
|
||||
|
||||
ENDPOINT = "https://api.github.com/user"
|
||||
TIMEOUT = 10.0
|
||||
|
||||
def check(self, access_token: str) -> HealthCheckResult:
|
||||
"""
|
||||
Validate GitHub token by fetching the authenticated user.
|
||||
|
||||
Returns the authenticated username on success.
|
||||
"""
|
||||
try:
|
||||
with httpx.Client(timeout=self.TIMEOUT) as client:
|
||||
response = client.get(
|
||||
self.ENDPOINT,
|
||||
headers={
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
"Accept": "application/vnd.github+json",
|
||||
"X-GitHub-Api-Version": "2022-11-28",
|
||||
},
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
username = data.get("login", "unknown")
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message=f"GitHub token valid (authenticated as {username})",
|
||||
details={"username": username},
|
||||
)
|
||||
elif response.status_code == 401:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message="GitHub token is invalid or expired",
|
||||
details={"status_code": 401},
|
||||
)
|
||||
elif response.status_code == 403:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message="GitHub token lacks required permissions",
|
||||
details={"status_code": 403},
|
||||
)
|
||||
else:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"GitHub API returned status {response.status_code}",
|
||||
details={"status_code": response.status_code},
|
||||
)
|
||||
except httpx.TimeoutException:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message="GitHub API request timed out",
|
||||
details={"error": "timeout"},
|
||||
)
|
||||
except httpx.RequestError as e:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"Failed to connect to GitHub API: {e}",
|
||||
details={"error": str(e)},
|
||||
)
|
||||
|
||||
|
||||
class ResendHealthChecker:
|
||||
"""Health checker for Resend API credentials."""
|
||||
|
||||
ENDPOINT = "https://api.resend.com/domains"
|
||||
TIMEOUT = 10.0
|
||||
|
||||
def check(self, api_key: str) -> HealthCheckResult:
|
||||
"""
|
||||
Validate Resend API key by listing domains.
|
||||
|
||||
A successful response confirms the key is valid.
|
||||
"""
|
||||
try:
|
||||
with httpx.Client(timeout=self.TIMEOUT) as client:
|
||||
response = client.get(
|
||||
self.ENDPOINT,
|
||||
headers={
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
"Accept": "application/json",
|
||||
},
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message="Resend API key valid",
|
||||
)
|
||||
elif response.status_code == 401:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message="Resend API key is invalid",
|
||||
details={"status_code": 401},
|
||||
)
|
||||
elif response.status_code == 403:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message="Resend API key lacks required permissions",
|
||||
details={"status_code": 403},
|
||||
)
|
||||
else:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"Resend API returned status {response.status_code}",
|
||||
details={"status_code": response.status_code},
|
||||
)
|
||||
except httpx.TimeoutException:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message="Resend API request timed out",
|
||||
details={"error": "timeout"},
|
||||
)
|
||||
except httpx.RequestError as e:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"Failed to connect to Resend API: {e}",
|
||||
details={"error": str(e)},
|
||||
)
|
||||
|
||||
|
||||
# Registry of health checkers
|
||||
HEALTH_CHECKERS: dict[str, CredentialHealthChecker] = {
|
||||
"hubspot": HubSpotHealthChecker(),
|
||||
"brave_search": BraveSearchHealthChecker(),
|
||||
"google_search": GoogleSearchHealthChecker(),
|
||||
"anthropic": AnthropicHealthChecker(),
|
||||
"github": GitHubHealthChecker(),
|
||||
"resend": ResendHealthChecker(),
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -15,6 +15,21 @@ LLM_CREDENTIALS = {
|
||||
startup_required=False, # MCP server doesn't need LLM credentials
|
||||
help_url="https://console.anthropic.com/settings/keys",
|
||||
description="API key for Anthropic Claude models",
|
||||
# Auth method support
|
||||
direct_api_key_supported=True,
|
||||
api_key_instructions="""To get an Anthropic API key:
|
||||
1. Go to https://console.anthropic.com/settings/keys
|
||||
2. Sign in or create an Anthropic account
|
||||
3. Click "Create Key"
|
||||
4. Give your key a descriptive name (e.g., "Hive Agent")
|
||||
5. Copy the API key (starts with sk-ant-)
|
||||
6. Store it securely - you won't be able to see the full key again!""",
|
||||
# Health check configuration
|
||||
health_check_endpoint="https://api.anthropic.com/v1/messages",
|
||||
health_check_method="POST",
|
||||
# Credential store mapping
|
||||
credential_id="anthropic",
|
||||
credential_key="api_key",
|
||||
),
|
||||
# Future LLM providers:
|
||||
# "openai": CredentialSpec(
|
||||
|
||||
@@ -15,6 +15,20 @@ SEARCH_CREDENTIALS = {
|
||||
startup_required=False,
|
||||
help_url="https://brave.com/search/api/",
|
||||
description="API key for Brave Search",
|
||||
# Auth method support
|
||||
direct_api_key_supported=True,
|
||||
api_key_instructions="""To get a Brave Search API key:
|
||||
1. Go to https://brave.com/search/api/
|
||||
2. Create a Brave Search API account (or sign in)
|
||||
3. Choose a plan (Free tier includes 2,000 queries/month)
|
||||
4. Navigate to the API Keys section in your dashboard
|
||||
5. Click "Create API Key" and give it a name
|
||||
6. Copy the API key and store it securely""",
|
||||
# Health check configuration
|
||||
health_check_endpoint="https://api.search.brave.com/res/v1/web/search",
|
||||
# Credential store mapping
|
||||
credential_id="brave_search",
|
||||
credential_key="api_key",
|
||||
),
|
||||
"google_search": CredentialSpec(
|
||||
env_var="GOOGLE_API_KEY",
|
||||
@@ -22,8 +36,24 @@ SEARCH_CREDENTIALS = {
|
||||
node_types=[],
|
||||
required=True,
|
||||
startup_required=False,
|
||||
help_url="https://console.cloud.google.com/",
|
||||
help_url="https://console.cloud.google.com/apis/credentials",
|
||||
description="API key for Google Custom Search",
|
||||
# Auth method support
|
||||
direct_api_key_supported=True,
|
||||
api_key_instructions="""To get a Google Custom Search API key:
|
||||
1. Go to https://console.cloud.google.com/apis/credentials
|
||||
2. Create a new project (or select an existing one)
|
||||
3. Enable the "Custom Search API" from the API Library
|
||||
4. Go to Credentials > Create Credentials > API Key
|
||||
5. Copy the generated API key
|
||||
6. (Recommended) Click "Restrict Key" and limit it to the Custom Search API
|
||||
7. Store the key securely""",
|
||||
# Health check configuration
|
||||
health_check_endpoint="https://www.googleapis.com/customsearch/v1",
|
||||
# Credential store mapping
|
||||
credential_id="google_search",
|
||||
credential_key="api_key",
|
||||
credential_group="google_custom_search",
|
||||
),
|
||||
"google_cse": CredentialSpec(
|
||||
env_var="GOOGLE_CSE_ID",
|
||||
@@ -31,7 +61,22 @@ SEARCH_CREDENTIALS = {
|
||||
node_types=[],
|
||||
required=True,
|
||||
startup_required=False,
|
||||
help_url="https://programmablesearchengine.google.com/",
|
||||
help_url="https://programmablesearchengine.google.com/controlpanel/all",
|
||||
description="Google Custom Search Engine ID",
|
||||
# Auth method support
|
||||
direct_api_key_supported=True,
|
||||
api_key_instructions="""To get a Google Custom Search Engine (CSE) ID:
|
||||
1. Go to https://programmablesearchengine.google.com/controlpanel/all
|
||||
2. Click "Add" to create a new search engine
|
||||
3. Under "What to search", select "Search the entire web"
|
||||
4. Give your search engine a name (e.g., "Hive Agent Search")
|
||||
5. Click "Create"
|
||||
6. Copy the Search Engine ID (cx value) from the overview page""",
|
||||
# Health check configuration
|
||||
health_check_endpoint="https://www.googleapis.com/customsearch/v1",
|
||||
# Credential store mapping
|
||||
credential_id="google_cse",
|
||||
credential_key="api_key",
|
||||
credential_group="google_custom_search",
|
||||
),
|
||||
}
|
||||
|
||||
@@ -348,6 +348,41 @@ class CredentialStoreAdapter:
|
||||
|
||||
# --- Factory Methods ---
|
||||
|
||||
@classmethod
|
||||
def default(
|
||||
cls,
|
||||
specs: dict[str, CredentialSpec] | None = None,
|
||||
) -> CredentialStoreAdapter:
|
||||
"""Create adapter with encrypted storage primary and env var fallback."""
|
||||
from framework.credentials import CredentialStore
|
||||
from framework.credentials.storage import (
|
||||
CompositeStorage,
|
||||
EncryptedFileStorage,
|
||||
EnvVarStorage,
|
||||
)
|
||||
|
||||
if specs is None:
|
||||
from . import CREDENTIAL_SPECS
|
||||
|
||||
specs = CREDENTIAL_SPECS
|
||||
|
||||
env_mapping = {name: spec.env_var for name, spec in specs.items()}
|
||||
|
||||
try:
|
||||
encrypted = EncryptedFileStorage()
|
||||
env = EnvVarStorage(env_mapping)
|
||||
composite = CompositeStorage(primary=encrypted, fallbacks=[env])
|
||||
store = CredentialStore(storage=composite)
|
||||
except Exception as e:
|
||||
import logging
|
||||
|
||||
logging.getLogger(__name__).warning(
|
||||
"Encrypted credential storage unavailable, falling back to env vars: %s", e
|
||||
)
|
||||
store = CredentialStore.with_env_storage(env_mapping)
|
||||
|
||||
return cls(store=store, specs=specs)
|
||||
|
||||
@classmethod
|
||||
def for_testing(
|
||||
cls,
|
||||
|
||||
@@ -7,7 +7,7 @@ Usage:
|
||||
from aden_tools.credentials import CredentialStoreAdapter
|
||||
|
||||
mcp = FastMCP("my-server")
|
||||
credentials = CredentialStoreAdapter.with_env_storage()
|
||||
credentials = CredentialStoreAdapter.default()
|
||||
register_all_tools(mcp, credentials=credentials)
|
||||
"""
|
||||
|
||||
|
||||
@@ -10,6 +10,17 @@ from aden_tools.credentials import (
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _no_dotenv(tmp_path, monkeypatch):
|
||||
"""Isolate tests from the project .env file.
|
||||
|
||||
EnvVarStorage falls back to reading Path.cwd()/.env when a key is
|
||||
missing from os.environ. Changing cwd to a temp dir ensures
|
||||
monkeypatch.delenv() truly simulates a missing credential.
|
||||
"""
|
||||
monkeypatch.chdir(tmp_path)
|
||||
|
||||
|
||||
class TestCredentialStoreAdapter:
|
||||
"""Tests for CredentialStoreAdapter class."""
|
||||
|
||||
@@ -438,3 +449,38 @@ class TestStartupValidation:
|
||||
|
||||
# Should not raise
|
||||
creds.validate_startup()
|
||||
|
||||
|
||||
class TestSpecCompleteness:
|
||||
"""Tests that all credential specs have required fields populated."""
|
||||
|
||||
def test_direct_api_key_specs_have_instructions(self):
|
||||
"""All specs with direct_api_key_supported=True have non-empty api_key_instructions."""
|
||||
for name, spec in CREDENTIAL_SPECS.items():
|
||||
if spec.direct_api_key_supported:
|
||||
assert spec.api_key_instructions.strip(), (
|
||||
f"Credential '{name}' has direct_api_key_supported=True "
|
||||
f"but empty api_key_instructions"
|
||||
)
|
||||
|
||||
def test_all_specs_have_credential_id(self):
|
||||
"""All credential specs have a non-empty credential_id."""
|
||||
for name, spec in CREDENTIAL_SPECS.items():
|
||||
assert spec.credential_id, f"Credential '{name}' is missing credential_id"
|
||||
|
||||
def test_google_search_and_cse_share_credential_group(self):
|
||||
"""google_search and google_cse share the same credential_group."""
|
||||
google_search = CREDENTIAL_SPECS["google_search"]
|
||||
google_cse = CREDENTIAL_SPECS["google_cse"]
|
||||
|
||||
assert google_search.credential_group == "google_custom_search"
|
||||
assert google_cse.credential_group == "google_custom_search"
|
||||
assert google_search.credential_group == google_cse.credential_group
|
||||
|
||||
def test_credential_group_default_empty(self):
|
||||
"""Specs without a group have empty credential_group."""
|
||||
for name, spec in CREDENTIAL_SPECS.items():
|
||||
if name not in ("google_search", "google_cse"):
|
||||
assert spec.credential_group == "", (
|
||||
f"Credential '{name}' has unexpected credential_group='{spec.credential_group}'"
|
||||
)
|
||||
|
||||
@@ -0,0 +1,308 @@
|
||||
"""Tests for credential health checkers."""
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import httpx
|
||||
|
||||
from aden_tools.credentials.health_check import (
|
||||
HEALTH_CHECKERS,
|
||||
AnthropicHealthChecker,
|
||||
GitHubHealthChecker,
|
||||
GoogleSearchHealthChecker,
|
||||
ResendHealthChecker,
|
||||
check_credential_health,
|
||||
)
|
||||
|
||||
|
||||
class TestHealthCheckerRegistry:
|
||||
"""Tests for the HEALTH_CHECKERS registry."""
|
||||
|
||||
def test_google_search_registered(self):
|
||||
"""GoogleSearchHealthChecker is registered in HEALTH_CHECKERS."""
|
||||
assert "google_search" in HEALTH_CHECKERS
|
||||
assert isinstance(HEALTH_CHECKERS["google_search"], GoogleSearchHealthChecker)
|
||||
|
||||
def test_anthropic_registered(self):
|
||||
"""AnthropicHealthChecker is registered in HEALTH_CHECKERS."""
|
||||
assert "anthropic" in HEALTH_CHECKERS
|
||||
assert isinstance(HEALTH_CHECKERS["anthropic"], AnthropicHealthChecker)
|
||||
|
||||
def test_github_registered(self):
|
||||
"""GitHubHealthChecker is registered in HEALTH_CHECKERS."""
|
||||
assert "github" in HEALTH_CHECKERS
|
||||
assert isinstance(HEALTH_CHECKERS["github"], GitHubHealthChecker)
|
||||
|
||||
def test_resend_registered(self):
|
||||
"""ResendHealthChecker is registered in HEALTH_CHECKERS."""
|
||||
assert "resend" in HEALTH_CHECKERS
|
||||
assert isinstance(HEALTH_CHECKERS["resend"], ResendHealthChecker)
|
||||
|
||||
def test_all_expected_checkers_registered(self):
|
||||
"""All expected health checkers are in the registry."""
|
||||
expected = {"hubspot", "brave_search", "google_search", "anthropic", "github", "resend"}
|
||||
assert set(HEALTH_CHECKERS.keys()) == expected
|
||||
|
||||
|
||||
class TestAnthropicHealthChecker:
|
||||
"""Tests for AnthropicHealthChecker."""
|
||||
|
||||
def _mock_response(self, status_code, json_data=None):
|
||||
response = MagicMock(spec=httpx.Response)
|
||||
response.status_code = status_code
|
||||
if json_data:
|
||||
response.json.return_value = json_data
|
||||
return response
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_valid_key_200(self, mock_client_cls):
|
||||
mock_client = MagicMock()
|
||||
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
|
||||
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
|
||||
mock_client.post.return_value = self._mock_response(200)
|
||||
|
||||
checker = AnthropicHealthChecker()
|
||||
result = checker.check("sk-ant-test-key")
|
||||
|
||||
assert result.valid is True
|
||||
assert "valid" in result.message.lower()
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_invalid_key_401(self, mock_client_cls):
|
||||
mock_client = MagicMock()
|
||||
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
|
||||
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
|
||||
mock_client.post.return_value = self._mock_response(401)
|
||||
|
||||
checker = AnthropicHealthChecker()
|
||||
result = checker.check("invalid-key")
|
||||
|
||||
assert result.valid is False
|
||||
assert result.details["status_code"] == 401
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_rate_limited_429(self, mock_client_cls):
|
||||
mock_client = MagicMock()
|
||||
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
|
||||
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
|
||||
mock_client.post.return_value = self._mock_response(429)
|
||||
|
||||
checker = AnthropicHealthChecker()
|
||||
result = checker.check("sk-ant-test-key")
|
||||
|
||||
assert result.valid is True
|
||||
assert result.details.get("rate_limited") is True
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_bad_request_400_still_valid(self, mock_client_cls):
|
||||
mock_client = MagicMock()
|
||||
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
|
||||
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
|
||||
mock_client.post.return_value = self._mock_response(400)
|
||||
|
||||
checker = AnthropicHealthChecker()
|
||||
result = checker.check("sk-ant-test-key")
|
||||
|
||||
assert result.valid is True
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_timeout(self, mock_client_cls):
|
||||
mock_client = MagicMock()
|
||||
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
|
||||
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
|
||||
mock_client.post.side_effect = httpx.TimeoutException("timed out")
|
||||
|
||||
checker = AnthropicHealthChecker()
|
||||
result = checker.check("sk-ant-test-key")
|
||||
|
||||
assert result.valid is False
|
||||
assert result.details["error"] == "timeout"
|
||||
|
||||
|
||||
class TestGitHubHealthChecker:
|
||||
"""Tests for GitHubHealthChecker."""
|
||||
|
||||
def _mock_response(self, status_code, json_data=None):
|
||||
response = MagicMock(spec=httpx.Response)
|
||||
response.status_code = status_code
|
||||
if json_data:
|
||||
response.json.return_value = json_data
|
||||
return response
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_valid_token_200(self, mock_client_cls):
|
||||
mock_client = MagicMock()
|
||||
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
|
||||
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
|
||||
mock_client.get.return_value = self._mock_response(200, {"login": "testuser"})
|
||||
|
||||
checker = GitHubHealthChecker()
|
||||
result = checker.check("ghp_test-token")
|
||||
|
||||
assert result.valid is True
|
||||
assert "testuser" in result.message
|
||||
assert result.details["username"] == "testuser"
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_invalid_token_401(self, mock_client_cls):
|
||||
mock_client = MagicMock()
|
||||
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
|
||||
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
|
||||
mock_client.get.return_value = self._mock_response(401)
|
||||
|
||||
checker = GitHubHealthChecker()
|
||||
result = checker.check("invalid-token")
|
||||
|
||||
assert result.valid is False
|
||||
assert result.details["status_code"] == 401
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_forbidden_403(self, mock_client_cls):
|
||||
mock_client = MagicMock()
|
||||
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
|
||||
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
|
||||
mock_client.get.return_value = self._mock_response(403)
|
||||
|
||||
checker = GitHubHealthChecker()
|
||||
result = checker.check("ghp_test-token")
|
||||
|
||||
assert result.valid is False
|
||||
assert result.details["status_code"] == 403
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_timeout(self, mock_client_cls):
|
||||
mock_client = MagicMock()
|
||||
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
|
||||
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
|
||||
mock_client.get.side_effect = httpx.TimeoutException("timed out")
|
||||
|
||||
checker = GitHubHealthChecker()
|
||||
result = checker.check("ghp_test-token")
|
||||
|
||||
assert result.valid is False
|
||||
assert result.details["error"] == "timeout"
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_request_error(self, mock_client_cls):
|
||||
mock_client = MagicMock()
|
||||
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
|
||||
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
|
||||
mock_client.get.side_effect = httpx.RequestError("connection failed")
|
||||
|
||||
checker = GitHubHealthChecker()
|
||||
result = checker.check("ghp_test-token")
|
||||
|
||||
assert result.valid is False
|
||||
assert "connection failed" in result.details["error"]
|
||||
|
||||
|
||||
class TestResendHealthChecker:
|
||||
"""Tests for ResendHealthChecker."""
|
||||
|
||||
def _mock_response(self, status_code, json_data=None):
|
||||
response = MagicMock(spec=httpx.Response)
|
||||
response.status_code = status_code
|
||||
if json_data:
|
||||
response.json.return_value = json_data
|
||||
return response
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_valid_key_200(self, mock_client_cls):
|
||||
mock_client = MagicMock()
|
||||
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
|
||||
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
|
||||
mock_client.get.return_value = self._mock_response(200)
|
||||
|
||||
checker = ResendHealthChecker()
|
||||
result = checker.check("re_test-key")
|
||||
|
||||
assert result.valid is True
|
||||
assert "valid" in result.message.lower()
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_invalid_key_401(self, mock_client_cls):
|
||||
mock_client = MagicMock()
|
||||
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
|
||||
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
|
||||
mock_client.get.return_value = self._mock_response(401)
|
||||
|
||||
checker = ResendHealthChecker()
|
||||
result = checker.check("invalid-key")
|
||||
|
||||
assert result.valid is False
|
||||
assert result.details["status_code"] == 401
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_forbidden_403(self, mock_client_cls):
|
||||
mock_client = MagicMock()
|
||||
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
|
||||
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
|
||||
mock_client.get.return_value = self._mock_response(403)
|
||||
|
||||
checker = ResendHealthChecker()
|
||||
result = checker.check("re_test-key")
|
||||
|
||||
assert result.valid is False
|
||||
assert result.details["status_code"] == 403
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_timeout(self, mock_client_cls):
|
||||
mock_client = MagicMock()
|
||||
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
|
||||
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
|
||||
mock_client.get.side_effect = httpx.TimeoutException("timed out")
|
||||
|
||||
checker = ResendHealthChecker()
|
||||
result = checker.check("re_test-key")
|
||||
|
||||
assert result.valid is False
|
||||
assert result.details["error"] == "timeout"
|
||||
|
||||
|
||||
class TestCheckCredentialHealthDispatcher:
|
||||
"""Tests for the check_credential_health() top-level dispatcher."""
|
||||
|
||||
def test_unknown_credential_returns_valid(self):
|
||||
"""Unregistered credential names are assumed valid."""
|
||||
result = check_credential_health("nonexistent_service", "some-key")
|
||||
|
||||
assert result.valid is True
|
||||
assert result.details.get("no_checker") is True
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_dispatches_to_registered_checker(self, mock_client_cls):
|
||||
"""Normal dispatch calls the registered checker."""
|
||||
mock_client = MagicMock()
|
||||
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
|
||||
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
|
||||
response = MagicMock(spec=httpx.Response)
|
||||
response.status_code = 200
|
||||
mock_client.get.return_value = response
|
||||
|
||||
result = check_credential_health("brave_search", "test-key")
|
||||
|
||||
assert result.valid is True
|
||||
mock_client.get.assert_called_once()
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_google_search_with_cse_id(self, mock_client_cls):
|
||||
"""google_search special case passes cse_id to checker."""
|
||||
mock_client = MagicMock()
|
||||
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
|
||||
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
|
||||
response = MagicMock(spec=httpx.Response)
|
||||
response.status_code = 200
|
||||
mock_client.get.return_value = response
|
||||
|
||||
result = check_credential_health("google_search", "api-key", cse_id="cse-123")
|
||||
|
||||
assert result.valid is True
|
||||
# Verify the request included the cse_id as the cx param
|
||||
call_kwargs = mock_client.get.call_args
|
||||
assert call_kwargs[1]["params"]["cx"] == "cse-123"
|
||||
|
||||
def test_google_search_without_cse_id(self):
|
||||
"""google_search without cse_id does partial check (no HTTP call)."""
|
||||
result = check_credential_health("google_search", "api-key")
|
||||
|
||||
assert result.valid is True
|
||||
assert result.details.get("partial_check") is True
|
||||
Reference in New Issue
Block a user