Merge branch 'main' into feat/open-hive

This commit is contained in:
bryan
2026-02-24 07:28:42 -08:00
48 changed files with 3358 additions and 234 deletions
+5 -5
View File
@@ -378,16 +378,16 @@ flowchart TB
SA -->|"Inform"| ELN_EL
SA -->|"Starts"| B
B -->|"Report"| ELN_EL
TR -->|"Assigned"| EventLoopNode
CB -->|"Modify Worker Bee"| WorkerBees
TR -->|"Assigned"| ELN_EL
CB -->|"Modify Worker Bee"| WB_C
%% =========================================
%% SHARED MEMORY & LOGS ACCESS
%% =========================================
%% Worker Bees Access
Graph <-->|"Read/Write"| WTM
Graph <-->|"Read/Write"| SM
%% Worker Bees Access (link to node inside Graph subgraph)
AN <-->|"Read/Write"| WTM
AN <-->|"Read/Write"| SM
%% Queen Bee Access
QB_C <-->|"Read/Write"| WTM
@@ -1,17 +1,21 @@
"""
Credential Tester verify synced credentials via live API calls.
Credential Tester verify credentials (Aden OAuth + local API keys) via live API calls.
Interactive agent that lists connected accounts, lets the user pick one,
Interactive agent that lists all testable accounts, lets the user pick one,
loads the provider's tools, and runs a chat session to test the credential.
"""
from .agent import (
CredentialTesterAgent,
_list_aden_accounts,
_list_env_fallback_accounts,
_list_local_accounts,
configure_for_account,
conversation_mode,
edges,
entry_node,
entry_points,
get_tools_for_provider,
goal,
identity_prompt,
list_connected_accounts,
@@ -35,6 +39,7 @@ __all__ = [
"edges",
"entry_node",
"entry_points",
"get_tools_for_provider",
"goal",
"identity_prompt",
"list_connected_accounts",
@@ -45,4 +50,8 @@ __all__ = [
"skip_credential_validation",
"skip_guardian",
"terminal_nodes",
# Internal list helpers (exposed for testing)
"_list_aden_accounts",
"_list_local_accounts",
"_list_env_fallback_accounts",
]
+277 -53
View File
@@ -1,7 +1,8 @@
"""Credential Tester agent — verify synced credentials via live API calls.
"""Credential Tester agent — verify credentials via live API calls.
A framework agent that lets the user pick a connected account and test it
by making real API calls via the provider's tools.
Supports both Aden OAuth2-synced accounts AND locally-stored API key accounts.
Aden accounts use account="alias" routing; local accounts inject the key into
the session environment so tools read it without an account= parameter.
When loaded via AgentRunner.load() (TUI picker, ``hive run``), the module-level
``nodes`` / ``edges`` variables provide a static graph. The TUI detects
@@ -40,7 +41,7 @@ if TYPE_CHECKING:
goal = Goal(
id="credential-tester",
name="Credential Tester",
description="Verify that a synced credential can make real API calls.",
description="Verify that a credential can make real API calls.",
success_criteria=[
SuccessCriterion(
id="api-call-success",
@@ -59,52 +60,148 @@ goal = Goal(
def get_tools_for_provider(provider_name: str) -> list[str]:
"""Collect tool names for a specific Aden credential by credential_id.
"""Collect tool names for a credential by credential_id OR credential_group.
Matches on ``credential_id`` (e.g. "google" Gmail tools only),
NOT ``aden_provider_name`` which can be shared across products
(e.g. both google and google_docs have aden_provider_name="google").
Matches on both ``credential_id`` (e.g. "google" Gmail tools) and
``credential_group`` (e.g. "google_custom_search" all google search tools).
"""
from aden_tools.credentials import CREDENTIAL_SPECS
tools: list[str] = []
for spec in CREDENTIAL_SPECS.values():
if spec.credential_id == provider_name:
if spec.credential_id == provider_name or spec.credential_group == provider_name:
tools.extend(spec.tools)
return sorted(set(tools))
def list_connected_accounts() -> list[dict]:
"""List connected accounts from GET /v1/credentials."""
def _list_aden_accounts() -> list[dict]:
"""List active accounts from the Aden platform (requires ADEN_API_KEY)."""
import os
from framework.credentials.aden.client import AdenClientConfig, AdenCredentialClient
api_key = os.environ.get("ADEN_API_KEY")
if not api_key:
return []
client = AdenCredentialClient(
AdenClientConfig(
base_url=os.environ.get("ADEN_API_URL", "https://api.adenhq.com"),
)
)
try:
integrations = client.list_integrations()
finally:
client.close()
from framework.credentials.aden.client import AdenClientConfig, AdenCredentialClient
return [
{
"provider": c.provider,
"alias": c.alias,
"identity": {"email": c.email} if c.email else {},
"integration_id": c.integration_id,
}
for c in integrations
if c.status == "active"
client = AdenCredentialClient(
AdenClientConfig(
base_url=os.environ.get("ADEN_API_URL", "https://api.adenhq.com"),
)
)
try:
integrations = client.list_integrations()
finally:
client.close()
return [
{
"provider": c.provider,
"alias": c.alias,
"identity": {"email": c.email} if c.email else {},
"integration_id": c.integration_id,
"source": "aden",
}
for c in integrations
if c.status == "active"
]
except Exception:
return []
def _list_local_accounts() -> list[dict]:
"""List named local API key accounts from LocalCredentialRegistry."""
try:
from framework.credentials.local.registry import LocalCredentialRegistry
return [
info.to_account_dict() for info in LocalCredentialRegistry.default().list_accounts()
]
except Exception:
return []
def _list_env_fallback_accounts() -> list[dict]:
"""Surface configured-but-unregistered credentials as testable entries.
Detects credentials available via env vars OR stored in the encrypted
store in the old flat format (e.g. ``brave_search`` with no alias).
These are users who haven't yet run ``save_account()`` but have a working key.
Shows with alias="default" and status="unknown".
"""
import os
from aden_tools.credentials import CREDENTIAL_SPECS
# Collect IDs in encrypted store (includes old flat entries like "brave_search")
try:
from framework.credentials.storage import EncryptedFileStorage
encrypted_ids: set[str] = set(EncryptedFileStorage().list_all())
except Exception:
encrypted_ids = set()
def _is_configured(cred_name: str, spec) -> bool:
# 1. Env var present
if os.environ.get(spec.env_var):
return True
# 2. Old flat encrypted entry (no slash — new entries have {x}/{y})
if cred_name in encrypted_ids:
return True
return False
seen_groups: set[str] = set()
accounts: list[dict] = []
for cred_name, spec in CREDENTIAL_SPECS.items():
if not spec.direct_api_key_supported or not spec.tools:
continue
if spec.credential_group:
if spec.credential_group in seen_groups:
continue
group_available = all(
_is_configured(n, s)
for n, s in CREDENTIAL_SPECS.items()
if s.credential_group == spec.credential_group
)
if not group_available:
continue
seen_groups.add(spec.credential_group)
provider = spec.credential_group
else:
if not _is_configured(cred_name, spec):
continue
provider = cred_name
accounts.append(
{
"provider": provider,
"alias": "default",
"identity": {},
"integration_id": None,
"source": "local",
"status": "unknown",
}
)
return accounts
def list_connected_accounts() -> list[dict]:
"""List all testable accounts: Aden-synced + named local + env-var fallbacks."""
aden = _list_aden_accounts()
local = _list_local_accounts()
# Show env-var fallbacks only for credentials not already in the named registry
local_providers = {a["provider"] for a in local}
env_fallbacks = [
a for a in _list_env_fallback_accounts() if a["provider"] not in local_providers
]
return aden + local + env_fallbacks
# ---------------------------------------------------------------------------
# Module-level hooks (read by AgentRunner.load / TUI)
@@ -123,22 +220,102 @@ requires_account_selection = True
def configure_for_account(runner: AgentRunner, account: dict) -> None:
"""Scope the tester node's tools to the selected provider.
Called by the TUI after the user picks an account from the picker.
After scoping, re-enables credential validation so the selected
provider's credentials are checked before the agent starts.
Handles both Aden accounts (account= routing) and local accounts
(session-level env var injection, no account= parameter in prompt).
"""
provider = account["provider"]
tools = get_tools_for_provider(provider)
tools.append("get_account_info")
source = account.get("source", "aden")
alias = account.get("alias", "unknown")
email = account.get("identity", {}).get("email", "")
detail = f" (email: {email})" if email else ""
identity = account.get("identity", {})
tools = get_tools_for_provider(provider)
if source == "aden":
tools.append("get_account_info")
email = identity.get("email", "")
detail = f" (email: {email})" if email else ""
_configure_aden_node(runner, provider, alias, detail, tools)
else:
status = account.get("status", "unknown")
_activate_local_account(provider, alias)
_configure_local_node(runner, provider, alias, identity, tools, status)
def _activate_local_account(credential_id: str, alias: str) -> None:
"""Inject a named local account's key into the session environment.
Handles three cases:
1. Named account in LocalCredentialRegistry (new format: {credential_id}/{alias})
2. Old flat credential in EncryptedFileStorage (id == credential_id, no alias)
3. Env var already set skip injection (nothing to do)
"""
import os
from aden_tools.credentials import CREDENTIAL_SPECS
# Collect specs for this credential (handles grouped credentials too)
group_specs = [
(cred_name, spec)
for cred_name, spec in CREDENTIAL_SPECS.items()
if spec.credential_group == credential_id
or spec.credential_id == credential_id
or cred_name == credential_id
]
# Deduplicate — credential_id and credential_group may both match the same spec
seen_env_vars: set[str] = set()
try:
from framework.credentials.local.registry import LocalCredentialRegistry
from framework.credentials.storage import EncryptedFileStorage
registry = LocalCredentialRegistry.default()
flat_storage = EncryptedFileStorage()
for _cred_name, spec in group_specs:
if spec.env_var in seen_env_vars:
continue
# If env var is already set, nothing to do for this one
if os.environ.get(spec.env_var):
seen_env_vars.add(spec.env_var)
continue
seen_env_vars.add(spec.env_var)
# Determine key name based on spec
key_name = "api_key"
if spec.credential_group and "cse" in spec.env_var.lower():
key_name = "cse_id"
key: str | None = None
# 1. Try named account in registry (new format)
if alias != "default":
key = registry.get_key(credential_id, alias, key_name)
else:
# For "default" alias, check registry first, then fall back to flat store
key = registry.get_key(credential_id, "default", key_name)
# 2. Fall back to old flat encrypted entry (id == credential_id, no alias)
if key is None:
flat_cred = flat_storage.load(credential_id)
if flat_cred is not None:
key = flat_cred.get_key(key_name) or flat_cred.get_default_key()
if key:
os.environ[spec.env_var] = key
except Exception:
pass
def _configure_aden_node(
runner: AgentRunner,
provider: str,
alias: str,
detail: str,
tools: list[str],
) -> None:
for node in runner.graph.nodes:
if node.id == "tester":
node.tools = sorted(set(tools))
# Update system prompt to be provider-specific
node.system_prompt = f"""\
You are a credential tester for the account: {provider}/{alias}{detail}
@@ -165,19 +342,60 @@ or any other identifier — always use the alias exactly as shown.
"""
break
# Set intro message for TUI display
runner.intro_message = (
f"Testing {provider}/{alias}{detail}"
f"{len(tools)} tools loaded. "
f"I'll suggest a read-only API call to verify the credential works."
"I'll suggest a read-only API call to verify the credential works."
)
def _configure_local_node(
runner: AgentRunner,
provider: str,
alias: str,
identity: dict,
tools: list[str],
status: str,
) -> None:
identity_parts = [f"{k}: {v}" for k, v in identity.items() if v]
detail = f" ({', '.join(identity_parts)})" if identity_parts else ""
status_note = " [key not yet validated]" if status == "unknown" else ""
for node in runner.graph.nodes:
if node.id == "tester":
node.tools = sorted(set(tools))
node.system_prompt = f"""\
You are a credential tester for the local API key: {provider}/{alias}{detail}{status_note}
# Instructions
1. Suggest a simple test call to verify the credential works \
(e.g. search for "test", list items, get profile info).
2. Execute the call when the user agrees.
3. Report the result: success (with sample data) or failure (with error).
4. Let the user request additional API calls to further test the credential.
# Rules
- Do NOT pass an `account` parameter this credential is injected \
directly into the session environment and tools read it automatically.
- Start with read-only operations before write operations.
- Always confirm with the user before performing write operations.
- If a call fails, report the exact error this helps diagnose credential issues.
- Be concise. No emojis.
"""
break
runner.intro_message = (
f"Testing {provider}/{alias}{detail}"
f"{len(tools)} tools loaded. "
"I'll suggest a test API call to verify the credential works."
)
# ---------------------------------------------------------------------------
# Module-level graph variables (read by AgentRunner.load)
# ---------------------------------------------------------------------------
# The static node starts with minimal tools. configure_for_account() scopes
# it to the selected provider's tools before execution.
nodes = [
NodeSpec(
@@ -195,7 +413,7 @@ nodes = [
tools=["get_account_info"],
system_prompt="""\
You are a credential tester. Your job is to help the user verify that their \
connected accounts can make real API calls.
connected accounts and API keys can make real API calls.
# Startup
@@ -208,12 +426,11 @@ connected accounts can make real API calls.
6. Report the result: success (with sample data) or failure (with error).
7. Let the user request additional API calls to further test the credential.
# Account routing
# Account routing (Aden accounts only)
IMPORTANT: Always pass the account's **alias** as the ``account`` parameter \
when calling any tool. The alias is the routing key never use the email or \
any other identifier. For example, if the alias is "Timothy", call \
``gmail_list_messages(account="Timothy", ...)``.
IMPORTANT: For Aden-synced accounts, always pass the account's **alias** as the \
``account`` parameter when calling any tool. For local API key accounts, do NOT \
pass an account parameter they are pre-injected into the session.
# Rules
@@ -234,7 +451,8 @@ terminal_nodes = [] # Forever-alive: loops until user exits
conversation_mode = "continuous"
identity_prompt = (
"You are a credential tester that verifies connected accounts can make real API calls."
"You are a credential tester that verifies connected accounts and API keys "
"can make real API calls."
)
loop_config = {
"max_iterations": 50,
@@ -255,7 +473,6 @@ class CredentialTesterAgent:
accounts = agent.list_accounts()
agent.select_account(accounts[0])
await agent.start()
# ... user chats via TUI or CLI ...
await agent.stop()
"""
@@ -267,7 +484,7 @@ class CredentialTesterAgent:
self._storage_path: Path | None = None
def list_accounts(self) -> list[dict]:
"""List connected accounts from the Aden credential store."""
"""List all testable accounts (Aden + local named + env-var fallbacks)."""
return list_connected_accounts()
def select_account(self, account: dict) -> None:
@@ -275,7 +492,7 @@ class CredentialTesterAgent:
Args:
account: Account dict from list_accounts() with
provider, alias, identity keys.
provider, alias, identity, source keys.
"""
self._selected_account = account
@@ -294,14 +511,21 @@ class CredentialTesterAgent:
def _build_graph(self) -> GraphSpec:
provider = self.selected_provider
alias = self.selected_alias
source = self._selected_account.get("source", "aden")
identity = self._selected_account.get("identity", {})
tools = get_tools_for_provider(provider)
if source == "local":
_activate_local_account(provider, alias)
elif source == "aden":
tools.append("get_account_info")
tester_node = build_tester_node(
provider=provider,
alias=alias,
tools=tools,
identity=identity,
source=source,
)
return GraphSpec(
@@ -8,18 +8,38 @@ def build_tester_node(
alias: str,
tools: list[str],
identity: dict[str, str],
source: str = "aden",
) -> NodeSpec:
"""Build the tester node dynamically for the selected account.
Args:
provider: Aden provider name (e.g. "google", "slack").
alias: User-set alias (e.g. "Timothy").
provider: Provider / credential name (e.g. "google", "brave_search").
alias: User-set alias (e.g. "Timothy", "work").
tools: Tool names available for this provider.
identity: Identity dict (email, workspace, etc.) for context.
source: "aden" or "local" controls routing instructions in the prompt.
"""
detail_parts = [f"{k}: {v}" for k, v in identity.items() if v]
detail = f" ({', '.join(detail_parts)})" if detail_parts else ""
if source == "aden":
routing_section = f"""\
# Account routing
IMPORTANT: Always pass `account="{alias}"` when calling any tool. \
This routes the API call to the correct credential. Never use the email \
or any other identifier always use the alias exactly as shown.
"""
else:
routing_section = """\
# Credential routing
This is a local API key credential do NOT pass an `account` parameter. \
The key is pre-injected into the session environment and tools read it automatically.
"""
account_label = "account" if source == "aden" else "local API key"
return NodeSpec(
id="tester",
name="Credential Tester",
@@ -34,22 +54,17 @@ def build_tester_node(
output_keys=[],
tools=tools,
system_prompt=f"""\
You are a credential tester for the account: {provider}/{alias}{detail}
You are a credential tester for the {account_label}: {provider}/{alias}{detail}
Your job is to help the user verify that this credential works by making \
real API calls using the available tools.
# Account routing
IMPORTANT: Always pass `account="{alias}"` when calling any tool. \
This routes the API call to the correct credential. Never use the email \
or any other identifier always use the alias exactly as shown.
{routing_section}
# Instructions
1. Start by greeting the user and confirming which account you're testing.
2. Suggest a simple, safe, read-only API call to verify the credential works \
(e.g. list messages, list channels, list contacts).
(e.g. list messages, list channels, list contacts, search for "test").
3. Execute the call when the user agrees.
4. Report the result clearly: success (with sample data) or failure (with error).
5. Let the user request additional API calls to further test the credential.
+37 -10
View File
@@ -441,14 +441,15 @@ class GraphBuilder:
self.session.test_cases.append(test)
self._save_session()
def run_test(
async def run_test_async(
self,
test: TestCase,
executor_factory: Callable,
) -> TestResult:
"""
Run a single test case.
Run a single test case asynchronously.
This method is safe to call from async contexts (Jupyter, FastAPI, etc.).
executor_factory should return a configured GraphExecutor.
"""
self._require_phase([BuildPhase.ADDING_NODES, BuildPhase.ADDING_EDGES, BuildPhase.TESTING])
@@ -460,14 +461,10 @@ class GraphBuilder:
executor = executor_factory()
# Run the test
import asyncio
result = asyncio.run(
executor.execute(
graph=graph,
goal=self.session.goal,
input_data=test.input,
)
result = await executor.execute(
graph=graph,
goal=self.session.goal,
input_data=test.input,
)
# Check result
@@ -497,6 +494,36 @@ class GraphBuilder:
return test_result
def run_test(
self,
test: TestCase,
executor_factory: Callable,
) -> TestResult:
"""
Run a single test case.
This is a synchronous wrapper around run_test_async().
If called from an async context (Jupyter, FastAPI, etc.), use run_test_async() instead.
executor_factory should return a configured GraphExecutor.
"""
import asyncio
# Check if an event loop is already running
# get_running_loop() returns a loop if one exists, or raises RuntimeError if none exists
try:
asyncio.get_running_loop()
except RuntimeError:
# No event loop running - safe to use asyncio.run()
return asyncio.run(self.run_test_async(test, executor_factory))
# Event loop is running - cannot use asyncio.run()
raise RuntimeError(
"Cannot call run_test() from an async context. "
"An event loop is already running. "
"Please use 'await builder.run_test_async(test, executor_factory)' instead."
)
def run_all_tests(self, executor_factory: Callable) -> list[TestResult]:
"""Run all test cases."""
results = []
+12
View File
@@ -92,6 +92,14 @@ try:
except ImportError:
_ADEN_AVAILABLE = False
# Local credential registry (named API key accounts with identity metadata)
try:
from .local import LocalAccountInfo, LocalCredentialRegistry
_LOCAL_AVAILABLE = True
except ImportError:
_LOCAL_AVAILABLE = False
__all__ = [
# Main store
"CredentialStore",
@@ -133,7 +141,11 @@ __all__ = [
"AdenCredentialClient",
"AdenClientConfig",
"AdenCachedStorage",
# Local credential registry (optional - requires cryptography)
"LocalCredentialRegistry",
"LocalAccountInfo",
]
# Track Aden availability for runtime checks
ADEN_AVAILABLE = _ADEN_AVAILABLE
LOCAL_AVAILABLE = _LOCAL_AVAILABLE
@@ -0,0 +1,31 @@
"""
Local credential registry named API key accounts with identity metadata.
Provides feature parity with Aden OAuth credentials for locally-stored API keys:
aliases, identity metadata, status tracking, CRUD, and health validation.
Usage:
from framework.credentials.local import LocalCredentialRegistry, LocalAccountInfo
registry = LocalCredentialRegistry.default()
# Add a named account
info, health = registry.save_account("brave_search", "work", "BSA-xxx")
# List all stored local accounts
for account in registry.list_accounts():
print(f"{account.credential_id}/{account.alias}: {account.status}")
if account.identity.is_known:
print(f" Identity: {account.identity.label}")
# Re-validate a stored account
result = registry.validate_account("github", "personal")
"""
from .models import LocalAccountInfo
from .registry import LocalCredentialRegistry
__all__ = [
"LocalAccountInfo",
"LocalCredentialRegistry",
]
@@ -0,0 +1,58 @@
"""
Data models for the local credential registry.
LocalAccountInfo mirrors AdenIntegrationInfo, giving local API key credentials
the same identity/status metadata as Aden OAuth credentials.
"""
from __future__ import annotations
from dataclasses import dataclass, field
from datetime import datetime
from framework.credentials.models import CredentialIdentity
@dataclass
class LocalAccountInfo:
"""
A locally-stored named credential account.
Mirrors AdenIntegrationInfo so local and Aden accounts can be treated
uniformly in the credential tester and account selection UI.
Attributes:
credential_id: The logical credential name (e.g. "brave_search", "github")
alias: User-provided name for this account (e.g. "work", "personal")
status: "active" | "failed" | "unknown"
identity: Email, username, workspace, or account_id extracted from health check
last_validated: When the key was last verified against the live API
created_at: When this account was first stored
"""
credential_id: str
alias: str
status: str = "unknown"
identity: CredentialIdentity = field(default_factory=CredentialIdentity)
last_validated: datetime | None = None
created_at: datetime = field(default_factory=datetime.utcnow)
@property
def storage_id(self) -> str:
"""The key used in EncryptedFileStorage: '{credential_id}/{alias}'."""
return f"{self.credential_id}/{self.alias}"
def to_account_dict(self) -> dict:
"""
Format compatible with AccountSelectionScreen and configure_for_account().
Same shape as Aden account dicts, with source='local' added.
"""
return {
"provider": self.credential_id,
"alias": self.alias,
"identity": self.identity.to_dict(),
"integration_id": None,
"source": "local",
"status": self.status,
}
@@ -0,0 +1,326 @@
"""
Local Credential Registry.
Manages named local API key accounts stored in EncryptedFileStorage.
Mirrors the Aden integration model so local credentials have feature parity:
aliases, identity metadata, status tracking, CRUD, and health validation.
Storage convention:
{credential_id}/{alias} CredentialObject
e.g. "brave_search/work" { api_key: "BSA-xxx", _alias: "work",
_integration_type: "brave_search",
_status: "active",
_identity_username: "acme", ... }
Usage:
registry = LocalCredentialRegistry.default()
# Add a new account
info, health = registry.save_account("brave_search", "work", "BSA-xxx")
print(info.status, info.identity.label)
# List all accounts
for account in registry.list_accounts():
print(f"{account.credential_id}/{account.alias}: {account.status}")
# Get the raw API key for a specific account
key = registry.get_key("github", "personal")
# Re-validate a stored account
result = registry.validate_account("github", "personal")
"""
from __future__ import annotations
import logging
from datetime import UTC, datetime
from pathlib import Path
from typing import TYPE_CHECKING, Any
from framework.credentials.models import CredentialIdentity, CredentialObject
from framework.credentials.storage import EncryptedFileStorage
from .models import LocalAccountInfo
if TYPE_CHECKING:
from aden_tools.credentials.health_check import HealthCheckResult
logger = logging.getLogger(__name__)
_SEPARATOR = "/"
class LocalCredentialRegistry:
"""
Named local API key account store backed by EncryptedFileStorage.
Provides the same list/save/get/delete/validate surface as the Aden
client, but for locally-stored API keys.
"""
def __init__(self, storage: EncryptedFileStorage) -> None:
self._storage = storage
# ------------------------------------------------------------------
# Listing
# ------------------------------------------------------------------
def list_accounts(self, credential_id: str | None = None) -> list[LocalAccountInfo]:
"""
List all stored local accounts.
Args:
credential_id: If given, filter to this credential type only.
Returns:
List of LocalAccountInfo sorted by credential_id then alias.
"""
all_ids = self._storage.list_all()
accounts: list[LocalAccountInfo] = []
for storage_id in all_ids:
if _SEPARATOR not in storage_id:
continue # Skip legacy un-aliased entries
try:
cred_obj = self._storage.load(storage_id)
except Exception as exc:
logger.debug("Skipping unreadable credential %s: %s", storage_id, exc)
continue
if cred_obj is None:
continue
info = self._to_account_info(cred_obj)
if info is None:
continue
if credential_id and info.credential_id != credential_id:
continue
accounts.append(info)
return sorted(accounts, key=lambda a: (a.credential_id, a.alias))
# ------------------------------------------------------------------
# Save / add
# ------------------------------------------------------------------
def save_account(
self,
credential_id: str,
alias: str,
api_key: str,
run_health_check: bool = True,
extra_keys: dict[str, str] | None = None,
) -> tuple[LocalAccountInfo, HealthCheckResult | None]:
"""
Store a named account, optionally validating it first.
Args:
credential_id: Logical credential name (e.g. "brave_search").
alias: User-chosen name (e.g. "work"). Defaults to "default".
api_key: The raw API key / token value.
run_health_check: If True, verify the key against the live API
and extract identity metadata. Failure still saves with
status="failed" so the user can re-validate later.
extra_keys: Additional key/value pairs to store (e.g.
cse_id for google_custom_search).
Returns:
(LocalAccountInfo, HealthCheckResult | None)
"""
alias = alias or "default"
health_result: HealthCheckResult | None = None
identity: dict[str, str] = {}
status = "active"
if run_health_check:
try:
from aden_tools.credentials.health_check import check_credential_health
kwargs: dict[str, Any] = {}
if extra_keys and "cse_id" in extra_keys:
kwargs["cse_id"] = extra_keys["cse_id"]
health_result = check_credential_health(credential_id, api_key, **kwargs)
status = "active" if health_result.valid else "failed"
identity = health_result.details.get("identity", {})
except Exception as exc:
logger.warning("Health check failed for %s/%s: %s", credential_id, alias, exc)
status = "unknown"
storage_id = f"{credential_id}{_SEPARATOR}{alias}"
now = datetime.now(UTC)
cred_obj = CredentialObject(id=storage_id)
cred_obj.set_key("api_key", api_key)
cred_obj.set_key("_alias", alias)
cred_obj.set_key("_integration_type", credential_id)
cred_obj.set_key("_status", status)
if extra_keys:
for k, v in extra_keys.items():
cred_obj.set_key(k, v)
if identity:
valid_fields = set(CredentialIdentity.model_fields)
filtered = {k: v for k, v in identity.items() if k in valid_fields}
if filtered:
cred_obj.set_identity(**filtered)
cred_obj.last_refreshed = now if run_health_check else None
self._storage.save(cred_obj)
account_info = LocalAccountInfo(
credential_id=credential_id,
alias=alias,
status=status,
identity=cred_obj.identity,
last_validated=cred_obj.last_refreshed,
created_at=cred_obj.created_at,
)
return account_info, health_result
# ------------------------------------------------------------------
# Get
# ------------------------------------------------------------------
def get_account(self, credential_id: str, alias: str) -> CredentialObject | None:
"""Load the raw CredentialObject for a specific account."""
return self._storage.load(f"{credential_id}{_SEPARATOR}{alias}")
def get_key(self, credential_id: str, alias: str, key_name: str = "api_key") -> str | None:
"""
Return the stored secret value for a specific account.
Args:
credential_id: Logical credential name (e.g. "brave_search").
alias: Account alias (e.g. "work").
key_name: Key within the credential (default "api_key").
Returns:
The secret value, or None if not found.
"""
cred = self.get_account(credential_id, alias)
if cred is None:
return None
return cred.get_key(key_name)
def get_account_info(self, credential_id: str, alias: str) -> LocalAccountInfo | None:
"""Load a LocalAccountInfo for a specific account."""
cred = self.get_account(credential_id, alias)
if cred is None:
return None
return self._to_account_info(cred)
# ------------------------------------------------------------------
# Delete
# ------------------------------------------------------------------
def delete_account(self, credential_id: str, alias: str) -> bool:
"""
Remove a stored account.
Returns:
True if the account existed and was deleted, False otherwise.
"""
return self._storage.delete(f"{credential_id}{_SEPARATOR}{alias}")
# ------------------------------------------------------------------
# Validate
# ------------------------------------------------------------------
def validate_account(self, credential_id: str, alias: str) -> HealthCheckResult:
"""
Re-run health check for a stored account and update its status.
Args:
credential_id: Logical credential name.
alias: Account alias.
Returns:
HealthCheckResult from the live API check.
Raises:
KeyError: If the account doesn't exist.
"""
from aden_tools.credentials.health_check import HealthCheckResult, check_credential_health
cred = self.get_account(credential_id, alias)
if cred is None:
raise KeyError(f"No local account found: {credential_id}/{alias}")
api_key = cred.get_key("api_key")
if not api_key:
return HealthCheckResult(valid=False, message="No api_key stored for this account")
try:
kwargs: dict[str, Any] = {}
cse_id = cred.get_key("cse_id")
if cse_id:
kwargs["cse_id"] = cse_id
result = check_credential_health(credential_id, api_key, **kwargs)
except Exception as exc:
result = HealthCheckResult(
valid=False,
message=f"Health check error: {exc}",
details={"error": str(exc)},
)
# Update status and timestamp in-place
new_status = "active" if result.valid else "failed"
cred.set_key("_status", new_status)
cred.last_refreshed = datetime.now(UTC)
# Re-extract identity if available
identity = result.details.get("identity", {})
if identity:
valid_fields = set(CredentialIdentity.model_fields)
filtered = {k: v for k, v in identity.items() if k in valid_fields}
if filtered:
cred.set_identity(**filtered)
self._storage.save(cred)
return result
# ------------------------------------------------------------------
# Factory
# ------------------------------------------------------------------
@classmethod
def default(cls) -> LocalCredentialRegistry:
"""Create a registry using the default encrypted storage at ~/.hive/credentials."""
return cls(EncryptedFileStorage())
@classmethod
def at_path(cls, path: str | Path) -> LocalCredentialRegistry:
"""Create a registry using a custom storage path."""
return cls(EncryptedFileStorage(base_path=path))
# ------------------------------------------------------------------
# Internals
# ------------------------------------------------------------------
def _to_account_info(self, cred_obj: CredentialObject) -> LocalAccountInfo | None:
"""Build LocalAccountInfo from a CredentialObject."""
cred_type_key = cred_obj.keys.get("_integration_type")
if cred_type_key is None:
return None
cred_id = cred_type_key.get_secret_value()
alias_key = cred_obj.keys.get("_alias")
alias = alias_key.get_secret_value() if alias_key else cred_obj.id.split(_SEPARATOR, 1)[-1]
status_key = cred_obj.keys.get("_status")
status = status_key.get_secret_value() if status_key else "unknown"
return LocalAccountInfo(
credential_id=cred_id,
alias=alias,
status=status,
identity=cred_obj.identity,
last_validated=cred_obj.last_refreshed,
created_at=cred_obj.created_at,
)
+23 -7
View File
@@ -14,20 +14,36 @@ logger = logging.getLogger(__name__)
def ensure_credential_key_env() -> None:
"""Load HIVE_CREDENTIAL_KEY and ADEN_API_KEY from shell config if not in environment.
"""Load credentials from shell config if not in environment.
The setup-credentials skill writes these to ~/.zshrc or ~/.bashrc.
If the user hasn't sourced their config in the current shell, this reads
them directly so the runner (and any MCP subprocesses it spawns) can:
- Unlock the encrypted credential store (HIVE_CREDENTIAL_KEY)
- Enable Aden OAuth sync for Google/HubSpot/etc. (ADEN_API_KEY)
The quickstart.sh and setup-credentials skill write API keys to ~/.zshrc
or ~/.bashrc. If the user hasn't sourced their config in the current shell,
this reads them directly so the runner (and any MCP subprocesses) can use them.
Loads:
- HIVE_CREDENTIAL_KEY (encrypted credential store)
- ADEN_API_KEY (Aden OAuth sync)
- All LLM API keys (ANTHROPIC_API_KEY, OPENAI_API_KEY, ZAI_API_KEY, etc.)
"""
try:
from aden_tools.credentials.shell_config import check_env_var_in_shell_config
except ImportError:
return
for var_name in ("HIVE_CREDENTIAL_KEY", "ADEN_API_KEY"):
# Core credentials that are always checked
env_vars_to_load = ["HIVE_CREDENTIAL_KEY", "ADEN_API_KEY"]
# Add all LLM/tool API keys from CREDENTIAL_SPECS
try:
from aden_tools.credentials import CREDENTIAL_SPECS
for spec in CREDENTIAL_SPECS.values():
if spec.env_var and spec.env_var not in env_vars_to_load:
env_vars_to_load.append(spec.env_var)
except ImportError:
pass
for var_name in env_vars_to_load:
if os.environ.get(var_name):
continue
found, value = check_env_var_in_shell_config(var_name)
+1 -1
View File
@@ -422,7 +422,7 @@ class GraphSpec(BaseModel):
# Cleanup LLM for JSON extraction fallback (fast/cheap model preferred)
# If not set, uses CEREBRAS_API_KEY -> cerebras/llama-3.3-70b or
# ANTHROPIC_API_KEY -> claude-3-5-haiku as fallback
# ANTHROPIC_API_KEY -> claude-haiku-4-5 as fallback
cleanup_llm_model: str | None = None
# Execution limits
+24 -5
View File
@@ -315,8 +315,10 @@ class EventLoopNode(NodeProtocol):
f"{system_prompt}\n\n"
f"--- Your Memory ---\n{_adapt_text}\n--- End Memory ---\n\n"
'Maintain your memory by calling save_data("adapt.md", ...) '
'or edit_data("adapt.md", ...) as you work. '
"Record identity, session history, decisions, and working notes."
'or edit_data("adapt.md", ...) as you work.\n'
"IMMEDIATELY save: user rules about which account/identity to use, "
"behavioral constraints, and preferences. "
"Also record session history, decisions, and working notes."
)
conversation = NodeConversation(
@@ -2203,7 +2205,11 @@ class EventLoopNode(NodeProtocol):
)
prompt = (
"Summarize this conversation so far in 2-3 sentences, "
"preserving key decisions and results:\n\n"
"preserving key decisions and results.\n\n"
"IMPORTANT: Always preserve any user-stated rules, constraints, "
"or preferences — especially which account/identity to use, "
"formatting preferences, and behavioral instructions. "
"These MUST appear verbatim or near-verbatim in your summary.\n\n"
f"{messages_text}"
)
if tool_history:
@@ -2220,7 +2226,9 @@ class EventLoopNode(NodeProtocol):
response = await ctx.llm.acomplete(
messages=[{"role": "user", "content": prompt}],
system=(
"Summarize conversations concisely. Always preserve the tool history section."
"Summarize conversations concisely. Always preserve the tool "
"history section. Always preserve user-stated rules, constraints, "
"and account/identity preferences verbatim."
),
max_tokens=summary_budget,
)
@@ -2292,13 +2300,24 @@ class EventLoopNode(NodeProtocol):
# 5. Spillover files — list actual files so the LLM can load
# them immediately instead of having to call list_data_files first.
# Inline adapt.md (agent memory) directly — it contains user rules
# and identity preferences that must survive emergency compaction.
if self._config.spillover_dir:
try:
from pathlib import Path
data_dir = Path(self._config.spillover_dir)
if data_dir.is_dir():
files = sorted(f.name for f in data_dir.iterdir() if f.is_file())
# Inline adapt.md content directly
adapt_path = data_dir / "adapt.md"
if adapt_path.is_file():
adapt_text = adapt_path.read_text(encoding="utf-8").strip()
if adapt_text:
parts.append(f"AGENT MEMORY (adapt.md):\n{adapt_text}")
files = sorted(
f.name for f in data_dir.iterdir() if f.is_file() and f.name != "adapt.md"
)
if files:
file_list = "\n".join(f" - {f}" for f in files[:30])
parts.append("DATA FILES (use load_data to read):\n" + file_list)
+32 -2
View File
@@ -135,6 +135,8 @@ class GraphExecutor:
storage_path: str | Path | None = None,
loop_config: dict[str, Any] | None = None,
accounts_prompt: str = "",
accounts_data: list[dict] | None = None,
tool_provider_map: dict[str, str] | None = None,
):
"""
Initialize the executor.
@@ -155,6 +157,8 @@ class GraphExecutor:
storage_path: Optional base path for conversation persistence
loop_config: Optional EventLoopNode configuration (max_iterations, etc.)
accounts_prompt: Connected accounts block for system prompt injection
accounts_data: Raw account data for per-node prompt generation
tool_provider_map: Tool name to provider name mapping for account routing
"""
self.runtime = runtime
self.llm = llm
@@ -170,6 +174,8 @@ class GraphExecutor:
self._storage_path = Path(storage_path) if storage_path else None
self._loop_config = loop_config or {}
self.accounts_prompt = accounts_prompt
self.accounts_data = accounts_data
self.tool_provider_map = tool_provider_map
# Initialize output cleaner
self.cleansing_config = cleansing_config or CleansingConfig()
@@ -1166,6 +1172,7 @@ class GraphExecutor:
next_spec = graph.get_node(current_node_id)
if next_spec and next_spec.node_type == "event_loop":
from framework.graph.prompt_composer import (
build_accounts_prompt,
build_narrative,
build_transition_marker,
compose_system_prompt,
@@ -1191,12 +1198,24 @@ class GraphExecutor:
else _adapt_text
)
# Build per-node accounts prompt for the next node
_node_accounts = self.accounts_prompt or None
if self.accounts_data and self.tool_provider_map:
_node_accounts = (
build_accounts_prompt(
self.accounts_data,
self.tool_provider_map,
node_tool_names=next_spec.tools,
)
or None
)
# Compose new system prompt (Layer 1 + 2 + 3 + accounts)
new_system = compose_system_prompt(
identity_prompt=getattr(graph, "identity_prompt", None),
focus_prompt=next_spec.system_prompt,
narrative=narrative,
accounts_prompt=self.accounts_prompt or None,
accounts_prompt=_node_accounts,
)
continuous_conversation.update_system_prompt(new_system)
@@ -1523,6 +1542,17 @@ class GraphExecutor:
write_keys=node_spec.output_keys,
)
# Build per-node accounts prompt (filtered to this node's tools)
node_accounts_prompt = self.accounts_prompt
if self.accounts_data and self.tool_provider_map:
from framework.graph.prompt_composer import build_accounts_prompt
node_accounts_prompt = build_accounts_prompt(
self.accounts_data,
self.tool_provider_map,
node_tool_names=node_spec.tools,
)
return NodeContext(
runtime=self.runtime,
node_id=node_spec.id,
@@ -1540,7 +1570,7 @@ class GraphExecutor:
inherited_conversation=inherited_conversation,
cumulative_output_keys=cumulative_output_keys or [],
event_triggered=event_triggered,
accounts_prompt=self.accounts_prompt,
accounts_prompt=node_accounts_prompt,
execution_id=self.runtime.execution_id,
)
+5
View File
@@ -44,6 +44,11 @@ class SuccessCriterion(BaseModel):
metric: str = Field(
description="How to measure: 'output_contains', 'output_equals', 'llm_judge', 'custom'"
)
# NEW: runtime evaluation type (separate from metric)
type: str = Field(
default="success_rate", description="Runtime evaluation type, e.g. 'success_rate'"
)
target: Any = Field(description="The target value or condition")
weight: float = Field(default=1.0, ge=0.0, le=1.0, description="Relative importance (0-1)")
met: bool = False
+1 -1
View File
@@ -197,7 +197,7 @@ Example format:
client = anthropic.Anthropic(api_key=api_key)
message = client.messages.create(
model="claude-3-5-haiku-20241022",
model="claude-haiku-4-5-20251001",
max_tokens=500,
messages=[{"role": "user", "content": prompt}],
)
+1 -1
View File
@@ -591,7 +591,7 @@ class NodeResult:
client = anthropic.Anthropic(api_key=api_key)
message = client.messages.create(
model="claude-3-5-haiku-20241022",
model="claude-haiku-4-5-20251001",
max_tokens=200,
messages=[{"role": "user", "content": prompt}],
)
+88 -12
View File
@@ -34,29 +34,105 @@ def _with_datetime(prompt: str) -> str:
return f"{prompt}\n\n{stamp}" if prompt else stamp
def build_accounts_prompt(accounts: list[dict[str, Any]]) -> str:
def build_accounts_prompt(
accounts: list[dict[str, Any]],
tool_provider_map: dict[str, str] | None = None,
node_tool_names: list[str] | None = None,
) -> str:
"""Build a prompt section describing connected accounts.
When tool_provider_map is provided, produces structured output grouped
by provider with tool mapping, so the LLM knows which ``account`` value
to pass to which tool.
When node_tool_names is also provided, filters to only show providers
whose tools overlap with the node's tool list.
Args:
accounts: List of account info dicts from CredentialStoreAdapter.get_all_account_info().
accounts: List of account info dicts from
CredentialStoreAdapter.get_all_account_info().
tool_provider_map: Mapping of tool_name -> provider_name
(e.g. {"gmail_list_messages": "google"}).
node_tool_names: Tool names available to the current node.
When provided, only providers with matching tools are shown.
Returns:
Formatted accounts block, or empty string if no accounts.
"""
if not accounts:
return ""
lines = [
"Connected accounts (use the alias as the `account` parameter "
"when calling tools to target a specific account):"
]
# Flat format (backward compat) when no tool mapping provided
if tool_provider_map is None:
lines = [
"Connected accounts (use the alias as the `account` parameter "
"when calling tools to target a specific account):"
]
for acct in accounts:
provider = acct.get("provider", "unknown")
alias = acct.get("alias", "unknown")
identity = acct.get("identity", {})
detail_parts = [f"{k}: {v}" for k, v in identity.items() if v]
detail = f" ({', '.join(detail_parts)})" if detail_parts else ""
lines.append(f"- {provider}/{alias}{detail}")
return "\n".join(lines)
# --- Structured format: group by provider with tool mapping ---
# Invert tool_provider_map to provider -> [tools]
provider_tools: dict[str, list[str]] = {}
for tool_name, provider in tool_provider_map.items():
provider_tools.setdefault(provider, []).append(tool_name)
# Filter to relevant providers based on node tools
node_tool_set = set(node_tool_names) if node_tool_names else None
# Group accounts by provider
provider_accounts: dict[str, list[dict[str, Any]]] = {}
for acct in accounts:
provider = acct.get("provider", "unknown")
alias = acct.get("alias", "unknown")
identity = acct.get("identity", {})
detail_parts = [f"{k}: {v}" for k, v in identity.items() if v]
detail = f" ({', '.join(detail_parts)})" if detail_parts else ""
lines.append(f"- {provider}/{alias}{detail}")
return "\n".join(lines)
provider_accounts.setdefault(provider, []).append(acct)
sections: list[str] = ["Connected accounts:"]
for provider, acct_list in provider_accounts.items():
tools_for_provider = sorted(provider_tools.get(provider, []))
# If node tools specified, only show providers with overlapping tools
if node_tool_set is not None:
relevant_tools = [t for t in tools_for_provider if t in node_tool_set]
if not relevant_tools:
continue
tools_for_provider = relevant_tools
# Local-only providers: tools read from env vars, no account= routing
all_local = all(a.get("source") == "local" for a in acct_list)
# Provider header with tools
display_name = provider.replace("_", " ").title()
if tools_for_provider and not all_local:
tools_str = ", ".join(tools_for_provider)
sections.append(f'\n{display_name} (use account="<alias>" with: {tools_str}):')
elif tools_for_provider and all_local:
tools_str = ", ".join(tools_for_provider)
sections.append(f"\n{display_name} (tools: {tools_str}):")
else:
sections.append(f"\n{display_name}:")
# Account entries
for acct in acct_list:
alias = acct.get("alias", "unknown")
identity = acct.get("identity", {})
detail_parts = [f"{k}: {v}" for k, v in identity.items() if v]
detail = f" ({', '.join(detail_parts)})" if detail_parts else ""
source_tag = " [local]" if acct.get("source") == "local" else ""
sections.append(f" - {provider}/{alias}{detail}{source_tag}")
# If filtering removed all providers, return empty
if len(sections) <= 1:
return ""
return "\n".join(sections)
def compose_system_prompt(
+106 -46
View File
@@ -3338,6 +3338,11 @@ def store_credential(
str, "Logical credential name (e.g., 'hubspot', 'brave_search', 'anthropic')"
],
credential_value: Annotated[str, "The secret value to store (API key, token, etc.)"],
alias: Annotated[
str,
"Named alias for this account (e.g., 'work', 'personal'). Defaults to 'default'. "
"Use aliases to store multiple accounts for the same service.",
] = "default",
key_name: Annotated[
str, "Key name within the credential (e.g., 'api_key', 'access_token')"
] = "api_key",
@@ -3347,38 +3352,42 @@ def store_credential(
Store a credential securely in the local encrypted store at ~/.hive/credentials.
Uses Fernet encryption (AES-128-CBC + HMAC). Requires HIVE_CREDENTIAL_KEY env var.
Credentials are stored as {credential_name}/{alias}, allowing multiple named accounts
per service (e.g., 'brave_search/work', 'brave_search/personal').
A health check is run automatically to validate the key and extract identity metadata.
"""
try:
from pydantic import SecretStr
from framework.credentials.local.registry import LocalCredentialRegistry
from framework.credentials import CredentialKey, CredentialObject
store = _get_credential_store()
if not display_name:
display_name = credential_name.replace("_", " ").title()
cred = CredentialObject(
id=credential_name,
name=display_name,
keys={
key_name: CredentialKey(
name=key_name,
value=SecretStr(credential_value),
)
},
registry = LocalCredentialRegistry.default()
info, health_result = registry.save_account(
credential_id=credential_name,
alias=alias,
api_key=credential_value,
run_health_check=True,
)
store.save_credential(cred)
return json.dumps(
{
"success": True,
"credential": credential_name,
"key": key_name,
"location": "~/.hive/credentials",
"encrypted": True,
result: dict = {
"success": True,
"credential": credential_name,
"alias": alias,
"storage_id": info.storage_id,
"status": info.status,
"location": "~/.hive/credentials",
"encrypted": True,
}
if health_result is not None:
result["health_check"] = {
"valid": health_result.valid,
"message": health_result.message,
}
)
identity = info.identity.to_dict()
if identity:
result["identity"] = identity
return json.dumps(result)
except Exception as e:
return json.dumps({"success": False, "error": str(e)})
@@ -3388,26 +3397,28 @@ def list_stored_credentials() -> str:
"""
List all credentials currently stored in the local encrypted store.
Returns credential IDs and metadata (never returns secret values).
Returns credential IDs, aliases, status, and identity metadata (never returns secret values).
"""
try:
store = _get_credential_store()
credential_ids = store.list_credentials()
from framework.credentials.local.registry import LocalCredentialRegistry
registry = LocalCredentialRegistry.default()
accounts = registry.list_accounts()
credentials = []
for cred_id in credential_ids:
try:
cred = store.get_credential(cred_id)
credentials.append(
{
"id": cred.id,
"name": cred.name,
"keys": list(cred.keys.keys()),
"created_at": cred.created_at.isoformat() if cred.created_at else None,
}
)
except Exception:
credentials.append({"id": cred_id, "error": "Could not load"})
for info in accounts:
entry: dict = {
"credential_id": info.credential_id,
"alias": info.alias,
"storage_id": info.storage_id,
"status": info.status,
"created_at": info.created_at.isoformat() if info.created_at else None,
"last_validated": info.last_validated.isoformat() if info.last_validated else None,
}
identity = info.identity.to_dict()
if identity:
entry["identity"] = identity
credentials.append(entry)
return json.dumps(
{
@@ -3424,26 +3435,75 @@ def list_stored_credentials() -> str:
@mcp.tool()
def delete_stored_credential(
credential_name: Annotated[str, "Logical credential name to delete (e.g., 'hubspot')"],
alias: Annotated[
str,
"Alias of the account to delete (e.g., 'work', 'personal'). Defaults to 'default'.",
] = "default",
) -> str:
"""
Delete a credential from the local encrypted store.
"""
try:
store = _get_credential_store()
deleted = store.delete_credential(credential_name)
from framework.credentials.local.registry import LocalCredentialRegistry
registry = LocalCredentialRegistry.default()
storage_id = f"{credential_name}/{alias}"
deleted = registry.delete_account(credential_name, alias)
return json.dumps(
{
"success": deleted,
"credential": credential_name,
"message": f"Credential '{credential_name}' deleted"
"alias": alias,
"storage_id": storage_id,
"message": f"Credential '{storage_id}' deleted"
if deleted
else f"Credential '{credential_name}' not found",
else f"Credential '{storage_id}' not found",
}
)
except Exception as e:
return json.dumps({"success": False, "error": str(e)})
@mcp.tool()
def validate_credential(
credential_name: Annotated[
str, "Logical credential name to validate (e.g., 'brave_search', 'github')"
],
alias: Annotated[
str,
"Alias of the account to validate (e.g., 'work', 'personal'). Defaults to 'default'.",
] = "default",
) -> str:
"""
Re-run health check for a stored credential and update its status.
Makes a live API call to verify the credential is still valid and updates
the stored status and last_validated timestamp.
"""
try:
from framework.credentials.local.registry import LocalCredentialRegistry
registry = LocalCredentialRegistry.default()
result = registry.validate_account(credential_name, alias)
response: dict = {
"credential": credential_name,
"alias": alias,
"storage_id": f"{credential_name}/{alias}",
"valid": result.valid,
"status": "active" if result.valid else "failed",
"message": result.message,
}
identity = result.details.get("identity") if result.details else None
if identity:
response["identity"] = identity
return json.dumps(response)
except Exception as e:
return json.dumps({"success": False, "error": str(e)})
@mcp.tool()
def verify_credentials(
agent_path: Annotated[str, "Path to the exported agent directory (e.g., 'exports/my-agent')"],
+1 -1
View File
@@ -1099,7 +1099,7 @@ Output ONLY valid JSON, no explanation:"""
try:
message = client.messages.create(
model="claude-3-5-haiku-20241022", # Fast and cheap
model="claude-haiku-4-5-20251001", # Fast and cheap
max_tokens=500,
messages=[{"role": "user", "content": prompt}],
)
+70 -25
View File
@@ -788,31 +788,38 @@ class AgentRunner:
extra_headers={"authorization": f"Bearer {api_key}"},
)
else:
# Fall back to environment variable
# First check api_key_env_var from config (set by quickstart)
api_key_env = llm_config.get("api_key_env_var") or self._get_api_key_env_var(
self.model
)
if api_key_env and os.environ.get(api_key_env):
# Local models (e.g. Ollama) don't need an API key
if self._is_local_model(self.model):
self._llm = LiteLLMProvider(
model=self.model,
api_key=os.environ[api_key_env],
api_base=api_base,
)
else:
# Fall back to credential store
api_key = self._get_api_key_from_credential_store()
if api_key:
# Fall back to environment variable
# First check api_key_env_var from config (set by quickstart)
api_key_env = llm_config.get("api_key_env_var") or self._get_api_key_env_var(
self.model
)
if api_key_env and os.environ.get(api_key_env):
self._llm = LiteLLMProvider(
model=self.model, api_key=api_key, api_base=api_base
model=self.model,
api_key=os.environ[api_key_env],
api_base=api_base,
)
# Set env var so downstream code (e.g. cleanup LLM in
# node._extract_json) can also find it
if api_key_env:
os.environ[api_key_env] = api_key
elif api_key_env:
print(f"Warning: {api_key_env} not set. LLM calls will fail.")
print(f"Set it with: export {api_key_env}=your-api-key")
else:
# Fall back to credential store
api_key = self._get_api_key_from_credential_store()
if api_key:
self._llm = LiteLLMProvider(
model=self.model, api_key=api_key, api_base=api_base
)
# Set env var so downstream code (e.g. cleanup LLM in
# node._extract_json) can also find it
if api_key_env:
os.environ[api_key_env] = api_key
elif api_key_env:
print(f"Warning: {api_key_env} not set. LLM calls will fail.")
print(f"Set it with: export {api_key_env}=your-api-key")
# Fail fast if the agent needs an LLM but none was configured
if self._llm is None:
@@ -820,6 +827,12 @@ class AgentRunner:
if has_llm_nodes:
from framework.credentials.models import CredentialError
if self._is_local_model(self.model):
raise CredentialError(
f"Failed to initialize LLM for local model '{self.model}'. "
f"Ensure your local LLM server is running "
f"(e.g. 'ollama serve' for Ollama)."
)
api_key_env = self._get_api_key_env_var(self.model)
hint = (
f"Set it with: export {api_key_env}=your-api-key"
@@ -834,19 +847,28 @@ class AgentRunner:
# Collect connected account info for system prompt injection
accounts_prompt = ""
accounts_data: list[dict] | None = None
tool_provider_map: dict[str, str] | None = None
try:
from aden_tools.credentials.store_adapter import CredentialStoreAdapter
adapter = CredentialStoreAdapter.default()
accounts = adapter.get_all_account_info()
if accounts:
accounts_data = adapter.get_all_account_info()
tool_provider_map = adapter.get_tool_provider_map()
if accounts_data:
from framework.graph.prompt_composer import build_accounts_prompt
accounts_prompt = build_accounts_prompt(accounts)
accounts_prompt = build_accounts_prompt(accounts_data, tool_provider_map)
except Exception:
pass # Best-effort — agent works without account info
self._setup_agent_runtime(tools, tool_executor, accounts_prompt=accounts_prompt)
self._setup_agent_runtime(
tools,
tool_executor,
accounts_prompt=accounts_prompt,
accounts_data=accounts_data,
tool_provider_map=tool_provider_map,
)
def _get_api_key_env_var(self, model: str) -> str | None:
"""Get the environment variable name for the API key based on model name."""
@@ -866,8 +888,8 @@ class AgentRunner:
return "MISTRAL_API_KEY"
elif model_lower.startswith("groq/"):
return "GROQ_API_KEY"
elif model_lower.startswith("ollama/"):
return None # Ollama doesn't need an API key (local)
elif self._is_local_model(model_lower):
return None # Local models don't need an API key
elif model_lower.startswith("azure/"):
return "AZURE_API_KEY"
elif model_lower.startswith("cohere/"):
@@ -907,8 +929,29 @@ class AgentRunner:
except Exception:
return None
@staticmethod
def _is_local_model(model: str) -> bool:
"""Check if a model is a local model that doesn't require an API key.
Local providers like Ollama run on the user's machine and do not
need any authentication credentials.
"""
LOCAL_PREFIXES = (
"ollama/",
"ollama_chat/",
"vllm/",
"lm_studio/",
"llamacpp/",
)
return model.lower().startswith(LOCAL_PREFIXES)
def _setup_agent_runtime(
self, tools: list, tool_executor: Callable | None, accounts_prompt: str = ""
self,
tools: list,
tool_executor: Callable | None,
accounts_prompt: str = "",
accounts_data: list[dict] | None = None,
tool_provider_map: dict[str, str] | None = None,
) -> None:
"""Set up multi-entry-point execution using AgentRuntime."""
# Convert AsyncEntryPointSpec to EntryPointSpec for AgentRuntime
@@ -981,6 +1024,8 @@ class AgentRunner:
config=runtime_config,
graph_id=self.graph.id or self.agent_path.name,
accounts_prompt=accounts_prompt,
accounts_data=accounts_data,
tool_provider_map=tool_provider_map,
)
# Pass intro_message through for TUI display
+17
View File
@@ -127,6 +127,8 @@ class AgentRuntime:
checkpoint_config: CheckpointConfig | None = None,
graph_id: str | None = None,
accounts_prompt: str = "",
accounts_data: list[dict] | None = None,
tool_provider_map: dict[str, str] | None = None,
):
"""
Initialize agent runtime.
@@ -143,12 +145,15 @@ class AgentRuntime:
checkpoint_config: Optional checkpoint configuration for resumable sessions
graph_id: Optional identifier for the primary graph (defaults to "primary")
accounts_prompt: Connected accounts block for system prompt injection
accounts_data: Raw account data for per-node prompt generation
tool_provider_map: Tool name to provider name mapping for account routing
"""
self.graph = graph
self.goal = goal
self._config = config or AgentRuntimeConfig()
self._runtime_log_store = runtime_log_store
self._checkpoint_config = checkpoint_config
self.accounts_prompt = accounts_prompt
# Primary graph identity
self._graph_id: str = graph_id or "primary"
@@ -181,6 +186,8 @@ class AgentRuntime:
self._tools = tools or []
self._tool_executor = tool_executor
self._accounts_prompt = accounts_prompt
self._accounts_data = accounts_data
self._tool_provider_map = tool_provider_map
# Entry points and streams (primary graph)
self._entry_points: dict[str, EntryPointSpec] = {}
@@ -277,6 +284,8 @@ class AgentRuntime:
checkpoint_config=self._checkpoint_config,
graph_id=self._graph_id,
accounts_prompt=self._accounts_prompt,
accounts_data=self._accounts_data,
tool_provider_map=self._tool_provider_map,
)
await stream.start()
self._streams[ep_id] = stream
@@ -679,6 +688,8 @@ class AgentRuntime:
checkpoint_config=self._checkpoint_config,
graph_id=graph_id,
accounts_prompt=self._accounts_prompt,
accounts_data=self._accounts_data,
tool_provider_map=self._tool_provider_map,
)
if self._running:
await stream.start()
@@ -1181,6 +1192,8 @@ def create_agent_runtime(
checkpoint_config: CheckpointConfig | None = None,
graph_id: str | None = None,
accounts_prompt: str = "",
accounts_data: list[dict] | None = None,
tool_provider_map: dict[str, str] | None = None,
) -> AgentRuntime:
"""
Create and configure an AgentRuntime with entry points.
@@ -1204,6 +1217,8 @@ def create_agent_runtime(
checkpoint_config: Optional checkpoint configuration for resumable sessions.
If None, uses default checkpointing behavior.
graph_id: Optional identifier for the primary graph (defaults to "primary").
accounts_data: Raw account data for per-node prompt generation.
tool_provider_map: Tool name to provider name mapping for account routing.
Returns:
Configured AgentRuntime (not yet started)
@@ -1227,6 +1242,8 @@ def create_agent_runtime(
checkpoint_config=checkpoint_config,
graph_id=graph_id,
accounts_prompt=accounts_prompt,
accounts_data=accounts_data,
tool_provider_map=tool_provider_map,
)
for spec in entry_points:
@@ -144,6 +144,8 @@ class ExecutionStream:
checkpoint_config: CheckpointConfig | None = None,
graph_id: str | None = None,
accounts_prompt: str = "",
accounts_data: list[dict] | None = None,
tool_provider_map: dict[str, str] | None = None,
):
"""
Initialize execution stream.
@@ -165,6 +167,8 @@ class ExecutionStream:
checkpoint_config: Optional checkpoint configuration for resumable sessions
graph_id: Optional graph identifier for multi-graph sessions
accounts_prompt: Connected accounts block for system prompt injection
accounts_data: Raw account data for per-node prompt generation
tool_provider_map: Tool name to provider name mapping for account routing
"""
self.stream_id = stream_id
self.entry_spec = entry_spec
@@ -184,6 +188,8 @@ class ExecutionStream:
self._checkpoint_config = checkpoint_config
self._session_store = session_store
self._accounts_prompt = accounts_prompt
self._accounts_data = accounts_data
self._tool_provider_map = tool_provider_map
# Create stream-scoped runtime
self._runtime = StreamRuntime(
@@ -463,6 +469,8 @@ class ExecutionStream:
runtime_logger=runtime_logger,
loop_config=self.graph.loop_config,
accounts_prompt=self._accounts_prompt,
accounts_data=self._accounts_data,
tool_provider_map=self._tool_provider_map,
)
# Track executor so inject_input() can reach EventLoopNode instances
self._active_executors[execution_id] = executor
+10 -2
View File
@@ -313,7 +313,6 @@ class OutcomeAggregator:
async def _evaluate_criterion(self, criterion: Any) -> CriterionStatus:
"""
Evaluate a single success criterion.
This is a heuristic evaluation based on decision outcomes.
More sophisticated evaluation can be added per criterion type.
"""
@@ -325,6 +324,11 @@ class OutcomeAggregator:
evidence=[],
)
# Guard: only apply this heuristic to success-rate criteria
criterion_type = getattr(criterion, "type", "success_rate")
if criterion_type != "success_rate":
return status
# Get relevant decisions (those mentioning this criterion or related intents)
relevant_decisions = [
d
@@ -341,13 +345,17 @@ class OutcomeAggregator:
outcomes = [d.outcome for d in relevant_decisions if d.outcome is not None]
if outcomes:
success_count = sum(1 for o in outcomes if o.success)
# Progress is computed as raw success rate of decision outcomes.
status.progress = success_count / len(outcomes)
# Add evidence
for d in relevant_decisions[:5]: # Limit evidence
if d.outcome:
evidence = (
f"{d.decision.intent}: {'success' if d.outcome.success else 'failed'}"
f"decision_id={d.decision.id}, "
f"intent={d.decision.intent}, "
f"result={'success' if d.outcome.success else 'failed'}"
)
status.evidence.append(evidence)
+66 -1
View File
@@ -534,9 +534,74 @@ class AdenTUI(App):
if result is None:
self.exit()
return
self._handle_picker_result(result)
# Show Get Started tab on initial launch
self.push_screen(
AgentPickerScreen(agents, show_get_started=True),
callback=_on_initial_pick,
)
def _handle_picker_result(self, result: str) -> None:
"""Handle the result from the agent picker, including Get Started actions."""
if result.startswith("action:"):
action = result.removeprefix("action:")
if action == "run_examples":
# Switch to Examples tab by re-opening picker focused on examples
self._show_agent_picker_tab("examples")
elif action == "run_existing":
# Switch to Your Agents tab
self._show_agent_picker_tab("your-agents")
elif action == "build_edit":
# Launch agent builder guidance
self._show_build_edit_message()
else:
# Regular agent path - load it
self._do_load_agent(result)
self.push_screen(AgentPickerScreen(agents), callback=_on_initial_pick)
def _show_agent_picker_tab(self, tab_id: str) -> None:
"""Show the agent picker focused on a specific tab (no Get Started)."""
from framework.tui.screens.agent_picker import AgentPickerScreen, discover_agents
agents = discover_agents()
if not agents:
self.notify("No agents found", severity="error", timeout=5)
return
def _on_pick(result: str | None) -> None:
if result is None:
self.exit()
return
if result.startswith("action:"):
# Shouldn't happen but handle gracefully
self._handle_picker_result(result)
else:
self._do_load_agent(result)
screen = AgentPickerScreen(agents, show_get_started=False)
def _focus_tab() -> None:
try:
tabbed = screen.query_one(
"TabbedContent", expect_type=type(screen.query_one("TabbedContent"))
)
tabbed.active = tab_id
except Exception:
pass
self.push_screen(screen, callback=_on_pick)
self.call_later(_focus_tab)
def _show_build_edit_message(self) -> None:
"""Show guidance for building or editing agents."""
self.notify(
"To build or edit agents, use 'hive build' from the terminal "
"or run Claude Code with the /hive skill.",
severity="information",
timeout=10,
)
# Re-show picker so user can still select an agent
self._show_agent_picker_initial()
def action_show_agent_picker(self) -> None:
"""Open the agent picker (Ctrl+A or /agents)."""
+13
View File
@@ -0,0 +1,13 @@
"""TUI screens package."""
from .account_selection import AccountSelectionScreen
from .add_local_credential import AddLocalCredentialScreen
from .agent_picker import AgentPickerScreen
from .credential_setup import CredentialSetupScreen
__all__ = [
"AccountSelectionScreen",
"AddLocalCredentialScreen",
"AgentPickerScreen",
"CredentialSetupScreen",
]
@@ -66,16 +66,32 @@ class AccountSelectionScreen(ModalScreen[dict | None]):
id="acct-subtitle",
)
option_list = OptionList(id="acct-list")
for i, acct in enumerate(self._accounts):
# Group: Aden accounts first, then local
aden = [a for a in self._accounts if a.get("source") != "local"]
local = [a for a in self._accounts if a.get("source") == "local"]
ordered = aden + local
for i, acct in enumerate(ordered):
provider = acct.get("provider", "unknown")
alias = acct.get("alias", "unknown")
email = acct.get("identity", {}).get("email", "")
identity = acct.get("identity", {})
source = acct.get("source", "aden")
# Build identity label: prefer email, then username/workspace
identity_label = (
identity.get("email")
or identity.get("username")
or identity.get("workspace")
or ""
)
label = Text()
label.append(f"{provider}/", style="bold")
label.append(alias, style="bold cyan")
if email:
label.append(f" ({email})", style="dim")
if source == "local":
label.append(" [local]", style="dim yellow")
if identity_label:
label.append(f" ({identity_label})", style="dim")
option_list.add_option(Option(label, id=f"acct-{i}"))
# Keep ordered list for index lookups
self._accounts = ordered
yield option_list
yield Label(
"[dim]Enter[/dim] Select [dim]Esc[/dim] Cancel",
@@ -0,0 +1,244 @@
"""Add Local Credential ModalScreen for storing named local API key accounts."""
from __future__ import annotations
from textual.app import ComposeResult
from textual.binding import Binding
from textual.containers import Vertical, VerticalScroll
from textual.screen import ModalScreen
from textual.widgets import Button, Input, Label, OptionList
from textual.widgets._option_list import Option
class AddLocalCredentialScreen(ModalScreen[dict | None]):
"""Modal screen for adding a named local API key credential.
Phase 1: Pick credential type from list.
Phase 2: Enter alias + API key, run health check, save.
Returns a dict with credential_id, alias, and identity on success, or None on cancel.
"""
BINDINGS = [
Binding("escape", "dismiss_screen", "Cancel"),
]
DEFAULT_CSS = """
AddLocalCredentialScreen {
align: center middle;
}
#alc-container {
width: 80%;
max-width: 90;
height: 80%;
background: $surface;
border: heavy $primary;
padding: 1 2;
}
#alc-title {
text-align: center;
text-style: bold;
width: 100%;
color: $text;
}
#alc-subtitle {
text-align: center;
width: 100%;
margin-bottom: 1;
}
#alc-type-list {
height: 1fr;
}
#alc-form {
height: 1fr;
}
.alc-field {
margin-bottom: 1;
height: auto;
}
.alc-field Label {
margin-bottom: 0;
}
#alc-status {
width: 100%;
height: auto;
margin-top: 1;
padding: 1;
background: $panel;
}
.alc-buttons {
height: auto;
margin-top: 1;
align: center middle;
}
.alc-buttons Button {
margin: 0 1;
}
#alc-footer {
text-align: center;
width: 100%;
margin-top: 1;
}
"""
def __init__(self) -> None:
super().__init__()
# Load credential specs that support direct API keys
self._specs: list[tuple[str, object]] = self._load_specs()
# Selected credential spec (set in phase 2)
self._selected_id: str = ""
self._selected_spec: object = None
self._phase: int = 1 # 1 = type selection, 2 = form
@staticmethod
def _load_specs() -> list[tuple[str, object]]:
"""Return (credential_id, spec) pairs for direct-API-key credentials."""
try:
from aden_tools.credentials import CREDENTIAL_SPECS
return [
(cid, spec)
for cid, spec in CREDENTIAL_SPECS.items()
if getattr(spec, "direct_api_key_supported", False)
]
except Exception:
return []
# ------------------------------------------------------------------
# Compose
# ------------------------------------------------------------------
def compose(self) -> ComposeResult:
with Vertical(id="alc-container"):
yield Label("Add Local Credential", id="alc-title")
yield Label("[dim]Store a named API key account[/dim]", id="alc-subtitle")
# Phase 1: type selection
option_list = OptionList(id="alc-type-list")
for cid, spec in self._specs:
description = getattr(spec, "description", cid)
option_list.add_option(Option(f"{cid} [dim]{description}[/dim]", id=f"type-{cid}"))
yield option_list
# Phase 2: form (hidden initially)
with VerticalScroll(id="alc-form"):
with Vertical(classes="alc-field"):
yield Label("[bold]Alias[/bold] [dim](e.g. work, personal)[/dim]")
yield Input(value="default", id="alc-alias")
with Vertical(classes="alc-field"):
yield Label("[bold]API Key[/bold]")
yield Input(placeholder="Paste API key...", password=True, id="alc-key")
yield Label("", id="alc-status")
with Vertical(classes="alc-buttons"):
yield Button("Test & Save", variant="primary", id="btn-save")
yield Button("Back", variant="default", id="btn-back")
yield Label(
"[dim]Enter[/dim] Select [dim]Esc[/dim] Cancel",
id="alc-footer",
)
def on_mount(self) -> None:
self._show_phase(1)
# ------------------------------------------------------------------
# Phase switching
# ------------------------------------------------------------------
def _show_phase(self, phase: int) -> None:
self._phase = phase
type_list = self.query_one("#alc-type-list", OptionList)
form = self.query_one("#alc-form", VerticalScroll)
if phase == 1:
type_list.display = True
form.display = False
subtitle = self.query_one("#alc-subtitle", Label)
subtitle.update("[dim]Select the credential type to add[/dim]")
else:
type_list.display = False
form.display = True
spec = self._selected_spec
description = (
getattr(spec, "description", self._selected_id) if spec else self._selected_id
)
subtitle = self.query_one("#alc-subtitle", Label)
subtitle.update(f"[dim]{self._selected_id}[/dim] {description}")
self._clear_status()
# Focus the alias input
self.query_one("#alc-alias", Input).focus()
# ------------------------------------------------------------------
# Event handlers
# ------------------------------------------------------------------
def on_option_list_option_selected(self, event: OptionList.OptionSelected) -> None:
if self._phase != 1:
return
option_id = event.option.id or ""
if option_id.startswith("type-"):
cid = option_id[5:] # strip "type-" prefix
self._selected_id = cid
self._selected_spec = next(
(spec for spec_id, spec in self._specs if spec_id == cid), None
)
self._show_phase(2)
def on_button_pressed(self, event: Button.Pressed) -> None:
if event.button.id == "btn-save":
self._do_save()
elif event.button.id == "btn-back":
self._show_phase(1)
# ------------------------------------------------------------------
# Save logic
# ------------------------------------------------------------------
def _do_save(self) -> None:
alias = self.query_one("#alc-alias", Input).value.strip() or "default"
api_key = self.query_one("#alc-key", Input).value.strip()
if not api_key:
self._set_status("[red]API key cannot be empty.[/red]")
return
self._set_status("[dim]Running health check...[/dim]")
# Disable save button while running
btn = self.query_one("#btn-save", Button)
btn.disabled = True
try:
from framework.credentials.local.registry import LocalCredentialRegistry
registry = LocalCredentialRegistry.default()
info, health_result = registry.save_account(
credential_id=self._selected_id,
alias=alias,
api_key=api_key,
run_health_check=True,
)
if health_result is not None and not health_result.valid:
self._set_status(
f"[yellow]Saved with failed health check:[/yellow] {health_result.message}\n"
"[dim]You can re-validate later via validate_credential().[/dim]"
)
else:
identity = info.identity.to_dict()
identity_str = ""
if identity:
parts = [f"{k}: {v}" for k, v in identity.items() if v]
identity_str = " " + ", ".join(parts) if parts else ""
self._set_status(f"[green]Saved:[/green] {info.storage_id}{identity_str}")
# Dismiss with result so callers can react
self.set_timer(1.0, lambda: self.dismiss(info.to_account_dict()))
return
except Exception as e:
self._set_status(f"[red]Error:[/red] {e}")
finally:
btn.disabled = False
def _set_status(self, markup: str) -> None:
self.query_one("#alc-status", Label).update(markup)
def _clear_status(self) -> None:
self.query_one("#alc-status", Label).update("")
def action_dismiss_screen(self) -> None:
self.dismiss(None)
+71 -1
View File
@@ -4,6 +4,7 @@ from __future__ import annotations
import json
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from rich.console import Group
@@ -16,6 +17,14 @@ from textual.widgets import Label, OptionList, TabbedContent, TabPane
from textual.widgets._option_list import Option
class GetStartedAction(Enum):
"""Actions available in the Get Started tab."""
RUN_EXAMPLES = "run_examples"
RUN_EXISTING = "run_existing"
BUILD_EDIT = "build_edit"
@dataclass
class AgentEntry:
"""Lightweight agent metadata for the picker."""
@@ -167,10 +176,20 @@ def _render_agent_option(agent: AgentEntry) -> Group:
return Group(*parts)
def _render_get_started_option(title: str, description: str, icon: str = "") -> Group:
"""Build a Rich renderable for a Get Started option."""
line1 = Text()
line1.append(f"{icon} ", style="bold cyan")
line1.append(title, style="bold")
line2 = Text(description, style="dim")
return Group(line1, line2)
class AgentPickerScreen(ModalScreen[str | None]):
"""Modal screen showing available agents organized by tabbed categories.
Returns the selected agent path as a string, or None if dismissed.
For Get Started actions, returns a special prefix like "action:run_examples".
"""
BINDINGS = [
@@ -216,9 +235,14 @@ class AgentPickerScreen(ModalScreen[str | None]):
}
"""
def __init__(self, agent_groups: dict[str, list[AgentEntry]]) -> None:
def __init__(
self,
agent_groups: dict[str, list[AgentEntry]],
show_get_started: bool = False,
) -> None:
super().__init__()
self._groups = agent_groups
self._show_get_started = show_get_started
# Map (tab_id, option_index) -> AgentEntry
self._option_map: dict[str, dict[int, AgentEntry]] = {}
@@ -231,6 +255,43 @@ class AgentPickerScreen(ModalScreen[str | None]):
id="picker-subtitle",
)
with TabbedContent():
# Get Started tab (only on initial launch)
if self._show_get_started:
with TabPane("Get Started", id="get-started"):
option_list = OptionList(id="list-get-started")
option_list.add_option(
Option(
_render_get_started_option(
"Test and run example agents",
"Try pre-built example agents to learn how Hive works",
"📚",
),
id="action:run_examples",
)
)
option_list.add_option(
Option(
_render_get_started_option(
"Test and run existing agent",
"Load and run an agent you've already built (from exports/)",
"🚀",
),
id="action:run_existing",
)
)
option_list.add_option(
Option(
_render_get_started_option(
"Build or edit agent",
"Create a new agent or modify an existing one",
"🛠️ ",
),
id="action:build_edit",
)
)
yield option_list
# Agent category tabs
for category, agents in self._groups.items():
tab_id = category.lower().replace(" ", "-")
with TabPane(f"{category} ({len(agents)})", id=tab_id):
@@ -252,6 +313,15 @@ class AgentPickerScreen(ModalScreen[str | None]):
def on_option_list_option_selected(self, event: OptionList.OptionSelected) -> None:
list_id = event.option_list.id or ""
# Handle Get Started tab options
if list_id == "list-get-started":
option = event.option
if option and option.id:
self.dismiss(option.id) # Returns "action:run_examples", etc.
return
# Handle agent selection from other tabs
idx = event.option_index
agent_map = self._option_map.get(list_id, {})
agent = agent_map.get(idx)
+49
View File
@@ -826,3 +826,52 @@ class TestAsyncComplete:
assert call_thread_ids[0] != main_thread_id, (
"Base acomplete() should offload sync complete() to a thread pool"
)
# ---------------------------------------------------------------------------
# AgentRunner._is_local_model — parameterized tests
# ---------------------------------------------------------------------------
class TestIsLocalModel:
"""Parameterized tests for AgentRunner._is_local_model()."""
@pytest.mark.parametrize(
"model",
[
"ollama/llama3",
"ollama/mistral",
"ollama_chat/llama3",
"vllm/mistral",
"lm_studio/phi3",
"llamacpp/llama-7b",
"Ollama/Llama3", # case-insensitive
"VLLM/Mistral",
],
)
def test_local_models_return_true(self, model):
"""Local model prefixes should be recognized."""
from framework.runner.runner import AgentRunner
assert AgentRunner._is_local_model(model) is True
@pytest.mark.parametrize(
"model",
[
"anthropic/claude-3-haiku",
"openai/gpt-4o",
"gpt-4o-mini",
"claude-3-haiku-20240307",
"gemini/gemini-1.5-flash",
"groq/llama3-70b",
"mistral/mistral-large",
"azure/gpt-4",
"cohere/command-r",
"together/llama3-70b",
],
)
def test_cloud_models_return_false(self, model):
"""Cloud model prefixes should not be treated as local."""
from framework.runner.runner import AgentRunner
assert AgentRunner._is_local_model(model) is False
@@ -47,6 +47,7 @@ Call gmail_list_labels() to show the user their current Gmail labels. This helps
- set_output("rules", <the confirmed rules as a clear text description>)
- set_output("max_emails", <the confirmed max_emails as a string number, e.g. "100">)
""",
tools=["gmail_list_labels"],
)
@@ -71,23 +72,25 @@ fetch_emails_node = NodeSpec(
You are a data pipeline step. Your job is to fetch emails from Gmail and write them to emails.jsonl.
**FIRST-TIME FETCH (default path):**
1. Read "max_emails" from input context.
1. Read "max_emails" and "rules" from input context.
2. Call bulk_fetch_emails(max_emails=<value>).
3. The tool returns {"filename": "emails.jsonl"}.
4. Call set_output("emails", "emails.jsonl").
**NEXT-BATCH FETCH (when user asks for "the next N" emails):**
The user wants emails BEYOND what was already fetched. Use pagination:
1. Call gmail_list_messages(query="label:INBOX", max_results=<previous + new count>) to get message IDs. Use page_token if needed to paginate past already-fetched emails.
2. Identify message IDs NOT in the previous batch (you remember them from continuous conversation).
3. Call gmail_batch_get_messages(message_ids=<new_ids>, format="metadata") for full metadata.
4. For each message in the result, call append_data(filename="emails.jsonl", data=<JSON: {id, subject, from, to, date, snippet, labels}>).
1. Call gmail_list_messages(query="label:INBOX", max_results=<previous + new count>).
Use page_token if needed to paginate past already-fetched emails.
2. Identify message IDs NOT in the previous batch.
3. Call gmail_batch_get_messages(message_ids=<new_ids>, format="metadata").
4. For each message, call append_data(filename="emails.jsonl",
data=<JSON: {id, subject, from, to, date, snippet, labels}>).
5. Call set_output("emails", "emails.jsonl").
**TOOLS:**
- bulk_fetch_emails(max_emails) Bulk fetch from inbox, writes emails.jsonl. Use for first fetch.
- gmail_list_messages(query, max_results, page_token) List message IDs with pagination. Returns {messages, next_page_token}.
- gmail_batch_get_messages(message_ids, format) Fetch metadata for specific IDs (max 50 per call).
- bulk_fetch_emails(max_emails) Bulk fetch from inbox, writes emails.jsonl.
- gmail_list_messages(query, max_results, page_token) List message IDs.
- gmail_batch_get_messages(message_ids, format) Fetch metadata (max 50/call).
- append_data(filename, data) Append a line to a JSONL file.
Do NOT add commentary or explanation. Execute the appropriate path and call set_output when done.
@@ -118,19 +121,20 @@ classify_and_act_node = NodeSpec(
You are an inbox management assistant. Apply the user's rules to their emails and execute Gmail actions.
**YOUR TOOLS:**
- load_data(filename, limit, offset) Read emails from a local file. This is how you access the emails.
- append_data(filename, data) Append a line to a file. Use this to record actions taken.
- gmail_batch_modify_messages(message_ids, add_labels, remove_labels) Modify Gmail labels in batch. ALWAYS prefer this.
- load_data(filename, limit, offset) Read emails from a local file.
- append_data(filename, data) Append a line to a file. Record actions taken.
- gmail_batch_modify_messages(message_ids, add_labels, remove_labels) Modify labels in batch. ALWAYS prefer this.
- gmail_modify_message(message_id, add_labels, remove_labels) Modify a single message's labels.
- gmail_trash_message(message_id) Move a message to trash. No batch version; call per email.
- gmail_trash_message(message_id) Move a message to trash.
- gmail_create_draft(to, subject, body) Create a draft reply. NEVER sends automatically.
- gmail_create_label(name) Create a new Gmail label. Returns the label ID.
- gmail_list_labels() List all existing Gmail labels with their IDs.
- set_output(key, value) Set an output value. Call ONLY after all actions are executed.
**CONTEXT:**
- "rules" = the user's rule to apply (e.g. "mark all as unread")
- "emails" = a filename (e.g. "emails.jsonl") containing the fetched emails as JSONL. Each line has: id, subject, from, to, date, snippet, labels.
- "rules" = the user's rule to apply (e.g. "mark all as unread").
- "emails" = a filename (e.g. "emails.jsonl") containing the fetched emails as JSONL.
Each line has: id, subject, from, to, date, snippet, labels.
**PROCESS EMAILS ONE CHUNK AT A TIME (you will get multiple turns):**
@@ -41,6 +41,13 @@ TOOLS = {
"type": "string",
"description": "Maximum number of emails to fetch (default '100')",
},
"account": {
"type": "string",
"description": (
"Account alias to use (e.g. 'timothy-home'). "
"Required when multiple Google accounts are connected."
),
},
},
"required": [],
},
@@ -64,8 +71,13 @@ def _get_data_dir() -> str:
return ctx["data_dir"]
def _get_access_token() -> str:
"""Get Google OAuth access token from credential store."""
def _get_access_token(account: str = "") -> str:
"""Get Google OAuth access token from credential store.
Args:
account: Account alias (e.g. 'timothy-home'). When provided,
resolves the token for that specific account.
"""
import os
# Try credential store first (same pattern as gmail_tool.py)
@@ -73,7 +85,10 @@ def _get_access_token() -> str:
from aden_tools.credentials import CredentialStoreAdapter
credentials = CredentialStoreAdapter.default()
token = credentials.get("google")
if account:
token = credentials.get_by_alias("google", account)
else:
token = credentials.get("google")
if token:
return token
except Exception:
@@ -105,17 +120,21 @@ def _parse_headers(headers: list[dict]) -> dict[str, str]:
# ---------------------------------------------------------------------------
def _bulk_fetch_emails(max_emails: str = "100") -> str:
def _bulk_fetch_emails(max_emails: str = "100", account: str = "") -> str:
"""Fetch inbox emails and write them to emails.jsonl.
Uses synchronous httpx.Client since this runs as a tool call inside
an already-running async event loop.
Args:
max_emails: Maximum number of emails to fetch.
account: Account alias (e.g. 'timothy-home') for multi-account routing.
Returns:
The filename "emails.jsonl" (written to session data_dir).
"""
max_count = int(max_emails) if max_emails else 100
access_token = _get_access_token()
access_token = _get_access_token(account)
data_dir = _get_data_dir()
Path(data_dir).mkdir(parents=True, exist_ok=True)
@@ -237,7 +256,8 @@ def tool_executor(tool_use: ToolUse) -> ToolResult:
if tool_use.name == "bulk_fetch_emails":
try:
max_emails = tool_use.input.get("max_emails", "100")
filename = _bulk_fetch_emails(max_emails=max_emails)
account = tool_use.input.get("account", "")
filename = _bulk_fetch_emails(max_emails=max_emails, account=account)
return ToolResult(
tool_use_id=tool_use.id,
content=json.dumps({"filename": filename}),
+25 -11
View File
@@ -748,9 +748,14 @@ if [ ${#FOUND_PROVIDERS[@]} -gt 0 ]; then
echo -e " ${CYAN}$i)${NC} $provider"
i=$((i + 1))
done
ZAI_CHOICE=$i
echo -e " ${CYAN}$i)${NC} ZAI Code Subscription ${DIM}(use your ZAI Code plan)${NC}"
i=$((i + 1))
# Only show ZAI Code Subscription if the API key already exists
if [ -n "${ZAI_API_KEY:-}" ]; then
ZAI_CHOICE=$i
echo -e " ${CYAN}$i)${NC} ZAI Code Subscription ${DIM}(use your ZAI Code plan)${NC}"
i=$((i + 1))
else
ZAI_CHOICE=-1 # invalid choice, won't match
fi
echo -e " ${CYAN}$i)${NC} Other"
max_choice=$i
echo ""
@@ -1203,18 +1208,27 @@ if [ "$CODEX_AVAILABLE" = true ]; then
echo ""
fi
# Prompt user to source shell config or start new terminal
echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
echo -e "${BOLD}⚠️ IMPORTANT: Load your new configuration${NC}"
echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
echo ""
echo -e " Your API keys have been saved to ${CYAN}$SHELL_RC_FILE${NC}"
echo -e " To use them, either:"
echo ""
echo -e " ${GREEN}Option 1:${NC} Source your shell config now:"
echo -e " ${CYAN}source $SHELL_RC_FILE${NC}"
echo ""
echo -e " ${GREEN}Option 2:${NC} Open a new terminal window"
echo ""
echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
echo ""
echo -e "${BOLD}Run an Agent:${NC}"
echo ""
echo -e " Launch the interactive dashboard to browse and run agents:"
echo -e " You can start a example agent or an agent built by yourself:"
echo -e " You can start an example agent or an agent built by yourself:"
echo -e " ${CYAN}hive tui${NC}"
echo ""
# Show shell sourcing reminder if we added environment variables
if [ -n "$SELECTED_PROVIDER_ID" ] || [ -n "$HIVE_CREDENTIAL_KEY" ]; then
echo -e "${BOLD}Note:${NC} To use the new environment variables in this shell, run:"
echo -e " ${CYAN}source $SHELL_RC_FILE${NC}"
echo ""
fi
echo -e "${DIM}Run ./quickstart.sh again to reconfigure.${NC}"
echo ""
+2
View File
@@ -102,6 +102,7 @@ python mcp_server.py
| ---- | ----------- |
| `web_search` | Search the web (Google or Brave, auto-detected) |
| `web_scrape` | Scrape and extract content from webpages |
| `search_wikipedia` | Search Wikipedia for pages and summaries |
| `scholar_search`, `scholar_get_citations`, `scholar_get_author` | Search academic papers, get citations and author profiles via SerpAPI |
| `patents_search`, `patents_get_details` | Search patents and retrieve patent details via SerpAPI |
| `exa_search`, `exa_answer`, `exa_find_similar`, `exa_get_contents` | Semantic search and content retrieval via Exa AI |
@@ -185,6 +186,7 @@ tools/
│ ├── web_search_tool/
│ ├── web_scrape_tool/
│ ├── pdf_read_tool/
│ ├── wikipedia_tool/
│ ├── time_tool/
│ └── calendar_tool/
├── tests/ # Test suite
+11 -1
View File
@@ -56,6 +56,7 @@ To add a new credential:
from .apollo import APOLLO_CREDENTIALS
from .base import CredentialError, CredentialSpec
from .bigquery import BIGQUERY_CREDENTIALS
from .brevo import BREVO_CREDENTIALS
from .browser import get_aden_auth_url, get_aden_setup_url, open_browser
from .calcom import CALCOM_CREDENTIALS
from .discord import DISCORD_CREDENTIALS
@@ -65,7 +66,12 @@ from .github import GITHUB_CREDENTIALS
from .google_calendar import GOOGLE_CALENDAR_CREDENTIALS
from .google_docs import GOOGLE_DOCS_CREDENTIALS
from .google_maps import GOOGLE_MAPS_CREDENTIALS
from .health_check import HealthCheckResult, check_credential_health
from .health_check import (
BaseHttpHealthChecker,
HealthCheckResult,
check_credential_health,
validate_integration_wiring,
)
from .hubspot import HUBSPOT_CREDENTIALS
from .llm import LLM_CREDENTIALS
from .news import NEWS_CREDENTIALS
@@ -105,6 +111,7 @@ CREDENTIAL_SPECS = {
**BIGQUERY_CREDENTIALS,
**CALCOM_CREDENTIALS,
**STRIPE_CREDENTIALS,
**BREVO_CREDENTIALS,
**POSTGRES_CREDENTIALS,
}
@@ -116,8 +123,10 @@ __all__ = [
# Credential store adapter (replaces deprecated CredentialManager)
"CredentialStoreAdapter",
# Health check utilities
"BaseHttpHealthChecker",
"HealthCheckResult",
"check_credential_health",
"validate_integration_wiring",
# Browser utilities for OAuth2 flows
"open_browser",
"get_aden_auth_url",
@@ -149,5 +158,6 @@ __all__ = [
"CALCOM_CREDENTIALS",
"DISCORD_CREDENTIALS",
"STRIPE_CREDENTIALS",
"BREVO_CREDENTIALS",
"POSTGRES_CREDENTIALS",
]
+42
View File
@@ -0,0 +1,42 @@
"""
Brevo tool credentials.
Contains credentials for Brevo (formerly Sendinblue) transactional email,
SMS, and contact management integration.
"""
from .base import CredentialSpec
BREVO_CREDENTIALS = {
"brevo": CredentialSpec(
env_var="BREVO_API_KEY",
tools=[
"brevo_send_email",
"brevo_send_sms",
"brevo_create_contact",
"brevo_get_contact",
"brevo_update_contact",
],
required=True,
startup_required=False,
help_url="https://app.brevo.com/settings/keys/api",
description="Brevo API key for transactional email, SMS, and contact management",
# Auth method support
aden_supported=False,
direct_api_key_supported=True,
api_key_instructions="""To get a Brevo API key:
1. Go to https://app.brevo.com and create an account (or sign in)
2. Navigate to Settings > API Keys (or visit https://app.brevo.com/settings/keys/api)
3. Click "Generate a new API key"
4. Give it a name (e.g., "Hive Agent")
5. Copy the API key (starts with xkeysib-)
6. Store it securely - you won't be able to see it again!
7. Note: For sending emails, you'll need a verified sender domain or email""",
# Health check configuration
health_check_endpoint="https://api.brevo.com/v3/account",
health_check_method="GET",
# Credential store mapping
credential_id="brevo",
credential_key="api_key",
),
}
@@ -239,6 +239,178 @@ class OAuthBearerHealthChecker:
)
class BaseHttpHealthChecker:
"""Configurable base class for HTTP-based credential health checkers.
Reduces boilerplate by handling the common HTTP request/response/error pattern.
Subclasses configure via class constants and override hooks as needed.
Supports five auth patterns:
- AUTH_BEARER: Authorization: Bearer <token>
- AUTH_HEADER: Custom header name/value template
- AUTH_QUERY: Token as query parameter
- AUTH_BASIC: HTTP Basic Authentication
- AUTH_URL: Token embedded in URL (e.g., Telegram)
Example::
class CalcomHealthChecker(BaseHttpHealthChecker):
ENDPOINT = "https://api.cal.com/v1/me"
SERVICE_NAME = "Cal.com"
AUTH_TYPE = "query"
AUTH_QUERY_PARAM_NAME = "apiKey"
"""
# Auth pattern constants
AUTH_BEARER = "bearer"
AUTH_HEADER = "header"
AUTH_QUERY = "query"
AUTH_BASIC = "basic"
AUTH_URL = "url"
# Subclass configuration
ENDPOINT: str = ""
SERVICE_NAME: str = ""
HTTP_METHOD: str = "GET"
TIMEOUT: float = 10.0
# Auth configuration
AUTH_TYPE: str = AUTH_BEARER
AUTH_HEADER_NAME: str = "Authorization"
AUTH_HEADER_TEMPLATE: str = "Bearer {token}"
AUTH_QUERY_PARAM_NAME: str = "key"
# Status code interpretation
VALID_STATUSES: frozenset[int] = frozenset({200})
RATE_LIMITED_STATUSES: frozenset[int] = frozenset({429})
AUTHENTICATED_ERROR_STATUSES: frozenset[int] = frozenset()
INVALID_STATUSES: frozenset[int] = frozenset({401})
FORBIDDEN_STATUSES: frozenset[int] = frozenset({403})
def _build_url(self, credential_value: str) -> str:
"""Build request URL. Override for URL-template auth."""
return self.ENDPOINT
def _build_headers(self, credential_value: str) -> dict[str, str]:
"""Build request headers based on AUTH_TYPE."""
headers: dict[str, str] = {"Accept": "application/json"}
if self.AUTH_TYPE == self.AUTH_BEARER:
headers["Authorization"] = f"Bearer {credential_value}"
elif self.AUTH_TYPE == self.AUTH_HEADER:
headers[self.AUTH_HEADER_NAME] = self.AUTH_HEADER_TEMPLATE.format(
token=credential_value
)
return headers
def _build_params(self, credential_value: str) -> dict[str, str]:
"""Build query parameters. Includes auth param for AUTH_QUERY type."""
if self.AUTH_TYPE == self.AUTH_QUERY:
return {self.AUTH_QUERY_PARAM_NAME: credential_value}
return {}
def _build_auth(self, credential_value: str) -> tuple[str, str] | None:
"""Build HTTP Basic auth tuple for AUTH_BASIC type."""
if self.AUTH_TYPE == self.AUTH_BASIC:
return (credential_value, "")
return None
def _build_json_body(self, credential_value: str) -> dict | None:
"""Build JSON request body. Override for POST requests that need one."""
return None
def _extract_identity(self, data: dict) -> dict[str, str]:
"""Extract identity info from successful response. Override in subclass."""
return {}
def _interpret_response(self, response: httpx.Response) -> HealthCheckResult:
"""Interpret HTTP response. Override for non-standard status logic."""
status = response.status_code
if status in self.VALID_STATUSES:
identity: dict[str, str] = {}
try:
data = response.json()
identity = self._extract_identity(data)
except Exception:
pass
return HealthCheckResult(
valid=True,
message=f"{self.SERVICE_NAME} credentials valid",
details={"identity": identity} if identity else {},
)
elif status in self.RATE_LIMITED_STATUSES:
return HealthCheckResult(
valid=True,
message=f"{self.SERVICE_NAME} credentials valid (rate limited)",
details={"status_code": status, "rate_limited": True},
)
elif status in self.AUTHENTICATED_ERROR_STATUSES:
return HealthCheckResult(
valid=True,
message=f"{self.SERVICE_NAME} credentials valid",
details={"status_code": status},
)
elif status in self.INVALID_STATUSES:
return HealthCheckResult(
valid=False,
message=f"{self.SERVICE_NAME} credentials are invalid or expired",
details={"status_code": status},
)
elif status in self.FORBIDDEN_STATUSES:
return HealthCheckResult(
valid=False,
message=f"{self.SERVICE_NAME} credentials lack required permissions",
details={"status_code": status},
)
else:
return HealthCheckResult(
valid=False,
message=f"{self.SERVICE_NAME} API returned status {status}",
details={"status_code": status},
)
def check(self, credential_value: str) -> HealthCheckResult:
"""Execute the health check. Normally not overridden."""
try:
url = self._build_url(credential_value)
headers = self._build_headers(credential_value)
params = self._build_params(credential_value)
auth = self._build_auth(credential_value)
json_body = self._build_json_body(credential_value)
with httpx.Client(timeout=self.TIMEOUT) as client:
kwargs: dict[str, Any] = {"headers": headers}
if params:
kwargs["params"] = params
if auth:
kwargs["auth"] = auth
if json_body is not None:
kwargs["json"] = json_body
if self.HTTP_METHOD.upper() == "POST":
response = client.post(url, **kwargs)
else:
response = client.get(url, **kwargs)
return self._interpret_response(response)
except httpx.TimeoutException:
return HealthCheckResult(
valid=False,
message=f"{self.SERVICE_NAME} API request timed out",
details={"error": "timeout"},
)
except httpx.RequestError as e:
error_msg = str(e)
if any(s in error_msg for s in ("Bearer", "Authorization", "api_key", "token")):
error_msg = "Request failed (details redacted for security)"
return HealthCheckResult(
valid=False,
message=f"Failed to connect to {self.SERVICE_NAME}: {error_msg}",
details={"error": error_msg},
)
class GoogleCalendarHealthChecker(OAuthBearerHealthChecker):
"""Health checker for Google Calendar OAuth tokens."""
@@ -740,6 +912,152 @@ class GoogleGmailHealthChecker(OAuthBearerHealthChecker):
return {"email": email} if email else {}
# --- New checkers using BaseHttpHealthChecker ---
class StripeHealthChecker(BaseHttpHealthChecker):
"""Health checker for Stripe API key."""
ENDPOINT = "https://api.stripe.com/v1/balance"
SERVICE_NAME = "Stripe"
class ExaSearchHealthChecker(BaseHttpHealthChecker):
"""Health checker for Exa Search API key."""
ENDPOINT = "https://api.exa.ai/search"
SERVICE_NAME = "Exa Search"
HTTP_METHOD = "POST"
def _build_json_body(self, credential_value: str) -> dict:
return {"query": "test", "numResults": 1}
class GoogleDocsHealthChecker(OAuthBearerHealthChecker):
"""Health checker for Google Docs OAuth tokens."""
def __init__(self):
super().__init__(
endpoint="https://docs.googleapis.com/v1/documents/1",
service_name="Google Docs",
)
class CalcomHealthChecker(BaseHttpHealthChecker):
"""Health checker for Cal.com API key."""
ENDPOINT = "https://api.cal.com/v1/me"
SERVICE_NAME = "Cal.com"
AUTH_TYPE = BaseHttpHealthChecker.AUTH_QUERY
AUTH_QUERY_PARAM_NAME = "apiKey"
class SerpApiHealthChecker(BaseHttpHealthChecker):
"""Health checker for SerpAPI key."""
ENDPOINT = "https://serpapi.com/account.json"
SERVICE_NAME = "SerpAPI"
AUTH_TYPE = BaseHttpHealthChecker.AUTH_QUERY
AUTH_QUERY_PARAM_NAME = "api_key"
class ApolloHealthChecker(BaseHttpHealthChecker):
"""Health checker for Apollo.io API key."""
ENDPOINT = "https://api.apollo.io/v1/auth/health"
SERVICE_NAME = "Apollo"
AUTH_TYPE = BaseHttpHealthChecker.AUTH_QUERY
AUTH_QUERY_PARAM_NAME = "api_key"
class TelegramHealthChecker(BaseHttpHealthChecker):
"""Health checker for Telegram bot token."""
SERVICE_NAME = "Telegram"
AUTH_TYPE = BaseHttpHealthChecker.AUTH_URL
def _build_url(self, credential_value: str) -> str:
return f"https://api.telegram.org/bot{credential_value}/getMe"
def _build_headers(self, credential_value: str) -> dict[str, str]:
return {"Accept": "application/json"}
def _interpret_response(self, response: httpx.Response) -> HealthCheckResult:
if response.status_code == 200:
try:
data = response.json()
if data.get("ok"):
username = data.get("result", {}).get("username", "unknown")
identity = {"username": username} if username != "unknown" else {}
return HealthCheckResult(
valid=True,
message=f"Telegram bot token valid (bot: @{username})",
details={"identity": identity},
)
else:
return HealthCheckResult(
valid=False,
message="Telegram bot token is invalid",
details={"telegram_error": data.get("description", "")},
)
except Exception:
return HealthCheckResult(
valid=True,
message="Telegram credentials valid",
)
elif response.status_code == 401:
return HealthCheckResult(
valid=False,
message="Telegram bot token is invalid",
details={"status_code": 401},
)
else:
return HealthCheckResult(
valid=False,
message=f"Telegram API returned status {response.status_code}",
details={"status_code": response.status_code},
)
class NewsdataHealthChecker(BaseHttpHealthChecker):
"""Health checker for Newsdata.io API key."""
ENDPOINT = "https://newsdata.io/api/1/news"
SERVICE_NAME = "Newsdata"
AUTH_TYPE = BaseHttpHealthChecker.AUTH_QUERY
AUTH_QUERY_PARAM_NAME = "apikey"
def _build_params(self, credential_value: str) -> dict[str, str]:
params = super()._build_params(credential_value)
params["q"] = "test"
return params
class FinlightHealthChecker(BaseHttpHealthChecker):
"""Health checker for Finlight API key."""
ENDPOINT = "https://api.finlight.me/v1/news"
SERVICE_NAME = "Finlight"
class BrevoHealthChecker(BaseHttpHealthChecker):
"""Health checker for Brevo API key."""
ENDPOINT = "https://api.brevo.com/v3/account"
SERVICE_NAME = "Brevo"
AUTH_TYPE = BaseHttpHealthChecker.AUTH_HEADER
AUTH_HEADER_NAME = "api-key"
AUTH_HEADER_TEMPLATE = "{token}"
def _extract_identity(self, data: dict) -> dict[str, str]:
identity: dict[str, str] = {}
if data.get("email"):
identity["email"] = data["email"]
if data.get("companyName"):
identity["company"] = data["companyName"]
return identity
# Registry of health checkers
HEALTH_CHECKERS: dict[str, CredentialHealthChecker] = {
"discord": DiscordHealthChecker(),
@@ -753,6 +1071,16 @@ HEALTH_CHECKERS: dict[str, CredentialHealthChecker] = {
"anthropic": AnthropicHealthChecker(),
"github": GitHubHealthChecker(),
"resend": ResendHealthChecker(),
"stripe": StripeHealthChecker(),
"exa_search": ExaSearchHealthChecker(),
"google_docs": GoogleDocsHealthChecker(),
"calcom": CalcomHealthChecker(),
"serpapi": SerpApiHealthChecker(),
"apollo": ApolloHealthChecker(),
"telegram": TelegramHealthChecker(),
"newsdata": NewsdataHealthChecker(),
"finlight": FinlightHealthChecker(),
"brevo": BrevoHealthChecker(),
}
@@ -807,3 +1135,80 @@ def check_credential_health(
return checker.check(credential_value, kwargs["cse_id"])
return checker.check(credential_value)
def validate_integration_wiring(credential_name: str) -> list[str]:
"""Check that a credential integration is fully wired up.
Returns a list of issues found. Empty list means everything is correct.
Use during development to verify a new integration has all required pieces:
CredentialSpec, health checker, endpoint consistency, and required fields.
Args:
credential_name: The credential name to validate (e.g., 'jira').
Returns:
List of issue descriptions. Empty if fully wired.
Example::
issues = validate_integration_wiring("stripe")
for issue in issues:
print(f" - {issue}")
"""
from . import CREDENTIAL_SPECS
issues: list[str] = []
# 1. Check spec exists
spec = CREDENTIAL_SPECS.get(credential_name)
if spec is None:
issues.append(
f"No CredentialSpec for '{credential_name}' in CREDENTIAL_SPECS. "
f"Add it to the appropriate category file and import in __init__.py."
)
return issues
# 2. Check required fields
if not spec.env_var:
issues.append("CredentialSpec.env_var is empty")
if not spec.description:
issues.append("CredentialSpec.description is empty")
if not spec.tools and not spec.node_types:
issues.append("CredentialSpec has no tools or node_types")
if not spec.help_url:
issues.append("CredentialSpec.help_url is empty (users need this to get credentials)")
if spec.direct_api_key_supported and not spec.api_key_instructions:
issues.append(
"CredentialSpec.api_key_instructions is empty but direct_api_key_supported=True"
)
# 3. Check health check
if not spec.health_check_endpoint:
issues.append(
"CredentialSpec.health_check_endpoint is empty. "
"Add a lightweight API endpoint for credential validation."
)
else:
checker = HEALTH_CHECKERS.get(credential_name)
if checker is None:
issues.append(
f"No entry in HEALTH_CHECKERS for '{credential_name}'. "
f"The OAuthBearerHealthChecker fallback will be used. "
f"Add a dedicated checker if auth is not Bearer token."
)
else:
checker_endpoint = getattr(checker, "ENDPOINT", None) or getattr(
checker, "endpoint", None
)
if checker_endpoint and spec.health_check_endpoint:
spec_base = spec.health_check_endpoint.split("?")[0]
checker_base = str(checker_endpoint).split("?")[0]
if spec_base != checker_base:
issues.append(
f"Endpoint mismatch: spec='{spec.health_check_endpoint}' "
f"vs checker='{checker_endpoint}'"
)
return issues
@@ -85,7 +85,7 @@ class CredentialStoreAdapter:
# --- Existing CredentialManager API ---
def get(self, name: str) -> str | None:
def get(self, name: str, account: str | None = None) -> str | None:
"""
Get a credential value by logical name.
@@ -94,6 +94,10 @@ class CredentialStoreAdapter:
Args:
name: Logical credential name (e.g., "brave_search")
account: Optional alias for per-call routing to a specific named local
account (e.g. "work"). When provided, looks up the named account
from LocalCredentialRegistry before falling through to the store.
This mirrors the ``account=`` routing available for Aden credentials.
Returns:
The credential value, or None if not set
@@ -104,6 +108,16 @@ class CredentialStoreAdapter:
if name not in self._specs:
raise KeyError(f"Unknown credential '{name}'. Available: {list(self._specs.keys())}")
if account is not None:
try:
from framework.credentials.local.registry import LocalCredentialRegistry
key = LocalCredentialRegistry.default().get_key(name, account)
if key is not None:
return key
except Exception:
pass # Fall through to standard store lookup
return self._store.get(name)
def get_spec(self, name: str) -> CredentialSpec:
@@ -279,19 +293,43 @@ class CredentialStoreAdapter:
def get_all_account_info(self) -> list[dict]:
"""Collect all accounts across all configured providers.
Deduplicates by provider name to avoid listing the same provider's
accounts twice when multiple specs map to the same provider.
Includes both Aden OAuth accounts and named local API key accounts.
Deduplicates by (provider, alias) to avoid listing the same account
twice when it appears in both stores.
"""
accounts: list[dict] = []
seen: set[str] = set()
seen_specs: set[str] = set()
seen_accounts: set[tuple[str, str]] = set()
for name, spec in self._specs.items():
provider = spec.credential_id or name
if provider in seen or not self.is_available(name):
if provider in seen_specs or not self.is_available(name):
continue
seen.add(provider)
accounts.extend(self._store.list_accounts(provider))
seen_specs.add(provider)
for acct in self._store.list_accounts(provider):
key = (acct.get("provider", ""), acct.get("alias", ""))
if key not in seen_accounts:
seen_accounts.add(key)
accounts.append(acct)
# Include named local API key accounts
for acct in self.list_local_accounts():
key = (acct.get("provider", ""), acct.get("alias", ""))
if key not in seen_accounts:
seen_accounts.add(key)
accounts.append(acct)
return accounts
def get_tool_provider_map(self) -> dict[str, str]:
"""Map tool names to provider names for account routing.
Returns:
Dict mapping tool_name -> provider_name
(e.g. {"gmail_list_messages": "google", "slack_send_message": "slack"})
"""
return dict(self._tool_to_cred)
def get_by_alias(self, provider_name: str, alias: str) -> str | None:
"""Resolve a specific account's token by alias."""
cred = self._store.get_credential_by_alias(provider_name, alias)
@@ -301,6 +339,58 @@ class CredentialStoreAdapter:
"""Alias for get_by_alias (backward compat)."""
return self.get_by_alias(provider_name, label)
# --- Local credential registry ---
def list_local_accounts(self, credential_id: str | None = None) -> list[dict]:
"""
List named local API key accounts from LocalCredentialRegistry.
Args:
credential_id: If given, filter to this credential type only.
Returns:
List of account dicts (same shape as Aden account dicts, source='local').
"""
try:
from framework.credentials.local.registry import LocalCredentialRegistry
registry = LocalCredentialRegistry.default()
return [info.to_account_dict() for info in registry.list_accounts(credential_id)]
except Exception:
return []
def activate_local_account(self, credential_id: str, alias: str) -> bool:
"""
Inject a named local account's API key into the environment for this session.
This enables session-level routing: select an account inject its key as
the env var that tools already read. No tool signature changes required.
Args:
credential_id: Logical credential name (e.g. "brave_search").
alias: Account alias (e.g. "work").
Returns:
True if the key was found and injected, False otherwise.
"""
import os
try:
from framework.credentials.local.registry import LocalCredentialRegistry
key = LocalCredentialRegistry.default().get_key(credential_id, alias)
if key is None:
return False
spec = self._specs.get(credential_id)
if spec is None:
return False
os.environ[spec.env_var] = key
return True
except Exception:
return False
@property
def store(self) -> CredentialStore:
"""Access the underlying credential store for advanced operations."""
+6
View File
@@ -25,6 +25,7 @@ from .account_info_tool import register_tools as register_account_info
from .apollo_tool import register_tools as register_apollo
from .arxiv_tool import register_tools as register_arxiv
from .bigquery_tool import register_tools as register_bigquery
from .brevo_tool import register_tools as register_brevo
from .calcom_tool import register_tools as register_calcom
from .calendar_tool import register_tools as register_calendar
from .csv_tool import register_tools as register_csv
@@ -76,6 +77,9 @@ from .vision_tool import register_tools as register_vision
from .web_scrape_tool import register_tools as register_web_scrape
from .web_search_tool import register_tools as register_web_search
# Web and PDF tools
from .wikipedia_tool import register_tools as register_wikipedia
def register_all_tools(
mcp: FastMCP,
@@ -98,6 +102,7 @@ def register_all_tools(
register_pdf_read(mcp)
register_time(mcp)
register_runtime_logs(mcp)
register_wikipedia(mcp)
register_arxiv(mcp)
# Tools that need credentials (pass credentials if provided)
@@ -147,6 +152,7 @@ def register_all_tools(
register_subdomain_enumerator(mcp)
register_risk_scorer(mcp)
register_stripe(mcp, credentials=credentials)
register_brevo(mcp, credentials=credentials)
# Postgres tool
register_postgres(mcp, credentials=credentials)
@@ -0,0 +1,5 @@
"""Brevo (formerly Sendinblue) tool - transactional email, SMS, and contacts."""
from .brevo_tool import register_tools
__all__ = ["register_tools"]
@@ -0,0 +1,487 @@
"""
Brevo Tool - Send transactional emails, SMS, and manage contacts via Brevo API.
Supports:
- Transactional email sending
- Transactional SMS sending
- Contact create/read/update
API Reference: https://developers.brevo.com/reference
"""
from __future__ import annotations
import os
from typing import TYPE_CHECKING, Any
import httpx
from fastmcp import FastMCP
if TYPE_CHECKING:
from aden_tools.credentials import CredentialStoreAdapter
BREVO_API_BASE = "https://api.brevo.com/v3"
class _BrevoClient:
"""Internal client wrapping Brevo API v3 calls."""
def __init__(self, api_key: str):
self._api_key = api_key
@property
def _headers(self) -> dict[str, str]:
return {
"api-key": self._api_key,
"Content-Type": "application/json",
"Accept": "application/json",
}
def _handle_response(self, response: httpx.Response) -> dict[str, Any]:
"""Handle common HTTP error codes."""
if response.status_code == 401:
return {"error": "Invalid Brevo API key"}
if response.status_code == 400:
try:
detail = response.json()
msg = detail.get("message", response.text)
except Exception:
msg = response.text
return {"error": f"Bad request: {msg}"}
if response.status_code == 403:
return {"error": "Brevo API key lacks required permissions"}
if response.status_code == 404:
return {"error": "Resource not found"}
if response.status_code == 429:
return {"error": "Rate limit exceeded. Try again later."}
if response.status_code >= 400:
try:
detail = response.json().get("message", response.text)
except Exception:
detail = response.text
return {"error": f"Brevo API error (HTTP {response.status_code}): {detail}"}
# Success (200, 201, 204)
if response.status_code == 204:
return {"success": True}
try:
return response.json()
except Exception:
return {"success": True}
def send_email(
self,
to: list[dict[str, str]],
subject: str,
html_content: str,
sender: dict[str, str],
text_content: str | None = None,
cc: list[dict[str, str]] | None = None,
bcc: list[dict[str, str]] | None = None,
reply_to: dict[str, str] | None = None,
tags: list[str] | None = None,
) -> dict[str, Any]:
"""Send a transactional email."""
payload: dict[str, Any] = {
"to": to,
"subject": subject,
"htmlContent": html_content,
"sender": sender,
}
if text_content:
payload["textContent"] = text_content
if cc:
payload["cc"] = cc
if bcc:
payload["bcc"] = bcc
if reply_to:
payload["replyTo"] = reply_to
if tags:
payload["tags"] = tags
response = httpx.post(
f"{BREVO_API_BASE}/smtp/email",
headers=self._headers,
json=payload,
timeout=30.0,
)
return self._handle_response(response)
def send_sms(
self,
sender: str,
recipient: str,
content: str,
sms_type: str = "transactional",
tag: str | None = None,
) -> dict[str, Any]:
"""Send a transactional SMS."""
payload: dict[str, Any] = {
"sender": sender,
"recipient": recipient,
"content": content,
"type": sms_type,
}
if tag:
payload["tag"] = tag
response = httpx.post(
f"{BREVO_API_BASE}/transactionalSMS/send",
headers=self._headers,
json=payload,
timeout=30.0,
)
return self._handle_response(response)
def create_contact(
self,
email: str | None = None,
attributes: dict[str, Any] | None = None,
list_ids: list[int] | None = None,
update_enabled: bool = False,
) -> dict[str, Any]:
"""Create a new contact."""
payload: dict[str, Any] = {}
if email:
payload["email"] = email
if attributes:
payload["attributes"] = attributes
if list_ids:
payload["listIds"] = list_ids
if update_enabled:
payload["updateEnabled"] = True
response = httpx.post(
f"{BREVO_API_BASE}/contacts",
headers=self._headers,
json=payload,
timeout=30.0,
)
return self._handle_response(response)
def get_contact(self, identifier: str) -> dict[str, Any]:
"""Get a contact by email or ID."""
response = httpx.get(
f"{BREVO_API_BASE}/contacts/{identifier}",
headers=self._headers,
timeout=30.0,
)
return self._handle_response(response)
def update_contact(
self,
identifier: str,
attributes: dict[str, Any] | None = None,
list_ids: list[int] | None = None,
unlink_list_ids: list[int] | None = None,
) -> dict[str, Any]:
"""Update a contact."""
payload: dict[str, Any] = {}
if attributes:
payload["attributes"] = attributes
if list_ids:
payload["listIds"] = list_ids
if unlink_list_ids:
payload["unlinkListIds"] = unlink_list_ids
response = httpx.put(
f"{BREVO_API_BASE}/contacts/{identifier}",
headers=self._headers,
json=payload,
timeout=30.0,
)
return self._handle_response(response)
def register_tools(
mcp: FastMCP,
credentials: CredentialStoreAdapter | None = None,
) -> None:
"""Register Brevo tools with the MCP server."""
def _get_api_key() -> str | None:
"""Get Brevo API key from credential store or environment."""
if credentials is not None:
key = credentials.get("brevo")
if key is not None and not isinstance(key, str):
raise TypeError(
f"Expected string from credentials.get('brevo'), got {type(key).__name__}"
)
return key
return os.getenv("BREVO_API_KEY")
def _get_client() -> _BrevoClient | dict[str, str]:
"""Get a Brevo client, or return an error dict if no credentials."""
api_key = _get_api_key()
if not api_key:
return {
"error": "Brevo API key not configured",
"help": (
"Set BREVO_API_KEY environment variable or configure via "
"credential store. Get your key at https://app.brevo.com/settings/keys/api"
),
}
return _BrevoClient(api_key)
@mcp.tool()
def brevo_send_email(
to: list[dict[str, str]],
subject: str,
html_content: str,
sender_email: str,
sender_name: str = "",
text_content: str = "",
cc: list[dict[str, str]] | None = None,
bcc: list[dict[str, str]] | None = None,
reply_to_email: str = "",
tags: list[str] | None = None,
) -> dict[str, Any]:
"""
Send a transactional email via Brevo.
Use this for notifications, alerts, confirmations, or any triggered email.
Args:
to: Recipients list. Each item: {"email": "user@example.com", "name": "User Name"}.
Name is optional.
subject: Email subject line.
html_content: Email body as HTML string.
sender_email: Sender email address (must be a verified sender in Brevo).
sender_name: Sender display name. Optional.
text_content: Plain text alternative body. Optional.
cc: CC recipients. Same format as 'to'. Optional.
bcc: BCC recipients. Same format as 'to'. Optional.
reply_to_email: Reply-to email address. Optional.
tags: Tags for categorizing the email. Optional.
Returns:
Dict with messageId on success, or error dict on failure.
"""
client = _get_client()
if isinstance(client, dict):
return client
if not to:
return {"error": "At least one recipient is required"}
if not subject:
return {"error": "Subject is required"}
if not html_content:
return {"error": "HTML content is required"}
if not sender_email:
return {"error": "Sender email is required"}
sender: dict[str, str] = {"email": sender_email}
if sender_name:
sender["name"] = sender_name
reply_to = {"email": reply_to_email} if reply_to_email else None
try:
result = client.send_email(
to=to,
subject=subject,
html_content=html_content,
sender=sender,
text_content=text_content if text_content else None,
cc=cc,
bcc=bcc,
reply_to=reply_to,
tags=tags,
)
if "error" in result:
return result
return {
"success": True,
"message_id": result.get("messageId", ""),
"to": [r.get("email") for r in to],
"subject": subject,
}
except httpx.TimeoutException:
return {"error": "Brevo request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def brevo_send_sms(
sender: str,
recipient: str,
content: str,
sms_type: str = "transactional",
tag: str = "",
) -> dict[str, Any]:
"""
Send a transactional SMS via Brevo.
Use this for SMS notifications, alerts, or verification messages.
Args:
sender: Sender name (max 11 alphanumeric chars) or phone number (max 15 digits).
recipient: Recipient phone number with country code (e.g., "33612345678").
content: SMS message text. Messages over 160 chars are sent as multiple SMS.
sms_type: Either "transactional" or "marketing". Defaults to "transactional".
tag: Optional tag for categorizing the SMS.
Returns:
Dict with messageId on success, or error dict on failure.
"""
client = _get_client()
if isinstance(client, dict):
return client
if not sender:
return {"error": "Sender is required"}
if not recipient:
return {"error": "Recipient phone number is required"}
if not content:
return {"error": "SMS content is required"}
try:
result = client.send_sms(
sender=sender,
recipient=recipient,
content=content,
sms_type=sms_type,
tag=tag if tag else None,
)
if "error" in result:
return result
return {
"success": True,
"message_id": result.get("messageId", ""),
"recipient": recipient,
}
except httpx.TimeoutException:
return {"error": "Brevo request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def brevo_create_contact(
email: str,
attributes: dict[str, Any] | None = None,
list_ids: list[int] | None = None,
update_enabled: bool = False,
) -> dict[str, Any]:
"""
Create a contact in Brevo.
Use this to add new contacts to your Brevo account for email/SMS campaigns.
Args:
email: Contact email address.
attributes: Contact attributes in UPPERCASE (e.g., {"FNAME": "John", "LNAME": "Doe"}).
Standard attributes: FNAME, LNAME, SMS (phone with country code like +33xxxxxxxxxx).
list_ids: List IDs to add the contact to. Optional.
update_enabled: If True, updates the contact if it already exists. Defaults to False.
Returns:
Dict with contact id on success, or error dict on failure.
"""
client = _get_client()
if isinstance(client, dict):
return client
if not email:
return {"error": "Email is required"}
try:
result = client.create_contact(
email=email,
attributes=attributes,
list_ids=list_ids,
update_enabled=update_enabled,
)
if "error" in result:
return result
return {
"success": True,
"id": result.get("id"),
"email": email,
}
except httpx.TimeoutException:
return {"error": "Brevo request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def brevo_get_contact(
identifier: str,
) -> dict[str, Any]:
"""
Get a contact from Brevo by email address or contact ID.
Args:
identifier: Contact email address or numeric contact ID.
Returns:
Dict with contact details (email, attributes, listIds, statistics)
or error dict on failure.
"""
client = _get_client()
if isinstance(client, dict):
return client
if not identifier:
return {"error": "Contact identifier (email or ID) is required"}
try:
result = client.get_contact(identifier)
if "error" in result:
return result
return {
"success": True,
"id": result.get("id"),
"email": result.get("email"),
"attributes": result.get("attributes", {}),
"list_ids": result.get("listIds", []),
"email_blacklisted": result.get("emailBlacklisted", False),
"sms_blacklisted": result.get("smsBlacklisted", False),
}
except httpx.TimeoutException:
return {"error": "Brevo request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def brevo_update_contact(
identifier: str,
attributes: dict[str, Any] | None = None,
list_ids: list[int] | None = None,
unlink_list_ids: list[int] | None = None,
) -> dict[str, Any]:
"""
Update a contact in Brevo.
Args:
identifier: Contact email address or numeric contact ID.
attributes: Attributes to update in UPPERCASE (e.g., {"FNAME": "Jane"}).
list_ids: List IDs to add the contact to. Optional.
unlink_list_ids: List IDs to remove the contact from. Optional.
Returns:
Dict with success status, or error dict on failure.
"""
client = _get_client()
if isinstance(client, dict):
return client
if not identifier:
return {"error": "Contact identifier (email or ID) is required"}
try:
result = client.update_contact(
identifier=identifier,
attributes=attributes,
list_ids=list_ids,
unlink_list_ids=unlink_list_ids,
)
if "error" in result:
return result
return {
"success": True,
"identifier": identifier,
"message": "Contact updated successfully",
}
except httpx.TimeoutException:
return {"error": "Brevo request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@@ -114,10 +114,15 @@ def register_tools(
"subject": subject,
}
def _get_credential(provider: Literal["resend", "gmail"]) -> str | None:
def _get_credential(
provider: Literal["resend", "gmail"],
account: str = "",
) -> str | None:
"""Get the credential for the requested provider."""
if provider == "gmail":
if credentials is not None:
if account:
return credentials.get_by_alias("google", account)
return credentials.get("google")
return os.getenv("GOOGLE_ACCESS_TOKEN")
# resend
@@ -150,6 +155,7 @@ def register_tools(
from_email: str | None = None,
cc: str | list[str] | None = None,
bcc: str | list[str] | None = None,
account: str = "",
) -> dict:
"""Core email sending logic, callable by other tools."""
from_email = _resolve_from_email(from_email)
@@ -182,7 +188,7 @@ def register_tools(
"help": "Pass from_email or set EMAIL_FROM environment variable",
}
credential = _get_credential(provider)
credential = _get_credential(provider, account)
if not credential:
if provider == "gmail":
return {
@@ -215,6 +221,7 @@ def register_tools(
from_email: str | None = None,
cc: str | list[str] | None = None,
bcc: str | list[str] | None = None,
account: str = "",
) -> dict:
"""
Send an email.
@@ -232,12 +239,14 @@ def register_tools(
Optional for Gmail (defaults to authenticated user's address).
cc: CC recipient(s). Single string or list of strings. Optional.
bcc: BCC recipient(s). Single string or list of strings. Optional.
account: Account alias for multi-account routing (e.g. "timothy-home").
Only used with Gmail provider. Optional.
Returns:
Dict with send result including provider used and message ID,
or error dict with "error" and optional "help" keys.
"""
return _send_email_impl(to, subject, html, provider, from_email, cc, bcc)
return _send_email_impl(to, subject, html, provider, from_email, cc, bcc, account)
def _fetch_original_message(access_token: str, message_id: str) -> dict:
"""Fetch the original message to extract threading info."""
@@ -278,6 +287,7 @@ def register_tools(
html: str,
cc: str | list[str] | None = None,
bcc: str | list[str] | None = None,
account: str = "",
) -> dict:
"""
Reply to a Gmail message, keeping it in the same thread.
@@ -291,6 +301,8 @@ def register_tools(
html: Reply body as HTML string.
cc: CC recipient(s). Single string or list of strings. Optional.
bcc: BCC recipient(s). Single string or list of strings. Optional.
account: Account alias for multi-account routing (e.g. "timothy-home").
Optional.
Returns:
Dict with send result including reply message ID and threadId,
@@ -305,7 +317,7 @@ def register_tools(
if not html:
return {"error": "Reply body (html) is required"}
credential = _get_credential("gmail")
credential = _get_credential("gmail", account)
if not credential:
return {
"error": "Gmail credentials not configured",
@@ -0,0 +1,54 @@
# Wikipedia Search Tool
This tool allows agents to search Wikipedia and retrieve article summaries without needing an external API key.
## Features
- **Search**: Find relevant Wikipedia articles by query.
- **Summaries**: Get concise descriptions and excerpts for search results.
- **Multilingual**: Supports searching in different languages (default: English).
- **No API Key**: Uses the public Wikipedia REST API.
## Usage
### As an MCP Tool
```python
result = await call_tool(
"search_wikipedia",
arguments={
"query": "Artificial Intelligence",
"num_results": 3,
"lang": "en"
}
)
```
### Parameters
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `query` | `str` | Required | The search term to look for. |
| `num_results` | `int` | `3` | Number of results to return (max 10). |
| `lang` | `str` | `"en"` | Wikipedia language code (e.g., "en", "es", "fr"). |
## Response Format
The tool returns a dictionary with the following structure:
```json
{
"query": "Artificial Intelligence",
"lang": "en",
"count": 3,
"results": [
{
"title": "Artificial intelligence",
"url": "https://en.wikipedia.org/wiki/Artificial_intelligence",
"description": "Intelligence of machines",
"snippet": "Artificial intelligence (AI), in its broadest sense, is intelligence exhibited by machines, particularly the computer systems..."
},
...
]
}
```
@@ -0,0 +1,3 @@
from .wikipedia_tool import register_tools
__all__ = ["register_tools"]
@@ -0,0 +1,88 @@
"""
Wikipedia Search Tool - Search and retrieve summaries from Wikipedia.
Uses the Wikipedia Public API (REST) to find relevant articles and get their intros.
No external 'wikipedia' library required, uses standard `httpx`.
"""
from __future__ import annotations
import re
import httpx
from fastmcp import FastMCP
def register_tools(mcp: FastMCP) -> None:
"""Register wikipedia tool with the MCP server."""
def _strip_html(text: str) -> str:
"""Remove HTML tags from a string."""
if not text:
return ""
return re.sub(r"<[^>]+>", "", text)
@mcp.tool()
def search_wikipedia(query: str, lang: str = "en", num_results: int = 3) -> dict:
"""
Search Wikipedia for a given query and return summaries of top matching articles.
Args:
query: The search term (e.g. "Artificial Intelligence")
lang: Language code (default: "en")
num_results: Number of pages to retrieve (default: 3, max: 10)
Returns:
Dict containing query metadata and list of results (title, summary, url).
"""
if not query:
return {"error": "Query cannot be empty"}
num_results = max(1, min(num_results, 10))
base_url = f"https://{lang}.wikipedia.org/w/rest.php/v1/search/page"
try:
# 1. Search for pages
response = httpx.get(
base_url,
params={"q": query, "limit": num_results},
timeout=10.0,
headers={"User-Agent": "AdenAgentFramework/1.0 (https://adenhq.com)"},
)
if response.status_code != 200:
return {"error": f"Wikipedia API error: {response.status_code}", "query": query}
data = response.json()
pages = data.get("pages", [])
results = []
for page in pages:
# Basic info
title = page.get("title", "")
key = page.get("key", "")
# Use description or excerpt for summary
description = page.get("description") or "No description available."
excerpt = page.get("excerpt") or ""
# Clean up HTML from excerpt (e.g. <span class="searchmatch">)
snippet = _strip_html(excerpt)
results.append(
{
"title": title,
"url": f"https://{lang}.wikipedia.org/wiki/{key}",
"description": description,
"snippet": snippet,
}
)
return {"query": query, "lang": lang, "count": len(results), "results": results}
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {str(e)}"}
except Exception as e:
return {"error": f"Search failed: {str(e)}"}
+116
View File
@@ -0,0 +1,116 @@
"""Tests that enforce credential registry completeness and consistency.
These tests run in CI and catch common mistakes when adding new integrations:
- Missing health checker for a spec with health_check_endpoint
- Orphaned entries in HEALTH_CHECKERS (no corresponding spec)
- CredentialSpec fields that are incomplete
- Duplicate env var conflicts
"""
import pytest
from aden_tools.credentials import CREDENTIAL_SPECS
from aden_tools.credentials.health_check import HEALTH_CHECKERS, validate_integration_wiring
class TestRegistryCompleteness:
"""Every credential with a health_check_endpoint must have a registered checker."""
# Credentials that intentionally don't have their own dedicated checker:
# - google_cse: shares google_search checker (same credential_group)
# - razorpay/razorpay_secret: requires HTTP Basic auth with TWO credentials,
# which the single-value health check dispatcher can't support
KNOWN_EXCEPTIONS = {"google_cse", "razorpay", "razorpay_secret"}
def test_specs_with_endpoint_have_checkers(self):
"""Every CredentialSpec with health_check_endpoint has a HEALTH_CHECKERS entry."""
missing = []
for name, spec in CREDENTIAL_SPECS.items():
if name in self.KNOWN_EXCEPTIONS:
continue
if spec.health_check_endpoint and name not in HEALTH_CHECKERS:
missing.append(
f"{name}: has endpoint '{spec.health_check_endpoint}' "
f"but no dedicated health checker"
)
assert not missing, (
f"{len(missing)} credential(s) have health_check_endpoint but no checker:\n"
+ "\n".join(f" - {m}" for m in missing)
)
def test_checkers_have_corresponding_specs(self):
"""Every key in HEALTH_CHECKERS matches a CREDENTIAL_SPECS entry."""
orphaned = [name for name in HEALTH_CHECKERS if name not in CREDENTIAL_SPECS]
assert not orphaned, f"HEALTH_CHECKERS has entries with no CREDENTIAL_SPECS: {orphaned}"
class TestSpecRequiredFields:
"""Every CredentialSpec should have minimum required fields."""
@pytest.mark.parametrize(
"cred_name,spec",
list(CREDENTIAL_SPECS.items()),
ids=list(CREDENTIAL_SPECS.keys()),
)
def test_has_env_var(self, cred_name, spec):
assert spec.env_var, f"{cred_name}: missing env_var"
@pytest.mark.parametrize(
"cred_name,spec",
list(CREDENTIAL_SPECS.items()),
ids=list(CREDENTIAL_SPECS.keys()),
)
def test_has_description(self, cred_name, spec):
assert spec.description, f"{cred_name}: missing description"
@pytest.mark.parametrize(
"cred_name,spec",
list(CREDENTIAL_SPECS.items()),
ids=list(CREDENTIAL_SPECS.keys()),
)
def test_has_tools_or_node_types(self, cred_name, spec):
assert spec.tools or spec.node_types, (
f"{cred_name}: must have at least one tool or node_type"
)
class TestNoDuplicateEnvVars:
"""No two credential specs should use the same env_var (unless in same credential_group)."""
def test_no_accidental_env_var_collisions(self):
seen: dict[str, list[str]] = {}
for name, spec in CREDENTIAL_SPECS.items():
seen.setdefault(spec.env_var, []).append(name)
duplicates = {}
for env_var, names in seen.items():
if len(names) <= 1:
continue
# Filter out intentional duplicates (same credential_group)
groups = {CREDENTIAL_SPECS[n].credential_group for n in names}
if len(groups) == 1 and groups != {""}:
continue # All share the same non-empty group -- intentional
duplicates[env_var] = names
assert not duplicates, f"Duplicate env_vars across unrelated credentials: {duplicates}"
class TestIntegrationWiring:
"""validate_integration_wiring() catches wiring issues."""
def test_nonexistent_credential(self):
issues = validate_integration_wiring("nonexistent_service_xyz")
assert any("No CredentialSpec" in i for i in issues)
def test_known_credential_no_critical_issues(self):
"""A well-wired credential (e.g. 'hubspot') should have no issues."""
issues = validate_integration_wiring("hubspot")
assert not issues, f"Unexpected issues for hubspot: {issues}"
@pytest.mark.parametrize("cred_name", list(HEALTH_CHECKERS.keys()))
def test_all_checkers_pass_wiring(self, cred_name):
"""Every registered checker should pass wiring validation."""
issues = validate_integration_wiring(cred_name)
assert not issues, f"Wiring issues for '{cred_name}':\n" + "\n".join(
f" - {i}" for i in issues
)
+216
View File
@@ -7,12 +7,22 @@ import httpx
from aden_tools.credentials.health_check import (
HEALTH_CHECKERS,
AnthropicHealthChecker,
ApolloHealthChecker,
BrevoHealthChecker,
CalcomHealthChecker,
DiscordHealthChecker,
ExaSearchHealthChecker,
FinlightHealthChecker,
GitHubHealthChecker,
GoogleCalendarHealthChecker,
GoogleDocsHealthChecker,
GoogleMapsHealthChecker,
GoogleSearchHealthChecker,
NewsdataHealthChecker,
ResendHealthChecker,
SerpApiHealthChecker,
StripeHealthChecker,
TelegramHealthChecker,
check_credential_health,
)
@@ -69,6 +79,16 @@ class TestHealthCheckerRegistry:
"google",
"slack",
"discord",
"stripe",
"exa_search",
"google_docs",
"calcom",
"serpapi",
"apollo",
"telegram",
"newsdata",
"finlight",
"brevo",
}
assert set(HEALTH_CHECKERS.keys()) == expected
@@ -485,3 +505,199 @@ class TestGoogleCalendarHealthCheckerTokenSanitization:
assert not result.valid
assert "Connection refused" in result.message
# ---------------------------------------------------------------------------
# HealthCheckerTestSuite: reusable base class for standard test scenarios
# ---------------------------------------------------------------------------
class HealthCheckerTestSuite:
"""Reusable test mixin that auto-generates standard health check scenarios.
Subclass this and set ``CHECKER_CLASS`` and ``HTTP_METHOD`` to get 6 tests
for free. Add checker-specific tests alongside as needed.
Example::
class TestMyNewChecker(HealthCheckerTestSuite):
CHECKER_CLASS = MyNewHealthChecker
HTTP_METHOD = "get"
"""
CHECKER_CLASS: type | None = None
HTTP_METHOD: str = "get"
CHECKER_KWARGS: dict = {}
# Override these if the checker uses non-standard valid-status logic
EXPECT_200_VALID: bool = True
EXPECT_401_INVALID: bool = True
EXPECT_403_INVALID: bool = True
EXPECT_429_VALID: bool = True
def _make_checker(self):
assert self.CHECKER_CLASS is not None, "Set CHECKER_CLASS in subclass"
return self.CHECKER_CLASS(**self.CHECKER_KWARGS)
def _mock_response(self, status_code, json_data=None):
response = MagicMock(spec=httpx.Response)
response.status_code = status_code
if json_data:
response.json.return_value = json_data
else:
response.json.return_value = {}
return response
def _setup_mock(self, mock_client_cls, status_code=200, json_data=None):
mock_client = MagicMock()
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
http_method = getattr(mock_client, self.HTTP_METHOD)
http_method.return_value = self._mock_response(status_code, json_data)
return mock_client, http_method
@patch("aden_tools.credentials.health_check.httpx.Client")
def test_valid_credential_200(self, mock_client_cls):
"""200 response means valid credential."""
if not self.EXPECT_200_VALID:
return
self._setup_mock(mock_client_cls, 200)
result = self._make_checker().check("test-credential")
assert result.valid is True
@patch("aden_tools.credentials.health_check.httpx.Client")
def test_invalid_credential_401(self, mock_client_cls):
"""401 response means invalid credential."""
if not self.EXPECT_401_INVALID:
return
self._setup_mock(mock_client_cls, 401)
result = self._make_checker().check("bad-credential")
assert result.valid is False
assert result.details.get("status_code") == 401
@patch("aden_tools.credentials.health_check.httpx.Client")
def test_forbidden_403(self, mock_client_cls):
"""403 response means insufficient permissions."""
if not self.EXPECT_403_INVALID:
return
self._setup_mock(mock_client_cls, 403)
result = self._make_checker().check("test-credential")
assert result.valid is False
assert result.details.get("status_code") == 403
@patch("aden_tools.credentials.health_check.httpx.Client")
def test_rate_limited_429(self, mock_client_cls):
"""429 (rate limited) typically means the credential is valid."""
if not self.EXPECT_429_VALID:
return
self._setup_mock(mock_client_cls, 429)
result = self._make_checker().check("test-credential")
assert result.valid is True
@patch("aden_tools.credentials.health_check.httpx.Client")
def test_timeout(self, mock_client_cls):
"""Timeout is handled gracefully."""
mock_client = MagicMock()
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
getattr(mock_client, self.HTTP_METHOD).side_effect = httpx.TimeoutException("timed out")
result = self._make_checker().check("test-credential")
assert result.valid is False
assert result.details.get("error") == "timeout"
@patch("aden_tools.credentials.health_check.httpx.Client")
def test_network_error(self, mock_client_cls):
"""Network errors are handled gracefully."""
mock_client = MagicMock()
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
getattr(mock_client, self.HTTP_METHOD).side_effect = httpx.RequestError(
"connection refused"
)
result = self._make_checker().check("test-credential")
assert result.valid is False
assert "error" in result.details
# ---------------------------------------------------------------------------
# Tests for new checkers (using HealthCheckerTestSuite)
# ---------------------------------------------------------------------------
class TestStripeHealthChecker(HealthCheckerTestSuite):
CHECKER_CLASS = StripeHealthChecker
HTTP_METHOD = "get"
class TestExaSearchHealthChecker(HealthCheckerTestSuite):
CHECKER_CLASS = ExaSearchHealthChecker
HTTP_METHOD = "post"
class TestGoogleDocsHealthChecker(HealthCheckerTestSuite):
CHECKER_CLASS = GoogleDocsHealthChecker
HTTP_METHOD = "get"
# OAuthBearerHealthChecker doesn't treat 429 as valid
EXPECT_429_VALID = False
class TestCalcomHealthChecker(HealthCheckerTestSuite):
CHECKER_CLASS = CalcomHealthChecker
HTTP_METHOD = "get"
class TestSerpApiHealthChecker(HealthCheckerTestSuite):
CHECKER_CLASS = SerpApiHealthChecker
HTTP_METHOD = "get"
class TestApolloHealthChecker(HealthCheckerTestSuite):
CHECKER_CLASS = ApolloHealthChecker
HTTP_METHOD = "get"
class TestTelegramHealthChecker(HealthCheckerTestSuite):
CHECKER_CLASS = TelegramHealthChecker
HTTP_METHOD = "get"
# Telegram returns 200 with {"ok": true/false} rather than using HTTP status codes
EXPECT_429_VALID = False
@patch("aden_tools.credentials.health_check.httpx.Client")
def test_valid_credential_200(self, mock_client_cls):
"""200 with ok=true means valid bot token."""
self._setup_mock(
mock_client_cls,
200,
{"ok": True, "result": {"username": "testbot"}},
)
result = self._make_checker().check("123:ABC")
assert result.valid is True
assert "testbot" in result.message
@patch("aden_tools.credentials.health_check.httpx.Client")
def test_ok_false_invalid(self, mock_client_cls):
"""200 with ok=false means invalid bot token."""
self._setup_mock(
mock_client_cls,
200,
{"ok": False, "description": "Unauthorized"},
)
result = self._make_checker().check("bad-token")
assert result.valid is False
class TestNewsdataHealthChecker(HealthCheckerTestSuite):
CHECKER_CLASS = NewsdataHealthChecker
HTTP_METHOD = "get"
class TestFinlightHealthChecker(HealthCheckerTestSuite):
CHECKER_CLASS = FinlightHealthChecker
HTTP_METHOD = "get"
class TestBrevoHealthChecker(HealthCheckerTestSuite):
CHECKER_CLASS = BrevoHealthChecker
HTTP_METHOD = "get"
-2
View File
@@ -132,7 +132,6 @@ class TestPgQuery:
assert "error" in result
def test_query_timeout(self, pg_query_fn, monkeypatch):
class TimeoutCursor:
def execute(self, *args, **kwargs):
raise psycopg.errors.QueryCanceled()
@@ -190,7 +189,6 @@ class TestPgListTables:
class TestPgDescribeTable:
def test_describe_table_success(self, pg_describe_table_fn, monkeypatch):
class DescribeCursor:
def execute(self, *args, **kwargs):
pass
+109
View File
@@ -0,0 +1,109 @@
from unittest.mock import MagicMock, patch
import pytest
from fastmcp import FastMCP
from aden_tools.tools.wikipedia_tool.wikipedia_tool import register_tools
@pytest.fixture
def mcp():
return FastMCP("test-server")
@pytest.fixture
def tool_func(mcp):
"""Register the tool and return the callable function."""
register_tools(mcp)
# FastMCP stores tools in _tools dictionary usually, or we can just access
# the decorated function if we extracted it. Since register_tools uses
# @mcp.tool(), let's extract the function logic or call via mcp if possible.
# For unit testing the logic, it's easier if we can access the underlying function.
# But register_tools defines the function *inside* the scope.
# So we'll need to rely on how FastMCP exposes tools or refactor slightly?
# Actually, looking at other tests might help, but let's assume standard FastMCP behavior.
# If FastMCP.tool() returns the function, we can capture it.
# But here register_tools returns None.
# Workaround: We can inspect mcp._tools (if it exists) or use a mock mcp
# to capture the decorator.
tools = {}
mock_mcp = MagicMock()
def mock_tool():
def decorator(f):
tools[f.__name__] = f
return f
return decorator
mock_mcp.tool = mock_tool
register_tools(mock_mcp)
return tools["search_wikipedia"]
def test_search_wikipedia_success(tool_func):
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {
"pages": [
{
"title": "Artificial Intelligence",
"key": "Artificial_Intelligence",
"description": "Intelligence demonstrated by machines",
"excerpt": "<b>Artificial intelligence</b> (<b>AI</b>)...",
},
{
"title": "AI Winter",
"key": "AI_Winter",
"description": "Period of reduced funding",
"excerpt": "In the history of AI...",
},
]
}
patch_target = "aden_tools.tools.wikipedia_tool.wikipedia_tool.httpx.get"
with patch(patch_target, return_value=mock_response) as mock_get:
result = tool_func(query="AI")
assert result["query"] == "AI"
assert result["count"] == 2
assert result["results"][0]["title"] == "Artificial Intelligence"
assert "Artificial_Intelligence" in result["results"][0]["url"]
# Verify HTML stripping
assert "<b>" not in result["results"][0]["snippet"]
assert "Artificial intelligence (AI)..." in result["results"][0]["snippet"]
mock_get.assert_called_once()
args, kwargs = mock_get.call_args
assert kwargs["params"]["q"] == "AI"
def test_search_wikipedia_empty_query(tool_func):
result = tool_func(query="")
assert "error" in result
assert result["error"] == "Query cannot be empty"
def test_search_wikipedia_api_error(tool_func):
mock_response = MagicMock()
mock_response.status_code = 500
patch_target = "aden_tools.tools.wikipedia_tool.wikipedia_tool.httpx.get"
with patch(patch_target, return_value=mock_response):
result = tool_func(query="Error")
assert "error" in result
assert "Wikipedia API error: 500" in result["error"]
def test_search_wikipedia_timeout(tool_func):
import httpx
patch_target = "aden_tools.tools.wikipedia_tool.wikipedia_tool.httpx.get"
with patch(patch_target, side_effect=httpx.TimeoutException("Timeout")):
result = tool_func(query="Timeout")
assert "error" in result
assert "Request timed out" in result["error"]