Merge branch 'main' into feature/concurrent-judge-runtime
This commit is contained in:
@@ -378,16 +378,16 @@ flowchart TB
|
||||
SA -->|"Inform"| ELN_EL
|
||||
SA -->|"Starts"| B
|
||||
B -->|"Report"| ELN_EL
|
||||
TR -->|"Assigned"| EventLoopNode
|
||||
CB -->|"Modify Worker Bee"| WorkerBees
|
||||
TR -->|"Assigned"| ELN_EL
|
||||
CB -->|"Modify Worker Bee"| WB_C
|
||||
|
||||
%% =========================================
|
||||
%% SHARED MEMORY & LOGS ACCESS
|
||||
%% =========================================
|
||||
|
||||
%% Worker Bees Access
|
||||
Graph <-->|"Read/Write"| WTM
|
||||
Graph <-->|"Read/Write"| SM
|
||||
%% Worker Bees Access (link to node inside Graph subgraph)
|
||||
AN <-->|"Read/Write"| WTM
|
||||
AN <-->|"Read/Write"| SM
|
||||
|
||||
%% Queen Bee Access
|
||||
QB_C <-->|"Read/Write"| WTM
|
||||
|
||||
@@ -1,17 +1,21 @@
|
||||
"""
|
||||
Credential Tester — verify synced credentials via live API calls.
|
||||
Credential Tester — verify credentials (Aden OAuth + local API keys) via live API calls.
|
||||
|
||||
Interactive agent that lists connected accounts, lets the user pick one,
|
||||
Interactive agent that lists all testable accounts, lets the user pick one,
|
||||
loads the provider's tools, and runs a chat session to test the credential.
|
||||
"""
|
||||
|
||||
from .agent import (
|
||||
CredentialTesterAgent,
|
||||
_list_aden_accounts,
|
||||
_list_env_fallback_accounts,
|
||||
_list_local_accounts,
|
||||
configure_for_account,
|
||||
conversation_mode,
|
||||
edges,
|
||||
entry_node,
|
||||
entry_points,
|
||||
get_tools_for_provider,
|
||||
goal,
|
||||
identity_prompt,
|
||||
list_connected_accounts,
|
||||
@@ -34,6 +38,7 @@ __all__ = [
|
||||
"edges",
|
||||
"entry_node",
|
||||
"entry_points",
|
||||
"get_tools_for_provider",
|
||||
"goal",
|
||||
"identity_prompt",
|
||||
"list_connected_accounts",
|
||||
@@ -43,4 +48,8 @@ __all__ = [
|
||||
"requires_account_selection",
|
||||
"skip_credential_validation",
|
||||
"terminal_nodes",
|
||||
# Internal list helpers (exposed for testing)
|
||||
"_list_aden_accounts",
|
||||
"_list_local_accounts",
|
||||
"_list_env_fallback_accounts",
|
||||
]
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
"""Credential Tester agent — verify synced credentials via live API calls.
|
||||
"""Credential Tester agent — verify credentials via live API calls.
|
||||
|
||||
A framework agent that lets the user pick a connected account and test it
|
||||
by making real API calls via the provider's tools.
|
||||
Supports both Aden OAuth2-synced accounts AND locally-stored API key accounts.
|
||||
Aden accounts use account="alias" routing; local accounts inject the key into
|
||||
the session environment so tools read it without an account= parameter.
|
||||
|
||||
When loaded via AgentRunner.load() (TUI picker, ``hive run``), the module-level
|
||||
``nodes`` / ``edges`` variables provide a static graph. The TUI detects
|
||||
@@ -40,7 +41,7 @@ if TYPE_CHECKING:
|
||||
goal = Goal(
|
||||
id="credential-tester",
|
||||
name="Credential Tester",
|
||||
description="Verify that a synced credential can make real API calls.",
|
||||
description="Verify that a credential can make real API calls.",
|
||||
success_criteria=[
|
||||
SuccessCriterion(
|
||||
id="api-call-success",
|
||||
@@ -59,52 +60,148 @@ goal = Goal(
|
||||
|
||||
|
||||
def get_tools_for_provider(provider_name: str) -> list[str]:
|
||||
"""Collect tool names for a specific Aden credential by credential_id.
|
||||
"""Collect tool names for a credential by credential_id OR credential_group.
|
||||
|
||||
Matches on ``credential_id`` (e.g. "google" → Gmail tools only),
|
||||
NOT ``aden_provider_name`` which can be shared across products
|
||||
(e.g. both google and google_docs have aden_provider_name="google").
|
||||
Matches on both ``credential_id`` (e.g. "google" → Gmail tools) and
|
||||
``credential_group`` (e.g. "google_custom_search" → all google search tools).
|
||||
"""
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS
|
||||
|
||||
tools: list[str] = []
|
||||
for spec in CREDENTIAL_SPECS.values():
|
||||
if spec.credential_id == provider_name:
|
||||
if spec.credential_id == provider_name or spec.credential_group == provider_name:
|
||||
tools.extend(spec.tools)
|
||||
return sorted(set(tools))
|
||||
|
||||
|
||||
def list_connected_accounts() -> list[dict]:
|
||||
"""List connected accounts from GET /v1/credentials."""
|
||||
def _list_aden_accounts() -> list[dict]:
|
||||
"""List active accounts from the Aden platform (requires ADEN_API_KEY)."""
|
||||
import os
|
||||
|
||||
from framework.credentials.aden.client import AdenClientConfig, AdenCredentialClient
|
||||
|
||||
api_key = os.environ.get("ADEN_API_KEY")
|
||||
if not api_key:
|
||||
return []
|
||||
|
||||
client = AdenCredentialClient(
|
||||
AdenClientConfig(
|
||||
base_url=os.environ.get("ADEN_API_URL", "https://api.adenhq.com"),
|
||||
)
|
||||
)
|
||||
try:
|
||||
integrations = client.list_integrations()
|
||||
finally:
|
||||
client.close()
|
||||
from framework.credentials.aden.client import AdenClientConfig, AdenCredentialClient
|
||||
|
||||
return [
|
||||
{
|
||||
"provider": c.provider,
|
||||
"alias": c.alias,
|
||||
"identity": {"email": c.email} if c.email else {},
|
||||
"integration_id": c.integration_id,
|
||||
}
|
||||
for c in integrations
|
||||
if c.status == "active"
|
||||
client = AdenCredentialClient(
|
||||
AdenClientConfig(
|
||||
base_url=os.environ.get("ADEN_API_URL", "https://api.adenhq.com"),
|
||||
)
|
||||
)
|
||||
try:
|
||||
integrations = client.list_integrations()
|
||||
finally:
|
||||
client.close()
|
||||
|
||||
return [
|
||||
{
|
||||
"provider": c.provider,
|
||||
"alias": c.alias,
|
||||
"identity": {"email": c.email} if c.email else {},
|
||||
"integration_id": c.integration_id,
|
||||
"source": "aden",
|
||||
}
|
||||
for c in integrations
|
||||
if c.status == "active"
|
||||
]
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
|
||||
def _list_local_accounts() -> list[dict]:
|
||||
"""List named local API key accounts from LocalCredentialRegistry."""
|
||||
try:
|
||||
from framework.credentials.local.registry import LocalCredentialRegistry
|
||||
|
||||
return [
|
||||
info.to_account_dict() for info in LocalCredentialRegistry.default().list_accounts()
|
||||
]
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
|
||||
def _list_env_fallback_accounts() -> list[dict]:
|
||||
"""Surface configured-but-unregistered credentials as testable entries.
|
||||
|
||||
Detects credentials available via env vars OR stored in the encrypted
|
||||
store in the old flat format (e.g. ``brave_search`` with no alias).
|
||||
These are users who haven't yet run ``save_account()`` but have a working key.
|
||||
Shows with alias="default" and status="unknown".
|
||||
"""
|
||||
import os
|
||||
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS
|
||||
|
||||
# Collect IDs in encrypted store (includes old flat entries like "brave_search")
|
||||
try:
|
||||
from framework.credentials.storage import EncryptedFileStorage
|
||||
|
||||
encrypted_ids: set[str] = set(EncryptedFileStorage().list_all())
|
||||
except Exception:
|
||||
encrypted_ids = set()
|
||||
|
||||
def _is_configured(cred_name: str, spec) -> bool:
|
||||
# 1. Env var present
|
||||
if os.environ.get(spec.env_var):
|
||||
return True
|
||||
# 2. Old flat encrypted entry (no slash — new entries have {x}/{y})
|
||||
if cred_name in encrypted_ids:
|
||||
return True
|
||||
return False
|
||||
|
||||
seen_groups: set[str] = set()
|
||||
accounts: list[dict] = []
|
||||
|
||||
for cred_name, spec in CREDENTIAL_SPECS.items():
|
||||
if not spec.direct_api_key_supported or not spec.tools:
|
||||
continue
|
||||
|
||||
if spec.credential_group:
|
||||
if spec.credential_group in seen_groups:
|
||||
continue
|
||||
group_available = all(
|
||||
_is_configured(n, s)
|
||||
for n, s in CREDENTIAL_SPECS.items()
|
||||
if s.credential_group == spec.credential_group
|
||||
)
|
||||
if not group_available:
|
||||
continue
|
||||
seen_groups.add(spec.credential_group)
|
||||
provider = spec.credential_group
|
||||
else:
|
||||
if not _is_configured(cred_name, spec):
|
||||
continue
|
||||
provider = cred_name
|
||||
|
||||
accounts.append(
|
||||
{
|
||||
"provider": provider,
|
||||
"alias": "default",
|
||||
"identity": {},
|
||||
"integration_id": None,
|
||||
"source": "local",
|
||||
"status": "unknown",
|
||||
}
|
||||
)
|
||||
|
||||
return accounts
|
||||
|
||||
|
||||
def list_connected_accounts() -> list[dict]:
|
||||
"""List all testable accounts: Aden-synced + named local + env-var fallbacks."""
|
||||
aden = _list_aden_accounts()
|
||||
local = _list_local_accounts()
|
||||
|
||||
# Show env-var fallbacks only for credentials not already in the named registry
|
||||
local_providers = {a["provider"] for a in local}
|
||||
env_fallbacks = [
|
||||
a for a in _list_env_fallback_accounts() if a["provider"] not in local_providers
|
||||
]
|
||||
|
||||
return aden + local + env_fallbacks
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Module-level hooks (read by AgentRunner.load / TUI)
|
||||
@@ -120,22 +217,102 @@ requires_account_selection = True
|
||||
def configure_for_account(runner: AgentRunner, account: dict) -> None:
|
||||
"""Scope the tester node's tools to the selected provider.
|
||||
|
||||
Called by the TUI after the user picks an account from the picker.
|
||||
After scoping, re-enables credential validation so the selected
|
||||
provider's credentials are checked before the agent starts.
|
||||
Handles both Aden accounts (account= routing) and local accounts
|
||||
(session-level env var injection, no account= parameter in prompt).
|
||||
"""
|
||||
provider = account["provider"]
|
||||
tools = get_tools_for_provider(provider)
|
||||
tools.append("get_account_info")
|
||||
|
||||
source = account.get("source", "aden")
|
||||
alias = account.get("alias", "unknown")
|
||||
email = account.get("identity", {}).get("email", "")
|
||||
detail = f" (email: {email})" if email else ""
|
||||
identity = account.get("identity", {})
|
||||
tools = get_tools_for_provider(provider)
|
||||
|
||||
if source == "aden":
|
||||
tools.append("get_account_info")
|
||||
email = identity.get("email", "")
|
||||
detail = f" (email: {email})" if email else ""
|
||||
_configure_aden_node(runner, provider, alias, detail, tools)
|
||||
else:
|
||||
status = account.get("status", "unknown")
|
||||
_activate_local_account(provider, alias)
|
||||
_configure_local_node(runner, provider, alias, identity, tools, status)
|
||||
|
||||
|
||||
def _activate_local_account(credential_id: str, alias: str) -> None:
|
||||
"""Inject a named local account's key into the session environment.
|
||||
|
||||
Handles three cases:
|
||||
1. Named account in LocalCredentialRegistry (new format: {credential_id}/{alias})
|
||||
2. Old flat credential in EncryptedFileStorage (id == credential_id, no alias)
|
||||
3. Env var already set — skip injection (nothing to do)
|
||||
"""
|
||||
import os
|
||||
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS
|
||||
|
||||
# Collect specs for this credential (handles grouped credentials too)
|
||||
group_specs = [
|
||||
(cred_name, spec)
|
||||
for cred_name, spec in CREDENTIAL_SPECS.items()
|
||||
if spec.credential_group == credential_id
|
||||
or spec.credential_id == credential_id
|
||||
or cred_name == credential_id
|
||||
]
|
||||
# Deduplicate — credential_id and credential_group may both match the same spec
|
||||
seen_env_vars: set[str] = set()
|
||||
|
||||
try:
|
||||
from framework.credentials.local.registry import LocalCredentialRegistry
|
||||
from framework.credentials.storage import EncryptedFileStorage
|
||||
|
||||
registry = LocalCredentialRegistry.default()
|
||||
flat_storage = EncryptedFileStorage()
|
||||
|
||||
for _cred_name, spec in group_specs:
|
||||
if spec.env_var in seen_env_vars:
|
||||
continue
|
||||
# If env var is already set, nothing to do for this one
|
||||
if os.environ.get(spec.env_var):
|
||||
seen_env_vars.add(spec.env_var)
|
||||
continue
|
||||
|
||||
seen_env_vars.add(spec.env_var)
|
||||
|
||||
# Determine key name based on spec
|
||||
key_name = "api_key"
|
||||
if spec.credential_group and "cse" in spec.env_var.lower():
|
||||
key_name = "cse_id"
|
||||
|
||||
key: str | None = None
|
||||
|
||||
# 1. Try named account in registry (new format)
|
||||
if alias != "default":
|
||||
key = registry.get_key(credential_id, alias, key_name)
|
||||
else:
|
||||
# For "default" alias, check registry first, then fall back to flat store
|
||||
key = registry.get_key(credential_id, "default", key_name)
|
||||
|
||||
# 2. Fall back to old flat encrypted entry (id == credential_id, no alias)
|
||||
if key is None:
|
||||
flat_cred = flat_storage.load(credential_id)
|
||||
if flat_cred is not None:
|
||||
key = flat_cred.get_key(key_name) or flat_cred.get_default_key()
|
||||
|
||||
if key:
|
||||
os.environ[spec.env_var] = key
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def _configure_aden_node(
|
||||
runner: AgentRunner,
|
||||
provider: str,
|
||||
alias: str,
|
||||
detail: str,
|
||||
tools: list[str],
|
||||
) -> None:
|
||||
for node in runner.graph.nodes:
|
||||
if node.id == "tester":
|
||||
node.tools = sorted(set(tools))
|
||||
# Update system prompt to be provider-specific
|
||||
node.system_prompt = f"""\
|
||||
You are a credential tester for the account: {provider}/{alias}{detail}
|
||||
|
||||
@@ -162,19 +339,60 @@ or any other identifier — always use the alias exactly as shown.
|
||||
"""
|
||||
break
|
||||
|
||||
# Set intro message for TUI display
|
||||
runner.intro_message = (
|
||||
f"Testing {provider}/{alias}{detail} — "
|
||||
f"{len(tools)} tools loaded. "
|
||||
f"I'll suggest a read-only API call to verify the credential works."
|
||||
"I'll suggest a read-only API call to verify the credential works."
|
||||
)
|
||||
|
||||
|
||||
def _configure_local_node(
|
||||
runner: AgentRunner,
|
||||
provider: str,
|
||||
alias: str,
|
||||
identity: dict,
|
||||
tools: list[str],
|
||||
status: str,
|
||||
) -> None:
|
||||
identity_parts = [f"{k}: {v}" for k, v in identity.items() if v]
|
||||
detail = f" ({', '.join(identity_parts)})" if identity_parts else ""
|
||||
status_note = " [key not yet validated]" if status == "unknown" else ""
|
||||
|
||||
for node in runner.graph.nodes:
|
||||
if node.id == "tester":
|
||||
node.tools = sorted(set(tools))
|
||||
node.system_prompt = f"""\
|
||||
You are a credential tester for the local API key: {provider}/{alias}{detail}{status_note}
|
||||
|
||||
# Instructions
|
||||
|
||||
1. Suggest a simple test call to verify the credential works \
|
||||
(e.g. search for "test", list items, get profile info).
|
||||
2. Execute the call when the user agrees.
|
||||
3. Report the result: success (with sample data) or failure (with error).
|
||||
4. Let the user request additional API calls to further test the credential.
|
||||
|
||||
# Rules
|
||||
|
||||
- Do NOT pass an `account` parameter — this credential is injected \
|
||||
directly into the session environment and tools read it automatically.
|
||||
- Start with read-only operations before write operations.
|
||||
- Always confirm with the user before performing write operations.
|
||||
- If a call fails, report the exact error — this helps diagnose credential issues.
|
||||
- Be concise. No emojis.
|
||||
"""
|
||||
break
|
||||
|
||||
runner.intro_message = (
|
||||
f"Testing {provider}/{alias}{detail} — "
|
||||
f"{len(tools)} tools loaded. "
|
||||
"I'll suggest a test API call to verify the credential works."
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Module-level graph variables (read by AgentRunner.load)
|
||||
# ---------------------------------------------------------------------------
|
||||
# The static node starts with minimal tools. configure_for_account() scopes
|
||||
# it to the selected provider's tools before execution.
|
||||
|
||||
nodes = [
|
||||
NodeSpec(
|
||||
@@ -192,7 +410,7 @@ nodes = [
|
||||
tools=["get_account_info"],
|
||||
system_prompt="""\
|
||||
You are a credential tester. Your job is to help the user verify that their \
|
||||
connected accounts can make real API calls.
|
||||
connected accounts and API keys can make real API calls.
|
||||
|
||||
# Startup
|
||||
|
||||
@@ -205,12 +423,11 @@ connected accounts can make real API calls.
|
||||
6. Report the result: success (with sample data) or failure (with error).
|
||||
7. Let the user request additional API calls to further test the credential.
|
||||
|
||||
# Account routing
|
||||
# Account routing (Aden accounts only)
|
||||
|
||||
IMPORTANT: Always pass the account's **alias** as the ``account`` parameter \
|
||||
when calling any tool. The alias is the routing key — never use the email or \
|
||||
any other identifier. For example, if the alias is "Timothy", call \
|
||||
``gmail_list_messages(account="Timothy", ...)``.
|
||||
IMPORTANT: For Aden-synced accounts, always pass the account's **alias** as the \
|
||||
``account`` parameter when calling any tool. For local API key accounts, do NOT \
|
||||
pass an account parameter — they are pre-injected into the session.
|
||||
|
||||
# Rules
|
||||
|
||||
@@ -231,7 +448,8 @@ terminal_nodes = [] # Forever-alive: loops until user exits
|
||||
|
||||
conversation_mode = "continuous"
|
||||
identity_prompt = (
|
||||
"You are a credential tester that verifies connected accounts can make real API calls."
|
||||
"You are a credential tester that verifies connected accounts and API keys "
|
||||
"can make real API calls."
|
||||
)
|
||||
loop_config = {
|
||||
"max_iterations": 50,
|
||||
@@ -252,7 +470,6 @@ class CredentialTesterAgent:
|
||||
accounts = agent.list_accounts()
|
||||
agent.select_account(accounts[0])
|
||||
await agent.start()
|
||||
# ... user chats via TUI or CLI ...
|
||||
await agent.stop()
|
||||
"""
|
||||
|
||||
@@ -264,7 +481,7 @@ class CredentialTesterAgent:
|
||||
self._storage_path: Path | None = None
|
||||
|
||||
def list_accounts(self) -> list[dict]:
|
||||
"""List connected accounts from the Aden credential store."""
|
||||
"""List all testable accounts (Aden + local named + env-var fallbacks)."""
|
||||
return list_connected_accounts()
|
||||
|
||||
def select_account(self, account: dict) -> None:
|
||||
@@ -272,7 +489,7 @@ class CredentialTesterAgent:
|
||||
|
||||
Args:
|
||||
account: Account dict from list_accounts() with
|
||||
provider, alias, identity keys.
|
||||
provider, alias, identity, source keys.
|
||||
"""
|
||||
self._selected_account = account
|
||||
|
||||
@@ -291,14 +508,21 @@ class CredentialTesterAgent:
|
||||
def _build_graph(self) -> GraphSpec:
|
||||
provider = self.selected_provider
|
||||
alias = self.selected_alias
|
||||
source = self._selected_account.get("source", "aden")
|
||||
identity = self._selected_account.get("identity", {})
|
||||
tools = get_tools_for_provider(provider)
|
||||
|
||||
if source == "local":
|
||||
_activate_local_account(provider, alias)
|
||||
elif source == "aden":
|
||||
tools.append("get_account_info")
|
||||
|
||||
tester_node = build_tester_node(
|
||||
provider=provider,
|
||||
alias=alias,
|
||||
tools=tools,
|
||||
identity=identity,
|
||||
source=source,
|
||||
)
|
||||
|
||||
return GraphSpec(
|
||||
|
||||
@@ -8,18 +8,38 @@ def build_tester_node(
|
||||
alias: str,
|
||||
tools: list[str],
|
||||
identity: dict[str, str],
|
||||
source: str = "aden",
|
||||
) -> NodeSpec:
|
||||
"""Build the tester node dynamically for the selected account.
|
||||
|
||||
Args:
|
||||
provider: Aden provider name (e.g. "google", "slack").
|
||||
alias: User-set alias (e.g. "Timothy").
|
||||
provider: Provider / credential name (e.g. "google", "brave_search").
|
||||
alias: User-set alias (e.g. "Timothy", "work").
|
||||
tools: Tool names available for this provider.
|
||||
identity: Identity dict (email, workspace, etc.) for context.
|
||||
source: "aden" or "local" — controls routing instructions in the prompt.
|
||||
"""
|
||||
detail_parts = [f"{k}: {v}" for k, v in identity.items() if v]
|
||||
detail = f" ({', '.join(detail_parts)})" if detail_parts else ""
|
||||
|
||||
if source == "aden":
|
||||
routing_section = f"""\
|
||||
# Account routing
|
||||
|
||||
IMPORTANT: Always pass `account="{alias}"` when calling any tool. \
|
||||
This routes the API call to the correct credential. Never use the email \
|
||||
or any other identifier — always use the alias exactly as shown.
|
||||
"""
|
||||
else:
|
||||
routing_section = """\
|
||||
# Credential routing
|
||||
|
||||
This is a local API key credential — do NOT pass an `account` parameter. \
|
||||
The key is pre-injected into the session environment and tools read it automatically.
|
||||
"""
|
||||
|
||||
account_label = "account" if source == "aden" else "local API key"
|
||||
|
||||
return NodeSpec(
|
||||
id="tester",
|
||||
name="Credential Tester",
|
||||
@@ -34,22 +54,17 @@ def build_tester_node(
|
||||
output_keys=[],
|
||||
tools=tools,
|
||||
system_prompt=f"""\
|
||||
You are a credential tester for the account: {provider}/{alias}{detail}
|
||||
You are a credential tester for the {account_label}: {provider}/{alias}{detail}
|
||||
|
||||
Your job is to help the user verify that this credential works by making \
|
||||
real API calls using the available tools.
|
||||
|
||||
# Account routing
|
||||
|
||||
IMPORTANT: Always pass `account="{alias}"` when calling any tool. \
|
||||
This routes the API call to the correct credential. Never use the email \
|
||||
or any other identifier — always use the alias exactly as shown.
|
||||
|
||||
{routing_section}
|
||||
# Instructions
|
||||
|
||||
1. Start by greeting the user and confirming which account you're testing.
|
||||
2. Suggest a simple, safe, read-only API call to verify the credential works \
|
||||
(e.g. list messages, list channels, list contacts).
|
||||
(e.g. list messages, list channels, list contacts, search for "test").
|
||||
3. Execute the call when the user agrees.
|
||||
4. Report the result clearly: success (with sample data) or failure (with error).
|
||||
5. Let the user request additional API calls to further test the credential.
|
||||
|
||||
@@ -441,14 +441,15 @@ class GraphBuilder:
|
||||
self.session.test_cases.append(test)
|
||||
self._save_session()
|
||||
|
||||
def run_test(
|
||||
async def run_test_async(
|
||||
self,
|
||||
test: TestCase,
|
||||
executor_factory: Callable,
|
||||
) -> TestResult:
|
||||
"""
|
||||
Run a single test case.
|
||||
Run a single test case asynchronously.
|
||||
|
||||
This method is safe to call from async contexts (Jupyter, FastAPI, etc.).
|
||||
executor_factory should return a configured GraphExecutor.
|
||||
"""
|
||||
self._require_phase([BuildPhase.ADDING_NODES, BuildPhase.ADDING_EDGES, BuildPhase.TESTING])
|
||||
@@ -460,14 +461,10 @@ class GraphBuilder:
|
||||
executor = executor_factory()
|
||||
|
||||
# Run the test
|
||||
import asyncio
|
||||
|
||||
result = asyncio.run(
|
||||
executor.execute(
|
||||
graph=graph,
|
||||
goal=self.session.goal,
|
||||
input_data=test.input,
|
||||
)
|
||||
result = await executor.execute(
|
||||
graph=graph,
|
||||
goal=self.session.goal,
|
||||
input_data=test.input,
|
||||
)
|
||||
|
||||
# Check result
|
||||
@@ -497,6 +494,36 @@ class GraphBuilder:
|
||||
|
||||
return test_result
|
||||
|
||||
def run_test(
|
||||
self,
|
||||
test: TestCase,
|
||||
executor_factory: Callable,
|
||||
) -> TestResult:
|
||||
"""
|
||||
Run a single test case.
|
||||
|
||||
This is a synchronous wrapper around run_test_async().
|
||||
If called from an async context (Jupyter, FastAPI, etc.), use run_test_async() instead.
|
||||
|
||||
executor_factory should return a configured GraphExecutor.
|
||||
"""
|
||||
import asyncio
|
||||
|
||||
# Check if an event loop is already running
|
||||
# get_running_loop() returns a loop if one exists, or raises RuntimeError if none exists
|
||||
try:
|
||||
asyncio.get_running_loop()
|
||||
except RuntimeError:
|
||||
# No event loop running - safe to use asyncio.run()
|
||||
return asyncio.run(self.run_test_async(test, executor_factory))
|
||||
|
||||
# Event loop is running - cannot use asyncio.run()
|
||||
raise RuntimeError(
|
||||
"Cannot call run_test() from an async context. "
|
||||
"An event loop is already running. "
|
||||
"Please use 'await builder.run_test_async(test, executor_factory)' instead."
|
||||
)
|
||||
|
||||
def run_all_tests(self, executor_factory: Callable) -> list[TestResult]:
|
||||
"""Run all test cases."""
|
||||
results = []
|
||||
|
||||
@@ -92,6 +92,14 @@ try:
|
||||
except ImportError:
|
||||
_ADEN_AVAILABLE = False
|
||||
|
||||
# Local credential registry (named API key accounts with identity metadata)
|
||||
try:
|
||||
from .local import LocalAccountInfo, LocalCredentialRegistry
|
||||
|
||||
_LOCAL_AVAILABLE = True
|
||||
except ImportError:
|
||||
_LOCAL_AVAILABLE = False
|
||||
|
||||
__all__ = [
|
||||
# Main store
|
||||
"CredentialStore",
|
||||
@@ -133,7 +141,11 @@ __all__ = [
|
||||
"AdenCredentialClient",
|
||||
"AdenClientConfig",
|
||||
"AdenCachedStorage",
|
||||
# Local credential registry (optional - requires cryptography)
|
||||
"LocalCredentialRegistry",
|
||||
"LocalAccountInfo",
|
||||
]
|
||||
|
||||
# Track Aden availability for runtime checks
|
||||
ADEN_AVAILABLE = _ADEN_AVAILABLE
|
||||
LOCAL_AVAILABLE = _LOCAL_AVAILABLE
|
||||
|
||||
@@ -0,0 +1,31 @@
|
||||
"""
|
||||
Local credential registry — named API key accounts with identity metadata.
|
||||
|
||||
Provides feature parity with Aden OAuth credentials for locally-stored API keys:
|
||||
aliases, identity metadata, status tracking, CRUD, and health validation.
|
||||
|
||||
Usage:
|
||||
from framework.credentials.local import LocalCredentialRegistry, LocalAccountInfo
|
||||
|
||||
registry = LocalCredentialRegistry.default()
|
||||
|
||||
# Add a named account
|
||||
info, health = registry.save_account("brave_search", "work", "BSA-xxx")
|
||||
|
||||
# List all stored local accounts
|
||||
for account in registry.list_accounts():
|
||||
print(f"{account.credential_id}/{account.alias}: {account.status}")
|
||||
if account.identity.is_known:
|
||||
print(f" Identity: {account.identity.label}")
|
||||
|
||||
# Re-validate a stored account
|
||||
result = registry.validate_account("github", "personal")
|
||||
"""
|
||||
|
||||
from .models import LocalAccountInfo
|
||||
from .registry import LocalCredentialRegistry
|
||||
|
||||
__all__ = [
|
||||
"LocalAccountInfo",
|
||||
"LocalCredentialRegistry",
|
||||
]
|
||||
@@ -0,0 +1,58 @@
|
||||
"""
|
||||
Data models for the local credential registry.
|
||||
|
||||
LocalAccountInfo mirrors AdenIntegrationInfo, giving local API key credentials
|
||||
the same identity/status metadata as Aden OAuth credentials.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
|
||||
from framework.credentials.models import CredentialIdentity
|
||||
|
||||
|
||||
@dataclass
|
||||
class LocalAccountInfo:
|
||||
"""
|
||||
A locally-stored named credential account.
|
||||
|
||||
Mirrors AdenIntegrationInfo so local and Aden accounts can be treated
|
||||
uniformly in the credential tester and account selection UI.
|
||||
|
||||
Attributes:
|
||||
credential_id: The logical credential name (e.g. "brave_search", "github")
|
||||
alias: User-provided name for this account (e.g. "work", "personal")
|
||||
status: "active" | "failed" | "unknown"
|
||||
identity: Email, username, workspace, or account_id extracted from health check
|
||||
last_validated: When the key was last verified against the live API
|
||||
created_at: When this account was first stored
|
||||
"""
|
||||
|
||||
credential_id: str
|
||||
alias: str
|
||||
status: str = "unknown"
|
||||
identity: CredentialIdentity = field(default_factory=CredentialIdentity)
|
||||
last_validated: datetime | None = None
|
||||
created_at: datetime = field(default_factory=datetime.utcnow)
|
||||
|
||||
@property
|
||||
def storage_id(self) -> str:
|
||||
"""The key used in EncryptedFileStorage: '{credential_id}/{alias}'."""
|
||||
return f"{self.credential_id}/{self.alias}"
|
||||
|
||||
def to_account_dict(self) -> dict:
|
||||
"""
|
||||
Format compatible with AccountSelectionScreen and configure_for_account().
|
||||
|
||||
Same shape as Aden account dicts, with source='local' added.
|
||||
"""
|
||||
return {
|
||||
"provider": self.credential_id,
|
||||
"alias": self.alias,
|
||||
"identity": self.identity.to_dict(),
|
||||
"integration_id": None,
|
||||
"source": "local",
|
||||
"status": self.status,
|
||||
}
|
||||
@@ -0,0 +1,326 @@
|
||||
"""
|
||||
Local Credential Registry.
|
||||
|
||||
Manages named local API key accounts stored in EncryptedFileStorage.
|
||||
Mirrors the Aden integration model so local credentials have feature parity:
|
||||
aliases, identity metadata, status tracking, CRUD, and health validation.
|
||||
|
||||
Storage convention:
|
||||
{credential_id}/{alias} → CredentialObject
|
||||
e.g. "brave_search/work" → { api_key: "BSA-xxx", _alias: "work",
|
||||
_integration_type: "brave_search",
|
||||
_status: "active",
|
||||
_identity_username: "acme", ... }
|
||||
|
||||
Usage:
|
||||
registry = LocalCredentialRegistry.default()
|
||||
|
||||
# Add a new account
|
||||
info, health = registry.save_account("brave_search", "work", "BSA-xxx")
|
||||
print(info.status, info.identity.label)
|
||||
|
||||
# List all accounts
|
||||
for account in registry.list_accounts():
|
||||
print(f"{account.credential_id}/{account.alias}: {account.status}")
|
||||
|
||||
# Get the raw API key for a specific account
|
||||
key = registry.get_key("github", "personal")
|
||||
|
||||
# Re-validate a stored account
|
||||
result = registry.validate_account("github", "personal")
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from framework.credentials.models import CredentialIdentity, CredentialObject
|
||||
from framework.credentials.storage import EncryptedFileStorage
|
||||
|
||||
from .models import LocalAccountInfo
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from aden_tools.credentials.health_check import HealthCheckResult
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_SEPARATOR = "/"
|
||||
|
||||
|
||||
class LocalCredentialRegistry:
|
||||
"""
|
||||
Named local API key account store backed by EncryptedFileStorage.
|
||||
|
||||
Provides the same list/save/get/delete/validate surface as the Aden
|
||||
client, but for locally-stored API keys.
|
||||
"""
|
||||
|
||||
def __init__(self, storage: EncryptedFileStorage) -> None:
|
||||
self._storage = storage
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Listing
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def list_accounts(self, credential_id: str | None = None) -> list[LocalAccountInfo]:
|
||||
"""
|
||||
List all stored local accounts.
|
||||
|
||||
Args:
|
||||
credential_id: If given, filter to this credential type only.
|
||||
|
||||
Returns:
|
||||
List of LocalAccountInfo sorted by credential_id then alias.
|
||||
"""
|
||||
all_ids = self._storage.list_all()
|
||||
accounts: list[LocalAccountInfo] = []
|
||||
|
||||
for storage_id in all_ids:
|
||||
if _SEPARATOR not in storage_id:
|
||||
continue # Skip legacy un-aliased entries
|
||||
|
||||
try:
|
||||
cred_obj = self._storage.load(storage_id)
|
||||
except Exception as exc:
|
||||
logger.debug("Skipping unreadable credential %s: %s", storage_id, exc)
|
||||
continue
|
||||
|
||||
if cred_obj is None:
|
||||
continue
|
||||
|
||||
info = self._to_account_info(cred_obj)
|
||||
if info is None:
|
||||
continue
|
||||
|
||||
if credential_id and info.credential_id != credential_id:
|
||||
continue
|
||||
|
||||
accounts.append(info)
|
||||
|
||||
return sorted(accounts, key=lambda a: (a.credential_id, a.alias))
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Save / add
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def save_account(
|
||||
self,
|
||||
credential_id: str,
|
||||
alias: str,
|
||||
api_key: str,
|
||||
run_health_check: bool = True,
|
||||
extra_keys: dict[str, str] | None = None,
|
||||
) -> tuple[LocalAccountInfo, HealthCheckResult | None]:
|
||||
"""
|
||||
Store a named account, optionally validating it first.
|
||||
|
||||
Args:
|
||||
credential_id: Logical credential name (e.g. "brave_search").
|
||||
alias: User-chosen name (e.g. "work"). Defaults to "default".
|
||||
api_key: The raw API key / token value.
|
||||
run_health_check: If True, verify the key against the live API
|
||||
and extract identity metadata. Failure still saves with
|
||||
status="failed" so the user can re-validate later.
|
||||
extra_keys: Additional key/value pairs to store (e.g.
|
||||
cse_id for google_custom_search).
|
||||
|
||||
Returns:
|
||||
(LocalAccountInfo, HealthCheckResult | None)
|
||||
"""
|
||||
alias = alias or "default"
|
||||
health_result: HealthCheckResult | None = None
|
||||
identity: dict[str, str] = {}
|
||||
status = "active"
|
||||
|
||||
if run_health_check:
|
||||
try:
|
||||
from aden_tools.credentials.health_check import check_credential_health
|
||||
|
||||
kwargs: dict[str, Any] = {}
|
||||
if extra_keys and "cse_id" in extra_keys:
|
||||
kwargs["cse_id"] = extra_keys["cse_id"]
|
||||
|
||||
health_result = check_credential_health(credential_id, api_key, **kwargs)
|
||||
status = "active" if health_result.valid else "failed"
|
||||
identity = health_result.details.get("identity", {})
|
||||
except Exception as exc:
|
||||
logger.warning("Health check failed for %s/%s: %s", credential_id, alias, exc)
|
||||
status = "unknown"
|
||||
|
||||
storage_id = f"{credential_id}{_SEPARATOR}{alias}"
|
||||
now = datetime.now(UTC)
|
||||
|
||||
cred_obj = CredentialObject(id=storage_id)
|
||||
cred_obj.set_key("api_key", api_key)
|
||||
cred_obj.set_key("_alias", alias)
|
||||
cred_obj.set_key("_integration_type", credential_id)
|
||||
cred_obj.set_key("_status", status)
|
||||
|
||||
if extra_keys:
|
||||
for k, v in extra_keys.items():
|
||||
cred_obj.set_key(k, v)
|
||||
|
||||
if identity:
|
||||
valid_fields = set(CredentialIdentity.model_fields)
|
||||
filtered = {k: v for k, v in identity.items() if k in valid_fields}
|
||||
if filtered:
|
||||
cred_obj.set_identity(**filtered)
|
||||
|
||||
cred_obj.last_refreshed = now if run_health_check else None
|
||||
self._storage.save(cred_obj)
|
||||
|
||||
account_info = LocalAccountInfo(
|
||||
credential_id=credential_id,
|
||||
alias=alias,
|
||||
status=status,
|
||||
identity=cred_obj.identity,
|
||||
last_validated=cred_obj.last_refreshed,
|
||||
created_at=cred_obj.created_at,
|
||||
)
|
||||
return account_info, health_result
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Get
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def get_account(self, credential_id: str, alias: str) -> CredentialObject | None:
|
||||
"""Load the raw CredentialObject for a specific account."""
|
||||
return self._storage.load(f"{credential_id}{_SEPARATOR}{alias}")
|
||||
|
||||
def get_key(self, credential_id: str, alias: str, key_name: str = "api_key") -> str | None:
|
||||
"""
|
||||
Return the stored secret value for a specific account.
|
||||
|
||||
Args:
|
||||
credential_id: Logical credential name (e.g. "brave_search").
|
||||
alias: Account alias (e.g. "work").
|
||||
key_name: Key within the credential (default "api_key").
|
||||
|
||||
Returns:
|
||||
The secret value, or None if not found.
|
||||
"""
|
||||
cred = self.get_account(credential_id, alias)
|
||||
if cred is None:
|
||||
return None
|
||||
return cred.get_key(key_name)
|
||||
|
||||
def get_account_info(self, credential_id: str, alias: str) -> LocalAccountInfo | None:
|
||||
"""Load a LocalAccountInfo for a specific account."""
|
||||
cred = self.get_account(credential_id, alias)
|
||||
if cred is None:
|
||||
return None
|
||||
return self._to_account_info(cred)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Delete
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def delete_account(self, credential_id: str, alias: str) -> bool:
|
||||
"""
|
||||
Remove a stored account.
|
||||
|
||||
Returns:
|
||||
True if the account existed and was deleted, False otherwise.
|
||||
"""
|
||||
return self._storage.delete(f"{credential_id}{_SEPARATOR}{alias}")
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Validate
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def validate_account(self, credential_id: str, alias: str) -> HealthCheckResult:
|
||||
"""
|
||||
Re-run health check for a stored account and update its status.
|
||||
|
||||
Args:
|
||||
credential_id: Logical credential name.
|
||||
alias: Account alias.
|
||||
|
||||
Returns:
|
||||
HealthCheckResult from the live API check.
|
||||
|
||||
Raises:
|
||||
KeyError: If the account doesn't exist.
|
||||
"""
|
||||
from aden_tools.credentials.health_check import HealthCheckResult, check_credential_health
|
||||
|
||||
cred = self.get_account(credential_id, alias)
|
||||
if cred is None:
|
||||
raise KeyError(f"No local account found: {credential_id}/{alias}")
|
||||
|
||||
api_key = cred.get_key("api_key")
|
||||
if not api_key:
|
||||
return HealthCheckResult(valid=False, message="No api_key stored for this account")
|
||||
|
||||
try:
|
||||
kwargs: dict[str, Any] = {}
|
||||
cse_id = cred.get_key("cse_id")
|
||||
if cse_id:
|
||||
kwargs["cse_id"] = cse_id
|
||||
|
||||
result = check_credential_health(credential_id, api_key, **kwargs)
|
||||
except Exception as exc:
|
||||
result = HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"Health check error: {exc}",
|
||||
details={"error": str(exc)},
|
||||
)
|
||||
|
||||
# Update status and timestamp in-place
|
||||
new_status = "active" if result.valid else "failed"
|
||||
cred.set_key("_status", new_status)
|
||||
cred.last_refreshed = datetime.now(UTC)
|
||||
|
||||
# Re-extract identity if available
|
||||
identity = result.details.get("identity", {})
|
||||
if identity:
|
||||
valid_fields = set(CredentialIdentity.model_fields)
|
||||
filtered = {k: v for k, v in identity.items() if k in valid_fields}
|
||||
if filtered:
|
||||
cred.set_identity(**filtered)
|
||||
|
||||
self._storage.save(cred)
|
||||
return result
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Factory
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@classmethod
|
||||
def default(cls) -> LocalCredentialRegistry:
|
||||
"""Create a registry using the default encrypted storage at ~/.hive/credentials."""
|
||||
return cls(EncryptedFileStorage())
|
||||
|
||||
@classmethod
|
||||
def at_path(cls, path: str | Path) -> LocalCredentialRegistry:
|
||||
"""Create a registry using a custom storage path."""
|
||||
return cls(EncryptedFileStorage(base_path=path))
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Internals
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _to_account_info(self, cred_obj: CredentialObject) -> LocalAccountInfo | None:
|
||||
"""Build LocalAccountInfo from a CredentialObject."""
|
||||
cred_type_key = cred_obj.keys.get("_integration_type")
|
||||
if cred_type_key is None:
|
||||
return None
|
||||
cred_id = cred_type_key.get_secret_value()
|
||||
|
||||
alias_key = cred_obj.keys.get("_alias")
|
||||
alias = alias_key.get_secret_value() if alias_key else cred_obj.id.split(_SEPARATOR, 1)[-1]
|
||||
|
||||
status_key = cred_obj.keys.get("_status")
|
||||
status = status_key.get_secret_value() if status_key else "unknown"
|
||||
|
||||
return LocalAccountInfo(
|
||||
credential_id=cred_id,
|
||||
alias=alias,
|
||||
status=status,
|
||||
identity=cred_obj.identity,
|
||||
last_validated=cred_obj.last_refreshed,
|
||||
created_at=cred_obj.created_at,
|
||||
)
|
||||
@@ -14,20 +14,36 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def ensure_credential_key_env() -> None:
|
||||
"""Load HIVE_CREDENTIAL_KEY and ADEN_API_KEY from shell config if not in environment.
|
||||
"""Load credentials from shell config if not in environment.
|
||||
|
||||
The setup-credentials skill writes these to ~/.zshrc or ~/.bashrc.
|
||||
If the user hasn't sourced their config in the current shell, this reads
|
||||
them directly so the runner (and any MCP subprocesses it spawns) can:
|
||||
- Unlock the encrypted credential store (HIVE_CREDENTIAL_KEY)
|
||||
- Enable Aden OAuth sync for Google/HubSpot/etc. (ADEN_API_KEY)
|
||||
The quickstart.sh and setup-credentials skill write API keys to ~/.zshrc
|
||||
or ~/.bashrc. If the user hasn't sourced their config in the current shell,
|
||||
this reads them directly so the runner (and any MCP subprocesses) can use them.
|
||||
|
||||
Loads:
|
||||
- HIVE_CREDENTIAL_KEY (encrypted credential store)
|
||||
- ADEN_API_KEY (Aden OAuth sync)
|
||||
- All LLM API keys (ANTHROPIC_API_KEY, OPENAI_API_KEY, ZAI_API_KEY, etc.)
|
||||
"""
|
||||
try:
|
||||
from aden_tools.credentials.shell_config import check_env_var_in_shell_config
|
||||
except ImportError:
|
||||
return
|
||||
|
||||
for var_name in ("HIVE_CREDENTIAL_KEY", "ADEN_API_KEY"):
|
||||
# Core credentials that are always checked
|
||||
env_vars_to_load = ["HIVE_CREDENTIAL_KEY", "ADEN_API_KEY"]
|
||||
|
||||
# Add all LLM/tool API keys from CREDENTIAL_SPECS
|
||||
try:
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS
|
||||
|
||||
for spec in CREDENTIAL_SPECS.values():
|
||||
if spec.env_var and spec.env_var not in env_vars_to_load:
|
||||
env_vars_to_load.append(spec.env_var)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
for var_name in env_vars_to_load:
|
||||
if os.environ.get(var_name):
|
||||
continue
|
||||
found, value = check_env_var_in_shell_config(var_name)
|
||||
|
||||
@@ -428,7 +428,7 @@ class GraphSpec(BaseModel):
|
||||
|
||||
# Cleanup LLM for JSON extraction fallback (fast/cheap model preferred)
|
||||
# If not set, uses CEREBRAS_API_KEY -> cerebras/llama-3.3-70b or
|
||||
# ANTHROPIC_API_KEY -> claude-3-5-haiku as fallback
|
||||
# ANTHROPIC_API_KEY -> claude-haiku-4-5 as fallback
|
||||
cleanup_llm_model: str | None = None
|
||||
|
||||
# Execution limits
|
||||
|
||||
@@ -327,8 +327,10 @@ class EventLoopNode(NodeProtocol):
|
||||
f"{system_prompt}\n\n"
|
||||
f"--- Your Memory ---\n{_adapt_text}\n--- End Memory ---\n\n"
|
||||
'Maintain your memory by calling save_data("adapt.md", ...) '
|
||||
'or edit_data("adapt.md", ...) as you work. '
|
||||
"Record identity, session history, decisions, and working notes."
|
||||
'or edit_data("adapt.md", ...) as you work.\n'
|
||||
"IMMEDIATELY save: user rules about which account/identity to use, "
|
||||
"behavioral constraints, and preferences. "
|
||||
"Also record session history, decisions, and working notes."
|
||||
)
|
||||
|
||||
conversation = NodeConversation(
|
||||
@@ -2210,7 +2212,11 @@ class EventLoopNode(NodeProtocol):
|
||||
)
|
||||
prompt = (
|
||||
"Summarize this conversation so far in 2-3 sentences, "
|
||||
"preserving key decisions and results:\n\n"
|
||||
"preserving key decisions and results.\n\n"
|
||||
"IMPORTANT: Always preserve any user-stated rules, constraints, "
|
||||
"or preferences — especially which account/identity to use, "
|
||||
"formatting preferences, and behavioral instructions. "
|
||||
"These MUST appear verbatim or near-verbatim in your summary.\n\n"
|
||||
f"{messages_text}"
|
||||
)
|
||||
if tool_history:
|
||||
@@ -2227,7 +2233,9 @@ class EventLoopNode(NodeProtocol):
|
||||
response = await ctx.llm.acomplete(
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
system=(
|
||||
"Summarize conversations concisely. Always preserve the tool history section."
|
||||
"Summarize conversations concisely. Always preserve the tool "
|
||||
"history section. Always preserve user-stated rules, constraints, "
|
||||
"and account/identity preferences verbatim."
|
||||
),
|
||||
max_tokens=summary_budget,
|
||||
)
|
||||
@@ -2299,13 +2307,24 @@ class EventLoopNode(NodeProtocol):
|
||||
|
||||
# 5. Spillover files — list actual files so the LLM can load
|
||||
# them immediately instead of having to call list_data_files first.
|
||||
# Inline adapt.md (agent memory) directly — it contains user rules
|
||||
# and identity preferences that must survive emergency compaction.
|
||||
if self._config.spillover_dir:
|
||||
try:
|
||||
from pathlib import Path
|
||||
|
||||
data_dir = Path(self._config.spillover_dir)
|
||||
if data_dir.is_dir():
|
||||
files = sorted(f.name for f in data_dir.iterdir() if f.is_file())
|
||||
# Inline adapt.md content directly
|
||||
adapt_path = data_dir / "adapt.md"
|
||||
if adapt_path.is_file():
|
||||
adapt_text = adapt_path.read_text(encoding="utf-8").strip()
|
||||
if adapt_text:
|
||||
parts.append(f"AGENT MEMORY (adapt.md):\n{adapt_text}")
|
||||
|
||||
files = sorted(
|
||||
f.name for f in data_dir.iterdir() if f.is_file() and f.name != "adapt.md"
|
||||
)
|
||||
if files:
|
||||
file_list = "\n".join(f" - {f}" for f in files[:30])
|
||||
parts.append("DATA FILES (use load_data to read):\n" + file_list)
|
||||
|
||||
@@ -135,6 +135,8 @@ class GraphExecutor:
|
||||
storage_path: str | Path | None = None,
|
||||
loop_config: dict[str, Any] | None = None,
|
||||
accounts_prompt: str = "",
|
||||
accounts_data: list[dict] | None = None,
|
||||
tool_provider_map: dict[str, str] | None = None,
|
||||
):
|
||||
"""
|
||||
Initialize the executor.
|
||||
@@ -155,6 +157,8 @@ class GraphExecutor:
|
||||
storage_path: Optional base path for conversation persistence
|
||||
loop_config: Optional EventLoopNode configuration (max_iterations, etc.)
|
||||
accounts_prompt: Connected accounts block for system prompt injection
|
||||
accounts_data: Raw account data for per-node prompt generation
|
||||
tool_provider_map: Tool name to provider name mapping for account routing
|
||||
"""
|
||||
self.runtime = runtime
|
||||
self.llm = llm
|
||||
@@ -170,6 +174,8 @@ class GraphExecutor:
|
||||
self._storage_path = Path(storage_path) if storage_path else None
|
||||
self._loop_config = loop_config or {}
|
||||
self.accounts_prompt = accounts_prompt
|
||||
self.accounts_data = accounts_data
|
||||
self.tool_provider_map = tool_provider_map
|
||||
|
||||
# Initialize output cleaner
|
||||
self.cleansing_config = cleansing_config or CleansingConfig()
|
||||
@@ -1184,6 +1190,7 @@ class GraphExecutor:
|
||||
next_spec = graph.get_node(current_node_id)
|
||||
if next_spec and next_spec.node_type == "event_loop":
|
||||
from framework.graph.prompt_composer import (
|
||||
build_accounts_prompt,
|
||||
build_narrative,
|
||||
build_transition_marker,
|
||||
compose_system_prompt,
|
||||
@@ -1209,12 +1216,24 @@ class GraphExecutor:
|
||||
else _adapt_text
|
||||
)
|
||||
|
||||
# Build per-node accounts prompt for the next node
|
||||
_node_accounts = self.accounts_prompt or None
|
||||
if self.accounts_data and self.tool_provider_map:
|
||||
_node_accounts = (
|
||||
build_accounts_prompt(
|
||||
self.accounts_data,
|
||||
self.tool_provider_map,
|
||||
node_tool_names=next_spec.tools,
|
||||
)
|
||||
or None
|
||||
)
|
||||
|
||||
# Compose new system prompt (Layer 1 + 2 + 3 + accounts)
|
||||
new_system = compose_system_prompt(
|
||||
identity_prompt=getattr(graph, "identity_prompt", None),
|
||||
focus_prompt=next_spec.system_prompt,
|
||||
narrative=narrative,
|
||||
accounts_prompt=self.accounts_prompt or None,
|
||||
accounts_prompt=_node_accounts,
|
||||
)
|
||||
continuous_conversation.update_system_prompt(new_system)
|
||||
|
||||
@@ -1529,6 +1548,17 @@ class GraphExecutor:
|
||||
write_keys=node_spec.output_keys,
|
||||
)
|
||||
|
||||
# Build per-node accounts prompt (filtered to this node's tools)
|
||||
node_accounts_prompt = self.accounts_prompt
|
||||
if self.accounts_data and self.tool_provider_map:
|
||||
from framework.graph.prompt_composer import build_accounts_prompt
|
||||
|
||||
node_accounts_prompt = build_accounts_prompt(
|
||||
self.accounts_data,
|
||||
self.tool_provider_map,
|
||||
node_tool_names=node_spec.tools,
|
||||
)
|
||||
|
||||
return NodeContext(
|
||||
runtime=self.runtime,
|
||||
node_id=node_spec.id,
|
||||
@@ -1546,7 +1576,7 @@ class GraphExecutor:
|
||||
inherited_conversation=inherited_conversation,
|
||||
cumulative_output_keys=cumulative_output_keys or [],
|
||||
event_triggered=event_triggered,
|
||||
accounts_prompt=self.accounts_prompt,
|
||||
accounts_prompt=node_accounts_prompt,
|
||||
execution_id=self.runtime.execution_id,
|
||||
)
|
||||
|
||||
|
||||
@@ -44,6 +44,11 @@ class SuccessCriterion(BaseModel):
|
||||
metric: str = Field(
|
||||
description="How to measure: 'output_contains', 'output_equals', 'llm_judge', 'custom'"
|
||||
)
|
||||
# NEW: runtime evaluation type (separate from metric)
|
||||
type: str = Field(
|
||||
default="success_rate", description="Runtime evaluation type, e.g. 'success_rate'"
|
||||
)
|
||||
|
||||
target: Any = Field(description="The target value or condition")
|
||||
weight: float = Field(default=1.0, ge=0.0, le=1.0, description="Relative importance (0-1)")
|
||||
met: bool = False
|
||||
|
||||
@@ -197,7 +197,7 @@ Example format:
|
||||
|
||||
client = anthropic.Anthropic(api_key=api_key)
|
||||
message = client.messages.create(
|
||||
model="claude-3-5-haiku-20241022",
|
||||
model="claude-haiku-4-5-20251001",
|
||||
max_tokens=500,
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
)
|
||||
|
||||
@@ -591,7 +591,7 @@ class NodeResult:
|
||||
|
||||
client = anthropic.Anthropic(api_key=api_key)
|
||||
message = client.messages.create(
|
||||
model="claude-3-5-haiku-20241022",
|
||||
model="claude-haiku-4-5-20251001",
|
||||
max_tokens=200,
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
)
|
||||
|
||||
@@ -34,29 +34,105 @@ def _with_datetime(prompt: str) -> str:
|
||||
return f"{prompt}\n\n{stamp}" if prompt else stamp
|
||||
|
||||
|
||||
def build_accounts_prompt(accounts: list[dict[str, Any]]) -> str:
|
||||
def build_accounts_prompt(
|
||||
accounts: list[dict[str, Any]],
|
||||
tool_provider_map: dict[str, str] | None = None,
|
||||
node_tool_names: list[str] | None = None,
|
||||
) -> str:
|
||||
"""Build a prompt section describing connected accounts.
|
||||
|
||||
When tool_provider_map is provided, produces structured output grouped
|
||||
by provider with tool mapping, so the LLM knows which ``account`` value
|
||||
to pass to which tool.
|
||||
|
||||
When node_tool_names is also provided, filters to only show providers
|
||||
whose tools overlap with the node's tool list.
|
||||
|
||||
Args:
|
||||
accounts: List of account info dicts from CredentialStoreAdapter.get_all_account_info().
|
||||
accounts: List of account info dicts from
|
||||
CredentialStoreAdapter.get_all_account_info().
|
||||
tool_provider_map: Mapping of tool_name -> provider_name
|
||||
(e.g. {"gmail_list_messages": "google"}).
|
||||
node_tool_names: Tool names available to the current node.
|
||||
When provided, only providers with matching tools are shown.
|
||||
|
||||
Returns:
|
||||
Formatted accounts block, or empty string if no accounts.
|
||||
"""
|
||||
if not accounts:
|
||||
return ""
|
||||
lines = [
|
||||
"Connected accounts (use the alias as the `account` parameter "
|
||||
"when calling tools to target a specific account):"
|
||||
]
|
||||
|
||||
# Flat format (backward compat) when no tool mapping provided
|
||||
if tool_provider_map is None:
|
||||
lines = [
|
||||
"Connected accounts (use the alias as the `account` parameter "
|
||||
"when calling tools to target a specific account):"
|
||||
]
|
||||
for acct in accounts:
|
||||
provider = acct.get("provider", "unknown")
|
||||
alias = acct.get("alias", "unknown")
|
||||
identity = acct.get("identity", {})
|
||||
detail_parts = [f"{k}: {v}" for k, v in identity.items() if v]
|
||||
detail = f" ({', '.join(detail_parts)})" if detail_parts else ""
|
||||
lines.append(f"- {provider}/{alias}{detail}")
|
||||
return "\n".join(lines)
|
||||
|
||||
# --- Structured format: group by provider with tool mapping ---
|
||||
|
||||
# Invert tool_provider_map to provider -> [tools]
|
||||
provider_tools: dict[str, list[str]] = {}
|
||||
for tool_name, provider in tool_provider_map.items():
|
||||
provider_tools.setdefault(provider, []).append(tool_name)
|
||||
|
||||
# Filter to relevant providers based on node tools
|
||||
node_tool_set = set(node_tool_names) if node_tool_names else None
|
||||
|
||||
# Group accounts by provider
|
||||
provider_accounts: dict[str, list[dict[str, Any]]] = {}
|
||||
for acct in accounts:
|
||||
provider = acct.get("provider", "unknown")
|
||||
alias = acct.get("alias", "unknown")
|
||||
identity = acct.get("identity", {})
|
||||
detail_parts = [f"{k}: {v}" for k, v in identity.items() if v]
|
||||
detail = f" ({', '.join(detail_parts)})" if detail_parts else ""
|
||||
lines.append(f"- {provider}/{alias}{detail}")
|
||||
return "\n".join(lines)
|
||||
provider_accounts.setdefault(provider, []).append(acct)
|
||||
|
||||
sections: list[str] = ["Connected accounts:"]
|
||||
|
||||
for provider, acct_list in provider_accounts.items():
|
||||
tools_for_provider = sorted(provider_tools.get(provider, []))
|
||||
|
||||
# If node tools specified, only show providers with overlapping tools
|
||||
if node_tool_set is not None:
|
||||
relevant_tools = [t for t in tools_for_provider if t in node_tool_set]
|
||||
if not relevant_tools:
|
||||
continue
|
||||
tools_for_provider = relevant_tools
|
||||
|
||||
# Local-only providers: tools read from env vars, no account= routing
|
||||
all_local = all(a.get("source") == "local" for a in acct_list)
|
||||
|
||||
# Provider header with tools
|
||||
display_name = provider.replace("_", " ").title()
|
||||
if tools_for_provider and not all_local:
|
||||
tools_str = ", ".join(tools_for_provider)
|
||||
sections.append(f'\n{display_name} (use account="<alias>" with: {tools_str}):')
|
||||
elif tools_for_provider and all_local:
|
||||
tools_str = ", ".join(tools_for_provider)
|
||||
sections.append(f"\n{display_name} (tools: {tools_str}):")
|
||||
else:
|
||||
sections.append(f"\n{display_name}:")
|
||||
|
||||
# Account entries
|
||||
for acct in acct_list:
|
||||
alias = acct.get("alias", "unknown")
|
||||
identity = acct.get("identity", {})
|
||||
detail_parts = [f"{k}: {v}" for k, v in identity.items() if v]
|
||||
detail = f" ({', '.join(detail_parts)})" if detail_parts else ""
|
||||
source_tag = " [local]" if acct.get("source") == "local" else ""
|
||||
sections.append(f" - {provider}/{alias}{detail}{source_tag}")
|
||||
|
||||
# If filtering removed all providers, return empty
|
||||
if len(sections) <= 1:
|
||||
return ""
|
||||
|
||||
return "\n".join(sections)
|
||||
|
||||
|
||||
def compose_system_prompt(
|
||||
|
||||
@@ -3338,6 +3338,11 @@ def store_credential(
|
||||
str, "Logical credential name (e.g., 'hubspot', 'brave_search', 'anthropic')"
|
||||
],
|
||||
credential_value: Annotated[str, "The secret value to store (API key, token, etc.)"],
|
||||
alias: Annotated[
|
||||
str,
|
||||
"Named alias for this account (e.g., 'work', 'personal'). Defaults to 'default'. "
|
||||
"Use aliases to store multiple accounts for the same service.",
|
||||
] = "default",
|
||||
key_name: Annotated[
|
||||
str, "Key name within the credential (e.g., 'api_key', 'access_token')"
|
||||
] = "api_key",
|
||||
@@ -3347,38 +3352,42 @@ def store_credential(
|
||||
Store a credential securely in the local encrypted store at ~/.hive/credentials.
|
||||
|
||||
Uses Fernet encryption (AES-128-CBC + HMAC). Requires HIVE_CREDENTIAL_KEY env var.
|
||||
|
||||
Credentials are stored as {credential_name}/{alias}, allowing multiple named accounts
|
||||
per service (e.g., 'brave_search/work', 'brave_search/personal').
|
||||
A health check is run automatically to validate the key and extract identity metadata.
|
||||
"""
|
||||
try:
|
||||
from pydantic import SecretStr
|
||||
from framework.credentials.local.registry import LocalCredentialRegistry
|
||||
|
||||
from framework.credentials import CredentialKey, CredentialObject
|
||||
|
||||
store = _get_credential_store()
|
||||
|
||||
if not display_name:
|
||||
display_name = credential_name.replace("_", " ").title()
|
||||
|
||||
cred = CredentialObject(
|
||||
id=credential_name,
|
||||
name=display_name,
|
||||
keys={
|
||||
key_name: CredentialKey(
|
||||
name=key_name,
|
||||
value=SecretStr(credential_value),
|
||||
)
|
||||
},
|
||||
registry = LocalCredentialRegistry.default()
|
||||
info, health_result = registry.save_account(
|
||||
credential_id=credential_name,
|
||||
alias=alias,
|
||||
api_key=credential_value,
|
||||
run_health_check=True,
|
||||
)
|
||||
store.save_credential(cred)
|
||||
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
"credential": credential_name,
|
||||
"key": key_name,
|
||||
"location": "~/.hive/credentials",
|
||||
"encrypted": True,
|
||||
result: dict = {
|
||||
"success": True,
|
||||
"credential": credential_name,
|
||||
"alias": alias,
|
||||
"storage_id": info.storage_id,
|
||||
"status": info.status,
|
||||
"location": "~/.hive/credentials",
|
||||
"encrypted": True,
|
||||
}
|
||||
|
||||
if health_result is not None:
|
||||
result["health_check"] = {
|
||||
"valid": health_result.valid,
|
||||
"message": health_result.message,
|
||||
}
|
||||
)
|
||||
identity = info.identity.to_dict()
|
||||
if identity:
|
||||
result["identity"] = identity
|
||||
|
||||
return json.dumps(result)
|
||||
except Exception as e:
|
||||
return json.dumps({"success": False, "error": str(e)})
|
||||
|
||||
@@ -3388,26 +3397,28 @@ def list_stored_credentials() -> str:
|
||||
"""
|
||||
List all credentials currently stored in the local encrypted store.
|
||||
|
||||
Returns credential IDs and metadata (never returns secret values).
|
||||
Returns credential IDs, aliases, status, and identity metadata (never returns secret values).
|
||||
"""
|
||||
try:
|
||||
store = _get_credential_store()
|
||||
credential_ids = store.list_credentials()
|
||||
from framework.credentials.local.registry import LocalCredentialRegistry
|
||||
|
||||
registry = LocalCredentialRegistry.default()
|
||||
accounts = registry.list_accounts()
|
||||
|
||||
credentials = []
|
||||
for cred_id in credential_ids:
|
||||
try:
|
||||
cred = store.get_credential(cred_id)
|
||||
credentials.append(
|
||||
{
|
||||
"id": cred.id,
|
||||
"name": cred.name,
|
||||
"keys": list(cred.keys.keys()),
|
||||
"created_at": cred.created_at.isoformat() if cred.created_at else None,
|
||||
}
|
||||
)
|
||||
except Exception:
|
||||
credentials.append({"id": cred_id, "error": "Could not load"})
|
||||
for info in accounts:
|
||||
entry: dict = {
|
||||
"credential_id": info.credential_id,
|
||||
"alias": info.alias,
|
||||
"storage_id": info.storage_id,
|
||||
"status": info.status,
|
||||
"created_at": info.created_at.isoformat() if info.created_at else None,
|
||||
"last_validated": info.last_validated.isoformat() if info.last_validated else None,
|
||||
}
|
||||
identity = info.identity.to_dict()
|
||||
if identity:
|
||||
entry["identity"] = identity
|
||||
credentials.append(entry)
|
||||
|
||||
return json.dumps(
|
||||
{
|
||||
@@ -3424,26 +3435,75 @@ def list_stored_credentials() -> str:
|
||||
@mcp.tool()
|
||||
def delete_stored_credential(
|
||||
credential_name: Annotated[str, "Logical credential name to delete (e.g., 'hubspot')"],
|
||||
alias: Annotated[
|
||||
str,
|
||||
"Alias of the account to delete (e.g., 'work', 'personal'). Defaults to 'default'.",
|
||||
] = "default",
|
||||
) -> str:
|
||||
"""
|
||||
Delete a credential from the local encrypted store.
|
||||
"""
|
||||
try:
|
||||
store = _get_credential_store()
|
||||
deleted = store.delete_credential(credential_name)
|
||||
from framework.credentials.local.registry import LocalCredentialRegistry
|
||||
|
||||
registry = LocalCredentialRegistry.default()
|
||||
storage_id = f"{credential_name}/{alias}"
|
||||
deleted = registry.delete_account(credential_name, alias)
|
||||
return json.dumps(
|
||||
{
|
||||
"success": deleted,
|
||||
"credential": credential_name,
|
||||
"message": f"Credential '{credential_name}' deleted"
|
||||
"alias": alias,
|
||||
"storage_id": storage_id,
|
||||
"message": f"Credential '{storage_id}' deleted"
|
||||
if deleted
|
||||
else f"Credential '{credential_name}' not found",
|
||||
else f"Credential '{storage_id}' not found",
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
return json.dumps({"success": False, "error": str(e)})
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
def validate_credential(
|
||||
credential_name: Annotated[
|
||||
str, "Logical credential name to validate (e.g., 'brave_search', 'github')"
|
||||
],
|
||||
alias: Annotated[
|
||||
str,
|
||||
"Alias of the account to validate (e.g., 'work', 'personal'). Defaults to 'default'.",
|
||||
] = "default",
|
||||
) -> str:
|
||||
"""
|
||||
Re-run health check for a stored credential and update its status.
|
||||
|
||||
Makes a live API call to verify the credential is still valid and updates
|
||||
the stored status and last_validated timestamp.
|
||||
"""
|
||||
try:
|
||||
from framework.credentials.local.registry import LocalCredentialRegistry
|
||||
|
||||
registry = LocalCredentialRegistry.default()
|
||||
result = registry.validate_account(credential_name, alias)
|
||||
|
||||
response: dict = {
|
||||
"credential": credential_name,
|
||||
"alias": alias,
|
||||
"storage_id": f"{credential_name}/{alias}",
|
||||
"valid": result.valid,
|
||||
"status": "active" if result.valid else "failed",
|
||||
"message": result.message,
|
||||
}
|
||||
|
||||
identity = result.details.get("identity") if result.details else None
|
||||
if identity:
|
||||
response["identity"] = identity
|
||||
|
||||
return json.dumps(response)
|
||||
except Exception as e:
|
||||
return json.dumps({"success": False, "error": str(e)})
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
def verify_credentials(
|
||||
agent_path: Annotated[str, "Path to the exported agent directory (e.g., 'exports/my-agent')"],
|
||||
|
||||
@@ -1047,7 +1047,7 @@ Output ONLY valid JSON, no explanation:"""
|
||||
|
||||
try:
|
||||
message = client.messages.create(
|
||||
model="claude-3-5-haiku-20241022", # Fast and cheap
|
||||
model="claude-haiku-4-5-20251001", # Fast and cheap
|
||||
max_tokens=500,
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
)
|
||||
|
||||
@@ -783,31 +783,38 @@ class AgentRunner:
|
||||
extra_headers={"authorization": f"Bearer {api_key}"},
|
||||
)
|
||||
else:
|
||||
# Fall back to environment variable
|
||||
# First check api_key_env_var from config (set by quickstart)
|
||||
api_key_env = llm_config.get("api_key_env_var") or self._get_api_key_env_var(
|
||||
self.model
|
||||
)
|
||||
if api_key_env and os.environ.get(api_key_env):
|
||||
# Local models (e.g. Ollama) don't need an API key
|
||||
if self._is_local_model(self.model):
|
||||
self._llm = LiteLLMProvider(
|
||||
model=self.model,
|
||||
api_key=os.environ[api_key_env],
|
||||
api_base=api_base,
|
||||
)
|
||||
else:
|
||||
# Fall back to credential store
|
||||
api_key = self._get_api_key_from_credential_store()
|
||||
if api_key:
|
||||
# Fall back to environment variable
|
||||
# First check api_key_env_var from config (set by quickstart)
|
||||
api_key_env = llm_config.get("api_key_env_var") or self._get_api_key_env_var(
|
||||
self.model
|
||||
)
|
||||
if api_key_env and os.environ.get(api_key_env):
|
||||
self._llm = LiteLLMProvider(
|
||||
model=self.model, api_key=api_key, api_base=api_base
|
||||
model=self.model,
|
||||
api_key=os.environ[api_key_env],
|
||||
api_base=api_base,
|
||||
)
|
||||
# Set env var so downstream code (e.g. cleanup LLM in
|
||||
# node._extract_json) can also find it
|
||||
if api_key_env:
|
||||
os.environ[api_key_env] = api_key
|
||||
elif api_key_env:
|
||||
print(f"Warning: {api_key_env} not set. LLM calls will fail.")
|
||||
print(f"Set it with: export {api_key_env}=your-api-key")
|
||||
else:
|
||||
# Fall back to credential store
|
||||
api_key = self._get_api_key_from_credential_store()
|
||||
if api_key:
|
||||
self._llm = LiteLLMProvider(
|
||||
model=self.model, api_key=api_key, api_base=api_base
|
||||
)
|
||||
# Set env var so downstream code (e.g. cleanup LLM in
|
||||
# node._extract_json) can also find it
|
||||
if api_key_env:
|
||||
os.environ[api_key_env] = api_key
|
||||
elif api_key_env:
|
||||
print(f"Warning: {api_key_env} not set. LLM calls will fail.")
|
||||
print(f"Set it with: export {api_key_env}=your-api-key")
|
||||
|
||||
# Fail fast if the agent needs an LLM but none was configured
|
||||
if self._llm is None:
|
||||
@@ -815,6 +822,12 @@ class AgentRunner:
|
||||
if has_llm_nodes:
|
||||
from framework.credentials.models import CredentialError
|
||||
|
||||
if self._is_local_model(self.model):
|
||||
raise CredentialError(
|
||||
f"Failed to initialize LLM for local model '{self.model}'. "
|
||||
f"Ensure your local LLM server is running "
|
||||
f"(e.g. 'ollama serve' for Ollama)."
|
||||
)
|
||||
api_key_env = self._get_api_key_env_var(self.model)
|
||||
hint = (
|
||||
f"Set it with: export {api_key_env}=your-api-key"
|
||||
@@ -829,19 +842,28 @@ class AgentRunner:
|
||||
|
||||
# Collect connected account info for system prompt injection
|
||||
accounts_prompt = ""
|
||||
accounts_data: list[dict] | None = None
|
||||
tool_provider_map: dict[str, str] | None = None
|
||||
try:
|
||||
from aden_tools.credentials.store_adapter import CredentialStoreAdapter
|
||||
|
||||
adapter = CredentialStoreAdapter.default()
|
||||
accounts = adapter.get_all_account_info()
|
||||
if accounts:
|
||||
accounts_data = adapter.get_all_account_info()
|
||||
tool_provider_map = adapter.get_tool_provider_map()
|
||||
if accounts_data:
|
||||
from framework.graph.prompt_composer import build_accounts_prompt
|
||||
|
||||
accounts_prompt = build_accounts_prompt(accounts)
|
||||
accounts_prompt = build_accounts_prompt(accounts_data, tool_provider_map)
|
||||
except Exception:
|
||||
pass # Best-effort — agent works without account info
|
||||
|
||||
self._setup_agent_runtime(tools, tool_executor, accounts_prompt=accounts_prompt)
|
||||
self._setup_agent_runtime(
|
||||
tools,
|
||||
tool_executor,
|
||||
accounts_prompt=accounts_prompt,
|
||||
accounts_data=accounts_data,
|
||||
tool_provider_map=tool_provider_map,
|
||||
)
|
||||
|
||||
def _get_api_key_env_var(self, model: str) -> str | None:
|
||||
"""Get the environment variable name for the API key based on model name."""
|
||||
@@ -861,8 +883,8 @@ class AgentRunner:
|
||||
return "MISTRAL_API_KEY"
|
||||
elif model_lower.startswith("groq/"):
|
||||
return "GROQ_API_KEY"
|
||||
elif model_lower.startswith("ollama/"):
|
||||
return None # Ollama doesn't need an API key (local)
|
||||
elif self._is_local_model(model_lower):
|
||||
return None # Local models don't need an API key
|
||||
elif model_lower.startswith("azure/"):
|
||||
return "AZURE_API_KEY"
|
||||
elif model_lower.startswith("cohere/"):
|
||||
@@ -902,8 +924,29 @@ class AgentRunner:
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _is_local_model(model: str) -> bool:
|
||||
"""Check if a model is a local model that doesn't require an API key.
|
||||
|
||||
Local providers like Ollama run on the user's machine and do not
|
||||
need any authentication credentials.
|
||||
"""
|
||||
LOCAL_PREFIXES = (
|
||||
"ollama/",
|
||||
"ollama_chat/",
|
||||
"vllm/",
|
||||
"lm_studio/",
|
||||
"llamacpp/",
|
||||
)
|
||||
return model.lower().startswith(LOCAL_PREFIXES)
|
||||
|
||||
def _setup_agent_runtime(
|
||||
self, tools: list, tool_executor: Callable | None, accounts_prompt: str = ""
|
||||
self,
|
||||
tools: list,
|
||||
tool_executor: Callable | None,
|
||||
accounts_prompt: str = "",
|
||||
accounts_data: list[dict] | None = None,
|
||||
tool_provider_map: dict[str, str] | None = None,
|
||||
) -> None:
|
||||
"""Set up multi-entry-point execution using AgentRuntime."""
|
||||
# Convert AsyncEntryPointSpec to EntryPointSpec for AgentRuntime
|
||||
@@ -976,6 +1019,8 @@ class AgentRunner:
|
||||
config=runtime_config,
|
||||
graph_id=self.graph.id or self.agent_path.name,
|
||||
accounts_prompt=accounts_prompt,
|
||||
accounts_data=accounts_data,
|
||||
tool_provider_map=tool_provider_map,
|
||||
)
|
||||
|
||||
# Pass intro_message through for TUI display
|
||||
|
||||
@@ -128,6 +128,8 @@ class AgentRuntime:
|
||||
checkpoint_config: CheckpointConfig | None = None,
|
||||
graph_id: str | None = None,
|
||||
accounts_prompt: str = "",
|
||||
accounts_data: list[dict] | None = None,
|
||||
tool_provider_map: dict[str, str] | None = None,
|
||||
):
|
||||
"""
|
||||
Initialize agent runtime.
|
||||
@@ -144,12 +146,15 @@ class AgentRuntime:
|
||||
checkpoint_config: Optional checkpoint configuration for resumable sessions
|
||||
graph_id: Optional identifier for the primary graph (defaults to "primary")
|
||||
accounts_prompt: Connected accounts block for system prompt injection
|
||||
accounts_data: Raw account data for per-node prompt generation
|
||||
tool_provider_map: Tool name to provider name mapping for account routing
|
||||
"""
|
||||
self.graph = graph
|
||||
self.goal = goal
|
||||
self._config = config or AgentRuntimeConfig()
|
||||
self._runtime_log_store = runtime_log_store
|
||||
self._checkpoint_config = checkpoint_config
|
||||
self.accounts_prompt = accounts_prompt
|
||||
|
||||
# Primary graph identity
|
||||
self._graph_id: str = graph_id or "primary"
|
||||
@@ -182,6 +187,8 @@ class AgentRuntime:
|
||||
self._tools = tools or []
|
||||
self._tool_executor = tool_executor
|
||||
self._accounts_prompt = accounts_prompt
|
||||
self._accounts_data = accounts_data
|
||||
self._tool_provider_map = tool_provider_map
|
||||
|
||||
# Entry points and streams (primary graph)
|
||||
self._entry_points: dict[str, EntryPointSpec] = {}
|
||||
@@ -278,6 +285,8 @@ class AgentRuntime:
|
||||
checkpoint_config=self._checkpoint_config,
|
||||
graph_id=self._graph_id,
|
||||
accounts_prompt=self._accounts_prompt,
|
||||
accounts_data=self._accounts_data,
|
||||
tool_provider_map=self._tool_provider_map,
|
||||
)
|
||||
await stream.start()
|
||||
self._streams[ep_id] = stream
|
||||
@@ -752,6 +761,8 @@ class AgentRuntime:
|
||||
checkpoint_config=self._checkpoint_config,
|
||||
graph_id=graph_id,
|
||||
accounts_prompt=self._accounts_prompt,
|
||||
accounts_data=self._accounts_data,
|
||||
tool_provider_map=self._tool_provider_map,
|
||||
)
|
||||
if self._running:
|
||||
await stream.start()
|
||||
@@ -1306,6 +1317,8 @@ def create_agent_runtime(
|
||||
checkpoint_config: CheckpointConfig | None = None,
|
||||
graph_id: str | None = None,
|
||||
accounts_prompt: str = "",
|
||||
accounts_data: list[dict] | None = None,
|
||||
tool_provider_map: dict[str, str] | None = None,
|
||||
) -> AgentRuntime:
|
||||
"""
|
||||
Create and configure an AgentRuntime with entry points.
|
||||
@@ -1329,6 +1342,8 @@ def create_agent_runtime(
|
||||
checkpoint_config: Optional checkpoint configuration for resumable sessions.
|
||||
If None, uses default checkpointing behavior.
|
||||
graph_id: Optional identifier for the primary graph (defaults to "primary").
|
||||
accounts_data: Raw account data for per-node prompt generation.
|
||||
tool_provider_map: Tool name to provider name mapping for account routing.
|
||||
|
||||
Returns:
|
||||
Configured AgentRuntime (not yet started)
|
||||
@@ -1352,6 +1367,8 @@ def create_agent_runtime(
|
||||
checkpoint_config=checkpoint_config,
|
||||
graph_id=graph_id,
|
||||
accounts_prompt=accounts_prompt,
|
||||
accounts_data=accounts_data,
|
||||
tool_provider_map=tool_provider_map,
|
||||
)
|
||||
|
||||
for spec in entry_points:
|
||||
|
||||
@@ -166,6 +166,8 @@ class ExecutionStream:
|
||||
checkpoint_config: CheckpointConfig | None = None,
|
||||
graph_id: str | None = None,
|
||||
accounts_prompt: str = "",
|
||||
accounts_data: list[dict] | None = None,
|
||||
tool_provider_map: dict[str, str] | None = None,
|
||||
):
|
||||
"""
|
||||
Initialize execution stream.
|
||||
@@ -187,6 +189,8 @@ class ExecutionStream:
|
||||
checkpoint_config: Optional checkpoint configuration for resumable sessions
|
||||
graph_id: Optional graph identifier for multi-graph sessions
|
||||
accounts_prompt: Connected accounts block for system prompt injection
|
||||
accounts_data: Raw account data for per-node prompt generation
|
||||
tool_provider_map: Tool name to provider name mapping for account routing
|
||||
"""
|
||||
self.stream_id = stream_id
|
||||
self.entry_spec = entry_spec
|
||||
@@ -206,6 +210,8 @@ class ExecutionStream:
|
||||
self._checkpoint_config = checkpoint_config
|
||||
self._session_store = session_store
|
||||
self._accounts_prompt = accounts_prompt
|
||||
self._accounts_data = accounts_data
|
||||
self._tool_provider_map = tool_provider_map
|
||||
|
||||
# Create stream-scoped runtime
|
||||
self._runtime = StreamRuntime(
|
||||
@@ -486,6 +492,8 @@ class ExecutionStream:
|
||||
runtime_logger=runtime_logger,
|
||||
loop_config=self.graph.loop_config,
|
||||
accounts_prompt=self._accounts_prompt,
|
||||
accounts_data=self._accounts_data,
|
||||
tool_provider_map=self._tool_provider_map,
|
||||
)
|
||||
# Track executor so inject_input() can reach EventLoopNode instances
|
||||
self._active_executors[execution_id] = executor
|
||||
|
||||
@@ -313,7 +313,6 @@ class OutcomeAggregator:
|
||||
async def _evaluate_criterion(self, criterion: Any) -> CriterionStatus:
|
||||
"""
|
||||
Evaluate a single success criterion.
|
||||
|
||||
This is a heuristic evaluation based on decision outcomes.
|
||||
More sophisticated evaluation can be added per criterion type.
|
||||
"""
|
||||
@@ -325,6 +324,11 @@ class OutcomeAggregator:
|
||||
evidence=[],
|
||||
)
|
||||
|
||||
# Guard: only apply this heuristic to success-rate criteria
|
||||
criterion_type = getattr(criterion, "type", "success_rate")
|
||||
if criterion_type != "success_rate":
|
||||
return status
|
||||
|
||||
# Get relevant decisions (those mentioning this criterion or related intents)
|
||||
relevant_decisions = [
|
||||
d
|
||||
@@ -341,13 +345,17 @@ class OutcomeAggregator:
|
||||
outcomes = [d.outcome for d in relevant_decisions if d.outcome is not None]
|
||||
if outcomes:
|
||||
success_count = sum(1 for o in outcomes if o.success)
|
||||
|
||||
# Progress is computed as raw success rate of decision outcomes.
|
||||
status.progress = success_count / len(outcomes)
|
||||
|
||||
# Add evidence
|
||||
for d in relevant_decisions[:5]: # Limit evidence
|
||||
if d.outcome:
|
||||
evidence = (
|
||||
f"{d.decision.intent}: {'success' if d.outcome.success else 'failed'}"
|
||||
f"decision_id={d.decision.id}, "
|
||||
f"intent={d.decision.intent}, "
|
||||
f"result={'success' if d.outcome.success else 'failed'}"
|
||||
)
|
||||
status.evidence.append(evidence)
|
||||
|
||||
|
||||
@@ -841,9 +841,74 @@ class AdenTUI(App):
|
||||
if result is None:
|
||||
self.exit()
|
||||
return
|
||||
self._handle_picker_result(result)
|
||||
|
||||
# Show Get Started tab on initial launch
|
||||
self.push_screen(
|
||||
AgentPickerScreen(agents, show_get_started=True),
|
||||
callback=_on_initial_pick,
|
||||
)
|
||||
|
||||
def _handle_picker_result(self, result: str) -> None:
|
||||
"""Handle the result from the agent picker, including Get Started actions."""
|
||||
if result.startswith("action:"):
|
||||
action = result.removeprefix("action:")
|
||||
if action == "run_examples":
|
||||
# Switch to Examples tab by re-opening picker focused on examples
|
||||
self._show_agent_picker_tab("examples")
|
||||
elif action == "run_existing":
|
||||
# Switch to Your Agents tab
|
||||
self._show_agent_picker_tab("your-agents")
|
||||
elif action == "build_edit":
|
||||
# Launch agent builder guidance
|
||||
self._show_build_edit_message()
|
||||
else:
|
||||
# Regular agent path - load it
|
||||
self._do_load_agent(result)
|
||||
|
||||
self.push_screen(AgentPickerScreen(agents), callback=_on_initial_pick)
|
||||
def _show_agent_picker_tab(self, tab_id: str) -> None:
|
||||
"""Show the agent picker focused on a specific tab (no Get Started)."""
|
||||
from framework.tui.screens.agent_picker import AgentPickerScreen, discover_agents
|
||||
|
||||
agents = discover_agents()
|
||||
if not agents:
|
||||
self.notify("No agents found", severity="error", timeout=5)
|
||||
return
|
||||
|
||||
def _on_pick(result: str | None) -> None:
|
||||
if result is None:
|
||||
self.exit()
|
||||
return
|
||||
if result.startswith("action:"):
|
||||
# Shouldn't happen but handle gracefully
|
||||
self._handle_picker_result(result)
|
||||
else:
|
||||
self._do_load_agent(result)
|
||||
|
||||
screen = AgentPickerScreen(agents, show_get_started=False)
|
||||
|
||||
def _focus_tab() -> None:
|
||||
try:
|
||||
tabbed = screen.query_one(
|
||||
"TabbedContent", expect_type=type(screen.query_one("TabbedContent"))
|
||||
)
|
||||
tabbed.active = tab_id
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
self.push_screen(screen, callback=_on_pick)
|
||||
self.call_later(_focus_tab)
|
||||
|
||||
def _show_build_edit_message(self) -> None:
|
||||
"""Show guidance for building or editing agents."""
|
||||
self.notify(
|
||||
"To build or edit agents, use 'hive build' from the terminal "
|
||||
"or run Claude Code with the /hive skill.",
|
||||
severity="information",
|
||||
timeout=10,
|
||||
)
|
||||
# Re-show picker so user can still select an agent
|
||||
self._show_agent_picker_initial()
|
||||
|
||||
def action_show_agent_picker(self) -> None:
|
||||
"""Open the agent picker (Ctrl+A or /agents)."""
|
||||
|
||||
@@ -0,0 +1,13 @@
|
||||
"""TUI screens package."""
|
||||
|
||||
from .account_selection import AccountSelectionScreen
|
||||
from .add_local_credential import AddLocalCredentialScreen
|
||||
from .agent_picker import AgentPickerScreen
|
||||
from .credential_setup import CredentialSetupScreen
|
||||
|
||||
__all__ = [
|
||||
"AccountSelectionScreen",
|
||||
"AddLocalCredentialScreen",
|
||||
"AgentPickerScreen",
|
||||
"CredentialSetupScreen",
|
||||
]
|
||||
|
||||
@@ -66,16 +66,32 @@ class AccountSelectionScreen(ModalScreen[dict | None]):
|
||||
id="acct-subtitle",
|
||||
)
|
||||
option_list = OptionList(id="acct-list")
|
||||
for i, acct in enumerate(self._accounts):
|
||||
# Group: Aden accounts first, then local
|
||||
aden = [a for a in self._accounts if a.get("source") != "local"]
|
||||
local = [a for a in self._accounts if a.get("source") == "local"]
|
||||
ordered = aden + local
|
||||
for i, acct in enumerate(ordered):
|
||||
provider = acct.get("provider", "unknown")
|
||||
alias = acct.get("alias", "unknown")
|
||||
email = acct.get("identity", {}).get("email", "")
|
||||
identity = acct.get("identity", {})
|
||||
source = acct.get("source", "aden")
|
||||
# Build identity label: prefer email, then username/workspace
|
||||
identity_label = (
|
||||
identity.get("email")
|
||||
or identity.get("username")
|
||||
or identity.get("workspace")
|
||||
or ""
|
||||
)
|
||||
label = Text()
|
||||
label.append(f"{provider}/", style="bold")
|
||||
label.append(alias, style="bold cyan")
|
||||
if email:
|
||||
label.append(f" ({email})", style="dim")
|
||||
if source == "local":
|
||||
label.append(" [local]", style="dim yellow")
|
||||
if identity_label:
|
||||
label.append(f" ({identity_label})", style="dim")
|
||||
option_list.add_option(Option(label, id=f"acct-{i}"))
|
||||
# Keep ordered list for index lookups
|
||||
self._accounts = ordered
|
||||
yield option_list
|
||||
yield Label(
|
||||
"[dim]Enter[/dim] Select [dim]Esc[/dim] Cancel",
|
||||
|
||||
@@ -0,0 +1,244 @@
|
||||
"""Add Local Credential ModalScreen for storing named local API key accounts."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from textual.app import ComposeResult
|
||||
from textual.binding import Binding
|
||||
from textual.containers import Vertical, VerticalScroll
|
||||
from textual.screen import ModalScreen
|
||||
from textual.widgets import Button, Input, Label, OptionList
|
||||
from textual.widgets._option_list import Option
|
||||
|
||||
|
||||
class AddLocalCredentialScreen(ModalScreen[dict | None]):
|
||||
"""Modal screen for adding a named local API key credential.
|
||||
|
||||
Phase 1: Pick credential type from list.
|
||||
Phase 2: Enter alias + API key, run health check, save.
|
||||
|
||||
Returns a dict with credential_id, alias, and identity on success, or None on cancel.
|
||||
"""
|
||||
|
||||
BINDINGS = [
|
||||
Binding("escape", "dismiss_screen", "Cancel"),
|
||||
]
|
||||
|
||||
DEFAULT_CSS = """
|
||||
AddLocalCredentialScreen {
|
||||
align: center middle;
|
||||
}
|
||||
#alc-container {
|
||||
width: 80%;
|
||||
max-width: 90;
|
||||
height: 80%;
|
||||
background: $surface;
|
||||
border: heavy $primary;
|
||||
padding: 1 2;
|
||||
}
|
||||
#alc-title {
|
||||
text-align: center;
|
||||
text-style: bold;
|
||||
width: 100%;
|
||||
color: $text;
|
||||
}
|
||||
#alc-subtitle {
|
||||
text-align: center;
|
||||
width: 100%;
|
||||
margin-bottom: 1;
|
||||
}
|
||||
#alc-type-list {
|
||||
height: 1fr;
|
||||
}
|
||||
#alc-form {
|
||||
height: 1fr;
|
||||
}
|
||||
.alc-field {
|
||||
margin-bottom: 1;
|
||||
height: auto;
|
||||
}
|
||||
.alc-field Label {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
#alc-status {
|
||||
width: 100%;
|
||||
height: auto;
|
||||
margin-top: 1;
|
||||
padding: 1;
|
||||
background: $panel;
|
||||
}
|
||||
.alc-buttons {
|
||||
height: auto;
|
||||
margin-top: 1;
|
||||
align: center middle;
|
||||
}
|
||||
.alc-buttons Button {
|
||||
margin: 0 1;
|
||||
}
|
||||
#alc-footer {
|
||||
text-align: center;
|
||||
width: 100%;
|
||||
margin-top: 1;
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
# Load credential specs that support direct API keys
|
||||
self._specs: list[tuple[str, object]] = self._load_specs()
|
||||
# Selected credential spec (set in phase 2)
|
||||
self._selected_id: str = ""
|
||||
self._selected_spec: object = None
|
||||
self._phase: int = 1 # 1 = type selection, 2 = form
|
||||
|
||||
@staticmethod
|
||||
def _load_specs() -> list[tuple[str, object]]:
|
||||
"""Return (credential_id, spec) pairs for direct-API-key credentials."""
|
||||
try:
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS
|
||||
|
||||
return [
|
||||
(cid, spec)
|
||||
for cid, spec in CREDENTIAL_SPECS.items()
|
||||
if getattr(spec, "direct_api_key_supported", False)
|
||||
]
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Compose
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
with Vertical(id="alc-container"):
|
||||
yield Label("Add Local Credential", id="alc-title")
|
||||
yield Label("[dim]Store a named API key account[/dim]", id="alc-subtitle")
|
||||
# Phase 1: type selection
|
||||
option_list = OptionList(id="alc-type-list")
|
||||
for cid, spec in self._specs:
|
||||
description = getattr(spec, "description", cid)
|
||||
option_list.add_option(Option(f"{cid} [dim]{description}[/dim]", id=f"type-{cid}"))
|
||||
yield option_list
|
||||
# Phase 2: form (hidden initially)
|
||||
with VerticalScroll(id="alc-form"):
|
||||
with Vertical(classes="alc-field"):
|
||||
yield Label("[bold]Alias[/bold] [dim](e.g. work, personal)[/dim]")
|
||||
yield Input(value="default", id="alc-alias")
|
||||
with Vertical(classes="alc-field"):
|
||||
yield Label("[bold]API Key[/bold]")
|
||||
yield Input(placeholder="Paste API key...", password=True, id="alc-key")
|
||||
yield Label("", id="alc-status")
|
||||
with Vertical(classes="alc-buttons"):
|
||||
yield Button("Test & Save", variant="primary", id="btn-save")
|
||||
yield Button("Back", variant="default", id="btn-back")
|
||||
yield Label(
|
||||
"[dim]Enter[/dim] Select [dim]Esc[/dim] Cancel",
|
||||
id="alc-footer",
|
||||
)
|
||||
|
||||
def on_mount(self) -> None:
|
||||
self._show_phase(1)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Phase switching
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _show_phase(self, phase: int) -> None:
|
||||
self._phase = phase
|
||||
type_list = self.query_one("#alc-type-list", OptionList)
|
||||
form = self.query_one("#alc-form", VerticalScroll)
|
||||
if phase == 1:
|
||||
type_list.display = True
|
||||
form.display = False
|
||||
subtitle = self.query_one("#alc-subtitle", Label)
|
||||
subtitle.update("[dim]Select the credential type to add[/dim]")
|
||||
else:
|
||||
type_list.display = False
|
||||
form.display = True
|
||||
spec = self._selected_spec
|
||||
description = (
|
||||
getattr(spec, "description", self._selected_id) if spec else self._selected_id
|
||||
)
|
||||
subtitle = self.query_one("#alc-subtitle", Label)
|
||||
subtitle.update(f"[dim]{self._selected_id}[/dim] {description}")
|
||||
self._clear_status()
|
||||
# Focus the alias input
|
||||
self.query_one("#alc-alias", Input).focus()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Event handlers
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def on_option_list_option_selected(self, event: OptionList.OptionSelected) -> None:
|
||||
if self._phase != 1:
|
||||
return
|
||||
option_id = event.option.id or ""
|
||||
if option_id.startswith("type-"):
|
||||
cid = option_id[5:] # strip "type-" prefix
|
||||
self._selected_id = cid
|
||||
self._selected_spec = next(
|
||||
(spec for spec_id, spec in self._specs if spec_id == cid), None
|
||||
)
|
||||
self._show_phase(2)
|
||||
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
if event.button.id == "btn-save":
|
||||
self._do_save()
|
||||
elif event.button.id == "btn-back":
|
||||
self._show_phase(1)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Save logic
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _do_save(self) -> None:
|
||||
alias = self.query_one("#alc-alias", Input).value.strip() or "default"
|
||||
api_key = self.query_one("#alc-key", Input).value.strip()
|
||||
|
||||
if not api_key:
|
||||
self._set_status("[red]API key cannot be empty.[/red]")
|
||||
return
|
||||
|
||||
self._set_status("[dim]Running health check...[/dim]")
|
||||
# Disable save button while running
|
||||
btn = self.query_one("#btn-save", Button)
|
||||
btn.disabled = True
|
||||
|
||||
try:
|
||||
from framework.credentials.local.registry import LocalCredentialRegistry
|
||||
|
||||
registry = LocalCredentialRegistry.default()
|
||||
info, health_result = registry.save_account(
|
||||
credential_id=self._selected_id,
|
||||
alias=alias,
|
||||
api_key=api_key,
|
||||
run_health_check=True,
|
||||
)
|
||||
|
||||
if health_result is not None and not health_result.valid:
|
||||
self._set_status(
|
||||
f"[yellow]Saved with failed health check:[/yellow] {health_result.message}\n"
|
||||
"[dim]You can re-validate later via validate_credential().[/dim]"
|
||||
)
|
||||
else:
|
||||
identity = info.identity.to_dict()
|
||||
identity_str = ""
|
||||
if identity:
|
||||
parts = [f"{k}: {v}" for k, v in identity.items() if v]
|
||||
identity_str = " " + ", ".join(parts) if parts else ""
|
||||
self._set_status(f"[green]Saved:[/green] {info.storage_id}{identity_str}")
|
||||
# Dismiss with result so callers can react
|
||||
self.set_timer(1.0, lambda: self.dismiss(info.to_account_dict()))
|
||||
return
|
||||
except Exception as e:
|
||||
self._set_status(f"[red]Error:[/red] {e}")
|
||||
finally:
|
||||
btn.disabled = False
|
||||
|
||||
def _set_status(self, markup: str) -> None:
|
||||
self.query_one("#alc-status", Label).update(markup)
|
||||
|
||||
def _clear_status(self) -> None:
|
||||
self.query_one("#alc-status", Label).update("")
|
||||
|
||||
def action_dismiss_screen(self) -> None:
|
||||
self.dismiss(None)
|
||||
@@ -4,6 +4,7 @@ from __future__ import annotations
|
||||
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
|
||||
from rich.console import Group
|
||||
@@ -16,6 +17,14 @@ from textual.widgets import Label, OptionList, TabbedContent, TabPane
|
||||
from textual.widgets._option_list import Option
|
||||
|
||||
|
||||
class GetStartedAction(Enum):
|
||||
"""Actions available in the Get Started tab."""
|
||||
|
||||
RUN_EXAMPLES = "run_examples"
|
||||
RUN_EXISTING = "run_existing"
|
||||
BUILD_EDIT = "build_edit"
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentEntry:
|
||||
"""Lightweight agent metadata for the picker."""
|
||||
@@ -139,10 +148,20 @@ def _render_agent_option(agent: AgentEntry) -> Group:
|
||||
return Group(*parts)
|
||||
|
||||
|
||||
def _render_get_started_option(title: str, description: str, icon: str = "→") -> Group:
|
||||
"""Build a Rich renderable for a Get Started option."""
|
||||
line1 = Text()
|
||||
line1.append(f"{icon} ", style="bold cyan")
|
||||
line1.append(title, style="bold")
|
||||
line2 = Text(description, style="dim")
|
||||
return Group(line1, line2)
|
||||
|
||||
|
||||
class AgentPickerScreen(ModalScreen[str | None]):
|
||||
"""Modal screen showing available agents organized by tabbed categories.
|
||||
|
||||
Returns the selected agent path as a string, or None if dismissed.
|
||||
For Get Started actions, returns a special prefix like "action:run_examples".
|
||||
"""
|
||||
|
||||
BINDINGS = [
|
||||
@@ -188,9 +207,14 @@ class AgentPickerScreen(ModalScreen[str | None]):
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, agent_groups: dict[str, list[AgentEntry]]) -> None:
|
||||
def __init__(
|
||||
self,
|
||||
agent_groups: dict[str, list[AgentEntry]],
|
||||
show_get_started: bool = False,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self._groups = agent_groups
|
||||
self._show_get_started = show_get_started
|
||||
# Map (tab_id, option_index) -> AgentEntry
|
||||
self._option_map: dict[str, dict[int, AgentEntry]] = {}
|
||||
|
||||
@@ -203,6 +227,43 @@ class AgentPickerScreen(ModalScreen[str | None]):
|
||||
id="picker-subtitle",
|
||||
)
|
||||
with TabbedContent():
|
||||
# Get Started tab (only on initial launch)
|
||||
if self._show_get_started:
|
||||
with TabPane("Get Started", id="get-started"):
|
||||
option_list = OptionList(id="list-get-started")
|
||||
option_list.add_option(
|
||||
Option(
|
||||
_render_get_started_option(
|
||||
"Test and run example agents",
|
||||
"Try pre-built example agents to learn how Hive works",
|
||||
"📚",
|
||||
),
|
||||
id="action:run_examples",
|
||||
)
|
||||
)
|
||||
option_list.add_option(
|
||||
Option(
|
||||
_render_get_started_option(
|
||||
"Test and run existing agent",
|
||||
"Load and run an agent you've already built (from exports/)",
|
||||
"🚀",
|
||||
),
|
||||
id="action:run_existing",
|
||||
)
|
||||
)
|
||||
option_list.add_option(
|
||||
Option(
|
||||
_render_get_started_option(
|
||||
"Build or edit agent",
|
||||
"Create a new agent or modify an existing one",
|
||||
"🛠️ ",
|
||||
),
|
||||
id="action:build_edit",
|
||||
)
|
||||
)
|
||||
yield option_list
|
||||
|
||||
# Agent category tabs
|
||||
for category, agents in self._groups.items():
|
||||
tab_id = category.lower().replace(" ", "-")
|
||||
with TabPane(f"{category} ({len(agents)})", id=tab_id):
|
||||
@@ -224,6 +285,15 @@ class AgentPickerScreen(ModalScreen[str | None]):
|
||||
|
||||
def on_option_list_option_selected(self, event: OptionList.OptionSelected) -> None:
|
||||
list_id = event.option_list.id or ""
|
||||
|
||||
# Handle Get Started tab options
|
||||
if list_id == "list-get-started":
|
||||
option = event.option
|
||||
if option and option.id:
|
||||
self.dismiss(option.id) # Returns "action:run_examples", etc.
|
||||
return
|
||||
|
||||
# Handle agent selection from other tabs
|
||||
idx = event.option_index
|
||||
agent_map = self._option_map.get(list_id, {})
|
||||
agent = agent_map.get(idx)
|
||||
|
||||
@@ -826,3 +826,52 @@ class TestAsyncComplete:
|
||||
assert call_thread_ids[0] != main_thread_id, (
|
||||
"Base acomplete() should offload sync complete() to a thread pool"
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# AgentRunner._is_local_model — parameterized tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestIsLocalModel:
|
||||
"""Parameterized tests for AgentRunner._is_local_model()."""
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"model",
|
||||
[
|
||||
"ollama/llama3",
|
||||
"ollama/mistral",
|
||||
"ollama_chat/llama3",
|
||||
"vllm/mistral",
|
||||
"lm_studio/phi3",
|
||||
"llamacpp/llama-7b",
|
||||
"Ollama/Llama3", # case-insensitive
|
||||
"VLLM/Mistral",
|
||||
],
|
||||
)
|
||||
def test_local_models_return_true(self, model):
|
||||
"""Local model prefixes should be recognized."""
|
||||
from framework.runner.runner import AgentRunner
|
||||
|
||||
assert AgentRunner._is_local_model(model) is True
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"model",
|
||||
[
|
||||
"anthropic/claude-3-haiku",
|
||||
"openai/gpt-4o",
|
||||
"gpt-4o-mini",
|
||||
"claude-3-haiku-20240307",
|
||||
"gemini/gemini-1.5-flash",
|
||||
"groq/llama3-70b",
|
||||
"mistral/mistral-large",
|
||||
"azure/gpt-4",
|
||||
"cohere/command-r",
|
||||
"together/llama3-70b",
|
||||
],
|
||||
)
|
||||
def test_cloud_models_return_false(self, model):
|
||||
"""Cloud model prefixes should not be treated as local."""
|
||||
from framework.runner.runner import AgentRunner
|
||||
|
||||
assert AgentRunner._is_local_model(model) is False
|
||||
|
||||
@@ -0,0 +1,81 @@
|
||||
# Competitive Intelligence Agent (Community)
|
||||
## Built by https://github.com/nafiyad
|
||||
|
||||
An autonomous agent that monitors competitor websites, news sources, and GitHub repositories to deliver structured digests with key insights and trend analysis.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- **Python 3.11+** with `uv`
|
||||
- **ANTHROPIC_API_KEY** — set in your `.env` or environment
|
||||
- **GITHUB_TOKEN** *(optional)* — for GitHub activity monitoring
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Interactive Shell
|
||||
```bash
|
||||
cd examples/templates
|
||||
uv run python -m competitive_intel_agent shell
|
||||
```
|
||||
|
||||
### CLI Run
|
||||
```bash
|
||||
# With inline JSON
|
||||
uv run python -m competitive_intel_agent run \
|
||||
--competitors '[{"name":"Acme","website":"https://acme.com","github":"acme-org"},{"name":"Beta Inc","website":"https://beta.io","github":null}]' \
|
||||
--focus-areas "pricing,features,partnerships,hiring" \
|
||||
--frequency weekly
|
||||
|
||||
# From a file
|
||||
uv run python -m competitive_intel_agent run --competitors competitors.json
|
||||
```
|
||||
|
||||
### TUI Dashboard
|
||||
```bash
|
||||
uv run python -m competitive_intel_agent tui
|
||||
```
|
||||
|
||||
### Validate & Info
|
||||
```bash
|
||||
uv run python -m competitive_intel_agent validate
|
||||
uv run python -m competitive_intel_agent info
|
||||
```
|
||||
|
||||
## Agent Graph
|
||||
|
||||
```
|
||||
intake → web-scraper → news-search → github-monitor → aggregator → analysis → report
|
||||
↑
|
||||
(skipped if no competitors have GitHub)
|
||||
```
|
||||
|
||||
| Node | Purpose | Tools | Client-Facing |
|
||||
|------|---------|-------|:---:|
|
||||
| **intake** | Collect competitor list & focus areas | — | ✅ |
|
||||
| **web-scraper** | Scrape competitor websites | web_search, web_scrape | |
|
||||
| **news-search** | Search news & press releases | web_search, web_scrape | |
|
||||
| **github-monitor** | Track public GitHub activity | github_* | |
|
||||
| **aggregator** | Merge, deduplicate, persist | save_data, load_data | |
|
||||
| **analysis** | Extract insights & trends | load_data, save_data | |
|
||||
| **report** | Generate HTML digest | save_data, serve_file | ✅ |
|
||||
|
||||
## Input Format
|
||||
|
||||
```json
|
||||
{
|
||||
"competitors": [
|
||||
{"name": "CompetitorA", "website": "https://competitor-a.com", "github": "competitor-a"},
|
||||
{"name": "CompetitorB", "website": "https://competitor-b.com", "github": null}
|
||||
],
|
||||
"focus_areas": ["pricing", "new_features", "hiring", "partnerships"],
|
||||
"report_frequency": "weekly"
|
||||
}
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
The agent produces an HTML report saved to `~/.hive/agents/competitive_intel_agent/` with:
|
||||
- 🔥 **Key Highlights** — most significant competitive moves
|
||||
- 📊 **Per-Competitor Tables** — category, update, source, date
|
||||
- 📈 **30-Day Trends** — patterns across competitors over time
|
||||
|
||||
Historical snapshots are stored for trend comparison on subsequent runs.
|
||||
@@ -0,0 +1,24 @@
|
||||
"""
|
||||
Competitive Intelligence Agent — Automated competitor monitoring and reporting.
|
||||
|
||||
Monitors competitor websites, news sources, and GitHub repositories to deliver
|
||||
structured weekly digests with key insights and 30-day trend analysis for
|
||||
product and marketing teams.
|
||||
"""
|
||||
|
||||
from .agent import CompetitiveIntelAgent, default_agent, goal, nodes, edges
|
||||
from .config import RuntimeConfig, AgentMetadata, default_config, metadata
|
||||
|
||||
__version__ = "1.0.0"
|
||||
|
||||
__all__ = [
|
||||
"CompetitiveIntelAgent",
|
||||
"default_agent",
|
||||
"goal",
|
||||
"nodes",
|
||||
"edges",
|
||||
"RuntimeConfig",
|
||||
"AgentMetadata",
|
||||
"default_config",
|
||||
"metadata",
|
||||
]
|
||||
@@ -0,0 +1,278 @@
|
||||
"""
|
||||
CLI entry point for Competitive Intelligence Agent.
|
||||
|
||||
Uses AgentRuntime for multi-entrypoint support with HITL pause/resume.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
from typing import Any
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
|
||||
from .agent import CompetitiveIntelAgent, default_agent
|
||||
|
||||
|
||||
def setup_logging(verbose: bool = False, debug: bool = False) -> None:
|
||||
"""Configure logging for execution visibility."""
|
||||
if debug:
|
||||
level, fmt = logging.DEBUG, "%(asctime)s %(name)s: %(message)s"
|
||||
elif verbose:
|
||||
level, fmt = logging.INFO, "%(message)s"
|
||||
else:
|
||||
level, fmt = logging.WARNING, "%(levelname)s: %(message)s"
|
||||
logging.basicConfig(level=level, format=fmt, stream=sys.stderr)
|
||||
logging.getLogger("framework").setLevel(level)
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.version_option(version="1.0.0")
|
||||
def cli() -> None:
|
||||
"""Competitive Intelligence Agent - Monitor competitors and deliver weekly digests."""
|
||||
pass
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option(
|
||||
"--competitors",
|
||||
"-c",
|
||||
type=str,
|
||||
required=True,
|
||||
help='Competitors JSON string or file path (e.g. \'[{"name":"Acme","website":"https://acme.com"}]\')',
|
||||
)
|
||||
@click.option(
|
||||
"--focus-areas",
|
||||
"-f",
|
||||
type=str,
|
||||
default="pricing,features,partnerships,hiring",
|
||||
help="Comma-separated focus areas (default: pricing,features,partnerships,hiring)",
|
||||
)
|
||||
@click.option(
|
||||
"--frequency",
|
||||
type=click.Choice(["weekly", "daily", "monthly"]),
|
||||
default="weekly",
|
||||
help="Report frequency (default: weekly)",
|
||||
)
|
||||
@click.option("--quiet", "-q", is_flag=True, help="Only output result JSON")
|
||||
@click.option("--verbose", "-v", is_flag=True, help="Show execution details")
|
||||
@click.option("--debug", is_flag=True, help="Show debug logging")
|
||||
def run(
|
||||
competitors: str,
|
||||
focus_areas: str,
|
||||
frequency: str,
|
||||
quiet: bool,
|
||||
verbose: bool,
|
||||
debug: bool,
|
||||
) -> None:
|
||||
"""Execute competitive intelligence gathering and report generation."""
|
||||
if not quiet:
|
||||
setup_logging(verbose=verbose, debug=debug)
|
||||
|
||||
# Parse competitors — accept JSON string or file path
|
||||
try:
|
||||
competitors_data = json.loads(competitors)
|
||||
except json.JSONDecodeError:
|
||||
# Try loading from file
|
||||
try:
|
||||
with open(competitors) as f:
|
||||
competitors_data = json.load(f)
|
||||
except (FileNotFoundError, json.JSONDecodeError) as e:
|
||||
click.echo(f"Error parsing competitors: {e}", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
context: dict[str, Any] = {
|
||||
"competitors_input": json.dumps({
|
||||
"competitors": competitors_data,
|
||||
"focus_areas": [a.strip() for a in focus_areas.split(",")],
|
||||
"report_frequency": frequency,
|
||||
})
|
||||
}
|
||||
|
||||
result = asyncio.run(default_agent.run(context))
|
||||
|
||||
output_data: dict[str, Any] = {
|
||||
"success": result.success,
|
||||
"steps_executed": result.steps_executed,
|
||||
"output": result.output,
|
||||
}
|
||||
if result.error:
|
||||
output_data["error"] = result.error
|
||||
|
||||
click.echo(json.dumps(output_data, indent=2, default=str))
|
||||
sys.exit(0 if result.success else 1)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--verbose", "-v", is_flag=True, help="Show execution details")
|
||||
@click.option("--debug", is_flag=True, help="Show debug logging")
|
||||
def tui(verbose: bool, debug: bool) -> None:
|
||||
"""Launch the TUI dashboard for interactive competitive intelligence."""
|
||||
setup_logging(verbose=verbose, debug=debug)
|
||||
|
||||
try:
|
||||
from framework.tui.app import AdenTUI
|
||||
except ImportError:
|
||||
click.echo(
|
||||
"TUI requires the 'textual' package. Install with: pip install textual"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
from framework.llm import LiteLLMProvider
|
||||
from framework.runner.tool_registry import ToolRegistry
|
||||
from framework.runtime.agent_runtime import create_agent_runtime
|
||||
from framework.runtime.event_bus import EventBus
|
||||
from framework.runtime.execution_stream import EntryPointSpec
|
||||
|
||||
async def run_with_tui() -> None:
|
||||
agent = CompetitiveIntelAgent()
|
||||
|
||||
# Build graph and tools
|
||||
agent._event_bus = EventBus()
|
||||
agent._tool_registry = ToolRegistry()
|
||||
|
||||
storage_path = Path.home() / ".hive" / "agents" / "competitive_intel_agent"
|
||||
storage_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
mcp_config_path = Path(__file__).parent / "mcp_servers.json"
|
||||
if mcp_config_path.exists():
|
||||
agent._tool_registry.load_mcp_config(mcp_config_path)
|
||||
|
||||
llm = LiteLLMProvider(
|
||||
model=agent.config.model,
|
||||
api_key=agent.config.api_key,
|
||||
api_base=agent.config.api_base,
|
||||
)
|
||||
|
||||
tools = list(agent._tool_registry.get_tools().values())
|
||||
tool_executor = agent._tool_registry.get_executor()
|
||||
graph = agent._build_graph()
|
||||
|
||||
runtime = create_agent_runtime(
|
||||
graph=graph,
|
||||
goal=agent.goal,
|
||||
storage_path=storage_path,
|
||||
entry_points=[
|
||||
EntryPointSpec(
|
||||
id="start",
|
||||
name="Start Competitive Analysis",
|
||||
entry_node="intake",
|
||||
trigger_type="manual",
|
||||
isolation_level="isolated",
|
||||
),
|
||||
],
|
||||
llm=llm,
|
||||
tools=tools,
|
||||
tool_executor=tool_executor,
|
||||
)
|
||||
|
||||
await runtime.start()
|
||||
|
||||
try:
|
||||
app = AdenTUI(runtime)
|
||||
await app.run_async()
|
||||
finally:
|
||||
await runtime.stop()
|
||||
|
||||
asyncio.run(run_with_tui())
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--json", "output_json", is_flag=True)
|
||||
def info(output_json: bool) -> None:
|
||||
"""Show agent information."""
|
||||
info_data = default_agent.info()
|
||||
if output_json:
|
||||
click.echo(json.dumps(info_data, indent=2))
|
||||
else:
|
||||
click.echo(f"Agent: {info_data['name']}")
|
||||
click.echo(f"Version: {info_data['version']}")
|
||||
click.echo(f"Description: {info_data['description']}")
|
||||
click.echo(f"\nGoal: {info_data['goal']['name']}")
|
||||
click.echo(f" {info_data['goal']['description']}")
|
||||
click.echo(f"\nNodes: {', '.join(info_data['nodes'])}")
|
||||
# click.echo(f"Client-facing: {', '.join(info_data['client_facing_nodes'])}")
|
||||
click.echo(f"Entry: {info_data['entry_node']}")
|
||||
click.echo(f"Terminal: {', '.join(info_data['terminal_nodes'])}")
|
||||
click.echo(f"Edges: {len(info_data['edges'])}")
|
||||
|
||||
|
||||
@cli.command()
|
||||
def validate() -> None:
|
||||
"""Validate agent structure."""
|
||||
validation = default_agent.validate()
|
||||
if validation["valid"]:
|
||||
click.echo("✅ Agent is valid")
|
||||
if validation["warnings"]:
|
||||
for warning in validation["warnings"]:
|
||||
click.echo(f" ⚠️ {warning}")
|
||||
else:
|
||||
click.echo("❌ Agent has errors:")
|
||||
for error in validation["errors"]:
|
||||
click.echo(f" ERROR: {error}")
|
||||
sys.exit(0 if validation["valid"] else 1)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--verbose", "-v", is_flag=True)
|
||||
def shell(verbose: bool) -> None:
|
||||
"""Interactive competitive intelligence session (CLI, no TUI)."""
|
||||
asyncio.run(_interactive_shell(verbose))
|
||||
|
||||
|
||||
async def _interactive_shell(verbose: bool = False) -> None:
|
||||
"""Async interactive shell."""
|
||||
setup_logging(verbose=verbose)
|
||||
|
||||
click.echo("=== Competitive Intelligence Agent ===")
|
||||
click.echo("Provide competitor details to begin analysis (or 'quit' to exit):\n")
|
||||
|
||||
agent = CompetitiveIntelAgent()
|
||||
await agent.start()
|
||||
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
user_input = await asyncio.get_event_loop().run_in_executor(
|
||||
None, input, "Competitors> "
|
||||
)
|
||||
if user_input.lower() in ["quit", "exit", "q"]:
|
||||
click.echo("Goodbye!")
|
||||
break
|
||||
|
||||
if not user_input.strip():
|
||||
continue
|
||||
|
||||
click.echo("\nGathering competitive intelligence...\n")
|
||||
|
||||
result = await agent.trigger_and_wait(
|
||||
"start", {"competitors_input": user_input}
|
||||
)
|
||||
|
||||
if result is None:
|
||||
click.echo("\n[Execution timed out]\n")
|
||||
continue
|
||||
|
||||
if result.success:
|
||||
output = result.output
|
||||
status = output.get("delivery_status", "unknown")
|
||||
click.echo(f"\nAnalysis complete (status: {status})\n")
|
||||
else:
|
||||
click.echo(f"\nAnalysis failed: {result.error}\n")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
click.echo("\nGoodbye!")
|
||||
break
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {e}", err=True)
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
await agent.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
@@ -0,0 +1,403 @@
|
||||
{
|
||||
"agent": {
|
||||
"id": "competitive_intel_agent",
|
||||
"name": "Competitive Intelligence Report",
|
||||
"version": "1.0.0",
|
||||
"description": "Monitor competitor websites, news sources, and GitHub repositories to produce a structured weekly digest with key insights, detailed findings per competitor, and 30-day trend analysis."
|
||||
},
|
||||
"graph": {
|
||||
"id": "competitive_intel_agent-graph",
|
||||
"goal_id": "competitive-intelligence-report",
|
||||
"version": "1.0.0",
|
||||
"entry_node": "intake",
|
||||
"entry_points": {
|
||||
"start": "intake"
|
||||
},
|
||||
"pause_nodes": [],
|
||||
"terminal_nodes": [
|
||||
"report"
|
||||
],
|
||||
"nodes": [
|
||||
{
|
||||
"id": "intake",
|
||||
"name": "Competitor Intake",
|
||||
"description": "Collect competitor list, focus areas, and report preferences from the user",
|
||||
"node_type": "event_loop",
|
||||
"input_keys": [
|
||||
"competitors_input"
|
||||
],
|
||||
"output_keys": [
|
||||
"competitors",
|
||||
"focus_areas",
|
||||
"report_frequency",
|
||||
"has_github_competitors"
|
||||
],
|
||||
"nullable_output_keys": [],
|
||||
"input_schema": {},
|
||||
"output_schema": {},
|
||||
"system_prompt": null,
|
||||
"tools": [],
|
||||
"model": null,
|
||||
"function": null,
|
||||
"routes": {},
|
||||
"max_retries": 3,
|
||||
"retry_on": [],
|
||||
"max_node_visits": 1,
|
||||
"output_model": null,
|
||||
"max_validation_retries": 2,
|
||||
"client_facing": true
|
||||
},
|
||||
{
|
||||
"id": "web-scraper",
|
||||
"name": "Website Monitor",
|
||||
"description": "Scrape competitor websites for pricing, features, and announcements",
|
||||
"node_type": "event_loop",
|
||||
"input_keys": [
|
||||
"competitors",
|
||||
"focus_areas"
|
||||
],
|
||||
"output_keys": [
|
||||
"web_findings"
|
||||
],
|
||||
"nullable_output_keys": [],
|
||||
"input_schema": {},
|
||||
"output_schema": {},
|
||||
"system_prompt": null,
|
||||
"tools": [
|
||||
"web_search",
|
||||
"web_scrape"
|
||||
],
|
||||
"model": null,
|
||||
"function": null,
|
||||
"routes": {},
|
||||
"max_retries": 3,
|
||||
"retry_on": [],
|
||||
"max_node_visits": 1,
|
||||
"output_model": null,
|
||||
"max_validation_retries": 2,
|
||||
"client_facing": false
|
||||
},
|
||||
{
|
||||
"id": "news-search",
|
||||
"name": "News & Press Monitor",
|
||||
"description": "Search for competitor mentions in news, press releases, and industry publications",
|
||||
"node_type": "event_loop",
|
||||
"input_keys": [
|
||||
"competitors",
|
||||
"focus_areas"
|
||||
],
|
||||
"output_keys": [
|
||||
"news_findings"
|
||||
],
|
||||
"nullable_output_keys": [],
|
||||
"input_schema": {},
|
||||
"output_schema": {},
|
||||
"system_prompt": null,
|
||||
"tools": [
|
||||
"web_search",
|
||||
"web_scrape"
|
||||
],
|
||||
"model": null,
|
||||
"function": null,
|
||||
"routes": {},
|
||||
"max_retries": 3,
|
||||
"retry_on": [],
|
||||
"max_node_visits": 1,
|
||||
"output_model": null,
|
||||
"max_validation_retries": 2,
|
||||
"client_facing": false
|
||||
},
|
||||
{
|
||||
"id": "github-monitor",
|
||||
"name": "GitHub Activity Monitor",
|
||||
"description": "Track public GitHub repository activity for competitors with GitHub presence",
|
||||
"node_type": "event_loop",
|
||||
"input_keys": [
|
||||
"competitors"
|
||||
],
|
||||
"output_keys": [
|
||||
"github_findings"
|
||||
],
|
||||
"nullable_output_keys": [],
|
||||
"input_schema": {},
|
||||
"output_schema": {},
|
||||
"system_prompt": null,
|
||||
"tools": [
|
||||
"github_list_repos",
|
||||
"github_get_repo",
|
||||
"github_search_repos"
|
||||
],
|
||||
"model": null,
|
||||
"function": null,
|
||||
"routes": {},
|
||||
"max_retries": 3,
|
||||
"retry_on": [],
|
||||
"max_node_visits": 1,
|
||||
"output_model": null,
|
||||
"max_validation_retries": 2,
|
||||
"client_facing": false
|
||||
},
|
||||
{
|
||||
"id": "aggregator",
|
||||
"name": "Data Aggregator",
|
||||
"description": "Combine findings from all sources, deduplicate, and structure for analysis",
|
||||
"node_type": "event_loop",
|
||||
"input_keys": [
|
||||
"competitors",
|
||||
"web_findings",
|
||||
"news_findings",
|
||||
"github_findings"
|
||||
],
|
||||
"output_keys": [
|
||||
"aggregated_findings",
|
||||
"github_findings"
|
||||
],
|
||||
"nullable_output_keys": [
|
||||
"github_findings"
|
||||
],
|
||||
"input_schema": {},
|
||||
"output_schema": {},
|
||||
"system_prompt": null,
|
||||
"tools": [
|
||||
"save_data",
|
||||
"load_data",
|
||||
"list_data_files"
|
||||
],
|
||||
"model": null,
|
||||
"function": null,
|
||||
"routes": {},
|
||||
"max_retries": 3,
|
||||
"retry_on": [],
|
||||
"max_node_visits": 1,
|
||||
"output_model": null,
|
||||
"max_validation_retries": 2,
|
||||
"client_facing": false
|
||||
},
|
||||
{
|
||||
"id": "analysis",
|
||||
"name": "Insight Analysis",
|
||||
"description": "Extract key insights, detect trends, and compare with historical data",
|
||||
"node_type": "event_loop",
|
||||
"input_keys": [
|
||||
"aggregated_findings",
|
||||
"competitors",
|
||||
"focus_areas"
|
||||
],
|
||||
"output_keys": [
|
||||
"key_highlights",
|
||||
"trend_analysis",
|
||||
"detailed_findings"
|
||||
],
|
||||
"nullable_output_keys": [],
|
||||
"input_schema": {},
|
||||
"output_schema": {},
|
||||
"system_prompt": null,
|
||||
"tools": [
|
||||
"load_data",
|
||||
"save_data",
|
||||
"list_data_files"
|
||||
],
|
||||
"model": null,
|
||||
"function": null,
|
||||
"routes": {},
|
||||
"max_retries": 3,
|
||||
"retry_on": [],
|
||||
"max_node_visits": 1,
|
||||
"output_model": null,
|
||||
"max_validation_retries": 2,
|
||||
"client_facing": false
|
||||
},
|
||||
{
|
||||
"id": "report",
|
||||
"name": "Report Generator",
|
||||
"description": "Generate and deliver the competitive intelligence digest as an HTML report",
|
||||
"node_type": "event_loop",
|
||||
"input_keys": [
|
||||
"key_highlights",
|
||||
"trend_analysis",
|
||||
"detailed_findings",
|
||||
"competitors"
|
||||
],
|
||||
"output_keys": [
|
||||
"delivery_status"
|
||||
],
|
||||
"nullable_output_keys": [],
|
||||
"input_schema": {},
|
||||
"output_schema": {},
|
||||
"system_prompt": null,
|
||||
"tools": [
|
||||
"save_data",
|
||||
"load_data",
|
||||
"serve_file_to_user",
|
||||
"list_data_files"
|
||||
],
|
||||
"model": null,
|
||||
"function": null,
|
||||
"routes": {},
|
||||
"max_retries": 3,
|
||||
"retry_on": [],
|
||||
"max_node_visits": 1,
|
||||
"output_model": null,
|
||||
"max_validation_retries": 2,
|
||||
"client_facing": true
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"id": "intake-to-web-scraper",
|
||||
"source": "intake",
|
||||
"target": "web-scraper",
|
||||
"condition": "on_success",
|
||||
"condition_expr": null,
|
||||
"priority": 1,
|
||||
"input_mapping": {}
|
||||
},
|
||||
{
|
||||
"id": "web-scraper-to-news-search",
|
||||
"source": "web-scraper",
|
||||
"target": "news-search",
|
||||
"condition": "on_success",
|
||||
"condition_expr": null,
|
||||
"priority": 1,
|
||||
"input_mapping": {}
|
||||
},
|
||||
{
|
||||
"id": "news-search-to-github-monitor",
|
||||
"source": "news-search",
|
||||
"target": "github-monitor",
|
||||
"condition": "conditional",
|
||||
"condition_expr": "str(has_github_competitors).lower() == 'true'",
|
||||
"priority": 2,
|
||||
"input_mapping": {}
|
||||
},
|
||||
{
|
||||
"id": "news-search-to-aggregator-skip-github",
|
||||
"source": "news-search",
|
||||
"target": "aggregator",
|
||||
"condition": "conditional",
|
||||
"condition_expr": "str(has_github_competitors).lower() != 'true'",
|
||||
"priority": 1,
|
||||
"input_mapping": {}
|
||||
},
|
||||
{
|
||||
"id": "github-monitor-to-aggregator",
|
||||
"source": "github-monitor",
|
||||
"target": "aggregator",
|
||||
"condition": "on_success",
|
||||
"condition_expr": null,
|
||||
"priority": 1,
|
||||
"input_mapping": {}
|
||||
},
|
||||
{
|
||||
"id": "aggregator-to-analysis",
|
||||
"source": "aggregator",
|
||||
"target": "analysis",
|
||||
"condition": "on_success",
|
||||
"condition_expr": null,
|
||||
"priority": 1,
|
||||
"input_mapping": {}
|
||||
},
|
||||
{
|
||||
"id": "analysis-to-report",
|
||||
"source": "analysis",
|
||||
"target": "report",
|
||||
"condition": "on_success",
|
||||
"condition_expr": null,
|
||||
"priority": 1,
|
||||
"input_mapping": {}
|
||||
}
|
||||
],
|
||||
"max_steps": 100,
|
||||
"max_retries_per_node": 3,
|
||||
"description": "Monitor competitor websites, news sources, and GitHub repositories to produce a structured weekly digest with key insights, detailed findings per competitor, and 30-day trend analysis.",
|
||||
"created_at": "2026-02-22T21:09:31.647779"
|
||||
},
|
||||
"goal": {
|
||||
"id": "competitive-intelligence-report",
|
||||
"name": "Competitive Intelligence Report",
|
||||
"description": "Monitor competitor websites, news sources, and GitHub repositories to produce a structured weekly digest with key insights, detailed findings per competitor, and 30-day trend analysis.",
|
||||
"status": "draft",
|
||||
"success_criteria": [
|
||||
{
|
||||
"id": "sc-source-coverage",
|
||||
"description": "Check multiple source types per competitor",
|
||||
"metric": "sources_per_competitor",
|
||||
"target": ">=3",
|
||||
"weight": 0.25,
|
||||
"met": false
|
||||
},
|
||||
{
|
||||
"id": "sc-findings-structured",
|
||||
"description": "All findings structured with competitor, category, update, source, and date",
|
||||
"metric": "findings_structured",
|
||||
"target": "true",
|
||||
"weight": 0.25,
|
||||
"met": false
|
||||
},
|
||||
{
|
||||
"id": "sc-historical-comparison",
|
||||
"description": "Uses stored data to compare with previous reports for trend analysis",
|
||||
"metric": "historical_comparison",
|
||||
"target": "true",
|
||||
"weight": 0.25,
|
||||
"met": false
|
||||
},
|
||||
{
|
||||
"id": "sc-report-delivered",
|
||||
"description": "User receives a formatted, readable competitive intelligence digest",
|
||||
"metric": "report_delivered",
|
||||
"target": "true",
|
||||
"weight": 0.25,
|
||||
"met": false
|
||||
}
|
||||
],
|
||||
"constraints": [
|
||||
{
|
||||
"id": "c-no-fabrication",
|
||||
"description": "Never fabricate findings, news, or data",
|
||||
"constraint_type": "hard",
|
||||
"category": "quality",
|
||||
"check": ""
|
||||
},
|
||||
{
|
||||
"id": "c-source-attribution",
|
||||
"description": "Every finding must include a source URL",
|
||||
"constraint_type": "hard",
|
||||
"category": "quality",
|
||||
"check": ""
|
||||
},
|
||||
{
|
||||
"id": "c-recency",
|
||||
"description": "Prioritize findings from the past 7 days; include up to 30 days",
|
||||
"constraint_type": "soft",
|
||||
"category": "quality",
|
||||
"check": ""
|
||||
}
|
||||
],
|
||||
"context": {},
|
||||
"required_capabilities": [],
|
||||
"input_schema": {},
|
||||
"output_schema": {},
|
||||
"version": "1.0.0",
|
||||
"parent_version": null,
|
||||
"evolution_reason": null,
|
||||
"created_at": "2026-02-22 21:09:31.601236",
|
||||
"updated_at": "2026-02-22 21:09:31.601240"
|
||||
},
|
||||
"required_tools": [
|
||||
"github_get_repo",
|
||||
"github_search_repos",
|
||||
"list_data_files",
|
||||
"github_list_repos",
|
||||
"serve_file_to_user",
|
||||
"save_data",
|
||||
"web_search",
|
||||
"load_data",
|
||||
"web_scrape"
|
||||
],
|
||||
"metadata": {
|
||||
"created_at": "2026-02-22T21:09:31.647803",
|
||||
"node_count": 7,
|
||||
"edge_count": 7
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,375 @@
|
||||
"""Agent graph construction for Competitive Intelligence Agent."""
|
||||
|
||||
from typing import Any, TYPE_CHECKING
|
||||
from framework.graph import EdgeSpec, EdgeCondition, Goal, SuccessCriterion, Constraint, NodeSpec
|
||||
from framework.graph.edge import GraphSpec
|
||||
from framework.graph.executor import ExecutionResult, GraphExecutor
|
||||
from framework.runtime.event_bus import EventBus
|
||||
from framework.runtime.core import Runtime
|
||||
from framework.llm import LiteLLMProvider
|
||||
from framework.runner.tool_registry import ToolRegistry
|
||||
|
||||
from .config import default_config, metadata, RuntimeConfig
|
||||
from .nodes import (
|
||||
intake_node,
|
||||
web_scraper_node,
|
||||
news_search_node,
|
||||
github_monitor_node,
|
||||
aggregator_node,
|
||||
analysis_node,
|
||||
report_node,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from framework.config import RuntimeConfig
|
||||
|
||||
# Goal definition
|
||||
goal: Goal = Goal(
|
||||
id="competitive-intelligence-report",
|
||||
name="Competitive Intelligence Report",
|
||||
description=(
|
||||
"Monitor competitor websites, news sources, and GitHub repositories "
|
||||
"to produce a structured weekly digest with key insights, detailed "
|
||||
"findings per competitor, and 30-day trend analysis."
|
||||
),
|
||||
success_criteria=[
|
||||
SuccessCriterion(
|
||||
id="sc-source-coverage",
|
||||
description="Check multiple source types per competitor (website, news, GitHub)",
|
||||
metric="sources_per_competitor",
|
||||
target=">=3",
|
||||
weight=0.25,
|
||||
),
|
||||
SuccessCriterion(
|
||||
id="sc-findings-structured",
|
||||
description="All findings structured with competitor, category, update, source, and date",
|
||||
metric="findings_structured",
|
||||
target="true",
|
||||
weight=0.25,
|
||||
),
|
||||
SuccessCriterion(
|
||||
id="sc-historical-comparison",
|
||||
description="Uses stored data to compare with previous reports for trend analysis",
|
||||
metric="historical_comparison",
|
||||
target="true",
|
||||
weight=0.25,
|
||||
),
|
||||
SuccessCriterion(
|
||||
id="sc-report-delivered",
|
||||
description="User receives a formatted, readable competitive intelligence digest",
|
||||
metric="report_delivered",
|
||||
target="true",
|
||||
weight=0.25,
|
||||
),
|
||||
],
|
||||
constraints=[
|
||||
Constraint(
|
||||
id="c-no-fabrication",
|
||||
description="Never fabricate findings, news, or data — only report what was found",
|
||||
constraint_type="hard",
|
||||
category="quality",
|
||||
),
|
||||
Constraint(
|
||||
id="c-source-attribution",
|
||||
description="Every finding must include a source URL",
|
||||
constraint_type="hard",
|
||||
category="quality",
|
||||
),
|
||||
Constraint(
|
||||
id="c-recency",
|
||||
description="Prioritize findings from the past 7 days; include up to 30 days",
|
||||
constraint_type="soft",
|
||||
category="quality",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
# Node list
|
||||
nodes: list[NodeSpec] = [
|
||||
intake_node,
|
||||
web_scraper_node,
|
||||
news_search_node,
|
||||
github_monitor_node,
|
||||
aggregator_node,
|
||||
analysis_node,
|
||||
report_node,
|
||||
]
|
||||
|
||||
# Edge definitions
|
||||
edges: list[EdgeSpec] = [
|
||||
EdgeSpec(
|
||||
id="intake-to-web-scraper",
|
||||
source="intake",
|
||||
target="web-scraper",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
priority=1,
|
||||
),
|
||||
EdgeSpec(
|
||||
id="web-scraper-to-news-search",
|
||||
source="web-scraper",
|
||||
target="news-search",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
priority=1,
|
||||
),
|
||||
EdgeSpec(
|
||||
id="news-search-to-github-monitor",
|
||||
source="news-search",
|
||||
target="github-monitor",
|
||||
condition=EdgeCondition.CONDITIONAL,
|
||||
condition_expr="str(has_github_competitors).lower() == 'true'",
|
||||
priority=2,
|
||||
),
|
||||
EdgeSpec(
|
||||
id="news-search-to-aggregator-skip-github",
|
||||
source="news-search",
|
||||
target="aggregator",
|
||||
condition=EdgeCondition.CONDITIONAL,
|
||||
condition_expr="str(has_github_competitors).lower() != 'true'",
|
||||
priority=1,
|
||||
),
|
||||
EdgeSpec(
|
||||
id="github-monitor-to-aggregator",
|
||||
source="github-monitor",
|
||||
target="aggregator",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
priority=1,
|
||||
),
|
||||
EdgeSpec(
|
||||
id="aggregator-to-analysis",
|
||||
source="aggregator",
|
||||
target="analysis",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
priority=1,
|
||||
),
|
||||
EdgeSpec(
|
||||
id="analysis-to-report",
|
||||
source="analysis",
|
||||
target="report",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
priority=1,
|
||||
),
|
||||
]
|
||||
|
||||
# Graph configuration
|
||||
entry_node: str = "intake"
|
||||
entry_points: dict[str, str] = {"start": "intake"}
|
||||
pause_nodes: list[str] = []
|
||||
terminal_nodes: list[str] = ["report"]
|
||||
|
||||
|
||||
class CompetitiveIntelAgent:
|
||||
"""
|
||||
Competitive Intelligence Agent — 7-node pipeline.
|
||||
|
||||
Flow: intake -> web-scraper -> news-search -> github-monitor -> aggregator -> analysis -> report
|
||||
|
|
||||
(skipped if no GitHub competitors)
|
||||
"""
|
||||
|
||||
def __init__(self, config: RuntimeConfig | None = None) -> None:
|
||||
"""
|
||||
Initialize the Competitive Intelligence Agent.
|
||||
|
||||
Args:
|
||||
config: Optional runtime configuration. Defaults to default_config.
|
||||
"""
|
||||
self.config = config or default_config
|
||||
self.goal = goal
|
||||
self.nodes = nodes
|
||||
self.edges = edges
|
||||
self.entry_node = entry_node
|
||||
self.entry_points = entry_points
|
||||
self.pause_nodes = pause_nodes
|
||||
self.terminal_nodes = terminal_nodes
|
||||
self._executor: GraphExecutor | None = None
|
||||
self._graph: GraphSpec | None = None
|
||||
self._event_bus: EventBus | None = None
|
||||
self._tool_registry: ToolRegistry | None = None
|
||||
|
||||
def _build_graph(self) -> GraphSpec:
|
||||
"""
|
||||
Build the GraphSpec for the competitive intelligence workflow.
|
||||
|
||||
Returns:
|
||||
A GraphSpec defining the agent's logic.
|
||||
"""
|
||||
return GraphSpec(
|
||||
id="competitive-intel-agent-graph",
|
||||
goal_id=self.goal.id,
|
||||
version="1.0.0",
|
||||
entry_node=self.entry_node,
|
||||
entry_points=self.entry_points,
|
||||
terminal_nodes=self.terminal_nodes,
|
||||
pause_nodes=self.pause_nodes,
|
||||
nodes=self.nodes,
|
||||
edges=self.edges,
|
||||
default_model=self.config.model,
|
||||
max_tokens=self.config.max_tokens,
|
||||
loop_config={
|
||||
"max_iterations": 100,
|
||||
"max_tool_calls_per_turn": 20,
|
||||
"max_history_tokens": 32000,
|
||||
},
|
||||
)
|
||||
|
||||
def _setup(self) -> GraphExecutor:
|
||||
"""
|
||||
Set up the executor with all components (runtime, LLM, tools).
|
||||
|
||||
Returns:
|
||||
An initialized GraphExecutor instance.
|
||||
"""
|
||||
from pathlib import Path
|
||||
|
||||
storage_path = Path.home() / ".hive" / "agents" / "competitive_intel_agent"
|
||||
storage_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
self._event_bus = EventBus()
|
||||
self._tool_registry = ToolRegistry()
|
||||
|
||||
mcp_config_path = Path(__file__).parent / "mcp_servers.json"
|
||||
if mcp_config_path.exists():
|
||||
self._tool_registry.load_mcp_config(mcp_config_path)
|
||||
|
||||
llm = LiteLLMProvider(
|
||||
model=self.config.model,
|
||||
api_key=self.config.api_key,
|
||||
api_base=self.config.api_base,
|
||||
)
|
||||
|
||||
tool_executor = self._tool_registry.get_executor()
|
||||
tools = list(self._tool_registry.get_tools().values())
|
||||
|
||||
self._graph = self._build_graph()
|
||||
runtime = Runtime(storage_path)
|
||||
|
||||
self._executor = GraphExecutor(
|
||||
runtime=runtime,
|
||||
llm=llm,
|
||||
tools=tools,
|
||||
tool_executor=tool_executor,
|
||||
event_bus=self._event_bus,
|
||||
storage_path=storage_path,
|
||||
loop_config=self._graph.loop_config,
|
||||
)
|
||||
|
||||
return self._executor
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Set up the agent (initialize executor and tools)."""
|
||||
if self._executor is None:
|
||||
self._setup()
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Clean up resources."""
|
||||
self._executor = None
|
||||
self._event_bus = None
|
||||
|
||||
async def trigger_and_wait(
|
||||
self,
|
||||
entry_point: str,
|
||||
input_data: dict[str, Any],
|
||||
timeout: float | None = None,
|
||||
session_state: dict[str, Any] | None = None,
|
||||
) -> ExecutionResult | None:
|
||||
"""
|
||||
Execute the graph and wait for completion.
|
||||
|
||||
Args:
|
||||
entry_point: The graph entry point to trigger.
|
||||
input_data: Data to pass to the entry node.
|
||||
timeout: Optional execution timeout.
|
||||
session_state: Optional initial session state.
|
||||
|
||||
Returns:
|
||||
The execution result, or None if it timed out.
|
||||
"""
|
||||
if self._executor is None:
|
||||
raise RuntimeError("Agent not started. Call start() first.")
|
||||
if self._graph is None:
|
||||
raise RuntimeError("Graph not built. Call start() first.")
|
||||
|
||||
return await self._executor.execute(
|
||||
graph=self._graph,
|
||||
goal=self.goal,
|
||||
input_data=input_data,
|
||||
session_state=session_state,
|
||||
)
|
||||
|
||||
async def run(self, context: dict[str, Any], session_state: dict[str, Any] | None = None) -> ExecutionResult:
|
||||
"""
|
||||
Run the agent (convenience method for single execution).
|
||||
|
||||
Args:
|
||||
context: The input context for the agent.
|
||||
session_state: Optional initial session state.
|
||||
|
||||
Returns:
|
||||
The final execution result.
|
||||
"""
|
||||
await self.start()
|
||||
try:
|
||||
result = await self.trigger_and_wait(
|
||||
"start", context, session_state=session_state
|
||||
)
|
||||
return result or ExecutionResult(success=False, error="Execution timeout")
|
||||
finally:
|
||||
await self.stop()
|
||||
|
||||
def info(self) -> dict[str, Any]:
|
||||
"""Get agent information for introspection."""
|
||||
return {
|
||||
"name": metadata.name,
|
||||
"version": metadata.version,
|
||||
"description": metadata.description,
|
||||
"goal": {
|
||||
"name": self.goal.name,
|
||||
"description": self.goal.description,
|
||||
},
|
||||
"nodes": [n.id for n in self.nodes],
|
||||
"edges": [e.id for e in self.edges],
|
||||
"entry_node": self.entry_node,
|
||||
"entry_points": self.entry_points,
|
||||
"pause_nodes": self.pause_nodes,
|
||||
"terminal_nodes": self.terminal_nodes,
|
||||
"client_facing_nodes": [n.id for n in self.nodes if n.client_facing],
|
||||
}
|
||||
|
||||
def validate(self) -> dict[str, Any]:
|
||||
"""
|
||||
Validate agent structure for cycles, missing nodes, or invalid edges.
|
||||
|
||||
Returns:
|
||||
A dict with 'valid' (bool), 'errors' (list), and 'warnings' (list).
|
||||
"""
|
||||
errors = []
|
||||
warnings = []
|
||||
|
||||
node_ids = {node.id for node in self.nodes}
|
||||
for edge in self.edges:
|
||||
if edge.source not in node_ids:
|
||||
errors.append(f"Edge {edge.id}: source '{edge.source}' not found")
|
||||
if edge.target not in node_ids:
|
||||
errors.append(f"Edge {edge.id}: target '{edge.target}' not found")
|
||||
|
||||
if self.entry_node not in node_ids:
|
||||
errors.append(f"Entry node '{self.entry_node}' not found")
|
||||
|
||||
for terminal in self.terminal_nodes:
|
||||
if terminal not in node_ids:
|
||||
errors.append(f"Terminal node '{terminal}' not found")
|
||||
|
||||
for ep_id, node_id in self.entry_points.items():
|
||||
if node_id not in node_ids:
|
||||
errors.append(
|
||||
f"Entry point '{ep_id}' references unknown node '{node_id}'"
|
||||
)
|
||||
|
||||
return {
|
||||
"valid": len(errors) == 0,
|
||||
"errors": errors,
|
||||
"warnings": warnings,
|
||||
}
|
||||
|
||||
# Create default instance
|
||||
default_agent: CompetitiveIntelAgent = CompetitiveIntelAgent()
|
||||
@@ -0,0 +1,24 @@
|
||||
"""Runtime configuration for Competitive Intelligence Agent."""
|
||||
|
||||
from dataclasses import dataclass
|
||||
from framework.config import RuntimeConfig
|
||||
|
||||
default_config: RuntimeConfig = RuntimeConfig()
|
||||
|
||||
@dataclass
|
||||
class AgentMetadata:
|
||||
"""Metadata for the Competitive Intelligence Agent."""
|
||||
name: str = "Competitive Intelligence Agent"
|
||||
version: str = "1.0.0"
|
||||
description: str = (
|
||||
"Monitors competitor websites, news sources, and GitHub repositories "
|
||||
"to deliver automated weekly digests with key insights and trend analysis "
|
||||
"for product and marketing teams."
|
||||
)
|
||||
intro_message: str = (
|
||||
"Hi! I'm your competitive intelligence assistant. Tell me which competitors "
|
||||
"to monitor and what areas to focus on (pricing, features, hiring, partnerships, etc.) "
|
||||
"and I'll research them across websites, news, and GitHub to produce a detailed digest."
|
||||
)
|
||||
|
||||
metadata: AgentMetadata = AgentMetadata()
|
||||
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"hive-tools": {
|
||||
"transport": "stdio",
|
||||
"command": "uv",
|
||||
"args": [
|
||||
"run",
|
||||
"python",
|
||||
"mcp_server.py",
|
||||
"--stdio"
|
||||
],
|
||||
"cwd": "../../../tools",
|
||||
"description": "Hive tools MCP server providing web_search, web_scrape, github tools, and file utilities"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,319 @@
|
||||
"""Node definitions for Competitive Intelligence Agent."""
|
||||
|
||||
from framework.graph import NodeSpec
|
||||
|
||||
# Node 1: Intake (client-facing)
|
||||
intake_node: NodeSpec = NodeSpec(
|
||||
id="intake",
|
||||
name="Competitor Intake",
|
||||
description="Collect competitor list, focus areas, and report preferences from the user",
|
||||
node_type="event_loop",
|
||||
client_facing=True,
|
||||
input_keys=["competitors_input"],
|
||||
output_keys=["competitors", "focus_areas", "report_frequency", "has_github_competitors"],
|
||||
system_prompt="""\
|
||||
You are a competitive intelligence intake specialist. Your job is to gather the
|
||||
information needed to run a competitive analysis.
|
||||
|
||||
**STEP 1 — Read the input and respond (text only, NO tool calls):**
|
||||
|
||||
The user may provide input in several forms:
|
||||
- A JSON object with "competitors", "focus_areas", and "report_frequency"
|
||||
- A natural-language description of competitors to track
|
||||
- Just company names
|
||||
|
||||
If the input is clear, confirm what you understood and ask the user to confirm.
|
||||
If it's vague, ask 1-2 clarifying questions:
|
||||
- Which competitors? (name + website URL at minimum)
|
||||
- What focus areas? (pricing, features, hiring, partnerships, messaging, etc.)
|
||||
- Do any competitors have public GitHub organizations/repos?
|
||||
|
||||
After your message, call ask_user() to wait for the user's response.
|
||||
|
||||
**STEP 2 — After the user confirms, call set_output for each key:**
|
||||
|
||||
Structure the data and set outputs:
|
||||
- set_output("competitors", <JSON list of {name, website, github (or null)}>)
|
||||
- set_output("focus_areas", <JSON list of strings like ["pricing", "features", "hiring"]>)
|
||||
- set_output("report_frequency", "weekly")
|
||||
- set_output("has_github_competitors", "true" or "false")
|
||||
|
||||
Set has_github_competitors to "true" if at least one competitor has a non-null github field.
|
||||
""",
|
||||
tools=[],
|
||||
)
|
||||
|
||||
# Node 2: Web Scraper
|
||||
web_scraper_node: NodeSpec = NodeSpec(
|
||||
id="web-scraper",
|
||||
name="Website Monitor",
|
||||
description="Scrape competitor websites for pricing, features, and announcements",
|
||||
node_type="event_loop",
|
||||
input_keys=["competitors", "focus_areas"],
|
||||
output_keys=["web_findings"],
|
||||
system_prompt="""\
|
||||
You are a web intelligence agent. For each competitor, systematically check their
|
||||
online presence for updates related to the focus areas.
|
||||
|
||||
**Process for each competitor:**
|
||||
1. Use web_search to find their current pricing page, product page, changelog,
|
||||
and blog. Try queries like:
|
||||
- "{competitor_name} pricing"
|
||||
- "{competitor_name} changelog OR release notes OR what's new"
|
||||
- "{competitor_name} blog announcements"
|
||||
- "site:{competitor_website} pricing OR features"
|
||||
|
||||
2. Use web_scrape on the most relevant URLs to extract actual content.
|
||||
Focus on: pricing tiers, feature lists, recent announcements, messaging.
|
||||
|
||||
3. For each finding, note:
|
||||
- competitor: which competitor
|
||||
- category: pricing / features / announcement / messaging / other
|
||||
- update: what changed or what you found
|
||||
- source: the URL
|
||||
- date: when it was published/updated (if available, otherwise "unknown")
|
||||
|
||||
**Important:**
|
||||
- Work through competitors one at a time
|
||||
- Skip URLs that fail to load; move on
|
||||
- Prioritize recent content (last 7-30 days)
|
||||
- Be factual — only report what you actually see on the page
|
||||
|
||||
When done, call:
|
||||
- set_output("web_findings", <JSON list of finding objects>)
|
||||
""",
|
||||
tools=["web_search", "web_scrape"],
|
||||
)
|
||||
|
||||
# Node 3: News Search
|
||||
news_search_node: NodeSpec = NodeSpec(
|
||||
id="news-search",
|
||||
name="News & Press Monitor",
|
||||
description="Search for competitor mentions in news, press releases, and industry publications",
|
||||
node_type="event_loop",
|
||||
input_keys=["competitors", "focus_areas"],
|
||||
output_keys=["news_findings"],
|
||||
system_prompt="""\
|
||||
You are a news intelligence agent. Search for recent news, press releases, and
|
||||
industry coverage about each competitor.
|
||||
|
||||
**Process for each competitor:**
|
||||
1. Use web_search with news-focused queries:
|
||||
- "{competitor_name} news"
|
||||
- "{competitor_name} press release 2026"
|
||||
- "{competitor_name} partnership OR acquisition OR funding"
|
||||
- "{competitor_name} {focus_area}" for each focus area
|
||||
|
||||
2. Use web_scrape on the most relevant news articles (aim for 2-3 per competitor).
|
||||
Extract the headline, key details, and publication date.
|
||||
|
||||
3. For each finding, note:
|
||||
- competitor: which competitor
|
||||
- category: partnership / funding / hiring / press_release / industry_news
|
||||
- update: summary of the news item
|
||||
- source: the article URL
|
||||
- date: publication date
|
||||
|
||||
**Important:**
|
||||
- Prioritize news from the last 7 days, but include last 30 days if sparse
|
||||
- Include press releases, blog posts, and industry analyst coverage
|
||||
- Skip paywalled content gracefully
|
||||
- Do NOT fabricate news — only report what you find
|
||||
|
||||
When done, call:
|
||||
- set_output("news_findings", <JSON list of finding objects>)
|
||||
""",
|
||||
tools=["web_search", "web_scrape"],
|
||||
)
|
||||
|
||||
# Node 4: GitHub Monitor
|
||||
github_monitor_node: NodeSpec = NodeSpec(
|
||||
id="github-monitor",
|
||||
name="GitHub Activity Monitor",
|
||||
description="Track public GitHub repository activity for competitors with GitHub presence",
|
||||
node_type="event_loop",
|
||||
input_keys=["competitors"],
|
||||
output_keys=["github_findings"],
|
||||
system_prompt="""\
|
||||
You are a GitHub intelligence agent. For each competitor that has a GitHub
|
||||
organization or username, check their recent public activity.
|
||||
|
||||
**Process for each competitor with a GitHub handle:**
|
||||
1. Use github_get_repo or github_list_repos to find their main repositories.
|
||||
2. Note key metrics:
|
||||
- New repositories created recently
|
||||
- Star count changes (if you have historical data)
|
||||
- Recent commit activity (last 7 days)
|
||||
- Open issues/PRs count
|
||||
- Any new releases or tags
|
||||
|
||||
3. For each notable finding, note:
|
||||
- competitor: which competitor
|
||||
- category: github_activity / new_repo / release / open_source
|
||||
- update: what you found (e.g. "3 new commits to main repo", "Released v2.1")
|
||||
- source: GitHub URL
|
||||
- date: date of activity
|
||||
|
||||
**Important:**
|
||||
- Only process competitors that have a non-null "github" field
|
||||
- Focus on activity that signals product direction or engineering investment
|
||||
- If a competitor has many repos, focus on the most starred / most active ones
|
||||
- If no GitHub tool is available or auth fails, set output with an empty list
|
||||
|
||||
When done, call:
|
||||
- set_output("github_findings", <JSON list of finding objects>)
|
||||
""",
|
||||
tools=["github_list_repos", "github_get_repo", "github_search_repos"],
|
||||
)
|
||||
|
||||
# Node 5: Aggregator
|
||||
aggregator_node: NodeSpec = NodeSpec(
|
||||
id="aggregator",
|
||||
name="Data Aggregator",
|
||||
description="Combine findings from all sources, deduplicate, and structure for analysis",
|
||||
node_type="event_loop",
|
||||
input_keys=["competitors", "web_findings", "news_findings", "github_findings"],
|
||||
output_keys=["aggregated_findings"],
|
||||
nullable_output_keys=["github_findings"],
|
||||
system_prompt="""\
|
||||
You are a data aggregation specialist. Combine all the findings from the web
|
||||
scraper, news search, and GitHub monitor into a single, clean dataset.
|
||||
|
||||
**Steps:**
|
||||
1. Merge all findings into one list, preserving the source attribution.
|
||||
2. Deduplicate: if the same update appears from multiple searches, keep the
|
||||
most detailed version and note multiple sources.
|
||||
3. Categorize each finding consistently using these categories:
|
||||
- pricing, features, partnership, hiring, funding, press_release,
|
||||
- github_activity, messaging, product_launch, other
|
||||
4. Sort findings by competitor, then by date (most recent first).
|
||||
5. Save the aggregated data for historical tracking:
|
||||
save_data(filename="findings_latest.json", data=<aggregated JSON>)
|
||||
|
||||
When done, call:
|
||||
- set_output("aggregated_findings", <JSON list of deduplicated finding objects>)
|
||||
|
||||
Each finding should have: competitor, category, update, source, date.
|
||||
""",
|
||||
tools=["save_data", "load_data", "list_data_files"],
|
||||
)
|
||||
|
||||
# Node 6: Analysis
|
||||
analysis_node: NodeSpec = NodeSpec(
|
||||
id="analysis",
|
||||
name="Insight Analysis",
|
||||
description="Extract key insights, detect trends, and compare with historical data",
|
||||
node_type="event_loop",
|
||||
input_keys=["aggregated_findings", "competitors", "focus_areas"],
|
||||
output_keys=["key_highlights", "trend_analysis", "detailed_findings"],
|
||||
system_prompt="""\
|
||||
You are a competitive intelligence analyst. Analyze the aggregated findings and
|
||||
produce actionable insights.
|
||||
|
||||
**Steps:**
|
||||
|
||||
1. **Load historical data** (if available):
|
||||
- Use list_data_files() to see past snapshots
|
||||
- Use load_data() to load the most recent previous snapshot
|
||||
- Compare current findings with previous data to identify CHANGES
|
||||
|
||||
2. **Extract Key Highlights** (the most important 3-5 items):
|
||||
- Significant pricing changes
|
||||
- Major feature launches or product updates
|
||||
- Strategic moves (partnerships, acquisitions, funding)
|
||||
- Anything that requires immediate attention
|
||||
|
||||
3. **Trend Analysis** (30-day view):
|
||||
- Is a competitor investing more in enterprise features?
|
||||
- Are multiple competitors moving in the same direction?
|
||||
- Any shifts in pricing strategy across the market?
|
||||
- Engineering investment signals from GitHub activity
|
||||
|
||||
4. **Save current snapshot for future comparison:**
|
||||
save_data(filename="snapshot_YYYY-MM-DD.json", data=<current findings + analysis>)
|
||||
|
||||
When done, call:
|
||||
- set_output("key_highlights", <JSON list of highlight strings>)
|
||||
- set_output("trend_analysis", <JSON list of trend observation strings>)
|
||||
- set_output("detailed_findings", <JSON: per-competitor structured findings>)
|
||||
""",
|
||||
tools=["load_data", "save_data", "list_data_files"],
|
||||
)
|
||||
|
||||
# Node 7: Report Generator (client-facing)
|
||||
report_node: NodeSpec = NodeSpec(
|
||||
id="report",
|
||||
name="Report Generator",
|
||||
description="Generate and deliver the competitive intelligence digest as an HTML report",
|
||||
node_type="event_loop",
|
||||
client_facing=True,
|
||||
input_keys=["key_highlights", "trend_analysis", "detailed_findings", "competitors"],
|
||||
output_keys=["delivery_status"],
|
||||
system_prompt="""\
|
||||
You are a report generation specialist. Create a polished, self-contained HTML
|
||||
competitive intelligence report and deliver it to the user.
|
||||
|
||||
**STEP 1 — Build the HTML report (tool calls, NO text to user yet):**
|
||||
|
||||
Create a complete, well-styled HTML document. Use this structure:
|
||||
|
||||
```html
|
||||
<h1>Competitive Intelligence Report</h1>
|
||||
<p>Week of [date range]</p>
|
||||
|
||||
<h2>🔥 Key Highlights</h2>
|
||||
<!-- Bulleted list of the most important findings -->
|
||||
|
||||
<h2>📊 Detailed Findings</h2>
|
||||
<!-- For each competitor: -->
|
||||
<h3>[Competitor Name]</h3>
|
||||
<table>
|
||||
<tr><th>Category</th><th>Update</th><th>Source</th><th>Date</th></tr>
|
||||
<!-- One row per finding -->
|
||||
</table>
|
||||
|
||||
<h2>📈 30-Day Trends</h2>
|
||||
<!-- Bulleted list of trend observations -->
|
||||
|
||||
<footer>Generated by Competitive Intelligence Agent</footer>
|
||||
```
|
||||
|
||||
Design requirements:
|
||||
- Modern, readable styling with a dark header and clean tables
|
||||
- Color-coded categories (pricing=blue, features=green, partnerships=purple, etc.)
|
||||
- Clickable source links
|
||||
- Responsive layout
|
||||
|
||||
Save the report:
|
||||
save_data(filename="report_YYYY-MM-DD.html", data=<your_html>)
|
||||
|
||||
Serve it to the user:
|
||||
serve_file_to_user(filename="report_YYYY-MM-DD.html", label="Competitive Intelligence Report")
|
||||
|
||||
**STEP 2 — Present to the user (text only, NO tool calls):**
|
||||
|
||||
Tell the user the report is ready and include the file link. Provide a brief
|
||||
summary of the most important findings. Ask if they want to:
|
||||
- Dig deeper into any specific competitor
|
||||
- Adjust focus areas for next time
|
||||
- See historical trends
|
||||
|
||||
After presenting, call ask_user() to wait for the user's response.
|
||||
|
||||
**STEP 3 — After the user responds:**
|
||||
- Answer follow-up questions from the research material
|
||||
- Call ask_user() again if they might have more questions
|
||||
- When satisfied: set_output("delivery_status", "completed")
|
||||
""",
|
||||
tools=["save_data", "load_data", "serve_file_to_user", "list_data_files"],
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"intake_node",
|
||||
"web_scraper_node",
|
||||
"news_search_node",
|
||||
"github_monitor_node",
|
||||
"aggregator_node",
|
||||
"analysis_node",
|
||||
"report_node",
|
||||
]
|
||||
@@ -47,6 +47,7 @@ Call gmail_list_labels() to show the user their current Gmail labels. This helps
|
||||
|
||||
- set_output("rules", <the confirmed rules as a clear text description>)
|
||||
- set_output("max_emails", <the confirmed max_emails as a string number, e.g. "100">)
|
||||
|
||||
""",
|
||||
tools=["gmail_list_labels"],
|
||||
)
|
||||
@@ -71,23 +72,25 @@ fetch_emails_node = NodeSpec(
|
||||
You are a data pipeline step. Your job is to fetch emails from Gmail and write them to emails.jsonl.
|
||||
|
||||
**FIRST-TIME FETCH (default path):**
|
||||
1. Read "max_emails" from input context.
|
||||
1. Read "max_emails" and "rules" from input context.
|
||||
2. Call bulk_fetch_emails(max_emails=<value>).
|
||||
3. The tool returns {"filename": "emails.jsonl"}.
|
||||
4. Call set_output("emails", "emails.jsonl").
|
||||
|
||||
**NEXT-BATCH FETCH (when user asks for "the next N" emails):**
|
||||
The user wants emails BEYOND what was already fetched. Use pagination:
|
||||
1. Call gmail_list_messages(query="label:INBOX", max_results=<previous + new count>) to get message IDs. Use page_token if needed to paginate past already-fetched emails.
|
||||
2. Identify message IDs NOT in the previous batch (you remember them from continuous conversation).
|
||||
3. Call gmail_batch_get_messages(message_ids=<new_ids>, format="metadata") for full metadata.
|
||||
4. For each message in the result, call append_data(filename="emails.jsonl", data=<JSON: {id, subject, from, to, date, snippet, labels}>).
|
||||
1. Call gmail_list_messages(query="label:INBOX", max_results=<previous + new count>).
|
||||
Use page_token if needed to paginate past already-fetched emails.
|
||||
2. Identify message IDs NOT in the previous batch.
|
||||
3. Call gmail_batch_get_messages(message_ids=<new_ids>, format="metadata").
|
||||
4. For each message, call append_data(filename="emails.jsonl",
|
||||
data=<JSON: {id, subject, from, to, date, snippet, labels}>).
|
||||
5. Call set_output("emails", "emails.jsonl").
|
||||
|
||||
**TOOLS:**
|
||||
- bulk_fetch_emails(max_emails) — Bulk fetch from inbox, writes emails.jsonl. Use for first fetch.
|
||||
- gmail_list_messages(query, max_results, page_token) — List message IDs with pagination. Returns {messages, next_page_token}.
|
||||
- gmail_batch_get_messages(message_ids, format) — Fetch metadata for specific IDs (max 50 per call).
|
||||
- bulk_fetch_emails(max_emails) — Bulk fetch from inbox, writes emails.jsonl.
|
||||
- gmail_list_messages(query, max_results, page_token) — List message IDs.
|
||||
- gmail_batch_get_messages(message_ids, format) — Fetch metadata (max 50/call).
|
||||
- append_data(filename, data) — Append a line to a JSONL file.
|
||||
|
||||
Do NOT add commentary or explanation. Execute the appropriate path and call set_output when done.
|
||||
@@ -118,19 +121,20 @@ classify_and_act_node = NodeSpec(
|
||||
You are an inbox management assistant. Apply the user's rules to their emails and execute Gmail actions.
|
||||
|
||||
**YOUR TOOLS:**
|
||||
- load_data(filename, limit, offset) — Read emails from a local file. This is how you access the emails.
|
||||
- append_data(filename, data) — Append a line to a file. Use this to record actions taken.
|
||||
- gmail_batch_modify_messages(message_ids, add_labels, remove_labels) — Modify Gmail labels in batch. ALWAYS prefer this.
|
||||
- load_data(filename, limit, offset) — Read emails from a local file.
|
||||
- append_data(filename, data) — Append a line to a file. Record actions taken.
|
||||
- gmail_batch_modify_messages(message_ids, add_labels, remove_labels) — Modify labels in batch. ALWAYS prefer this.
|
||||
- gmail_modify_message(message_id, add_labels, remove_labels) — Modify a single message's labels.
|
||||
- gmail_trash_message(message_id) — Move a message to trash. No batch version; call per email.
|
||||
- gmail_trash_message(message_id) — Move a message to trash.
|
||||
- gmail_create_draft(to, subject, body) — Create a draft reply. NEVER sends automatically.
|
||||
- gmail_create_label(name) — Create a new Gmail label. Returns the label ID.
|
||||
- gmail_list_labels() — List all existing Gmail labels with their IDs.
|
||||
- set_output(key, value) — Set an output value. Call ONLY after all actions are executed.
|
||||
|
||||
**CONTEXT:**
|
||||
- "rules" = the user's rule to apply (e.g. "mark all as unread")
|
||||
- "emails" = a filename (e.g. "emails.jsonl") containing the fetched emails as JSONL. Each line has: id, subject, from, to, date, snippet, labels.
|
||||
- "rules" = the user's rule to apply (e.g. "mark all as unread").
|
||||
- "emails" = a filename (e.g. "emails.jsonl") containing the fetched emails as JSONL.
|
||||
Each line has: id, subject, from, to, date, snippet, labels.
|
||||
|
||||
**PROCESS EMAILS ONE CHUNK AT A TIME (you will get multiple turns):**
|
||||
|
||||
|
||||
@@ -41,6 +41,13 @@ TOOLS = {
|
||||
"type": "string",
|
||||
"description": "Maximum number of emails to fetch (default '100')",
|
||||
},
|
||||
"account": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Account alias to use (e.g. 'timothy-home'). "
|
||||
"Required when multiple Google accounts are connected."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": [],
|
||||
},
|
||||
@@ -64,8 +71,13 @@ def _get_data_dir() -> str:
|
||||
return ctx["data_dir"]
|
||||
|
||||
|
||||
def _get_access_token() -> str:
|
||||
"""Get Google OAuth access token from credential store."""
|
||||
def _get_access_token(account: str = "") -> str:
|
||||
"""Get Google OAuth access token from credential store.
|
||||
|
||||
Args:
|
||||
account: Account alias (e.g. 'timothy-home'). When provided,
|
||||
resolves the token for that specific account.
|
||||
"""
|
||||
import os
|
||||
|
||||
# Try credential store first (same pattern as gmail_tool.py)
|
||||
@@ -73,7 +85,10 @@ def _get_access_token() -> str:
|
||||
from aden_tools.credentials import CredentialStoreAdapter
|
||||
|
||||
credentials = CredentialStoreAdapter.default()
|
||||
token = credentials.get("google")
|
||||
if account:
|
||||
token = credentials.get_by_alias("google", account)
|
||||
else:
|
||||
token = credentials.get("google")
|
||||
if token:
|
||||
return token
|
||||
except Exception:
|
||||
@@ -105,17 +120,21 @@ def _parse_headers(headers: list[dict]) -> dict[str, str]:
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _bulk_fetch_emails(max_emails: str = "100") -> str:
|
||||
def _bulk_fetch_emails(max_emails: str = "100", account: str = "") -> str:
|
||||
"""Fetch inbox emails and write them to emails.jsonl.
|
||||
|
||||
Uses synchronous httpx.Client since this runs as a tool call inside
|
||||
an already-running async event loop.
|
||||
|
||||
Args:
|
||||
max_emails: Maximum number of emails to fetch.
|
||||
account: Account alias (e.g. 'timothy-home') for multi-account routing.
|
||||
|
||||
Returns:
|
||||
The filename "emails.jsonl" (written to session data_dir).
|
||||
"""
|
||||
max_count = int(max_emails) if max_emails else 100
|
||||
access_token = _get_access_token()
|
||||
access_token = _get_access_token(account)
|
||||
data_dir = _get_data_dir()
|
||||
Path(data_dir).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
@@ -237,7 +256,8 @@ def tool_executor(tool_use: ToolUse) -> ToolResult:
|
||||
if tool_use.name == "bulk_fetch_emails":
|
||||
try:
|
||||
max_emails = tool_use.input.get("max_emails", "100")
|
||||
filename = _bulk_fetch_emails(max_emails=max_emails)
|
||||
account = tool_use.input.get("account", "")
|
||||
filename = _bulk_fetch_emails(max_emails=max_emails, account=account)
|
||||
return ToolResult(
|
||||
tool_use_id=tool_use.id,
|
||||
content=json.dumps({"filename": filename}),
|
||||
|
||||
+25
-11
@@ -748,9 +748,14 @@ if [ ${#FOUND_PROVIDERS[@]} -gt 0 ]; then
|
||||
echo -e " ${CYAN}$i)${NC} $provider"
|
||||
i=$((i + 1))
|
||||
done
|
||||
ZAI_CHOICE=$i
|
||||
echo -e " ${CYAN}$i)${NC} ZAI Code Subscription ${DIM}(use your ZAI Code plan)${NC}"
|
||||
i=$((i + 1))
|
||||
# Only show ZAI Code Subscription if the API key already exists
|
||||
if [ -n "${ZAI_API_KEY:-}" ]; then
|
||||
ZAI_CHOICE=$i
|
||||
echo -e " ${CYAN}$i)${NC} ZAI Code Subscription ${DIM}(use your ZAI Code plan)${NC}"
|
||||
i=$((i + 1))
|
||||
else
|
||||
ZAI_CHOICE=-1 # invalid choice, won't match
|
||||
fi
|
||||
echo -e " ${CYAN}$i)${NC} Other"
|
||||
max_choice=$i
|
||||
echo ""
|
||||
@@ -1203,18 +1208,27 @@ if [ "$CODEX_AVAILABLE" = true ]; then
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Prompt user to source shell config or start new terminal
|
||||
echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
|
||||
echo -e "${BOLD}⚠️ IMPORTANT: Load your new configuration${NC}"
|
||||
echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
|
||||
echo ""
|
||||
echo -e " Your API keys have been saved to ${CYAN}$SHELL_RC_FILE${NC}"
|
||||
echo -e " To use them, either:"
|
||||
echo ""
|
||||
echo -e " ${GREEN}Option 1:${NC} Source your shell config now:"
|
||||
echo -e " ${CYAN}source $SHELL_RC_FILE${NC}"
|
||||
echo ""
|
||||
echo -e " ${GREEN}Option 2:${NC} Open a new terminal window"
|
||||
echo ""
|
||||
echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
|
||||
echo ""
|
||||
|
||||
echo -e "${BOLD}Run an Agent:${NC}"
|
||||
echo ""
|
||||
echo -e " Launch the interactive dashboard to browse and run agents:"
|
||||
echo -e " You can start a example agent or an agent built by yourself:"
|
||||
echo -e " You can start an example agent or an agent built by yourself:"
|
||||
echo -e " ${CYAN}hive tui${NC}"
|
||||
echo ""
|
||||
# Show shell sourcing reminder if we added environment variables
|
||||
if [ -n "$SELECTED_PROVIDER_ID" ] || [ -n "$HIVE_CREDENTIAL_KEY" ]; then
|
||||
echo -e "${BOLD}Note:${NC} To use the new environment variables in this shell, run:"
|
||||
echo -e " ${CYAN}source $SHELL_RC_FILE${NC}"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
echo -e "${DIM}Run ./quickstart.sh again to reconfigure.${NC}"
|
||||
echo ""
|
||||
|
||||
@@ -102,6 +102,7 @@ python mcp_server.py
|
||||
| ---- | ----------- |
|
||||
| `web_search` | Search the web (Google or Brave, auto-detected) |
|
||||
| `web_scrape` | Scrape and extract content from webpages |
|
||||
| `search_wikipedia` | Search Wikipedia for pages and summaries |
|
||||
| `scholar_search`, `scholar_get_citations`, `scholar_get_author` | Search academic papers, get citations and author profiles via SerpAPI |
|
||||
| `patents_search`, `patents_get_details` | Search patents and retrieve patent details via SerpAPI |
|
||||
| `exa_search`, `exa_answer`, `exa_find_similar`, `exa_get_contents` | Semantic search and content retrieval via Exa AI |
|
||||
@@ -185,6 +186,7 @@ tools/
|
||||
│ ├── web_search_tool/
|
||||
│ ├── web_scrape_tool/
|
||||
│ ├── pdf_read_tool/
|
||||
│ ├── wikipedia_tool/
|
||||
│ ├── time_tool/
|
||||
│ └── calendar_tool/
|
||||
├── tests/ # Test suite
|
||||
|
||||
@@ -56,6 +56,7 @@ To add a new credential:
|
||||
from .apollo import APOLLO_CREDENTIALS
|
||||
from .base import CredentialError, CredentialSpec
|
||||
from .bigquery import BIGQUERY_CREDENTIALS
|
||||
from .brevo import BREVO_CREDENTIALS
|
||||
from .browser import get_aden_auth_url, get_aden_setup_url, open_browser
|
||||
from .calcom import CALCOM_CREDENTIALS
|
||||
from .discord import DISCORD_CREDENTIALS
|
||||
@@ -65,7 +66,12 @@ from .github import GITHUB_CREDENTIALS
|
||||
from .google_calendar import GOOGLE_CALENDAR_CREDENTIALS
|
||||
from .google_docs import GOOGLE_DOCS_CREDENTIALS
|
||||
from .google_maps import GOOGLE_MAPS_CREDENTIALS
|
||||
from .health_check import HealthCheckResult, check_credential_health
|
||||
from .health_check import (
|
||||
BaseHttpHealthChecker,
|
||||
HealthCheckResult,
|
||||
check_credential_health,
|
||||
validate_integration_wiring,
|
||||
)
|
||||
from .hubspot import HUBSPOT_CREDENTIALS
|
||||
from .llm import LLM_CREDENTIALS
|
||||
from .news import NEWS_CREDENTIALS
|
||||
@@ -105,6 +111,7 @@ CREDENTIAL_SPECS = {
|
||||
**BIGQUERY_CREDENTIALS,
|
||||
**CALCOM_CREDENTIALS,
|
||||
**STRIPE_CREDENTIALS,
|
||||
**BREVO_CREDENTIALS,
|
||||
**POSTGRES_CREDENTIALS,
|
||||
}
|
||||
|
||||
@@ -116,8 +123,10 @@ __all__ = [
|
||||
# Credential store adapter (replaces deprecated CredentialManager)
|
||||
"CredentialStoreAdapter",
|
||||
# Health check utilities
|
||||
"BaseHttpHealthChecker",
|
||||
"HealthCheckResult",
|
||||
"check_credential_health",
|
||||
"validate_integration_wiring",
|
||||
# Browser utilities for OAuth2 flows
|
||||
"open_browser",
|
||||
"get_aden_auth_url",
|
||||
@@ -149,5 +158,6 @@ __all__ = [
|
||||
"CALCOM_CREDENTIALS",
|
||||
"DISCORD_CREDENTIALS",
|
||||
"STRIPE_CREDENTIALS",
|
||||
"BREVO_CREDENTIALS",
|
||||
"POSTGRES_CREDENTIALS",
|
||||
]
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
"""
|
||||
Brevo tool credentials.
|
||||
|
||||
Contains credentials for Brevo (formerly Sendinblue) transactional email,
|
||||
SMS, and contact management integration.
|
||||
"""
|
||||
|
||||
from .base import CredentialSpec
|
||||
|
||||
BREVO_CREDENTIALS = {
|
||||
"brevo": CredentialSpec(
|
||||
env_var="BREVO_API_KEY",
|
||||
tools=[
|
||||
"brevo_send_email",
|
||||
"brevo_send_sms",
|
||||
"brevo_create_contact",
|
||||
"brevo_get_contact",
|
||||
"brevo_update_contact",
|
||||
],
|
||||
required=True,
|
||||
startup_required=False,
|
||||
help_url="https://app.brevo.com/settings/keys/api",
|
||||
description="Brevo API key for transactional email, SMS, and contact management",
|
||||
# Auth method support
|
||||
aden_supported=False,
|
||||
direct_api_key_supported=True,
|
||||
api_key_instructions="""To get a Brevo API key:
|
||||
1. Go to https://app.brevo.com and create an account (or sign in)
|
||||
2. Navigate to Settings > API Keys (or visit https://app.brevo.com/settings/keys/api)
|
||||
3. Click "Generate a new API key"
|
||||
4. Give it a name (e.g., "Hive Agent")
|
||||
5. Copy the API key (starts with xkeysib-)
|
||||
6. Store it securely - you won't be able to see it again!
|
||||
7. Note: For sending emails, you'll need a verified sender domain or email""",
|
||||
# Health check configuration
|
||||
health_check_endpoint="https://api.brevo.com/v3/account",
|
||||
health_check_method="GET",
|
||||
# Credential store mapping
|
||||
credential_id="brevo",
|
||||
credential_key="api_key",
|
||||
),
|
||||
}
|
||||
@@ -239,6 +239,178 @@ class OAuthBearerHealthChecker:
|
||||
)
|
||||
|
||||
|
||||
class BaseHttpHealthChecker:
|
||||
"""Configurable base class for HTTP-based credential health checkers.
|
||||
|
||||
Reduces boilerplate by handling the common HTTP request/response/error pattern.
|
||||
Subclasses configure via class constants and override hooks as needed.
|
||||
|
||||
Supports five auth patterns:
|
||||
- AUTH_BEARER: Authorization: Bearer <token>
|
||||
- AUTH_HEADER: Custom header name/value template
|
||||
- AUTH_QUERY: Token as query parameter
|
||||
- AUTH_BASIC: HTTP Basic Authentication
|
||||
- AUTH_URL: Token embedded in URL (e.g., Telegram)
|
||||
|
||||
Example::
|
||||
|
||||
class CalcomHealthChecker(BaseHttpHealthChecker):
|
||||
ENDPOINT = "https://api.cal.com/v1/me"
|
||||
SERVICE_NAME = "Cal.com"
|
||||
AUTH_TYPE = "query"
|
||||
AUTH_QUERY_PARAM_NAME = "apiKey"
|
||||
"""
|
||||
|
||||
# Auth pattern constants
|
||||
AUTH_BEARER = "bearer"
|
||||
AUTH_HEADER = "header"
|
||||
AUTH_QUERY = "query"
|
||||
AUTH_BASIC = "basic"
|
||||
AUTH_URL = "url"
|
||||
|
||||
# Subclass configuration
|
||||
ENDPOINT: str = ""
|
||||
SERVICE_NAME: str = ""
|
||||
HTTP_METHOD: str = "GET"
|
||||
TIMEOUT: float = 10.0
|
||||
|
||||
# Auth configuration
|
||||
AUTH_TYPE: str = AUTH_BEARER
|
||||
AUTH_HEADER_NAME: str = "Authorization"
|
||||
AUTH_HEADER_TEMPLATE: str = "Bearer {token}"
|
||||
AUTH_QUERY_PARAM_NAME: str = "key"
|
||||
|
||||
# Status code interpretation
|
||||
VALID_STATUSES: frozenset[int] = frozenset({200})
|
||||
RATE_LIMITED_STATUSES: frozenset[int] = frozenset({429})
|
||||
AUTHENTICATED_ERROR_STATUSES: frozenset[int] = frozenset()
|
||||
INVALID_STATUSES: frozenset[int] = frozenset({401})
|
||||
FORBIDDEN_STATUSES: frozenset[int] = frozenset({403})
|
||||
|
||||
def _build_url(self, credential_value: str) -> str:
|
||||
"""Build request URL. Override for URL-template auth."""
|
||||
return self.ENDPOINT
|
||||
|
||||
def _build_headers(self, credential_value: str) -> dict[str, str]:
|
||||
"""Build request headers based on AUTH_TYPE."""
|
||||
headers: dict[str, str] = {"Accept": "application/json"}
|
||||
if self.AUTH_TYPE == self.AUTH_BEARER:
|
||||
headers["Authorization"] = f"Bearer {credential_value}"
|
||||
elif self.AUTH_TYPE == self.AUTH_HEADER:
|
||||
headers[self.AUTH_HEADER_NAME] = self.AUTH_HEADER_TEMPLATE.format(
|
||||
token=credential_value
|
||||
)
|
||||
return headers
|
||||
|
||||
def _build_params(self, credential_value: str) -> dict[str, str]:
|
||||
"""Build query parameters. Includes auth param for AUTH_QUERY type."""
|
||||
if self.AUTH_TYPE == self.AUTH_QUERY:
|
||||
return {self.AUTH_QUERY_PARAM_NAME: credential_value}
|
||||
return {}
|
||||
|
||||
def _build_auth(self, credential_value: str) -> tuple[str, str] | None:
|
||||
"""Build HTTP Basic auth tuple for AUTH_BASIC type."""
|
||||
if self.AUTH_TYPE == self.AUTH_BASIC:
|
||||
return (credential_value, "")
|
||||
return None
|
||||
|
||||
def _build_json_body(self, credential_value: str) -> dict | None:
|
||||
"""Build JSON request body. Override for POST requests that need one."""
|
||||
return None
|
||||
|
||||
def _extract_identity(self, data: dict) -> dict[str, str]:
|
||||
"""Extract identity info from successful response. Override in subclass."""
|
||||
return {}
|
||||
|
||||
def _interpret_response(self, response: httpx.Response) -> HealthCheckResult:
|
||||
"""Interpret HTTP response. Override for non-standard status logic."""
|
||||
status = response.status_code
|
||||
|
||||
if status in self.VALID_STATUSES:
|
||||
identity: dict[str, str] = {}
|
||||
try:
|
||||
data = response.json()
|
||||
identity = self._extract_identity(data)
|
||||
except Exception:
|
||||
pass
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message=f"{self.SERVICE_NAME} credentials valid",
|
||||
details={"identity": identity} if identity else {},
|
||||
)
|
||||
elif status in self.RATE_LIMITED_STATUSES:
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message=f"{self.SERVICE_NAME} credentials valid (rate limited)",
|
||||
details={"status_code": status, "rate_limited": True},
|
||||
)
|
||||
elif status in self.AUTHENTICATED_ERROR_STATUSES:
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message=f"{self.SERVICE_NAME} credentials valid",
|
||||
details={"status_code": status},
|
||||
)
|
||||
elif status in self.INVALID_STATUSES:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"{self.SERVICE_NAME} credentials are invalid or expired",
|
||||
details={"status_code": status},
|
||||
)
|
||||
elif status in self.FORBIDDEN_STATUSES:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"{self.SERVICE_NAME} credentials lack required permissions",
|
||||
details={"status_code": status},
|
||||
)
|
||||
else:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"{self.SERVICE_NAME} API returned status {status}",
|
||||
details={"status_code": status},
|
||||
)
|
||||
|
||||
def check(self, credential_value: str) -> HealthCheckResult:
|
||||
"""Execute the health check. Normally not overridden."""
|
||||
try:
|
||||
url = self._build_url(credential_value)
|
||||
headers = self._build_headers(credential_value)
|
||||
params = self._build_params(credential_value)
|
||||
auth = self._build_auth(credential_value)
|
||||
json_body = self._build_json_body(credential_value)
|
||||
|
||||
with httpx.Client(timeout=self.TIMEOUT) as client:
|
||||
kwargs: dict[str, Any] = {"headers": headers}
|
||||
if params:
|
||||
kwargs["params"] = params
|
||||
if auth:
|
||||
kwargs["auth"] = auth
|
||||
if json_body is not None:
|
||||
kwargs["json"] = json_body
|
||||
|
||||
if self.HTTP_METHOD.upper() == "POST":
|
||||
response = client.post(url, **kwargs)
|
||||
else:
|
||||
response = client.get(url, **kwargs)
|
||||
|
||||
return self._interpret_response(response)
|
||||
|
||||
except httpx.TimeoutException:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"{self.SERVICE_NAME} API request timed out",
|
||||
details={"error": "timeout"},
|
||||
)
|
||||
except httpx.RequestError as e:
|
||||
error_msg = str(e)
|
||||
if any(s in error_msg for s in ("Bearer", "Authorization", "api_key", "token")):
|
||||
error_msg = "Request failed (details redacted for security)"
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"Failed to connect to {self.SERVICE_NAME}: {error_msg}",
|
||||
details={"error": error_msg},
|
||||
)
|
||||
|
||||
|
||||
class GoogleCalendarHealthChecker(OAuthBearerHealthChecker):
|
||||
"""Health checker for Google Calendar OAuth tokens."""
|
||||
|
||||
@@ -740,6 +912,152 @@ class GoogleGmailHealthChecker(OAuthBearerHealthChecker):
|
||||
return {"email": email} if email else {}
|
||||
|
||||
|
||||
# --- New checkers using BaseHttpHealthChecker ---
|
||||
|
||||
|
||||
class StripeHealthChecker(BaseHttpHealthChecker):
|
||||
"""Health checker for Stripe API key."""
|
||||
|
||||
ENDPOINT = "https://api.stripe.com/v1/balance"
|
||||
SERVICE_NAME = "Stripe"
|
||||
|
||||
|
||||
class ExaSearchHealthChecker(BaseHttpHealthChecker):
|
||||
"""Health checker for Exa Search API key."""
|
||||
|
||||
ENDPOINT = "https://api.exa.ai/search"
|
||||
SERVICE_NAME = "Exa Search"
|
||||
HTTP_METHOD = "POST"
|
||||
|
||||
def _build_json_body(self, credential_value: str) -> dict:
|
||||
return {"query": "test", "numResults": 1}
|
||||
|
||||
|
||||
class GoogleDocsHealthChecker(OAuthBearerHealthChecker):
|
||||
"""Health checker for Google Docs OAuth tokens."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
endpoint="https://docs.googleapis.com/v1/documents/1",
|
||||
service_name="Google Docs",
|
||||
)
|
||||
|
||||
|
||||
class CalcomHealthChecker(BaseHttpHealthChecker):
|
||||
"""Health checker for Cal.com API key."""
|
||||
|
||||
ENDPOINT = "https://api.cal.com/v1/me"
|
||||
SERVICE_NAME = "Cal.com"
|
||||
AUTH_TYPE = BaseHttpHealthChecker.AUTH_QUERY
|
||||
AUTH_QUERY_PARAM_NAME = "apiKey"
|
||||
|
||||
|
||||
class SerpApiHealthChecker(BaseHttpHealthChecker):
|
||||
"""Health checker for SerpAPI key."""
|
||||
|
||||
ENDPOINT = "https://serpapi.com/account.json"
|
||||
SERVICE_NAME = "SerpAPI"
|
||||
AUTH_TYPE = BaseHttpHealthChecker.AUTH_QUERY
|
||||
AUTH_QUERY_PARAM_NAME = "api_key"
|
||||
|
||||
|
||||
class ApolloHealthChecker(BaseHttpHealthChecker):
|
||||
"""Health checker for Apollo.io API key."""
|
||||
|
||||
ENDPOINT = "https://api.apollo.io/v1/auth/health"
|
||||
SERVICE_NAME = "Apollo"
|
||||
AUTH_TYPE = BaseHttpHealthChecker.AUTH_QUERY
|
||||
AUTH_QUERY_PARAM_NAME = "api_key"
|
||||
|
||||
|
||||
class TelegramHealthChecker(BaseHttpHealthChecker):
|
||||
"""Health checker for Telegram bot token."""
|
||||
|
||||
SERVICE_NAME = "Telegram"
|
||||
AUTH_TYPE = BaseHttpHealthChecker.AUTH_URL
|
||||
|
||||
def _build_url(self, credential_value: str) -> str:
|
||||
return f"https://api.telegram.org/bot{credential_value}/getMe"
|
||||
|
||||
def _build_headers(self, credential_value: str) -> dict[str, str]:
|
||||
return {"Accept": "application/json"}
|
||||
|
||||
def _interpret_response(self, response: httpx.Response) -> HealthCheckResult:
|
||||
if response.status_code == 200:
|
||||
try:
|
||||
data = response.json()
|
||||
if data.get("ok"):
|
||||
username = data.get("result", {}).get("username", "unknown")
|
||||
identity = {"username": username} if username != "unknown" else {}
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message=f"Telegram bot token valid (bot: @{username})",
|
||||
details={"identity": identity},
|
||||
)
|
||||
else:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message="Telegram bot token is invalid",
|
||||
details={"telegram_error": data.get("description", "")},
|
||||
)
|
||||
except Exception:
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message="Telegram credentials valid",
|
||||
)
|
||||
elif response.status_code == 401:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message="Telegram bot token is invalid",
|
||||
details={"status_code": 401},
|
||||
)
|
||||
else:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"Telegram API returned status {response.status_code}",
|
||||
details={"status_code": response.status_code},
|
||||
)
|
||||
|
||||
|
||||
class NewsdataHealthChecker(BaseHttpHealthChecker):
|
||||
"""Health checker for Newsdata.io API key."""
|
||||
|
||||
ENDPOINT = "https://newsdata.io/api/1/news"
|
||||
SERVICE_NAME = "Newsdata"
|
||||
AUTH_TYPE = BaseHttpHealthChecker.AUTH_QUERY
|
||||
AUTH_QUERY_PARAM_NAME = "apikey"
|
||||
|
||||
def _build_params(self, credential_value: str) -> dict[str, str]:
|
||||
params = super()._build_params(credential_value)
|
||||
params["q"] = "test"
|
||||
return params
|
||||
|
||||
|
||||
class FinlightHealthChecker(BaseHttpHealthChecker):
|
||||
"""Health checker for Finlight API key."""
|
||||
|
||||
ENDPOINT = "https://api.finlight.me/v1/news"
|
||||
SERVICE_NAME = "Finlight"
|
||||
|
||||
|
||||
class BrevoHealthChecker(BaseHttpHealthChecker):
|
||||
"""Health checker for Brevo API key."""
|
||||
|
||||
ENDPOINT = "https://api.brevo.com/v3/account"
|
||||
SERVICE_NAME = "Brevo"
|
||||
AUTH_TYPE = BaseHttpHealthChecker.AUTH_HEADER
|
||||
AUTH_HEADER_NAME = "api-key"
|
||||
AUTH_HEADER_TEMPLATE = "{token}"
|
||||
|
||||
def _extract_identity(self, data: dict) -> dict[str, str]:
|
||||
identity: dict[str, str] = {}
|
||||
if data.get("email"):
|
||||
identity["email"] = data["email"]
|
||||
if data.get("companyName"):
|
||||
identity["company"] = data["companyName"]
|
||||
return identity
|
||||
|
||||
|
||||
# Registry of health checkers
|
||||
HEALTH_CHECKERS: dict[str, CredentialHealthChecker] = {
|
||||
"discord": DiscordHealthChecker(),
|
||||
@@ -753,6 +1071,16 @@ HEALTH_CHECKERS: dict[str, CredentialHealthChecker] = {
|
||||
"anthropic": AnthropicHealthChecker(),
|
||||
"github": GitHubHealthChecker(),
|
||||
"resend": ResendHealthChecker(),
|
||||
"stripe": StripeHealthChecker(),
|
||||
"exa_search": ExaSearchHealthChecker(),
|
||||
"google_docs": GoogleDocsHealthChecker(),
|
||||
"calcom": CalcomHealthChecker(),
|
||||
"serpapi": SerpApiHealthChecker(),
|
||||
"apollo": ApolloHealthChecker(),
|
||||
"telegram": TelegramHealthChecker(),
|
||||
"newsdata": NewsdataHealthChecker(),
|
||||
"finlight": FinlightHealthChecker(),
|
||||
"brevo": BrevoHealthChecker(),
|
||||
}
|
||||
|
||||
|
||||
@@ -807,3 +1135,80 @@ def check_credential_health(
|
||||
return checker.check(credential_value, kwargs["cse_id"])
|
||||
|
||||
return checker.check(credential_value)
|
||||
|
||||
|
||||
def validate_integration_wiring(credential_name: str) -> list[str]:
|
||||
"""Check that a credential integration is fully wired up.
|
||||
|
||||
Returns a list of issues found. Empty list means everything is correct.
|
||||
|
||||
Use during development to verify a new integration has all required pieces:
|
||||
CredentialSpec, health checker, endpoint consistency, and required fields.
|
||||
|
||||
Args:
|
||||
credential_name: The credential name to validate (e.g., 'jira').
|
||||
|
||||
Returns:
|
||||
List of issue descriptions. Empty if fully wired.
|
||||
|
||||
Example::
|
||||
|
||||
issues = validate_integration_wiring("stripe")
|
||||
for issue in issues:
|
||||
print(f" - {issue}")
|
||||
"""
|
||||
from . import CREDENTIAL_SPECS
|
||||
|
||||
issues: list[str] = []
|
||||
|
||||
# 1. Check spec exists
|
||||
spec = CREDENTIAL_SPECS.get(credential_name)
|
||||
if spec is None:
|
||||
issues.append(
|
||||
f"No CredentialSpec for '{credential_name}' in CREDENTIAL_SPECS. "
|
||||
f"Add it to the appropriate category file and import in __init__.py."
|
||||
)
|
||||
return issues
|
||||
|
||||
# 2. Check required fields
|
||||
if not spec.env_var:
|
||||
issues.append("CredentialSpec.env_var is empty")
|
||||
if not spec.description:
|
||||
issues.append("CredentialSpec.description is empty")
|
||||
if not spec.tools and not spec.node_types:
|
||||
issues.append("CredentialSpec has no tools or node_types")
|
||||
if not spec.help_url:
|
||||
issues.append("CredentialSpec.help_url is empty (users need this to get credentials)")
|
||||
if spec.direct_api_key_supported and not spec.api_key_instructions:
|
||||
issues.append(
|
||||
"CredentialSpec.api_key_instructions is empty but direct_api_key_supported=True"
|
||||
)
|
||||
|
||||
# 3. Check health check
|
||||
if not spec.health_check_endpoint:
|
||||
issues.append(
|
||||
"CredentialSpec.health_check_endpoint is empty. "
|
||||
"Add a lightweight API endpoint for credential validation."
|
||||
)
|
||||
else:
|
||||
checker = HEALTH_CHECKERS.get(credential_name)
|
||||
if checker is None:
|
||||
issues.append(
|
||||
f"No entry in HEALTH_CHECKERS for '{credential_name}'. "
|
||||
f"The OAuthBearerHealthChecker fallback will be used. "
|
||||
f"Add a dedicated checker if auth is not Bearer token."
|
||||
)
|
||||
else:
|
||||
checker_endpoint = getattr(checker, "ENDPOINT", None) or getattr(
|
||||
checker, "endpoint", None
|
||||
)
|
||||
if checker_endpoint and spec.health_check_endpoint:
|
||||
spec_base = spec.health_check_endpoint.split("?")[0]
|
||||
checker_base = str(checker_endpoint).split("?")[0]
|
||||
if spec_base != checker_base:
|
||||
issues.append(
|
||||
f"Endpoint mismatch: spec='{spec.health_check_endpoint}' "
|
||||
f"vs checker='{checker_endpoint}'"
|
||||
)
|
||||
|
||||
return issues
|
||||
|
||||
@@ -85,7 +85,7 @@ class CredentialStoreAdapter:
|
||||
|
||||
# --- Existing CredentialManager API ---
|
||||
|
||||
def get(self, name: str) -> str | None:
|
||||
def get(self, name: str, account: str | None = None) -> str | None:
|
||||
"""
|
||||
Get a credential value by logical name.
|
||||
|
||||
@@ -94,6 +94,10 @@ class CredentialStoreAdapter:
|
||||
|
||||
Args:
|
||||
name: Logical credential name (e.g., "brave_search")
|
||||
account: Optional alias for per-call routing to a specific named local
|
||||
account (e.g. "work"). When provided, looks up the named account
|
||||
from LocalCredentialRegistry before falling through to the store.
|
||||
This mirrors the ``account=`` routing available for Aden credentials.
|
||||
|
||||
Returns:
|
||||
The credential value, or None if not set
|
||||
@@ -104,6 +108,16 @@ class CredentialStoreAdapter:
|
||||
if name not in self._specs:
|
||||
raise KeyError(f"Unknown credential '{name}'. Available: {list(self._specs.keys())}")
|
||||
|
||||
if account is not None:
|
||||
try:
|
||||
from framework.credentials.local.registry import LocalCredentialRegistry
|
||||
|
||||
key = LocalCredentialRegistry.default().get_key(name, account)
|
||||
if key is not None:
|
||||
return key
|
||||
except Exception:
|
||||
pass # Fall through to standard store lookup
|
||||
|
||||
return self._store.get(name)
|
||||
|
||||
def get_spec(self, name: str) -> CredentialSpec:
|
||||
@@ -279,19 +293,43 @@ class CredentialStoreAdapter:
|
||||
def get_all_account_info(self) -> list[dict]:
|
||||
"""Collect all accounts across all configured providers.
|
||||
|
||||
Deduplicates by provider name to avoid listing the same provider's
|
||||
accounts twice when multiple specs map to the same provider.
|
||||
Includes both Aden OAuth accounts and named local API key accounts.
|
||||
Deduplicates by (provider, alias) to avoid listing the same account
|
||||
twice when it appears in both stores.
|
||||
"""
|
||||
accounts: list[dict] = []
|
||||
seen: set[str] = set()
|
||||
seen_specs: set[str] = set()
|
||||
seen_accounts: set[tuple[str, str]] = set()
|
||||
|
||||
for name, spec in self._specs.items():
|
||||
provider = spec.credential_id or name
|
||||
if provider in seen or not self.is_available(name):
|
||||
if provider in seen_specs or not self.is_available(name):
|
||||
continue
|
||||
seen.add(provider)
|
||||
accounts.extend(self._store.list_accounts(provider))
|
||||
seen_specs.add(provider)
|
||||
for acct in self._store.list_accounts(provider):
|
||||
key = (acct.get("provider", ""), acct.get("alias", ""))
|
||||
if key not in seen_accounts:
|
||||
seen_accounts.add(key)
|
||||
accounts.append(acct)
|
||||
|
||||
# Include named local API key accounts
|
||||
for acct in self.list_local_accounts():
|
||||
key = (acct.get("provider", ""), acct.get("alias", ""))
|
||||
if key not in seen_accounts:
|
||||
seen_accounts.add(key)
|
||||
accounts.append(acct)
|
||||
|
||||
return accounts
|
||||
|
||||
def get_tool_provider_map(self) -> dict[str, str]:
|
||||
"""Map tool names to provider names for account routing.
|
||||
|
||||
Returns:
|
||||
Dict mapping tool_name -> provider_name
|
||||
(e.g. {"gmail_list_messages": "google", "slack_send_message": "slack"})
|
||||
"""
|
||||
return dict(self._tool_to_cred)
|
||||
|
||||
def get_by_alias(self, provider_name: str, alias: str) -> str | None:
|
||||
"""Resolve a specific account's token by alias."""
|
||||
cred = self._store.get_credential_by_alias(provider_name, alias)
|
||||
@@ -301,6 +339,58 @@ class CredentialStoreAdapter:
|
||||
"""Alias for get_by_alias (backward compat)."""
|
||||
return self.get_by_alias(provider_name, label)
|
||||
|
||||
# --- Local credential registry ---
|
||||
|
||||
def list_local_accounts(self, credential_id: str | None = None) -> list[dict]:
|
||||
"""
|
||||
List named local API key accounts from LocalCredentialRegistry.
|
||||
|
||||
Args:
|
||||
credential_id: If given, filter to this credential type only.
|
||||
|
||||
Returns:
|
||||
List of account dicts (same shape as Aden account dicts, source='local').
|
||||
"""
|
||||
try:
|
||||
from framework.credentials.local.registry import LocalCredentialRegistry
|
||||
|
||||
registry = LocalCredentialRegistry.default()
|
||||
return [info.to_account_dict() for info in registry.list_accounts(credential_id)]
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
def activate_local_account(self, credential_id: str, alias: str) -> bool:
|
||||
"""
|
||||
Inject a named local account's API key into the environment for this session.
|
||||
|
||||
This enables session-level routing: select an account → inject its key as
|
||||
the env var that tools already read. No tool signature changes required.
|
||||
|
||||
Args:
|
||||
credential_id: Logical credential name (e.g. "brave_search").
|
||||
alias: Account alias (e.g. "work").
|
||||
|
||||
Returns:
|
||||
True if the key was found and injected, False otherwise.
|
||||
"""
|
||||
import os
|
||||
|
||||
try:
|
||||
from framework.credentials.local.registry import LocalCredentialRegistry
|
||||
|
||||
key = LocalCredentialRegistry.default().get_key(credential_id, alias)
|
||||
if key is None:
|
||||
return False
|
||||
|
||||
spec = self._specs.get(credential_id)
|
||||
if spec is None:
|
||||
return False
|
||||
|
||||
os.environ[spec.env_var] = key
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
@property
|
||||
def store(self) -> CredentialStore:
|
||||
"""Access the underlying credential store for advanced operations."""
|
||||
|
||||
@@ -25,6 +25,7 @@ from .account_info_tool import register_tools as register_account_info
|
||||
from .apollo_tool import register_tools as register_apollo
|
||||
from .arxiv_tool import register_tools as register_arxiv
|
||||
from .bigquery_tool import register_tools as register_bigquery
|
||||
from .brevo_tool import register_tools as register_brevo
|
||||
from .calcom_tool import register_tools as register_calcom
|
||||
from .calendar_tool import register_tools as register_calendar
|
||||
from .csv_tool import register_tools as register_csv
|
||||
@@ -76,6 +77,9 @@ from .vision_tool import register_tools as register_vision
|
||||
from .web_scrape_tool import register_tools as register_web_scrape
|
||||
from .web_search_tool import register_tools as register_web_search
|
||||
|
||||
# Web and PDF tools
|
||||
from .wikipedia_tool import register_tools as register_wikipedia
|
||||
|
||||
|
||||
def register_all_tools(
|
||||
mcp: FastMCP,
|
||||
@@ -98,6 +102,7 @@ def register_all_tools(
|
||||
register_pdf_read(mcp)
|
||||
register_time(mcp)
|
||||
register_runtime_logs(mcp)
|
||||
register_wikipedia(mcp)
|
||||
register_arxiv(mcp)
|
||||
|
||||
# Tools that need credentials (pass credentials if provided)
|
||||
@@ -147,6 +152,7 @@ def register_all_tools(
|
||||
register_subdomain_enumerator(mcp)
|
||||
register_risk_scorer(mcp)
|
||||
register_stripe(mcp, credentials=credentials)
|
||||
register_brevo(mcp, credentials=credentials)
|
||||
|
||||
# Postgres tool
|
||||
register_postgres(mcp, credentials=credentials)
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
"""Brevo (formerly Sendinblue) tool - transactional email, SMS, and contacts."""
|
||||
|
||||
from .brevo_tool import register_tools
|
||||
|
||||
__all__ = ["register_tools"]
|
||||
@@ -0,0 +1,487 @@
|
||||
"""
|
||||
Brevo Tool - Send transactional emails, SMS, and manage contacts via Brevo API.
|
||||
|
||||
Supports:
|
||||
- Transactional email sending
|
||||
- Transactional SMS sending
|
||||
- Contact create/read/update
|
||||
|
||||
API Reference: https://developers.brevo.com/reference
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import httpx
|
||||
from fastmcp import FastMCP
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from aden_tools.credentials import CredentialStoreAdapter
|
||||
|
||||
BREVO_API_BASE = "https://api.brevo.com/v3"
|
||||
|
||||
|
||||
class _BrevoClient:
|
||||
"""Internal client wrapping Brevo API v3 calls."""
|
||||
|
||||
def __init__(self, api_key: str):
|
||||
self._api_key = api_key
|
||||
|
||||
@property
|
||||
def _headers(self) -> dict[str, str]:
|
||||
return {
|
||||
"api-key": self._api_key,
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
}
|
||||
|
||||
def _handle_response(self, response: httpx.Response) -> dict[str, Any]:
|
||||
"""Handle common HTTP error codes."""
|
||||
if response.status_code == 401:
|
||||
return {"error": "Invalid Brevo API key"}
|
||||
if response.status_code == 400:
|
||||
try:
|
||||
detail = response.json()
|
||||
msg = detail.get("message", response.text)
|
||||
except Exception:
|
||||
msg = response.text
|
||||
return {"error": f"Bad request: {msg}"}
|
||||
if response.status_code == 403:
|
||||
return {"error": "Brevo API key lacks required permissions"}
|
||||
if response.status_code == 404:
|
||||
return {"error": "Resource not found"}
|
||||
if response.status_code == 429:
|
||||
return {"error": "Rate limit exceeded. Try again later."}
|
||||
if response.status_code >= 400:
|
||||
try:
|
||||
detail = response.json().get("message", response.text)
|
||||
except Exception:
|
||||
detail = response.text
|
||||
return {"error": f"Brevo API error (HTTP {response.status_code}): {detail}"}
|
||||
# Success (200, 201, 204)
|
||||
if response.status_code == 204:
|
||||
return {"success": True}
|
||||
try:
|
||||
return response.json()
|
||||
except Exception:
|
||||
return {"success": True}
|
||||
|
||||
def send_email(
|
||||
self,
|
||||
to: list[dict[str, str]],
|
||||
subject: str,
|
||||
html_content: str,
|
||||
sender: dict[str, str],
|
||||
text_content: str | None = None,
|
||||
cc: list[dict[str, str]] | None = None,
|
||||
bcc: list[dict[str, str]] | None = None,
|
||||
reply_to: dict[str, str] | None = None,
|
||||
tags: list[str] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Send a transactional email."""
|
||||
payload: dict[str, Any] = {
|
||||
"to": to,
|
||||
"subject": subject,
|
||||
"htmlContent": html_content,
|
||||
"sender": sender,
|
||||
}
|
||||
if text_content:
|
||||
payload["textContent"] = text_content
|
||||
if cc:
|
||||
payload["cc"] = cc
|
||||
if bcc:
|
||||
payload["bcc"] = bcc
|
||||
if reply_to:
|
||||
payload["replyTo"] = reply_to
|
||||
if tags:
|
||||
payload["tags"] = tags
|
||||
|
||||
response = httpx.post(
|
||||
f"{BREVO_API_BASE}/smtp/email",
|
||||
headers=self._headers,
|
||||
json=payload,
|
||||
timeout=30.0,
|
||||
)
|
||||
return self._handle_response(response)
|
||||
|
||||
def send_sms(
|
||||
self,
|
||||
sender: str,
|
||||
recipient: str,
|
||||
content: str,
|
||||
sms_type: str = "transactional",
|
||||
tag: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Send a transactional SMS."""
|
||||
payload: dict[str, Any] = {
|
||||
"sender": sender,
|
||||
"recipient": recipient,
|
||||
"content": content,
|
||||
"type": sms_type,
|
||||
}
|
||||
if tag:
|
||||
payload["tag"] = tag
|
||||
|
||||
response = httpx.post(
|
||||
f"{BREVO_API_BASE}/transactionalSMS/send",
|
||||
headers=self._headers,
|
||||
json=payload,
|
||||
timeout=30.0,
|
||||
)
|
||||
return self._handle_response(response)
|
||||
|
||||
def create_contact(
|
||||
self,
|
||||
email: str | None = None,
|
||||
attributes: dict[str, Any] | None = None,
|
||||
list_ids: list[int] | None = None,
|
||||
update_enabled: bool = False,
|
||||
) -> dict[str, Any]:
|
||||
"""Create a new contact."""
|
||||
payload: dict[str, Any] = {}
|
||||
if email:
|
||||
payload["email"] = email
|
||||
if attributes:
|
||||
payload["attributes"] = attributes
|
||||
if list_ids:
|
||||
payload["listIds"] = list_ids
|
||||
if update_enabled:
|
||||
payload["updateEnabled"] = True
|
||||
|
||||
response = httpx.post(
|
||||
f"{BREVO_API_BASE}/contacts",
|
||||
headers=self._headers,
|
||||
json=payload,
|
||||
timeout=30.0,
|
||||
)
|
||||
return self._handle_response(response)
|
||||
|
||||
def get_contact(self, identifier: str) -> dict[str, Any]:
|
||||
"""Get a contact by email or ID."""
|
||||
response = httpx.get(
|
||||
f"{BREVO_API_BASE}/contacts/{identifier}",
|
||||
headers=self._headers,
|
||||
timeout=30.0,
|
||||
)
|
||||
return self._handle_response(response)
|
||||
|
||||
def update_contact(
|
||||
self,
|
||||
identifier: str,
|
||||
attributes: dict[str, Any] | None = None,
|
||||
list_ids: list[int] | None = None,
|
||||
unlink_list_ids: list[int] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Update a contact."""
|
||||
payload: dict[str, Any] = {}
|
||||
if attributes:
|
||||
payload["attributes"] = attributes
|
||||
if list_ids:
|
||||
payload["listIds"] = list_ids
|
||||
if unlink_list_ids:
|
||||
payload["unlinkListIds"] = unlink_list_ids
|
||||
|
||||
response = httpx.put(
|
||||
f"{BREVO_API_BASE}/contacts/{identifier}",
|
||||
headers=self._headers,
|
||||
json=payload,
|
||||
timeout=30.0,
|
||||
)
|
||||
return self._handle_response(response)
|
||||
|
||||
|
||||
def register_tools(
|
||||
mcp: FastMCP,
|
||||
credentials: CredentialStoreAdapter | None = None,
|
||||
) -> None:
|
||||
"""Register Brevo tools with the MCP server."""
|
||||
|
||||
def _get_api_key() -> str | None:
|
||||
"""Get Brevo API key from credential store or environment."""
|
||||
if credentials is not None:
|
||||
key = credentials.get("brevo")
|
||||
if key is not None and not isinstance(key, str):
|
||||
raise TypeError(
|
||||
f"Expected string from credentials.get('brevo'), got {type(key).__name__}"
|
||||
)
|
||||
return key
|
||||
return os.getenv("BREVO_API_KEY")
|
||||
|
||||
def _get_client() -> _BrevoClient | dict[str, str]:
|
||||
"""Get a Brevo client, or return an error dict if no credentials."""
|
||||
api_key = _get_api_key()
|
||||
if not api_key:
|
||||
return {
|
||||
"error": "Brevo API key not configured",
|
||||
"help": (
|
||||
"Set BREVO_API_KEY environment variable or configure via "
|
||||
"credential store. Get your key at https://app.brevo.com/settings/keys/api"
|
||||
),
|
||||
}
|
||||
return _BrevoClient(api_key)
|
||||
|
||||
@mcp.tool()
|
||||
def brevo_send_email(
|
||||
to: list[dict[str, str]],
|
||||
subject: str,
|
||||
html_content: str,
|
||||
sender_email: str,
|
||||
sender_name: str = "",
|
||||
text_content: str = "",
|
||||
cc: list[dict[str, str]] | None = None,
|
||||
bcc: list[dict[str, str]] | None = None,
|
||||
reply_to_email: str = "",
|
||||
tags: list[str] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Send a transactional email via Brevo.
|
||||
|
||||
Use this for notifications, alerts, confirmations, or any triggered email.
|
||||
|
||||
Args:
|
||||
to: Recipients list. Each item: {"email": "user@example.com", "name": "User Name"}.
|
||||
Name is optional.
|
||||
subject: Email subject line.
|
||||
html_content: Email body as HTML string.
|
||||
sender_email: Sender email address (must be a verified sender in Brevo).
|
||||
sender_name: Sender display name. Optional.
|
||||
text_content: Plain text alternative body. Optional.
|
||||
cc: CC recipients. Same format as 'to'. Optional.
|
||||
bcc: BCC recipients. Same format as 'to'. Optional.
|
||||
reply_to_email: Reply-to email address. Optional.
|
||||
tags: Tags for categorizing the email. Optional.
|
||||
|
||||
Returns:
|
||||
Dict with messageId on success, or error dict on failure.
|
||||
"""
|
||||
client = _get_client()
|
||||
if isinstance(client, dict):
|
||||
return client
|
||||
|
||||
if not to:
|
||||
return {"error": "At least one recipient is required"}
|
||||
if not subject:
|
||||
return {"error": "Subject is required"}
|
||||
if not html_content:
|
||||
return {"error": "HTML content is required"}
|
||||
if not sender_email:
|
||||
return {"error": "Sender email is required"}
|
||||
|
||||
sender: dict[str, str] = {"email": sender_email}
|
||||
if sender_name:
|
||||
sender["name"] = sender_name
|
||||
|
||||
reply_to = {"email": reply_to_email} if reply_to_email else None
|
||||
|
||||
try:
|
||||
result = client.send_email(
|
||||
to=to,
|
||||
subject=subject,
|
||||
html_content=html_content,
|
||||
sender=sender,
|
||||
text_content=text_content if text_content else None,
|
||||
cc=cc,
|
||||
bcc=bcc,
|
||||
reply_to=reply_to,
|
||||
tags=tags,
|
||||
)
|
||||
if "error" in result:
|
||||
return result
|
||||
return {
|
||||
"success": True,
|
||||
"message_id": result.get("messageId", ""),
|
||||
"to": [r.get("email") for r in to],
|
||||
"subject": subject,
|
||||
}
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Brevo request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": f"Network error: {e}"}
|
||||
|
||||
@mcp.tool()
|
||||
def brevo_send_sms(
|
||||
sender: str,
|
||||
recipient: str,
|
||||
content: str,
|
||||
sms_type: str = "transactional",
|
||||
tag: str = "",
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Send a transactional SMS via Brevo.
|
||||
|
||||
Use this for SMS notifications, alerts, or verification messages.
|
||||
|
||||
Args:
|
||||
sender: Sender name (max 11 alphanumeric chars) or phone number (max 15 digits).
|
||||
recipient: Recipient phone number with country code (e.g., "33612345678").
|
||||
content: SMS message text. Messages over 160 chars are sent as multiple SMS.
|
||||
sms_type: Either "transactional" or "marketing". Defaults to "transactional".
|
||||
tag: Optional tag for categorizing the SMS.
|
||||
|
||||
Returns:
|
||||
Dict with messageId on success, or error dict on failure.
|
||||
"""
|
||||
client = _get_client()
|
||||
if isinstance(client, dict):
|
||||
return client
|
||||
|
||||
if not sender:
|
||||
return {"error": "Sender is required"}
|
||||
if not recipient:
|
||||
return {"error": "Recipient phone number is required"}
|
||||
if not content:
|
||||
return {"error": "SMS content is required"}
|
||||
|
||||
try:
|
||||
result = client.send_sms(
|
||||
sender=sender,
|
||||
recipient=recipient,
|
||||
content=content,
|
||||
sms_type=sms_type,
|
||||
tag=tag if tag else None,
|
||||
)
|
||||
if "error" in result:
|
||||
return result
|
||||
return {
|
||||
"success": True,
|
||||
"message_id": result.get("messageId", ""),
|
||||
"recipient": recipient,
|
||||
}
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Brevo request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": f"Network error: {e}"}
|
||||
|
||||
@mcp.tool()
|
||||
def brevo_create_contact(
|
||||
email: str,
|
||||
attributes: dict[str, Any] | None = None,
|
||||
list_ids: list[int] | None = None,
|
||||
update_enabled: bool = False,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Create a contact in Brevo.
|
||||
|
||||
Use this to add new contacts to your Brevo account for email/SMS campaigns.
|
||||
|
||||
Args:
|
||||
email: Contact email address.
|
||||
attributes: Contact attributes in UPPERCASE (e.g., {"FNAME": "John", "LNAME": "Doe"}).
|
||||
Standard attributes: FNAME, LNAME, SMS (phone with country code like +33xxxxxxxxxx).
|
||||
list_ids: List IDs to add the contact to. Optional.
|
||||
update_enabled: If True, updates the contact if it already exists. Defaults to False.
|
||||
|
||||
Returns:
|
||||
Dict with contact id on success, or error dict on failure.
|
||||
"""
|
||||
client = _get_client()
|
||||
if isinstance(client, dict):
|
||||
return client
|
||||
|
||||
if not email:
|
||||
return {"error": "Email is required"}
|
||||
|
||||
try:
|
||||
result = client.create_contact(
|
||||
email=email,
|
||||
attributes=attributes,
|
||||
list_ids=list_ids,
|
||||
update_enabled=update_enabled,
|
||||
)
|
||||
if "error" in result:
|
||||
return result
|
||||
return {
|
||||
"success": True,
|
||||
"id": result.get("id"),
|
||||
"email": email,
|
||||
}
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Brevo request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": f"Network error: {e}"}
|
||||
|
||||
@mcp.tool()
|
||||
def brevo_get_contact(
|
||||
identifier: str,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Get a contact from Brevo by email address or contact ID.
|
||||
|
||||
Args:
|
||||
identifier: Contact email address or numeric contact ID.
|
||||
|
||||
Returns:
|
||||
Dict with contact details (email, attributes, listIds, statistics)
|
||||
or error dict on failure.
|
||||
"""
|
||||
client = _get_client()
|
||||
if isinstance(client, dict):
|
||||
return client
|
||||
|
||||
if not identifier:
|
||||
return {"error": "Contact identifier (email or ID) is required"}
|
||||
|
||||
try:
|
||||
result = client.get_contact(identifier)
|
||||
if "error" in result:
|
||||
return result
|
||||
return {
|
||||
"success": True,
|
||||
"id": result.get("id"),
|
||||
"email": result.get("email"),
|
||||
"attributes": result.get("attributes", {}),
|
||||
"list_ids": result.get("listIds", []),
|
||||
"email_blacklisted": result.get("emailBlacklisted", False),
|
||||
"sms_blacklisted": result.get("smsBlacklisted", False),
|
||||
}
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Brevo request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": f"Network error: {e}"}
|
||||
|
||||
@mcp.tool()
|
||||
def brevo_update_contact(
|
||||
identifier: str,
|
||||
attributes: dict[str, Any] | None = None,
|
||||
list_ids: list[int] | None = None,
|
||||
unlink_list_ids: list[int] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Update a contact in Brevo.
|
||||
|
||||
Args:
|
||||
identifier: Contact email address or numeric contact ID.
|
||||
attributes: Attributes to update in UPPERCASE (e.g., {"FNAME": "Jane"}).
|
||||
list_ids: List IDs to add the contact to. Optional.
|
||||
unlink_list_ids: List IDs to remove the contact from. Optional.
|
||||
|
||||
Returns:
|
||||
Dict with success status, or error dict on failure.
|
||||
"""
|
||||
client = _get_client()
|
||||
if isinstance(client, dict):
|
||||
return client
|
||||
|
||||
if not identifier:
|
||||
return {"error": "Contact identifier (email or ID) is required"}
|
||||
|
||||
try:
|
||||
result = client.update_contact(
|
||||
identifier=identifier,
|
||||
attributes=attributes,
|
||||
list_ids=list_ids,
|
||||
unlink_list_ids=unlink_list_ids,
|
||||
)
|
||||
if "error" in result:
|
||||
return result
|
||||
return {
|
||||
"success": True,
|
||||
"identifier": identifier,
|
||||
"message": "Contact updated successfully",
|
||||
}
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Brevo request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": f"Network error: {e}"}
|
||||
@@ -114,10 +114,15 @@ def register_tools(
|
||||
"subject": subject,
|
||||
}
|
||||
|
||||
def _get_credential(provider: Literal["resend", "gmail"]) -> str | None:
|
||||
def _get_credential(
|
||||
provider: Literal["resend", "gmail"],
|
||||
account: str = "",
|
||||
) -> str | None:
|
||||
"""Get the credential for the requested provider."""
|
||||
if provider == "gmail":
|
||||
if credentials is not None:
|
||||
if account:
|
||||
return credentials.get_by_alias("google", account)
|
||||
return credentials.get("google")
|
||||
return os.getenv("GOOGLE_ACCESS_TOKEN")
|
||||
# resend
|
||||
@@ -150,6 +155,7 @@ def register_tools(
|
||||
from_email: str | None = None,
|
||||
cc: str | list[str] | None = None,
|
||||
bcc: str | list[str] | None = None,
|
||||
account: str = "",
|
||||
) -> dict:
|
||||
"""Core email sending logic, callable by other tools."""
|
||||
from_email = _resolve_from_email(from_email)
|
||||
@@ -182,7 +188,7 @@ def register_tools(
|
||||
"help": "Pass from_email or set EMAIL_FROM environment variable",
|
||||
}
|
||||
|
||||
credential = _get_credential(provider)
|
||||
credential = _get_credential(provider, account)
|
||||
if not credential:
|
||||
if provider == "gmail":
|
||||
return {
|
||||
@@ -215,6 +221,7 @@ def register_tools(
|
||||
from_email: str | None = None,
|
||||
cc: str | list[str] | None = None,
|
||||
bcc: str | list[str] | None = None,
|
||||
account: str = "",
|
||||
) -> dict:
|
||||
"""
|
||||
Send an email.
|
||||
@@ -232,12 +239,14 @@ def register_tools(
|
||||
Optional for Gmail (defaults to authenticated user's address).
|
||||
cc: CC recipient(s). Single string or list of strings. Optional.
|
||||
bcc: BCC recipient(s). Single string or list of strings. Optional.
|
||||
account: Account alias for multi-account routing (e.g. "timothy-home").
|
||||
Only used with Gmail provider. Optional.
|
||||
|
||||
Returns:
|
||||
Dict with send result including provider used and message ID,
|
||||
or error dict with "error" and optional "help" keys.
|
||||
"""
|
||||
return _send_email_impl(to, subject, html, provider, from_email, cc, bcc)
|
||||
return _send_email_impl(to, subject, html, provider, from_email, cc, bcc, account)
|
||||
|
||||
def _fetch_original_message(access_token: str, message_id: str) -> dict:
|
||||
"""Fetch the original message to extract threading info."""
|
||||
@@ -278,6 +287,7 @@ def register_tools(
|
||||
html: str,
|
||||
cc: str | list[str] | None = None,
|
||||
bcc: str | list[str] | None = None,
|
||||
account: str = "",
|
||||
) -> dict:
|
||||
"""
|
||||
Reply to a Gmail message, keeping it in the same thread.
|
||||
@@ -291,6 +301,8 @@ def register_tools(
|
||||
html: Reply body as HTML string.
|
||||
cc: CC recipient(s). Single string or list of strings. Optional.
|
||||
bcc: BCC recipient(s). Single string or list of strings. Optional.
|
||||
account: Account alias for multi-account routing (e.g. "timothy-home").
|
||||
Optional.
|
||||
|
||||
Returns:
|
||||
Dict with send result including reply message ID and threadId,
|
||||
@@ -305,7 +317,7 @@ def register_tools(
|
||||
if not html:
|
||||
return {"error": "Reply body (html) is required"}
|
||||
|
||||
credential = _get_credential("gmail")
|
||||
credential = _get_credential("gmail", account)
|
||||
if not credential:
|
||||
return {
|
||||
"error": "Gmail credentials not configured",
|
||||
|
||||
@@ -0,0 +1,54 @@
|
||||
# Wikipedia Search Tool
|
||||
|
||||
This tool allows agents to search Wikipedia and retrieve article summaries without needing an external API key.
|
||||
|
||||
## Features
|
||||
|
||||
- **Search**: Find relevant Wikipedia articles by query.
|
||||
- **Summaries**: Get concise descriptions and excerpts for search results.
|
||||
- **Multilingual**: Supports searching in different languages (default: English).
|
||||
- **No API Key**: Uses the public Wikipedia REST API.
|
||||
|
||||
## Usage
|
||||
|
||||
### As an MCP Tool
|
||||
|
||||
```python
|
||||
result = await call_tool(
|
||||
"search_wikipedia",
|
||||
arguments={
|
||||
"query": "Artificial Intelligence",
|
||||
"num_results": 3,
|
||||
"lang": "en"
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### Parameters
|
||||
|
||||
| Parameter | Type | Default | Description |
|
||||
|-----------|------|---------|-------------|
|
||||
| `query` | `str` | Required | The search term to look for. |
|
||||
| `num_results` | `int` | `3` | Number of results to return (max 10). |
|
||||
| `lang` | `str` | `"en"` | Wikipedia language code (e.g., "en", "es", "fr"). |
|
||||
|
||||
## Response Format
|
||||
|
||||
The tool returns a dictionary with the following structure:
|
||||
|
||||
```json
|
||||
{
|
||||
"query": "Artificial Intelligence",
|
||||
"lang": "en",
|
||||
"count": 3,
|
||||
"results": [
|
||||
{
|
||||
"title": "Artificial intelligence",
|
||||
"url": "https://en.wikipedia.org/wiki/Artificial_intelligence",
|
||||
"description": "Intelligence of machines",
|
||||
"snippet": "Artificial intelligence (AI), in its broadest sense, is intelligence exhibited by machines, particularly the computer systems..."
|
||||
},
|
||||
...
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -0,0 +1,3 @@
|
||||
from .wikipedia_tool import register_tools
|
||||
|
||||
__all__ = ["register_tools"]
|
||||
@@ -0,0 +1,88 @@
|
||||
"""
|
||||
Wikipedia Search Tool - Search and retrieve summaries from Wikipedia.
|
||||
|
||||
Uses the Wikipedia Public API (REST) to find relevant articles and get their intros.
|
||||
No external 'wikipedia' library required, uses standard `httpx`.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
|
||||
import httpx
|
||||
from fastmcp import FastMCP
|
||||
|
||||
|
||||
def register_tools(mcp: FastMCP) -> None:
|
||||
"""Register wikipedia tool with the MCP server."""
|
||||
|
||||
def _strip_html(text: str) -> str:
|
||||
"""Remove HTML tags from a string."""
|
||||
if not text:
|
||||
return ""
|
||||
return re.sub(r"<[^>]+>", "", text)
|
||||
|
||||
@mcp.tool()
|
||||
def search_wikipedia(query: str, lang: str = "en", num_results: int = 3) -> dict:
|
||||
"""
|
||||
Search Wikipedia for a given query and return summaries of top matching articles.
|
||||
|
||||
Args:
|
||||
query: The search term (e.g. "Artificial Intelligence")
|
||||
lang: Language code (default: "en")
|
||||
num_results: Number of pages to retrieve (default: 3, max: 10)
|
||||
|
||||
Returns:
|
||||
Dict containing query metadata and list of results (title, summary, url).
|
||||
"""
|
||||
if not query:
|
||||
return {"error": "Query cannot be empty"}
|
||||
|
||||
num_results = max(1, min(num_results, 10))
|
||||
base_url = f"https://{lang}.wikipedia.org/w/rest.php/v1/search/page"
|
||||
|
||||
try:
|
||||
# 1. Search for pages
|
||||
response = httpx.get(
|
||||
base_url,
|
||||
params={"q": query, "limit": num_results},
|
||||
timeout=10.0,
|
||||
headers={"User-Agent": "AdenAgentFramework/1.0 (https://adenhq.com)"},
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
return {"error": f"Wikipedia API error: {response.status_code}", "query": query}
|
||||
|
||||
data = response.json()
|
||||
pages = data.get("pages", [])
|
||||
|
||||
results = []
|
||||
for page in pages:
|
||||
# Basic info
|
||||
title = page.get("title", "")
|
||||
key = page.get("key", "")
|
||||
|
||||
# Use description or excerpt for summary
|
||||
description = page.get("description") or "No description available."
|
||||
excerpt = page.get("excerpt") or ""
|
||||
|
||||
# Clean up HTML from excerpt (e.g. <span class="searchmatch">)
|
||||
snippet = _strip_html(excerpt)
|
||||
|
||||
results.append(
|
||||
{
|
||||
"title": title,
|
||||
"url": f"https://{lang}.wikipedia.org/wiki/{key}",
|
||||
"description": description,
|
||||
"snippet": snippet,
|
||||
}
|
||||
)
|
||||
|
||||
return {"query": query, "lang": lang, "count": len(results), "results": results}
|
||||
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": f"Network error: {str(e)}"}
|
||||
except Exception as e:
|
||||
return {"error": f"Search failed: {str(e)}"}
|
||||
@@ -0,0 +1,116 @@
|
||||
"""Tests that enforce credential registry completeness and consistency.
|
||||
|
||||
These tests run in CI and catch common mistakes when adding new integrations:
|
||||
- Missing health checker for a spec with health_check_endpoint
|
||||
- Orphaned entries in HEALTH_CHECKERS (no corresponding spec)
|
||||
- CredentialSpec fields that are incomplete
|
||||
- Duplicate env var conflicts
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS
|
||||
from aden_tools.credentials.health_check import HEALTH_CHECKERS, validate_integration_wiring
|
||||
|
||||
|
||||
class TestRegistryCompleteness:
|
||||
"""Every credential with a health_check_endpoint must have a registered checker."""
|
||||
|
||||
# Credentials that intentionally don't have their own dedicated checker:
|
||||
# - google_cse: shares google_search checker (same credential_group)
|
||||
# - razorpay/razorpay_secret: requires HTTP Basic auth with TWO credentials,
|
||||
# which the single-value health check dispatcher can't support
|
||||
KNOWN_EXCEPTIONS = {"google_cse", "razorpay", "razorpay_secret"}
|
||||
|
||||
def test_specs_with_endpoint_have_checkers(self):
|
||||
"""Every CredentialSpec with health_check_endpoint has a HEALTH_CHECKERS entry."""
|
||||
missing = []
|
||||
for name, spec in CREDENTIAL_SPECS.items():
|
||||
if name in self.KNOWN_EXCEPTIONS:
|
||||
continue
|
||||
if spec.health_check_endpoint and name not in HEALTH_CHECKERS:
|
||||
missing.append(
|
||||
f"{name}: has endpoint '{spec.health_check_endpoint}' "
|
||||
f"but no dedicated health checker"
|
||||
)
|
||||
assert not missing, (
|
||||
f"{len(missing)} credential(s) have health_check_endpoint but no checker:\n"
|
||||
+ "\n".join(f" - {m}" for m in missing)
|
||||
)
|
||||
|
||||
def test_checkers_have_corresponding_specs(self):
|
||||
"""Every key in HEALTH_CHECKERS matches a CREDENTIAL_SPECS entry."""
|
||||
orphaned = [name for name in HEALTH_CHECKERS if name not in CREDENTIAL_SPECS]
|
||||
assert not orphaned, f"HEALTH_CHECKERS has entries with no CREDENTIAL_SPECS: {orphaned}"
|
||||
|
||||
|
||||
class TestSpecRequiredFields:
|
||||
"""Every CredentialSpec should have minimum required fields."""
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"cred_name,spec",
|
||||
list(CREDENTIAL_SPECS.items()),
|
||||
ids=list(CREDENTIAL_SPECS.keys()),
|
||||
)
|
||||
def test_has_env_var(self, cred_name, spec):
|
||||
assert spec.env_var, f"{cred_name}: missing env_var"
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"cred_name,spec",
|
||||
list(CREDENTIAL_SPECS.items()),
|
||||
ids=list(CREDENTIAL_SPECS.keys()),
|
||||
)
|
||||
def test_has_description(self, cred_name, spec):
|
||||
assert spec.description, f"{cred_name}: missing description"
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"cred_name,spec",
|
||||
list(CREDENTIAL_SPECS.items()),
|
||||
ids=list(CREDENTIAL_SPECS.keys()),
|
||||
)
|
||||
def test_has_tools_or_node_types(self, cred_name, spec):
|
||||
assert spec.tools or spec.node_types, (
|
||||
f"{cred_name}: must have at least one tool or node_type"
|
||||
)
|
||||
|
||||
|
||||
class TestNoDuplicateEnvVars:
|
||||
"""No two credential specs should use the same env_var (unless in same credential_group)."""
|
||||
|
||||
def test_no_accidental_env_var_collisions(self):
|
||||
seen: dict[str, list[str]] = {}
|
||||
for name, spec in CREDENTIAL_SPECS.items():
|
||||
seen.setdefault(spec.env_var, []).append(name)
|
||||
|
||||
duplicates = {}
|
||||
for env_var, names in seen.items():
|
||||
if len(names) <= 1:
|
||||
continue
|
||||
# Filter out intentional duplicates (same credential_group)
|
||||
groups = {CREDENTIAL_SPECS[n].credential_group for n in names}
|
||||
if len(groups) == 1 and groups != {""}:
|
||||
continue # All share the same non-empty group -- intentional
|
||||
duplicates[env_var] = names
|
||||
|
||||
assert not duplicates, f"Duplicate env_vars across unrelated credentials: {duplicates}"
|
||||
|
||||
|
||||
class TestIntegrationWiring:
|
||||
"""validate_integration_wiring() catches wiring issues."""
|
||||
|
||||
def test_nonexistent_credential(self):
|
||||
issues = validate_integration_wiring("nonexistent_service_xyz")
|
||||
assert any("No CredentialSpec" in i for i in issues)
|
||||
|
||||
def test_known_credential_no_critical_issues(self):
|
||||
"""A well-wired credential (e.g. 'hubspot') should have no issues."""
|
||||
issues = validate_integration_wiring("hubspot")
|
||||
assert not issues, f"Unexpected issues for hubspot: {issues}"
|
||||
|
||||
@pytest.mark.parametrize("cred_name", list(HEALTH_CHECKERS.keys()))
|
||||
def test_all_checkers_pass_wiring(self, cred_name):
|
||||
"""Every registered checker should pass wiring validation."""
|
||||
issues = validate_integration_wiring(cred_name)
|
||||
assert not issues, f"Wiring issues for '{cred_name}':\n" + "\n".join(
|
||||
f" - {i}" for i in issues
|
||||
)
|
||||
@@ -7,12 +7,22 @@ import httpx
|
||||
from aden_tools.credentials.health_check import (
|
||||
HEALTH_CHECKERS,
|
||||
AnthropicHealthChecker,
|
||||
ApolloHealthChecker,
|
||||
BrevoHealthChecker,
|
||||
CalcomHealthChecker,
|
||||
DiscordHealthChecker,
|
||||
ExaSearchHealthChecker,
|
||||
FinlightHealthChecker,
|
||||
GitHubHealthChecker,
|
||||
GoogleCalendarHealthChecker,
|
||||
GoogleDocsHealthChecker,
|
||||
GoogleMapsHealthChecker,
|
||||
GoogleSearchHealthChecker,
|
||||
NewsdataHealthChecker,
|
||||
ResendHealthChecker,
|
||||
SerpApiHealthChecker,
|
||||
StripeHealthChecker,
|
||||
TelegramHealthChecker,
|
||||
check_credential_health,
|
||||
)
|
||||
|
||||
@@ -69,6 +79,16 @@ class TestHealthCheckerRegistry:
|
||||
"google",
|
||||
"slack",
|
||||
"discord",
|
||||
"stripe",
|
||||
"exa_search",
|
||||
"google_docs",
|
||||
"calcom",
|
||||
"serpapi",
|
||||
"apollo",
|
||||
"telegram",
|
||||
"newsdata",
|
||||
"finlight",
|
||||
"brevo",
|
||||
}
|
||||
assert set(HEALTH_CHECKERS.keys()) == expected
|
||||
|
||||
@@ -485,3 +505,199 @@ class TestGoogleCalendarHealthCheckerTokenSanitization:
|
||||
|
||||
assert not result.valid
|
||||
assert "Connection refused" in result.message
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# HealthCheckerTestSuite: reusable base class for standard test scenarios
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class HealthCheckerTestSuite:
|
||||
"""Reusable test mixin that auto-generates standard health check scenarios.
|
||||
|
||||
Subclass this and set ``CHECKER_CLASS`` and ``HTTP_METHOD`` to get 6 tests
|
||||
for free. Add checker-specific tests alongside as needed.
|
||||
|
||||
Example::
|
||||
|
||||
class TestMyNewChecker(HealthCheckerTestSuite):
|
||||
CHECKER_CLASS = MyNewHealthChecker
|
||||
HTTP_METHOD = "get"
|
||||
"""
|
||||
|
||||
CHECKER_CLASS: type | None = None
|
||||
HTTP_METHOD: str = "get"
|
||||
CHECKER_KWARGS: dict = {}
|
||||
|
||||
# Override these if the checker uses non-standard valid-status logic
|
||||
EXPECT_200_VALID: bool = True
|
||||
EXPECT_401_INVALID: bool = True
|
||||
EXPECT_403_INVALID: bool = True
|
||||
EXPECT_429_VALID: bool = True
|
||||
|
||||
def _make_checker(self):
|
||||
assert self.CHECKER_CLASS is not None, "Set CHECKER_CLASS in subclass"
|
||||
return self.CHECKER_CLASS(**self.CHECKER_KWARGS)
|
||||
|
||||
def _mock_response(self, status_code, json_data=None):
|
||||
response = MagicMock(spec=httpx.Response)
|
||||
response.status_code = status_code
|
||||
if json_data:
|
||||
response.json.return_value = json_data
|
||||
else:
|
||||
response.json.return_value = {}
|
||||
return response
|
||||
|
||||
def _setup_mock(self, mock_client_cls, status_code=200, json_data=None):
|
||||
mock_client = MagicMock()
|
||||
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
|
||||
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
|
||||
http_method = getattr(mock_client, self.HTTP_METHOD)
|
||||
http_method.return_value = self._mock_response(status_code, json_data)
|
||||
return mock_client, http_method
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_valid_credential_200(self, mock_client_cls):
|
||||
"""200 response means valid credential."""
|
||||
if not self.EXPECT_200_VALID:
|
||||
return
|
||||
self._setup_mock(mock_client_cls, 200)
|
||||
result = self._make_checker().check("test-credential")
|
||||
assert result.valid is True
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_invalid_credential_401(self, mock_client_cls):
|
||||
"""401 response means invalid credential."""
|
||||
if not self.EXPECT_401_INVALID:
|
||||
return
|
||||
self._setup_mock(mock_client_cls, 401)
|
||||
result = self._make_checker().check("bad-credential")
|
||||
assert result.valid is False
|
||||
assert result.details.get("status_code") == 401
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_forbidden_403(self, mock_client_cls):
|
||||
"""403 response means insufficient permissions."""
|
||||
if not self.EXPECT_403_INVALID:
|
||||
return
|
||||
self._setup_mock(mock_client_cls, 403)
|
||||
result = self._make_checker().check("test-credential")
|
||||
assert result.valid is False
|
||||
assert result.details.get("status_code") == 403
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_rate_limited_429(self, mock_client_cls):
|
||||
"""429 (rate limited) typically means the credential is valid."""
|
||||
if not self.EXPECT_429_VALID:
|
||||
return
|
||||
self._setup_mock(mock_client_cls, 429)
|
||||
result = self._make_checker().check("test-credential")
|
||||
assert result.valid is True
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_timeout(self, mock_client_cls):
|
||||
"""Timeout is handled gracefully."""
|
||||
mock_client = MagicMock()
|
||||
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
|
||||
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
|
||||
getattr(mock_client, self.HTTP_METHOD).side_effect = httpx.TimeoutException("timed out")
|
||||
|
||||
result = self._make_checker().check("test-credential")
|
||||
assert result.valid is False
|
||||
assert result.details.get("error") == "timeout"
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_network_error(self, mock_client_cls):
|
||||
"""Network errors are handled gracefully."""
|
||||
mock_client = MagicMock()
|
||||
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
|
||||
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
|
||||
getattr(mock_client, self.HTTP_METHOD).side_effect = httpx.RequestError(
|
||||
"connection refused"
|
||||
)
|
||||
|
||||
result = self._make_checker().check("test-credential")
|
||||
assert result.valid is False
|
||||
assert "error" in result.details
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Tests for new checkers (using HealthCheckerTestSuite)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestStripeHealthChecker(HealthCheckerTestSuite):
|
||||
CHECKER_CLASS = StripeHealthChecker
|
||||
HTTP_METHOD = "get"
|
||||
|
||||
|
||||
class TestExaSearchHealthChecker(HealthCheckerTestSuite):
|
||||
CHECKER_CLASS = ExaSearchHealthChecker
|
||||
HTTP_METHOD = "post"
|
||||
|
||||
|
||||
class TestGoogleDocsHealthChecker(HealthCheckerTestSuite):
|
||||
CHECKER_CLASS = GoogleDocsHealthChecker
|
||||
HTTP_METHOD = "get"
|
||||
# OAuthBearerHealthChecker doesn't treat 429 as valid
|
||||
EXPECT_429_VALID = False
|
||||
|
||||
|
||||
class TestCalcomHealthChecker(HealthCheckerTestSuite):
|
||||
CHECKER_CLASS = CalcomHealthChecker
|
||||
HTTP_METHOD = "get"
|
||||
|
||||
|
||||
class TestSerpApiHealthChecker(HealthCheckerTestSuite):
|
||||
CHECKER_CLASS = SerpApiHealthChecker
|
||||
HTTP_METHOD = "get"
|
||||
|
||||
|
||||
class TestApolloHealthChecker(HealthCheckerTestSuite):
|
||||
CHECKER_CLASS = ApolloHealthChecker
|
||||
HTTP_METHOD = "get"
|
||||
|
||||
|
||||
class TestTelegramHealthChecker(HealthCheckerTestSuite):
|
||||
CHECKER_CLASS = TelegramHealthChecker
|
||||
HTTP_METHOD = "get"
|
||||
# Telegram returns 200 with {"ok": true/false} rather than using HTTP status codes
|
||||
EXPECT_429_VALID = False
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_valid_credential_200(self, mock_client_cls):
|
||||
"""200 with ok=true means valid bot token."""
|
||||
self._setup_mock(
|
||||
mock_client_cls,
|
||||
200,
|
||||
{"ok": True, "result": {"username": "testbot"}},
|
||||
)
|
||||
result = self._make_checker().check("123:ABC")
|
||||
assert result.valid is True
|
||||
assert "testbot" in result.message
|
||||
|
||||
@patch("aden_tools.credentials.health_check.httpx.Client")
|
||||
def test_ok_false_invalid(self, mock_client_cls):
|
||||
"""200 with ok=false means invalid bot token."""
|
||||
self._setup_mock(
|
||||
mock_client_cls,
|
||||
200,
|
||||
{"ok": False, "description": "Unauthorized"},
|
||||
)
|
||||
result = self._make_checker().check("bad-token")
|
||||
assert result.valid is False
|
||||
|
||||
|
||||
class TestNewsdataHealthChecker(HealthCheckerTestSuite):
|
||||
CHECKER_CLASS = NewsdataHealthChecker
|
||||
HTTP_METHOD = "get"
|
||||
|
||||
|
||||
class TestFinlightHealthChecker(HealthCheckerTestSuite):
|
||||
CHECKER_CLASS = FinlightHealthChecker
|
||||
HTTP_METHOD = "get"
|
||||
|
||||
|
||||
class TestBrevoHealthChecker(HealthCheckerTestSuite):
|
||||
CHECKER_CLASS = BrevoHealthChecker
|
||||
HTTP_METHOD = "get"
|
||||
|
||||
@@ -132,7 +132,6 @@ class TestPgQuery:
|
||||
assert "error" in result
|
||||
|
||||
def test_query_timeout(self, pg_query_fn, monkeypatch):
|
||||
|
||||
class TimeoutCursor:
|
||||
def execute(self, *args, **kwargs):
|
||||
raise psycopg.errors.QueryCanceled()
|
||||
@@ -190,7 +189,6 @@ class TestPgListTables:
|
||||
|
||||
class TestPgDescribeTable:
|
||||
def test_describe_table_success(self, pg_describe_table_fn, monkeypatch):
|
||||
|
||||
class DescribeCursor:
|
||||
def execute(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
@@ -0,0 +1,109 @@
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from fastmcp import FastMCP
|
||||
|
||||
from aden_tools.tools.wikipedia_tool.wikipedia_tool import register_tools
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mcp():
|
||||
return FastMCP("test-server")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def tool_func(mcp):
|
||||
"""Register the tool and return the callable function."""
|
||||
register_tools(mcp)
|
||||
# FastMCP stores tools in _tools dictionary usually, or we can just access
|
||||
# the decorated function if we extracted it. Since register_tools uses
|
||||
# @mcp.tool(), let's extract the function logic or call via mcp if possible.
|
||||
# For unit testing the logic, it's easier if we can access the underlying function.
|
||||
|
||||
# But register_tools defines the function *inside* the scope.
|
||||
# So we'll need to rely on how FastMCP exposes tools or refactor slightly?
|
||||
# Actually, looking at other tests might help, but let's assume standard FastMCP behavior.
|
||||
# If FastMCP.tool() returns the function, we can capture it.
|
||||
# But here register_tools returns None.
|
||||
|
||||
# Workaround: We can inspect mcp._tools (if it exists) or use a mock mcp
|
||||
# to capture the decorator.
|
||||
|
||||
tools = {}
|
||||
mock_mcp = MagicMock()
|
||||
|
||||
def mock_tool():
|
||||
def decorator(f):
|
||||
tools[f.__name__] = f
|
||||
return f
|
||||
|
||||
return decorator
|
||||
|
||||
mock_mcp.tool = mock_tool
|
||||
|
||||
register_tools(mock_mcp)
|
||||
return tools["search_wikipedia"]
|
||||
|
||||
|
||||
def test_search_wikipedia_success(tool_func):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
"pages": [
|
||||
{
|
||||
"title": "Artificial Intelligence",
|
||||
"key": "Artificial_Intelligence",
|
||||
"description": "Intelligence demonstrated by machines",
|
||||
"excerpt": "<b>Artificial intelligence</b> (<b>AI</b>)...",
|
||||
},
|
||||
{
|
||||
"title": "AI Winter",
|
||||
"key": "AI_Winter",
|
||||
"description": "Period of reduced funding",
|
||||
"excerpt": "In the history of AI...",
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
patch_target = "aden_tools.tools.wikipedia_tool.wikipedia_tool.httpx.get"
|
||||
with patch(patch_target, return_value=mock_response) as mock_get:
|
||||
result = tool_func(query="AI")
|
||||
|
||||
assert result["query"] == "AI"
|
||||
assert result["count"] == 2
|
||||
assert result["results"][0]["title"] == "Artificial Intelligence"
|
||||
assert "Artificial_Intelligence" in result["results"][0]["url"]
|
||||
# Verify HTML stripping
|
||||
assert "<b>" not in result["results"][0]["snippet"]
|
||||
assert "Artificial intelligence (AI)..." in result["results"][0]["snippet"]
|
||||
|
||||
mock_get.assert_called_once()
|
||||
args, kwargs = mock_get.call_args
|
||||
assert kwargs["params"]["q"] == "AI"
|
||||
|
||||
|
||||
def test_search_wikipedia_empty_query(tool_func):
|
||||
result = tool_func(query="")
|
||||
assert "error" in result
|
||||
assert result["error"] == "Query cannot be empty"
|
||||
|
||||
|
||||
def test_search_wikipedia_api_error(tool_func):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 500
|
||||
|
||||
patch_target = "aden_tools.tools.wikipedia_tool.wikipedia_tool.httpx.get"
|
||||
with patch(patch_target, return_value=mock_response):
|
||||
result = tool_func(query="Error")
|
||||
assert "error" in result
|
||||
assert "Wikipedia API error: 500" in result["error"]
|
||||
|
||||
|
||||
def test_search_wikipedia_timeout(tool_func):
|
||||
import httpx
|
||||
|
||||
patch_target = "aden_tools.tools.wikipedia_tool.wikipedia_tool.httpx.get"
|
||||
with patch(patch_target, side_effect=httpx.TimeoutException("Timeout")):
|
||||
result = tool_func(query="Timeout")
|
||||
assert "error" in result
|
||||
assert "Request timed out" in result["error"]
|
||||
Reference in New Issue
Block a user