refactor: remove the remaining old trigger format and change the trigger format in examples to the latest format

This commit is contained in:
Richard Tang
2026-03-11 16:13:37 -07:00
parent 0c7f43f595
commit 6cb73a6fea
15 changed files with 71 additions and 606 deletions
@@ -476,8 +476,8 @@ nodes/__init__.py
- Goal description, success criteria values, constraint values, edge \
definitions, identity_prompt in agent.py
- CLI options in __main__.py
- For async entry points (timers/webhooks), add AsyncEntryPointSpec \
and AgentRuntimeConfig to agent.py
- For triggers (timers/webhooks), add entries to triggers.json in the \
agent's export directory
Do NOT modify or rewrite:
- Import statements at top of agent.py (they are correct)
@@ -332,81 +332,46 @@ class MyAgent:
default_agent = MyAgent()
```
## agent.py — Async Entry Points Variant
## triggers.json — Timer and Webhook Triggers
When an agent needs timers, webhooks, or event-driven triggers, add
`async_entry_points` and optionally `runtime_config` as module-level variables.
These are IN ADDITION to the standard variables above.
When an agent needs timers, webhooks, or event-driven triggers, create a
`triggers.json` file in the agent's directory (alongside `agent.py`).
The queen loads these at session start and the user can manage them via
the `set_trigger` / `remove_trigger` tools at runtime.
```python
# Additional imports for async entry points
from framework.graph.edge import GraphSpec, AsyncEntryPointSpec
from framework.runtime.agent_runtime import (
AgentRuntime, AgentRuntimeConfig, create_agent_runtime,
)
# ... (goal, nodes, edges, entry_node, entry_points, etc. as above) ...
# Async entry points — event-driven triggers
async_entry_points = [
# Timer with cron: daily at 9am
AsyncEntryPointSpec(
id="daily-check",
name="Daily Check",
entry_node="process-node",
trigger_type="timer",
trigger_config={"cron": "0 9 * * *"},
isolation_level="shared",
max_concurrent=1,
),
# Timer with fixed interval: every 20 minutes
AsyncEntryPointSpec(
id="scheduled-check",
name="Scheduled Check",
entry_node="process-node",
trigger_type="timer",
trigger_config={"interval_minutes": 20, "run_immediately": False},
isolation_level="shared",
max_concurrent=1,
),
# Event: reacts to webhook events
AsyncEntryPointSpec(
id="webhook-event",
name="Webhook Event Handler",
entry_node="process-node",
trigger_type="event",
trigger_config={"event_types": ["webhook_received"]},
isolation_level="shared",
max_concurrent=10,
),
```json
[
{
"id": "daily-check",
"name": "Daily Check",
"trigger_type": "timer",
"trigger_config": {"cron": "0 9 * * *"},
"task": "Run the daily check process"
},
{
"id": "scheduled-check",
"name": "Scheduled Check",
"trigger_type": "timer",
"trigger_config": {"interval_minutes": 20},
"task": "Run the scheduled check"
},
{
"id": "webhook-event",
"name": "Webhook Event Handler",
"trigger_type": "webhook",
"trigger_config": {"event_types": ["webhook_received"]},
"task": "Process incoming webhook event"
}
]
# Webhook server config (only needed if using webhooks)
runtime_config = AgentRuntimeConfig(
webhook_host="127.0.0.1",
webhook_port=8080,
webhook_routes=[
{
"source_id": "my-source",
"path": "/webhooks/my-source",
"methods": ["POST"],
},
],
)
```
**Key rules for async entry points:**
- `async_entry_points` is a list of `AsyncEntryPointSpec` (NOT `EntryPointSpec`)
- `runtime_config` is `AgentRuntimeConfig` (NOT `RuntimeConfig` from config.py)
- Valid trigger_types: `timer`, `event`, `webhook`, `manual`, `api`
- Valid isolation_levels: `isolated`, `shared`, `synchronized`
**Key rules for triggers.json:**
- Valid trigger_types: `timer`, `webhook`
- Timer trigger_config (cron): `{"cron": "0 9 * * *"}` — standard 5-field cron expression
- Timer trigger_config (interval): `{"interval_minutes": float, "run_immediately": bool}`
- Event trigger_config: `{"event_types": ["webhook_received"], "filter_stream": "...", "filter_node": "..."}`
- Use `isolation_level="shared"` for async entry points that need to read
the primary session's memory (e.g., user-configured rules)
- The `_build_graph()` method passes `async_entry_points` to GraphSpec
- Reference: `exports/gmail_inbox_guardian/agent.py`
- Timer trigger_config (interval): `{"interval_minutes": float}`
- Each trigger must have a unique `id`
- The `task` field describes what the worker should do when the trigger fires
- Triggers are persisted back to `triggers.json` when modified via queen tools
## __init__.py
@@ -453,21 +418,6 @@ __all__ = [
]
```
**If the agent uses async entry points**, also import and export:
```python
from .agent import (
...,
async_entry_points,
runtime_config, # Only if using webhooks
)
__all__ = [
...,
"async_entry_points",
"runtime_config",
]
```
## __main__.py
```python
@@ -31,8 +31,7 @@ module-level variables via `getattr()`:
| `conversation_mode` | no | not passed | Isolated mode (no context carryover) |
| `identity_prompt` | no | not passed | No agent-level identity |
| `loop_config` | no | `{}` | No iteration limits |
| `async_entry_points` | no | `[]` | No async triggers (timers, webhooks, events) |
| `runtime_config` | no | `None` | No webhook server |
| `triggers.json` (file) | no | not present | No triggers (timers, webhooks) |
**CRITICAL:** `__init__.py` MUST import and re-export ALL of these from
`agent.py`. Missing exports silently fall back to defaults, causing
@@ -257,44 +256,28 @@ Multiple ON_SUCCESS edges from same source → parallel execution via asyncio.ga
Judge is the SOLE acceptance mechanism — no ad-hoc framework gating.
## Async Entry Points (Webhooks, Timers, Events)
## Triggers (Timers, Webhooks)
For agents that react to external events, use `AsyncEntryPointSpec`:
For agents that react to external events, create a `triggers.json` file
in the agent's export directory:
```python
from framework.graph.edge import AsyncEntryPointSpec
from framework.runtime.agent_runtime import AgentRuntimeConfig
# Timer trigger (cron or interval)
async_entry_points = [
AsyncEntryPointSpec(
id="daily-check",
name="Daily Check",
entry_node="process",
trigger_type="timer",
trigger_config={"cron": "0 9 * * *"}, # daily at 9am
isolation_level="shared",
)
```json
[
{
"id": "daily-check",
"name": "Daily Check",
"trigger_type": "timer",
"trigger_config": {"cron": "0 9 * * *"},
"task": "Run the daily check process"
}
]
# Webhook server (optional)
runtime_config = AgentRuntimeConfig(
webhook_host="127.0.0.1",
webhook_port=8080,
webhook_routes=[{"source_id": "gmail", "path": "/webhooks/gmail", "methods": ["POST"]}],
)
```
### Key Fields
- `trigger_type`: `"timer"`, `"event"`, `"webhook"`, `"manual"`
- `trigger_type`: `"timer"` or `"webhook"`
- `trigger_config`: `{"cron": "0 9 * * *"}` or `{"interval_minutes": 20}`
- `isolation_level`: `"shared"` (recommended), `"isolated"`, `"synchronized"`
- `event_types`: For event triggers, e.g., `["webhook_received"]`
### Exports Required
Both `async_entry_points` and `runtime_config` must be exported from `__init__.py`.
See `exports/gmail_inbox_guardian/agent.py` for complete example.
- `task`: describes what the worker should do when the trigger fires
- Triggers can also be created/removed at runtime via `set_trigger` / `remove_trigger` queen tools
## Tool Discovery
+3 -80
View File
@@ -376,28 +376,8 @@ class GraphSpec(BaseModel):
edges=[...],
)
For multi-entry-point agents (concurrent streams):
GraphSpec(
id="support-agent-graph",
goal_id="support-001",
entry_node="process-webhook", # Default entry
async_entry_points=[
AsyncEntryPointSpec(
id="webhook",
name="Zendesk Webhook",
entry_node="process-webhook",
trigger_type="webhook",
),
AsyncEntryPointSpec(
id="api",
name="API Handler",
entry_node="process-request",
trigger_type="api",
),
],
nodes=[...],
edges=[...],
)
Triggers (timer, webhook, event) are now defined in ``triggers.json``
alongside the agent directory, not embedded in the graph spec.
"""
id: str
@@ -410,12 +390,6 @@ class GraphSpec(BaseModel):
default_factory=dict,
description="Named entry points for resuming execution. Format: {name: node_id}",
)
async_entry_points: list[AsyncEntryPointSpec] = Field(
default_factory=list,
description=(
"Asynchronous entry points for concurrent execution streams (used with AgentRuntime)"
),
)
terminal_nodes: list[str] = Field(
default_factory=list, description="IDs of nodes that end execution"
)
@@ -494,17 +468,6 @@ class GraphSpec(BaseModel):
return node
return None
def has_async_entry_points(self) -> bool:
"""Check if this graph uses async entry points (multi-stream execution)."""
return len(self.async_entry_points) > 0
def get_async_entry_point(self, entry_point_id: str) -> AsyncEntryPointSpec | None:
"""Get an async entry point by ID."""
for ep in self.async_entry_points:
if ep.id == entry_point_id:
return ep
return None
def get_outgoing_edges(self, node_id: str) -> list[EdgeSpec]:
"""Get all edges leaving a node, sorted by priority."""
edges = [e for e in self.edges if e.source == node_id]
@@ -595,37 +558,6 @@ class GraphSpec(BaseModel):
if not self.get_node(self.entry_node):
errors.append(f"Entry node '{self.entry_node}' not found")
# Check async entry points
seen_entry_ids = set()
for entry_point in self.async_entry_points:
# Check for duplicate IDs
if entry_point.id in seen_entry_ids:
errors.append(f"Duplicate async entry point ID: '{entry_point.id}'")
seen_entry_ids.add(entry_point.id)
# Check entry node exists
if not self.get_node(entry_point.entry_node):
errors.append(
f"Async entry point '{entry_point.id}' references "
f"missing node '{entry_point.entry_node}'"
)
# Validate isolation level
valid_isolation = {"isolated", "shared", "synchronized"}
if entry_point.isolation_level not in valid_isolation:
errors.append(
f"Async entry point '{entry_point.id}' has invalid isolation_level "
f"'{entry_point.isolation_level}'. Valid: {valid_isolation}"
)
# Validate trigger type
valid_triggers = {"webhook", "api", "timer", "event", "manual"}
if entry_point.trigger_type not in valid_triggers:
errors.append(
f"Async entry point '{entry_point.id}' has invalid trigger_type "
f"'{entry_point.trigger_type}'. Valid: {valid_triggers}"
)
# Check terminal nodes exist
for term in self.terminal_nodes:
if not self.get_node(term):
@@ -654,10 +586,6 @@ class GraphSpec(BaseModel):
for entry_point_node in self.entry_points.values():
to_visit.append(entry_point_node)
# Add all async entry points as valid starting points
for async_entry in self.async_entry_points:
to_visit.append(async_entry.entry_node)
# Traverse from all entry points
while to_visit:
current = to_visit.pop()
@@ -674,17 +602,12 @@ class GraphSpec(BaseModel):
for sub_agent_id in sub_agents:
reachable.add(sub_agent_id)
# Build set of async entry point nodes for quick lookup
async_entry_nodes = {ep.entry_node for ep in self.async_entry_points}
for node in self.nodes:
if node.id not in reachable:
# Skip if node is a pause node, entry point target, or async entry
# (pause/resume architecture and async entry points make reachable)
# Skip if node is a pause node or entry point target
if (
node.id in self.pause_nodes
or node.id in self.entry_points.values()
or node.id in async_entry_nodes
):
continue
errors.append(f"Node '{node.id}' is unreachable from entry")
-65
View File
@@ -16,7 +16,6 @@ from framework.credentials.validation import (
from framework.graph import Goal
from framework.graph.edge import (
DEFAULT_MAX_TOKENS,
AsyncEntryPointSpec,
EdgeCondition,
EdgeSpec,
GraphSpec,
@@ -570,9 +569,6 @@ class AgentInfo:
constraints: list[dict]
required_tools: list[str]
has_tools_module: bool
# Multi-entry-point support
async_entry_points: list[dict] = field(default_factory=list)
is_multi_entry_point: bool = False
@dataclass
@@ -630,22 +626,6 @@ def load_agent_export(data: str | dict) -> tuple[GraphSpec, Goal]:
)
edges.append(edge)
# Build AsyncEntryPointSpec objects for multi-entry-point support
async_entry_points = []
for aep_data in graph_data.get("async_entry_points", []):
async_entry_points.append(
AsyncEntryPointSpec(
id=aep_data["id"],
name=aep_data.get("name", aep_data["id"]),
entry_node=aep_data["entry_node"],
trigger_type=aep_data.get("trigger_type", "manual"),
trigger_config=aep_data.get("trigger_config", {}),
isolation_level=aep_data.get("isolation_level", "shared"),
priority=aep_data.get("priority", 0),
max_concurrent=aep_data.get("max_concurrent", 10),
)
)
# Build GraphSpec
graph = GraphSpec(
id=graph_data.get("id", "agent-graph"),
@@ -653,7 +633,6 @@ def load_agent_export(data: str | dict) -> tuple[GraphSpec, Goal]:
version=graph_data.get("version", "1.0.0"),
entry_node=graph_data.get("entry_node", ""),
entry_points=graph_data.get("entry_points", {}), # Support pause/resume architecture
async_entry_points=async_entry_points, # Support multi-entry-point agents
terminal_nodes=graph_data.get("terminal_nodes", []),
pause_nodes=graph_data.get("pause_nodes", []), # Support pause/resume architecture
nodes=nodes,
@@ -805,8 +784,6 @@ class AgentRunner:
# AgentRuntime — unified execution path for all agents
self._agent_runtime: AgentRuntime | None = None
self._uses_async_entry_points = self.graph.has_async_entry_points()
# Pre-load validation: structural checks + credentials.
# Fails fast with actionable guidance — no MCP noise on screen.
run_preload_validation(
@@ -943,7 +920,6 @@ class AgentRunner:
"version": "1.0.0",
"entry_node": getattr(agent_module, "entry_node", nodes[0].id),
"entry_points": getattr(agent_module, "entry_points", {}),
"async_entry_points": getattr(agent_module, "async_entry_points", []),
"terminal_nodes": getattr(agent_module, "terminal_nodes", []),
"pause_nodes": getattr(agent_module, "pause_nodes", []),
"nodes": nodes,
@@ -1429,21 +1405,7 @@ class AgentRunner:
event_bus=None,
) -> None:
"""Set up multi-entry-point execution using AgentRuntime."""
# Convert AsyncEntryPointSpec to EntryPointSpec for AgentRuntime
entry_points = []
for async_ep in self.graph.async_entry_points:
ep = EntryPointSpec(
id=async_ep.id,
name=async_ep.name,
entry_node=async_ep.entry_node,
trigger_type=async_ep.trigger_type,
trigger_config=async_ep.trigger_config,
isolation_level=async_ep.isolation_level,
priority=async_ep.priority,
max_concurrent=async_ep.max_concurrent,
max_resurrections=async_ep.max_resurrections,
)
entry_points.append(ep)
# Always create a primary entry point for the graph's entry node.
# For multi-entry-point agents this ensures the primary path (e.g.
@@ -1750,19 +1712,6 @@ class AgentRunner:
for edge in self.graph.edges
]
# Build async entry points info
async_entry_points_info = [
{
"id": ep.id,
"name": ep.name,
"entry_node": ep.entry_node,
"trigger_type": ep.trigger_type,
"isolation_level": ep.isolation_level,
"max_concurrent": ep.max_concurrent,
}
for ep in self.graph.async_entry_points
]
return AgentInfo(
name=self.graph.id,
description=self.graph.description,
@@ -1789,8 +1738,6 @@ class AgentRunner:
],
required_tools=sorted(required_tools),
has_tools_module=(self.agent_path / "tools.py").exists(),
async_entry_points=async_entry_points_info,
is_multi_entry_point=self._uses_async_entry_points,
)
def validate(self) -> ValidationResult:
@@ -2105,18 +2052,6 @@ Respond with JSON only:
trigger_type="manual",
isolation_level="shared",
)
for aep in runner.graph.async_entry_points:
entry_points[aep.id] = EntryPointSpec(
id=aep.id,
name=aep.name,
entry_node=aep.entry_node,
trigger_type=aep.trigger_type,
trigger_config=aep.trigger_config,
isolation_level=aep.isolation_level,
priority=aep.priority,
max_concurrent=aep.max_concurrent,
)
await runtime.add_graph(
graph_id=gid,
graph=runner.graph,
+2 -3
View File
@@ -1002,8 +1002,8 @@ class ExecutionStream:
def _create_modified_graph(self) -> "GraphSpec":
"""Create a graph with the entry point overridden.
Preserves the original graph's entry_points and async_entry_points
so that validation correctly considers ALL entry nodes reachable.
Preserves the original graph's entry_points so that validation
correctly considers ALL entry nodes reachable.
Each stream only executes from its own entry_node, but the full
graph must validate with all entry points accounted for.
"""
@@ -1028,7 +1028,6 @@ class ExecutionStream:
version=self.graph.version,
entry_node=self.entry_spec.entry_node, # Use our entry point
entry_points=merged_entry_points,
async_entry_points=self.graph.async_entry_points,
terminal_nodes=self.graph.terminal_nodes,
pause_nodes=self.graph.pause_nodes,
nodes=self.graph.nodes,
@@ -17,7 +17,7 @@ from pathlib import Path
import pytest
from framework.graph import Goal
from framework.graph.edge import AsyncEntryPointSpec, EdgeCondition, EdgeSpec, GraphSpec
from framework.graph.edge import EdgeCondition, EdgeSpec, GraphSpec
from framework.graph.goal import Constraint, SuccessCriterion
from framework.graph.node import NodeSpec
from framework.runtime.agent_runtime import AgentRuntime, create_agent_runtime
@@ -101,30 +101,12 @@ def sample_graph():
),
]
async_entry_points = [
AsyncEntryPointSpec(
id="webhook",
name="Webhook Handler",
entry_node="process-webhook",
trigger_type="webhook",
isolation_level="shared",
),
AsyncEntryPointSpec(
id="api",
name="API Handler",
entry_node="process-api",
trigger_type="api",
isolation_level="shared",
),
]
return GraphSpec(
id="test-graph",
goal_id="test-goal",
version="1.0.0",
entry_node="process-webhook",
entry_points={"start": "process-webhook"},
async_entry_points=async_entry_points,
terminal_nodes=["complete"],
pause_nodes=[],
nodes=nodes,
@@ -504,108 +486,6 @@ class TestAgentRuntime:
# === GraphSpec Validation Tests ===
class TestGraphSpecValidation:
"""Tests for GraphSpec with async_entry_points."""
def test_has_async_entry_points(self, sample_graph):
"""Test checking for async entry points."""
assert sample_graph.has_async_entry_points() is True
# Graph without async entry points
simple_graph = GraphSpec(
id="simple",
goal_id="goal",
entry_node="start",
nodes=[],
edges=[],
)
assert simple_graph.has_async_entry_points() is False
def test_get_async_entry_point(self, sample_graph):
"""Test getting async entry point by ID."""
ep = sample_graph.get_async_entry_point("webhook")
assert ep is not None
assert ep.id == "webhook"
assert ep.entry_node == "process-webhook"
ep_not_found = sample_graph.get_async_entry_point("nonexistent")
assert ep_not_found is None
def test_validate_async_entry_points(self):
"""Test validation catches async entry point errors."""
nodes = [
NodeSpec(
id="valid-node",
name="Valid Node",
description="A valid node",
node_type="event_loop",
input_keys=[],
output_keys=[],
),
]
# Invalid entry node
graph = GraphSpec(
id="test",
goal_id="goal",
entry_node="valid-node",
async_entry_points=[
AsyncEntryPointSpec(
id="invalid",
name="Invalid",
entry_node="nonexistent-node",
trigger_type="webhook",
),
],
nodes=nodes,
edges=[],
)
errors = graph.validate()["errors"]
assert any("nonexistent-node" in e for e in errors)
# Invalid isolation level
graph2 = GraphSpec(
id="test",
goal_id="goal",
entry_node="valid-node",
async_entry_points=[
AsyncEntryPointSpec(
id="bad-isolation",
name="Bad Isolation",
entry_node="valid-node",
trigger_type="webhook",
isolation_level="invalid",
),
],
nodes=nodes,
edges=[],
)
errors2 = graph2.validate()["errors"]
assert any("isolation_level" in e for e in errors2)
# Invalid trigger type
graph3 = GraphSpec(
id="test",
goal_id="goal",
entry_node="valid-node",
async_entry_points=[
AsyncEntryPointSpec(
id="bad-trigger",
name="Bad Trigger",
entry_node="valid-node",
trigger_type="invalid_trigger",
),
],
nodes=nodes,
edges=[],
)
errors3 = graph3.validate()["errors"]
assert any("trigger_type" in e for e in errors3)
# === Integration Tests ===
@@ -483,7 +483,6 @@ class TestEventDrivenEntryPoints:
version="1.0.0",
entry_node="process-event",
entry_points={"start": "process-event"},
async_entry_points=[],
terminal_nodes=[],
pause_nodes=[],
nodes=nodes,
+1 -1
View File
@@ -10,7 +10,7 @@ from typing import Any
class TriggerDefinition:
"""A registered trigger that can be activated on the queen runtime.
Trigger *definitions* come from the worker's ``async_entry_points``.
Trigger *definitions* come from the worker's ``triggers.json``.
Activation state is per-session (persisted in ``SessionState.active_triggers``).
"""
@@ -78,19 +78,6 @@ def register_graph_tools(registry: ToolRegistry, runtime: AgentRuntime) -> int:
isolation_level="shared",
)
# Async entry points
for aep in runner.graph.async_entry_points:
entry_points[aep.id] = EntryPointSpec(
id=aep.id,
name=aep.name,
entry_node=aep.entry_node,
trigger_type=aep.trigger_type,
trigger_config=aep.trigger_config,
isolation_level=aep.isolation_level,
priority=aep.priority,
max_concurrent=aep.max_concurrent,
)
await runtime.add_graph(
graph_id=graph_id,
graph=runner.graph,
-111
View File
@@ -1,111 +0,0 @@
# Local credential parity: aliases, identity, status, and credential tester integration
## Summary
Gives local API key credentials (Brave Search, GitHub, Exa, Stripe, etc.) the same feature set as Aden OAuth credentials: named aliases, identity metadata, status tracking, CRUD management, and full visibility in the credential tester.
Fixes a bug where credentials configured with the existing `store_credential` MCP tool were invisible in the credential tester account picker.
---
## Changes
### New: `core/framework/credentials/local/`
**`models.py`** — `LocalAccountInfo` dataclass mirroring `AdenIntegrationInfo`:
- Fields: `credential_id`, `alias`, `status` (`active` / `failed` / `unknown`), `identity`, `last_validated`, `created_at`
- `storage_id` property returns `"{credential_id}/{alias}"` (e.g. `brave_search/work`)
- `to_account_dict()` returns same shape as Aden account dicts — feeds account picker without changes
**`registry.py`** — `LocalCredentialRegistry`, the core engine:
- `save_account(credential_id, alias, api_key)` — runs health check, extracts identity, stores at `{credential_id}/{alias}` in `EncryptedFileStorage`
- `list_accounts(credential_id=None)` — reads all `{x}/{y}` entries from storage
- `get_key(credential_id, alias)` — returns raw secret
- `delete_account(credential_id, alias)` — removes entry
- `validate_account(credential_id, alias)` — re-runs health check, updates `_status` and `last_refreshed` in-place
- `default()` classmethod — uses `~/.hive/credentials`
Storage convention: `{credential_id}/{alias}` as `CredentialObject.id`. Legacy flat entries (`brave_search`, no slash) continue to work — env var fallback is unchanged.
---
### Modified: `tools/src/aden_tools/credentials/store_adapter.py`
- `get(name, account=None)` — added `account=` param for per-call routing to a named local account; mirrors Aden `account=` routing
- `activate_local_account(credential_id, alias)` — injects a named account's key into `os.environ[spec.env_var]` for session-level activation
- `list_local_accounts(credential_id=None)` — delegates to `LocalCredentialRegistry`
---
### Modified: `core/framework/credentials/__init__.py`
Exports `LocalAccountInfo` and `LocalCredentialRegistry`.
---
### Modified: `core/framework/agents/credential_tester/agent.py`
Full rewrite of account listing and configuration:
- `_list_aden_accounts()` — extracted from old `list_connected_accounts()`
- `_list_local_accounts()` — uses `LocalCredentialRegistry`
- `_list_env_fallback_accounts()` — detects credentials configured via env var **or** in old flat encrypted format; fixes the invisible-credential bug
- `list_connected_accounts()` — combines all three, deduplicates
- `configure_for_account()` — branches on `source` field:
- `"aden"` → adds `get_account_info` tool, prompts with `account="alias"`
- `"local"` → calls `_activate_local_account()`, prompt has no `account=` param
- `_activate_local_account()` — handles three cases: named registry entry, old flat encrypted entry, env var already set; also handles grouped credentials (e.g. `google_custom_search` sets both `GOOGLE_API_KEY` and `GOOGLE_CSE_ID`)
- `get_tools_for_provider()` — fixed to match both `credential_id` AND `credential_group`
---
### Modified: `core/framework/builder/package_generator.py`
- `store_credential(name, value, alias="default", ...)` — added `alias` param; now delegates to `LocalCredentialRegistry.save_account()` with auto health check; returns `status` and `identity`
- `list_stored_credentials()` — delegates to `LocalCredentialRegistry.list_accounts()`; returns `credential_id`, `alias`, `status`, `identity`, `last_validated`
- `delete_stored_credential(name, alias="default")` — added `alias` param
- `validate_credential(name, alias="default")`**new tool** — re-runs health check via `LocalCredentialRegistry.validate_account()`, returns updated status and identity
---
### Modified: `core/framework/tui/screens/account_selection.py`
- Aden accounts rendered first, local accounts second
- Local accounts display a `[local]` badge
- Identity label shows email, username, or workspace when available
---
### New: `core/framework/tui/screens/add_local_credential.py`
Two-phase modal for adding a named local API key:
1. **Type selection** — filtered list of all `direct_api_key_supported=True` credentials
2. **Form** — alias input + password input → "Test & Save" runs health check inline, shows identity result, auto-dismisses on success
Exported from `core/framework/tui/screens/__init__.py`.
---
## Bug fix
**Credential tester not showing configured credentials** (e.g. Brave Search stored via `store_credential`):
- `_list_env_fallback_accounts()` previously used `CredentialStoreAdapter.with_env_storage()`, which only checked `os.environ`. Credentials stored in `EncryptedFileStorage` with the old flat format (`brave_search`, no slash) were invisible.
- `_activate_local_account()` early-returned when `alias == "default"`, assuming the env var was already set. Old flat encrypted credentials are not in `os.environ`.
**Fix**: `_list_env_fallback_accounts()` now also reads `EncryptedFileStorage.list_all()` and treats any flat entry (no `/`) as configured. `_activate_local_account()` now falls through to load from the flat encrypted entry when the env var is not set and the registry has no named entry.
---
## Test plan
- [ ] `store_credential("brave_search", "BSA-xxx", alias="work")` → health check runs, identity shown, stored as `brave_search/work`
- [ ] `list_stored_credentials()` → shows `credential_id`, `alias`, `status`, `identity`, `last_validated`
- [ ] `validate_credential("brave_search", "work")` → re-runs health check, updates status
- [ ] `delete_stored_credential("brave_search", alias="work")` → removes entry
- [ ] Credential tester account picker shows local accounts with `[local]` badge alongside Aden accounts
- [ ] Selecting a local account activates the key and tools work without `account=` param
- [ ] Selecting a legacy flat credential (stored before this PR) activates it correctly
- [ ] `AddLocalCredentialScreen` — select type, enter alias + key, health check runs inline, screen closes on success
- [ ] `CredentialStoreAdapter.get("brave_search", account="work")` returns key from registry
-56
View File
@@ -1,56 +0,0 @@
# feat(queen): Hive Queen Bee — native agent-building agent
## Summary
Introduces **Hive Coder** (codename "Queen Bee"), a framework-native coding agent that builds complete Hive agent packages from natural language descriptions. This is a single-node, forever-alive agent inspired by opencode's `while(true)` loop — one continuous conversation handles the full lifecycle: understand, qualify, design, implement, verify, and iterate.
The agent is deeply integrated with the framework: it can discover available MCP tools at runtime, inspect sessions and checkpoints of agents it builds, run their test suites, and self-verify its own output. It ships with a dedicated MCP tools server (`coder_tools_server.py`) providing rich file I/O, fuzzy-match editing, git snapshots, and shell execution — all scoped to a configurable project root.
## What's included
### New: `hive_coder` agent (`core/framework/agents/hive_coder/`)
- **`agent.py`** — Goal with 4 success criteria and 4 constraints, single-node graph, `HiveCoderAgent` class with full runtime lifecycle (start/stop/trigger_and_wait)
- **`nodes/__init__.py`** — Single `coder` EventLoopNode with a comprehensive system prompt covering coding mandates, tool discovery, meta-agent capabilities, node count rules, implementation templates, and a 6-phase workflow
- **`config.py`** — RuntimeConfig with auto-detection of preferred model from `~/.hive/configuration.json`
- **`__main__.py`** — Click CLI with `run`, `tui`, `info`, `validate`, and `shell` subcommands
- **`reference/`** — Framework guide, file templates, and anti-patterns docs embedded as agent reference material
### New: Coder Tools MCP Server (`tools/coder_tools_server.py`)
- 1500-line MCP server providing 13 tools: `read_file`, `write_file`, `edit_file` (with opencode-style 9-strategy fuzzy matching), `list_directory`, `search_files`, `run_command`, `undo_changes`, `discover_mcp_tools`, `list_agents`, `list_agent_sessions`, `list_agent_checkpoints`, `get_agent_checkpoint`, `run_agent_tests`
- Path-scoped security: all file operations sandboxed to project root
- Git-based undo: automatic snapshots before writes with `undo_changes` rollback
### Framework changes
- **`hive code` CLI command** — Direct launch shortcut for Hive Coder via `cmd_code` in `runner/cli.py`
- **`hive tui` updated** — Now discovers framework agents alongside exports/ and examples/
- **Cron timer support** — `AgentRuntime` now supports cron expressions (`croniter`) in addition to fixed-interval timers for async entry points
- **Datetime in system prompts** — `prompt_composer._with_datetime()` appends current datetime to all composed system prompts; EventLoopNode also applies it for isolated conversations
- **`max_node_visits` default → 0** — Changed from 1 to 0 (unbounded) across `NodeSpec` and executor, matching the forever-alive pattern as the standard default
- **TUI graph view** — Timer display updated to show cron expressions and hours in countdown
- **CredentialError handling** — `_setup()` calls in TUI launch paths now catch and display credential errors gracefully
### Tests
- New `test_agent_runtime.py` tests for cron-based timer scheduling
## Architecture
```
User ──▶ [coder] (EventLoopNode, client_facing, forever-alive)
│ Tools: coder_tools_server.py (file I/O, shell, git)
│ + meta-agent tools (discover, inspect, test)
└──▶ loops continuously until user exits
```
Single node. No edges. No terminal nodes. The agent stays alive and handles multiple build requests in one session — context accumulates across interactions.
## Test plan
- [ ] `hive code` launches Hive Coder TUI successfully
- [ ] `hive tui` shows "Framework Agents" as a source option
- [ ] Agent can discover tools via `discover_mcp_tools()`
- [ ] Agent generates a valid agent package from a natural language request
- [ ] Generated packages pass `AgentRunner.load()` validation
- [ ] Cron timer tests pass (`test_agent_runtime.py`)
- [ ] Existing tests unaffected by `max_node_visits` default change
@@ -12,7 +12,6 @@ from .agent import (
nodes,
edges,
loop_config,
async_entry_points,
entry_node,
entry_points,
pause_nodes,
@@ -31,7 +30,6 @@ __all__ = [
"nodes",
"edges",
"loop_config",
"async_entry_points",
"entry_node",
"entry_points",
"pause_nodes",
@@ -4,7 +4,7 @@ from pathlib import Path
from framework.graph import EdgeCondition, EdgeSpec, Goal, SuccessCriterion, Constraint
from framework.graph.checkpoint_config import CheckpointConfig
from framework.graph.edge import AsyncEntryPointSpec, GraphSpec
from framework.graph.edge import GraphSpec
from framework.graph.executor import ExecutionResult, GraphExecutor
from framework.llm import LiteLLMProvider
from framework.runner.tool_registry import ToolRegistry
@@ -152,17 +152,6 @@ edges = [
# Graph configuration
entry_node = "intake"
entry_points = {"start": "intake"}
async_entry_points = [
AsyncEntryPointSpec(
id="email-timer",
name="Scheduled Inbox Check",
entry_node="fetch-emails",
trigger_type="timer",
trigger_config={"interval_minutes": 5},
isolation_level="shared",
max_concurrent=1,
),
]
pause_nodes = []
terminal_nodes = []
loop_config = {
@@ -224,7 +213,6 @@ class EmailInboxManagementAgent:
loop_config=loop_config,
conversation_mode=conversation_mode,
identity_prompt=identity_prompt,
async_entry_points=async_entry_points,
)
def _setup(self, mock_mode=False) -> None:
@@ -275,16 +263,6 @@ class EmailInboxManagementAgent:
trigger_type="manual",
isolation_level="shared",
),
# Timer-driven entry point
EntryPointSpec(
id="email-timer",
name="Scheduled Inbox Check",
entry_node="fetch-emails",
trigger_type="timer",
trigger_config={"interval_minutes": 5},
isolation_level="shared",
max_concurrent=1,
),
]
self._agent_runtime = create_agent_runtime(
@@ -360,10 +338,6 @@ class EmailInboxManagementAgent:
"pause_nodes": self.pause_nodes,
"terminal_nodes": self.terminal_nodes,
"client_facing_nodes": [n.id for n in self.nodes if n.client_facing],
"async_entry_points": [
{"id": ep.id, "name": ep.name, "entry_node": ep.entry_node}
for ep in async_entry_points
],
}
def validate(self):
@@ -391,13 +365,6 @@ class EmailInboxManagementAgent:
f"Entry point '{ep_id}' references unknown node '{node_id}'"
)
# Validate async entry points
for ep in async_entry_points:
if ep.entry_node not in node_ids:
errors.append(
f"Async entry point '{ep.id}' references unknown node '{ep.entry_node}'"
)
return {
"valid": len(errors) == 0,
"errors": errors,
@@ -0,0 +1,11 @@
[
{
"id": "email-timer",
"name": "Scheduled Inbox Check",
"trigger_type": "timer",
"trigger_config": {
"interval_minutes": 5
},
"task": "Fetch and process inbox emails according to the user's rules"
}
]