Compare commits
105 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| a57d58e8d4 | |||
| 85c204a442 | |||
| 56075a25a3 | |||
| 2b0a6779cc | |||
| b9ddce9d41 | |||
| 0c85406bc2 | |||
| 1051134594 | |||
| 653d24df9d | |||
| b687fa9e94 | |||
| c7f0ab0444 | |||
| 93bf373a5b | |||
| 2d87042a70 | |||
| 8a28abb7b8 | |||
| 0cdfbac5a1 | |||
| 29a3ae471f | |||
| 9c0f56f027 | |||
| 462e303a6e | |||
| a84b3c7867 | |||
| 606267d053 | |||
| 35791ae478 | |||
| 10f0002080 | |||
| 60bff4107d | |||
| be11fa4b29 | |||
| da8bc796d3 | |||
| 429619379e | |||
| 0fecedbbbf | |||
| a2244ada75 | |||
| 7608ba9290 | |||
| f5f3396d5c | |||
| ed80ae80f0 | |||
| c7a47c71f0 | |||
| b14b8f8c52 | |||
| df1a83d475 | |||
| 5b7727cfd1 | |||
| 93e270dafb | |||
| be675dbb17 | |||
| 1c24848db3 | |||
| 4b5ec796bc | |||
| 24df4729ca | |||
| 1e6538efac | |||
| f9e53f58af | |||
| 41388efc31 | |||
| fab5ce6fd0 | |||
| 207d6baee5 | |||
| fec72bb2b6 | |||
| c4c4c24c59 | |||
| 917c7706ea | |||
| 8fadcd5b21 | |||
| 2005ba2dca | |||
| 557d5fd6e5 | |||
| 79d2a15f95 | |||
| ab32e44128 | |||
| 047059f85f | |||
| e8364f616d | |||
| 9098c9b6c6 | |||
| 84fd9ebac8 | |||
| 23d5d76d56 | |||
| b0c86588b6 | |||
| 5aff1f9489 | |||
| 199cb3d8cc | |||
| a98a4ca0b6 | |||
| c4f49aadfa | |||
| ca5ac389cf | |||
| 7a658f7953 | |||
| e05fc99da7 | |||
| 787090667e | |||
| 80b36b4052 | |||
| 0b8ed521c0 | |||
| 1ec7c5545f | |||
| cc6b6760c3 | |||
| 26aed90ab2 | |||
| 1c58ccb0c1 | |||
| 79b80fe817 | |||
| c0f3841af7 | |||
| 2b7d9bc471 | |||
| 98dc493a39 | |||
| cfaa57b28d | |||
| 219e603de6 | |||
| 7663a5bce8 | |||
| f2841b945d | |||
| 704a0fd63a | |||
| bf4101ac38 | |||
| bc349e8fde | |||
| 2f0439dca8 | |||
| 8470c6a980 | |||
| 43092ba1d7 | |||
| 61487db481 | |||
| f56feaf821 | |||
| c42c8ba505 | |||
| c313ea7ee2 | |||
| e3ea9212dd | |||
| 99d41d8cc6 | |||
| 8988c1e760 | |||
| b1a5f8e730 | |||
| 06a9adb051 | |||
| 9ce753055c | |||
| 0ce87b5155 | |||
| 730370a007 | |||
| f87909109c | |||
| d6a6d8b5ef | |||
| 57563abfa7 | |||
| 7c7b60a5e9 | |||
| 3f0b8bff5b | |||
| 91190cf82d | |||
| e1db3a4af9 |
@@ -0,0 +1,89 @@
|
||||
name: Integration Bounty
|
||||
description: A bounty task for the integration contribution program
|
||||
title: "[Bounty]: "
|
||||
labels: []
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
## Integration Bounty
|
||||
|
||||
This issue is part of the [Integration Bounty Program](../../docs/bounty-program/README.md).
|
||||
**Claim this bounty** by commenting below — a maintainer will assign you within 24 hours.
|
||||
|
||||
- type: dropdown
|
||||
id: bounty-type
|
||||
attributes:
|
||||
label: Bounty Type
|
||||
options:
|
||||
- "Test a Tool (20 pts)"
|
||||
- "Write Docs (20 pts)"
|
||||
- "Code Contribution (30 pts)"
|
||||
- "New Integration (75 pts)"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: difficulty
|
||||
attributes:
|
||||
label: Difficulty
|
||||
options:
|
||||
- Easy
|
||||
- Medium
|
||||
- Hard
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: tool-name
|
||||
attributes:
|
||||
label: Tool Name
|
||||
description: The integration this bounty targets (e.g., `airtable`, `salesforce`)
|
||||
placeholder: e.g., airtable
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: What needs to be done to complete this bounty.
|
||||
placeholder: |
|
||||
Describe the specific task, including:
|
||||
- What the contributor needs to do
|
||||
- Links to relevant files in the repo
|
||||
- Any setup requirements (API keys, accounts, etc.)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: acceptance-criteria
|
||||
attributes:
|
||||
label: Acceptance Criteria
|
||||
description: What "done" looks like. The PR or report must meet all criteria.
|
||||
placeholder: |
|
||||
- [ ] Criterion 1
|
||||
- [ ] Criterion 2
|
||||
- [ ] CI passes
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: relevant-files
|
||||
attributes:
|
||||
label: Relevant Files
|
||||
description: Links to tool directory, credential spec, health check file, etc.
|
||||
placeholder: |
|
||||
- Tool: `tools/src/aden_tools/tools/{tool_name}/`
|
||||
- Credential spec: `tools/src/aden_tools/credentials/{category}.py`
|
||||
- Health checks: `tools/src/aden_tools/credentials/health_check.py`
|
||||
|
||||
- type: textarea
|
||||
id: resources
|
||||
attributes:
|
||||
label: Resources
|
||||
description: Links to API docs, examples, or guides that will help the contributor.
|
||||
placeholder: |
|
||||
- [Building Tools Guide](../../tools/BUILDING_TOOLS.md)
|
||||
- [Tool README Template](../../docs/bounty-program/templates/tool-readme-template.md)
|
||||
- API docs: https://...
|
||||
@@ -0,0 +1,31 @@
|
||||
name: Link Discord Account
|
||||
description: Connect your GitHub and Discord for the bounty program
|
||||
title: "link: @{{ github.actor }}"
|
||||
labels: ["link-discord"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Link your Discord account to receive XP and role rewards when your bounty PRs are merged.
|
||||
|
||||
**How to find your Discord ID:**
|
||||
1. Open Discord Settings > Advanced > Enable **Developer Mode**
|
||||
2. Right-click your username > **Copy User ID**
|
||||
|
||||
- type: input
|
||||
id: discord_id
|
||||
attributes:
|
||||
label: Discord User ID
|
||||
description: "Your numeric Discord ID (not your username). Example: 123456789012345678"
|
||||
placeholder: "123456789012345678"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: display_name
|
||||
attributes:
|
||||
label: Display Name (optional)
|
||||
description: How you'd like to be credited
|
||||
placeholder: "Jane Doe"
|
||||
validations:
|
||||
required: false
|
||||
@@ -0,0 +1,37 @@
|
||||
name: Bounty completed
|
||||
description: Awards points and notifies Discord when a bounty PR is merged
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [closed]
|
||||
|
||||
jobs:
|
||||
bounty-notify:
|
||||
if: >
|
||||
github.event.pull_request.merged == true &&
|
||||
contains(join(github.event.pull_request.labels.*.name, ','), 'bounty:')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: latest
|
||||
|
||||
- name: Award XP and notify Discord
|
||||
run: bun run scripts/bounty-tracker.ts notify
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }}
|
||||
GITHUB_REPOSITORY_NAME: ${{ github.event.repository.name }}
|
||||
DISCORD_WEBHOOK_URL: ${{ secrets.DISCORD_BOUNTY_WEBHOOK_URL }}
|
||||
LURKR_API_KEY: ${{ secrets.LURKR_API_KEY }}
|
||||
LURKR_GUILD_ID: ${{ secrets.LURKR_GUILD_ID }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
@@ -62,8 +62,11 @@ jobs:
|
||||
uv run pytest tests/ -v
|
||||
|
||||
test-tools:
|
||||
name: Test Tools
|
||||
runs-on: ubuntu-latest
|
||||
name: Test Tools (${{ matrix.os }})
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
|
||||
@@ -0,0 +1,126 @@
|
||||
name: Link Discord account
|
||||
description: Auto-creates a PR to add contributor to contributors.yml when a link-discord issue is opened
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
|
||||
jobs:
|
||||
link-discord:
|
||||
if: contains(github.event.issue.labels.*.name, 'link-discord')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 2
|
||||
permissions:
|
||||
contents: write
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Parse issue and update contributors.yml
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const fs = require('fs');
|
||||
|
||||
const issue = context.payload.issue;
|
||||
const githubUsername = issue.user.login;
|
||||
|
||||
// Parse the issue body for form fields
|
||||
const body = issue.body || '';
|
||||
|
||||
// Extract Discord ID — look for the numeric value after the "Discord User ID" heading
|
||||
const discordMatch = body.match(/### Discord User ID\s*\n\s*(\d{17,20})/);
|
||||
if (!discordMatch) {
|
||||
await github.rest.issues.createComment({
|
||||
...context.repo,
|
||||
issue_number: issue.number,
|
||||
body: `Could not find a valid Discord ID in the issue body. Please make sure you entered a numeric ID (17-20 digits), not a username.\n\nExample: \`123456789012345678\``
|
||||
});
|
||||
await github.rest.issues.update({
|
||||
...context.repo,
|
||||
issue_number: issue.number,
|
||||
state: 'closed',
|
||||
state_reason: 'not_planned'
|
||||
});
|
||||
return;
|
||||
}
|
||||
const discordId = discordMatch[1];
|
||||
|
||||
// Extract display name (optional)
|
||||
const nameMatch = body.match(/### Display Name \(optional\)\s*\n\s*(.+)/);
|
||||
const displayName = nameMatch ? nameMatch[1].trim() : '';
|
||||
|
||||
// Check if user already exists
|
||||
const yml = fs.readFileSync('contributors.yml', 'utf-8');
|
||||
if (yml.includes(`github: ${githubUsername}`)) {
|
||||
await github.rest.issues.createComment({
|
||||
...context.repo,
|
||||
issue_number: issue.number,
|
||||
body: `@${githubUsername} is already in \`contributors.yml\`. If you need to update your Discord ID, please edit the file directly via PR.`
|
||||
});
|
||||
await github.rest.issues.update({
|
||||
...context.repo,
|
||||
issue_number: issue.number,
|
||||
state: 'closed',
|
||||
state_reason: 'completed'
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Append entry to contributors.yml
|
||||
let entry = ` - github: ${githubUsername}\n discord: "${discordId}"`;
|
||||
if (displayName && displayName !== '_No response_') {
|
||||
entry += `\n name: ${displayName}`;
|
||||
}
|
||||
entry += '\n';
|
||||
|
||||
const updated = yml.trimEnd() + '\n' + entry;
|
||||
fs.writeFileSync('contributors.yml', updated);
|
||||
|
||||
// Set outputs for commit step
|
||||
core.exportVariable('GITHUB_USERNAME', githubUsername);
|
||||
core.exportVariable('DISCORD_ID', discordId);
|
||||
core.exportVariable('ISSUE_NUMBER', issue.number.toString());
|
||||
|
||||
- name: Create PR
|
||||
run: |
|
||||
# Check if there are changes
|
||||
if git diff --quiet contributors.yml; then
|
||||
echo "No changes to contributors.yml"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
BRANCH="docs/link-discord-${GITHUB_USERNAME}"
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||
git checkout -b "$BRANCH"
|
||||
git add contributors.yml
|
||||
git commit -m "docs: link @${GITHUB_USERNAME} to Discord"
|
||||
git push origin "$BRANCH"
|
||||
|
||||
gh pr create \
|
||||
--title "docs: link @${GITHUB_USERNAME} to Discord" \
|
||||
--body "Adds @${GITHUB_USERNAME} (Discord \`${DISCORD_ID}\`) to \`contributors.yml\` for bounty XP tracking.
|
||||
|
||||
Closes #${ISSUE_NUMBER}" \
|
||||
--base main \
|
||||
--head "$BRANCH" \
|
||||
--label "link-discord"
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Notify on issue
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const username = process.env.GITHUB_USERNAME;
|
||||
const issueNumber = parseInt(process.env.ISSUE_NUMBER);
|
||||
|
||||
await github.rest.issues.createComment({
|
||||
...context.repo,
|
||||
issue_number: issueNumber,
|
||||
body: `A PR has been created to link your account. A maintainer will merge it shortly — once merged, you'll receive XP and Discord pings when your bounty PRs are merged.`
|
||||
});
|
||||
@@ -0,0 +1,40 @@
|
||||
name: Weekly bounty leaderboard
|
||||
description: Posts the integration bounty leaderboard to Discord every Monday
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Every Monday at 9:00 UTC
|
||||
- cron: "0 9 * * 1"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
since_date:
|
||||
description: "Only count PRs merged after this date (YYYY-MM-DD). Leave empty for all-time."
|
||||
required: false
|
||||
|
||||
jobs:
|
||||
leaderboard:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: latest
|
||||
|
||||
- name: Post leaderboard to Discord
|
||||
run: bun run scripts/bounty-tracker.ts leaderboard
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }}
|
||||
GITHUB_REPOSITORY_NAME: ${{ github.event.repository.name }}
|
||||
DISCORD_WEBHOOK_URL: ${{ secrets.DISCORD_BOUNTY_WEBHOOK_URL }}
|
||||
LURKR_API_KEY: ${{ secrets.LURKR_API_KEY }}
|
||||
LURKR_GUILD_ID: ${{ secrets.LURKR_GUILD_ID }}
|
||||
SINCE_DATE: ${{ github.event.inputs.since_date || '' }}
|
||||
@@ -20,8 +20,20 @@ check: ## Run all checks without modifying files (CI-safe)
|
||||
cd core && ruff format --check .
|
||||
cd tools && ruff format --check .
|
||||
|
||||
test: ## Run all tests
|
||||
test: ## Run all tests (core + tools, excludes live)
|
||||
cd core && uv run python -m pytest tests/ -v
|
||||
cd tools && uv run python -m pytest -v
|
||||
|
||||
test-tools: ## Run tool tests only (mocked, no credentials needed)
|
||||
cd tools && uv run python -m pytest -v
|
||||
|
||||
test-live: ## Run live integration tests (requires real API credentials)
|
||||
cd tools && uv run python -m pytest -m live -s -o "addopts=" --log-cli-level=INFO
|
||||
|
||||
test-all: ## Run everything including live tests
|
||||
cd core && uv run python -m pytest tests/ -v
|
||||
cd tools && uv run python -m pytest -v
|
||||
cd tools && uv run python -m pytest -m live -s -o "addopts=" --log-cli-level=INFO
|
||||
|
||||
install-hooks: ## Install pre-commit hooks
|
||||
uv pip install pre-commit
|
||||
|
||||
@@ -82,6 +82,7 @@ Use Hive when you need:
|
||||
|
||||
- Python 3.11+ for agent development
|
||||
- An LLM provider that powers the agents
|
||||
- **ripgrep (optional, recommended on Windows):** The `search_files` tool uses ripgrep for faster file search. If not installed, a Python fallback is used. On Windows: `winget install BurntSushi.ripgrep` or `scoop install ripgrep`
|
||||
|
||||
> **Note for Windows Users:** It is strongly recommended to use **WSL (Windows Subsystem for Linux)** or **Git Bash** to run this framework. Some core automation scripts may not execute correctly in standard Command Prompt or PowerShell.
|
||||
|
||||
|
||||
@@ -0,0 +1,27 @@
|
||||
# Identity mapping: GitHub username -> Discord ID
|
||||
#
|
||||
# This file links GitHub accounts to Discord accounts for the
|
||||
# Integration Bounty Program. When a bounty PR is merged, the
|
||||
# GitHub Action uses this file to ping the contributor on Discord.
|
||||
#
|
||||
# HOW TO ADD YOURSELF:
|
||||
# Open a "Link Discord Account" issue:
|
||||
# https://github.com/aden-hive/hive/issues/new?template=link-discord.yml
|
||||
# A GitHub Action will automatically add your entry here.
|
||||
#
|
||||
# To find your Discord ID:
|
||||
# 1. Open Discord Settings > Advanced > Enable Developer Mode
|
||||
# 2. Right-click your name > Copy User ID
|
||||
#
|
||||
# Format:
|
||||
# - github: your-github-username
|
||||
# discord: "your-discord-id" # quotes required (it's a number)
|
||||
# name: Your Display Name # optional
|
||||
|
||||
contributors:
|
||||
# - github: example-user
|
||||
# discord: "123456789012345678"
|
||||
# name: Example User
|
||||
- github: TimothyZhang7
|
||||
discord: "408460790061072384"
|
||||
name: Timothy@Aden
|
||||
@@ -46,6 +46,7 @@ _SHARED_TOOLS = [
|
||||
"read_file",
|
||||
"write_file",
|
||||
"edit_file",
|
||||
"hashline_edit",
|
||||
"list_directory",
|
||||
"search_files",
|
||||
"run_command",
|
||||
@@ -55,8 +56,6 @@ _SHARED_TOOLS = [
|
||||
"validate_agent_tools",
|
||||
"list_agents",
|
||||
"list_agent_sessions",
|
||||
"get_agent_session_state",
|
||||
"get_agent_session_memory",
|
||||
"list_agent_checkpoints",
|
||||
"get_agent_checkpoint",
|
||||
"run_agent_tests",
|
||||
@@ -131,12 +130,23 @@ errors yourself. Don't declare success until validation passes.
|
||||
|
||||
# Tools
|
||||
|
||||
## Paths (MANDATORY)
|
||||
**Always use RELATIVE paths**
|
||||
(e.g. `exports/agent_name/config.py`, `exports/agent_name/nodes/__init__.py`).
|
||||
**Never use absolute paths** like `/mnt/data/...` or `/workspace/...` — they fail.
|
||||
The project root is implicit.
|
||||
|
||||
## File I/O
|
||||
- read_file(path, offset?, limit?) — read with line numbers
|
||||
- read_file(path, offset?, limit?, hashline?) — read with line numbers; \
|
||||
hashline=True for N:hhhh|content anchors (use with hashline_edit)
|
||||
- write_file(path, content) — create/overwrite, auto-mkdir
|
||||
- edit_file(path, old_text, new_text, replace_all?) — fuzzy-match edit
|
||||
- hashline_edit(path, edits, auto_cleanup?, encoding?) — anchor-based \
|
||||
editing using N:hhhh refs from read_file(hashline=True). Ops: set_line, \
|
||||
replace_lines, insert_after, insert_before, replace, append
|
||||
- list_directory(path, recursive?) — list contents
|
||||
- search_files(pattern, path?, include?) — regex search
|
||||
- search_files(pattern, path?, include?, hashline?) — regex search; \
|
||||
hashline=True for anchors in results
|
||||
- run_command(command, cwd?, timeout?) — shell execution
|
||||
- undo_changes(path?) — restore from git snapshot
|
||||
|
||||
@@ -149,8 +159,6 @@ available tools grouped by category. output_schema: "simple" (default) or \
|
||||
in an agent's nodes actually exist. Call after building.
|
||||
- list_agents() — list all agent packages in exports/ with session counts
|
||||
- list_agent_sessions(agent_name, status?, limit?) — list sessions
|
||||
- get_agent_session_state(agent_name, session_id) — full session state
|
||||
- get_agent_session_memory(agent_name, session_id, key?) — memory data
|
||||
- list_agent_checkpoints(agent_name, session_id) — list checkpoints
|
||||
- get_agent_checkpoint(agent_name, session_id, checkpoint_id?) — load checkpoint
|
||||
- run_agent_tests(agent_name, test_types?, fail_fast?) — run pytest with parsing
|
||||
@@ -185,8 +193,7 @@ After writing agent code, validate structurally AND run tests:
|
||||
## Debugging Built Agents
|
||||
When a user says "my agent is failing" or "debug this agent":
|
||||
1. list_agent_sessions("{agent_name}") — find the session
|
||||
2. get_agent_session_state("{agent_name}", "{session_id}") — see status
|
||||
3. get_agent_session_memory("{agent_name}", "{session_id}") — inspect data
|
||||
2. get_worker_status
|
||||
4. list_agent_checkpoints / get_agent_checkpoint — trace execution
|
||||
|
||||
# Agent Building Workflow
|
||||
@@ -608,7 +615,7 @@ You have full coding tools for building and modifying agents:
|
||||
- File I/O: read_file, write_file, edit_file, list_directory, search_files, \
|
||||
run_command, undo_changes
|
||||
- Meta-agent: list_agent_tools, validate_agent_tools, \
|
||||
list_agents, list_agent_sessions, get_agent_session_state, get_agent_session_memory, \
|
||||
list_agents, list_agent_sessions, \
|
||||
list_agent_checkpoints, get_agent_checkpoint, run_agent_tests
|
||||
- load_built_agent(agent_path) — Load the agent and switch to STAGING mode
|
||||
- list_credentials(credential_id?) — List authorized credentials
|
||||
|
||||
@@ -6,6 +6,7 @@ helper functions.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
@@ -18,6 +19,7 @@ from framework.graph.edge import DEFAULT_MAX_TOKENS
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
HIVE_CONFIG_FILE = Path.home() / ".hive" / "configuration.json"
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_hive_config() -> dict[str, Any]:
|
||||
@@ -27,7 +29,12 @@ def get_hive_config() -> dict[str, Any]:
|
||||
try:
|
||||
with open(HIVE_CONFIG_FILE, encoding="utf-8-sig") as f:
|
||||
return json.load(f)
|
||||
except (json.JSONDecodeError, OSError):
|
||||
except (json.JSONDecodeError, OSError) as e:
|
||||
logger.warning(
|
||||
"Failed to load Hive config %s: %s",
|
||||
HIVE_CONFIG_FILE,
|
||||
e,
|
||||
)
|
||||
return {}
|
||||
|
||||
|
||||
|
||||
@@ -37,6 +37,8 @@ from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
|
||||
import json as _json
|
||||
|
||||
import httpx
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -260,6 +262,11 @@ class AdenCredentialClient:
|
||||
self.config = config
|
||||
self._client: httpx.Client | None = None
|
||||
|
||||
@staticmethod
|
||||
def _parse_json(response: httpx.Response) -> Any:
|
||||
"""Parse JSON from response, tolerating UTF-8 BOM."""
|
||||
return _json.loads(response.content.decode("utf-8-sig"))
|
||||
|
||||
def _get_client(self) -> httpx.Client:
|
||||
if self._client is None:
|
||||
headers = {
|
||||
@@ -295,7 +302,7 @@ class AdenCredentialClient:
|
||||
raise AdenAuthenticationError("Agent API key is invalid or revoked")
|
||||
|
||||
if response.status_code == 403:
|
||||
data = response.json()
|
||||
data = self._parse_json(response)
|
||||
raise AdenClientError(data.get("message", "Forbidden"))
|
||||
|
||||
if response.status_code == 404:
|
||||
@@ -309,7 +316,7 @@ class AdenCredentialClient:
|
||||
)
|
||||
|
||||
if response.status_code == 400:
|
||||
data = response.json()
|
||||
data = self._parse_json(response)
|
||||
msg = data.get("message", "Bad request")
|
||||
if data.get("error") == "refresh_failed" or "refresh" in msg.lower():
|
||||
raise AdenRefreshError(
|
||||
@@ -356,7 +363,7 @@ class AdenCredentialClient:
|
||||
alias, status, email, expires_at.
|
||||
"""
|
||||
response = self._request_with_retry("GET", "/v1/credentials")
|
||||
data = response.json()
|
||||
data = self._parse_json(response)
|
||||
return [AdenIntegrationInfo.from_dict(item) for item in data.get("integrations", [])]
|
||||
|
||||
# Alias
|
||||
@@ -376,7 +383,7 @@ class AdenCredentialClient:
|
||||
"""
|
||||
try:
|
||||
response = self._request_with_retry("GET", f"/v1/credentials/{integration_id}")
|
||||
data = response.json()
|
||||
data = self._parse_json(response)
|
||||
return AdenCredentialResponse.from_dict(data, integration_id=integration_id)
|
||||
except AdenNotFoundError:
|
||||
return None
|
||||
@@ -394,7 +401,7 @@ class AdenCredentialClient:
|
||||
AdenCredentialResponse with new access_token.
|
||||
"""
|
||||
response = self._request_with_retry("POST", f"/v1/credentials/{integration_id}/refresh")
|
||||
data = response.json()
|
||||
data = self._parse_json(response)
|
||||
return AdenCredentialResponse.from_dict(data, integration_id=integration_id)
|
||||
|
||||
def validate_token(self, integration_id: str) -> dict[str, Any]:
|
||||
@@ -407,7 +414,7 @@ class AdenCredentialClient:
|
||||
{"valid": bool, "status": str, "expires_at": str, "error": str|null}
|
||||
"""
|
||||
response = self._request_with_retry("GET", f"/v1/credentials/{integration_id}/validate")
|
||||
return response.json()
|
||||
return self._parse_json(response)
|
||||
|
||||
def health_check(self) -> dict[str, Any]:
|
||||
"""Check Aden server health."""
|
||||
@@ -415,7 +422,7 @@ class AdenCredentialClient:
|
||||
client = self._get_client()
|
||||
response = client.get("/health")
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
data = self._parse_json(response)
|
||||
data["latency_ms"] = response.elapsed.total_seconds() * 1000
|
||||
return data
|
||||
return {"status": "degraded", "error": f"HTTP {response.status_code}"}
|
||||
|
||||
@@ -568,7 +568,7 @@ def _load_nodes_from_python_agent(agent_path: Path) -> list:
|
||||
def _load_nodes_from_json_agent(agent_json: Path) -> list:
|
||||
"""Load nodes from a JSON-based agent."""
|
||||
try:
|
||||
with open(agent_json, encoding="utf-8") as f:
|
||||
with open(agent_json, encoding="utf-8-sig") as f:
|
||||
data = json.load(f)
|
||||
|
||||
from framework.graph import NodeSpec
|
||||
|
||||
@@ -203,7 +203,7 @@ class EncryptedFileStorage(CredentialStorage):
|
||||
# Decrypt
|
||||
try:
|
||||
json_bytes = self._fernet.decrypt(encrypted)
|
||||
data = json.loads(json_bytes.decode())
|
||||
data = json.loads(json_bytes.decode("utf-8-sig"))
|
||||
except Exception as e:
|
||||
raise CredentialDecryptionError(
|
||||
f"Failed to decrypt credential '{credential_id}': {e}"
|
||||
@@ -227,7 +227,7 @@ class EncryptedFileStorage(CredentialStorage):
|
||||
index_path = self.base_path / "metadata" / "index.json"
|
||||
if not index_path.exists():
|
||||
return []
|
||||
with open(index_path, encoding="utf-8") as f:
|
||||
with open(index_path, encoding="utf-8-sig") as f:
|
||||
index = json.load(f)
|
||||
return list(index.get("credentials", {}).keys())
|
||||
|
||||
@@ -268,7 +268,7 @@ class EncryptedFileStorage(CredentialStorage):
|
||||
index_path = self.base_path / "metadata" / "index.json"
|
||||
|
||||
if index_path.exists():
|
||||
with open(index_path, encoding="utf-8") as f:
|
||||
with open(index_path, encoding="utf-8-sig") as f:
|
||||
index = json.load(f)
|
||||
else:
|
||||
index = {"credentials": {}, "version": "1.0"}
|
||||
|
||||
@@ -431,8 +431,7 @@ class GraphSpec(BaseModel):
|
||||
max_tokens: int = Field(default=None) # resolved by _resolve_max_tokens validator
|
||||
|
||||
# Cleanup LLM for JSON extraction fallback (fast/cheap model preferred)
|
||||
# If not set, uses CEREBRAS_API_KEY -> cerebras/llama-3.3-70b or
|
||||
# ANTHROPIC_API_KEY -> claude-haiku-4-5 as fallback
|
||||
# If not set, uses CEREBRAS_API_KEY -> cerebras/llama-3.3-70b
|
||||
cleanup_llm_model: str | None = None
|
||||
|
||||
# Execution limits
|
||||
|
||||
@@ -4058,329 +4058,3 @@ class EventLoopNode(NodeProtocol):
|
||||
content=json.dumps(result_json, indent=2),
|
||||
is_error=True,
|
||||
)
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Subagent Execution
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
async def _execute_subagent(
|
||||
self,
|
||||
ctx: NodeContext,
|
||||
agent_id: str,
|
||||
task: str,
|
||||
*,
|
||||
accumulator: OutputAccumulator | None = None,
|
||||
) -> ToolResult:
|
||||
"""Execute a subagent and return the result as a ToolResult.
|
||||
|
||||
The subagent:
|
||||
- Gets a fresh conversation with just the task
|
||||
- Has read-only access to the parent's readable memory
|
||||
- Cannot delegate to its own subagents (prevents recursion)
|
||||
- Returns its output in structured JSON format
|
||||
|
||||
Args:
|
||||
ctx: Parent node's context (for memory, tools, LLM access).
|
||||
agent_id: The node ID of the subagent to invoke.
|
||||
task: The task description to give the subagent.
|
||||
accumulator: Parent's OutputAccumulator — provides outputs that
|
||||
have been set via ``set_output`` but not yet written to
|
||||
shared memory (which only happens after the node completes).
|
||||
|
||||
Returns:
|
||||
ToolResult with structured JSON output containing:
|
||||
- message: Human-readable summary
|
||||
- data: Subagent's output (free-form JSON)
|
||||
- metadata: Execution metadata (success, tokens, latency)
|
||||
"""
|
||||
from framework.graph.node import NodeContext, SharedMemory
|
||||
|
||||
# Log subagent invocation start
|
||||
logger.info(
|
||||
"\n" + "=" * 60 + "\n"
|
||||
"🤖 SUBAGENT INVOCATION\n"
|
||||
"=" * 60 + "\n"
|
||||
"Parent Node: %s\n"
|
||||
"Subagent ID: %s\n"
|
||||
"Task: %s\n" + "=" * 60,
|
||||
ctx.node_id,
|
||||
agent_id,
|
||||
task[:500] + "..." if len(task) > 500 else task,
|
||||
)
|
||||
|
||||
# 1. Validate agent exists in registry
|
||||
if agent_id not in ctx.node_registry:
|
||||
return ToolResult(
|
||||
tool_use_id="",
|
||||
content=json.dumps(
|
||||
{
|
||||
"message": f"Sub-agent '{agent_id}' not found in registry",
|
||||
"data": None,
|
||||
"metadata": {"agent_id": agent_id, "success": False, "error": "not_found"},
|
||||
}
|
||||
),
|
||||
is_error=True,
|
||||
)
|
||||
|
||||
subagent_spec = ctx.node_registry[agent_id]
|
||||
|
||||
# 2. Create read-only memory snapshot
|
||||
# Start with everything the parent can read from shared memory.
|
||||
parent_data = ctx.memory.read_all()
|
||||
|
||||
# Merge in-flight outputs from the parent's accumulator.
|
||||
# set_output() writes to the accumulator but shared memory is only
|
||||
# updated after the parent node completes — so the subagent would
|
||||
# otherwise miss any keys the parent set before delegating.
|
||||
if accumulator:
|
||||
for key, value in accumulator.to_dict().items():
|
||||
if key not in parent_data:
|
||||
parent_data[key] = value
|
||||
|
||||
subagent_memory = SharedMemory()
|
||||
for key, value in parent_data.items():
|
||||
subagent_memory.write(key, value, validate=False)
|
||||
|
||||
# Allow reads for parent data AND the subagent's declared input_keys
|
||||
# (input_keys may reference keys that exist but weren't in read_all,
|
||||
# or keys that were just written by the accumulator).
|
||||
read_keys = set(parent_data.keys()) | set(subagent_spec.input_keys or [])
|
||||
scoped_memory = subagent_memory.with_permissions(
|
||||
read_keys=list(read_keys),
|
||||
write_keys=[], # Read-only!
|
||||
)
|
||||
|
||||
# 2b. Set up report callback (one-way channel to parent / event bus)
|
||||
subagent_reports: list[dict] = []
|
||||
|
||||
async def _report_callback(
|
||||
message: str,
|
||||
data: dict | None = None,
|
||||
*,
|
||||
wait_for_response: bool = False,
|
||||
) -> str | None:
|
||||
subagent_reports.append({"message": message, "data": data, "timestamp": time.time()})
|
||||
if self._event_bus:
|
||||
await self._event_bus.emit_subagent_report(
|
||||
stream_id=ctx.node_id,
|
||||
node_id=f"{ctx.node_id}:subagent:{agent_id}",
|
||||
subagent_id=agent_id,
|
||||
message=message,
|
||||
data=data,
|
||||
execution_id=ctx.execution_id,
|
||||
)
|
||||
|
||||
if not wait_for_response:
|
||||
return None
|
||||
|
||||
if not self._event_bus:
|
||||
logger.warning(
|
||||
"Subagent '%s' requested user response but no event_bus available",
|
||||
agent_id,
|
||||
)
|
||||
return None
|
||||
|
||||
# Create isolated receiver and register for input routing
|
||||
import uuid
|
||||
|
||||
escalation_id = f"{ctx.node_id}:escalation:{uuid.uuid4().hex[:8]}"
|
||||
receiver = _EscalationReceiver()
|
||||
registry = ctx.shared_node_registry
|
||||
|
||||
registry[escalation_id] = receiver
|
||||
try:
|
||||
# Stream message to user (parent's node_id so TUI shows parent talking)
|
||||
await self._event_bus.emit_client_output_delta(
|
||||
stream_id=ctx.node_id,
|
||||
node_id=ctx.node_id,
|
||||
content=message,
|
||||
snapshot=message,
|
||||
execution_id=ctx.execution_id,
|
||||
)
|
||||
# Request input (escalation_id for routing response back)
|
||||
await self._event_bus.emit_client_input_requested(
|
||||
stream_id=ctx.node_id,
|
||||
node_id=escalation_id,
|
||||
prompt=message,
|
||||
execution_id=ctx.execution_id,
|
||||
)
|
||||
# Block until user responds
|
||||
return await receiver.wait()
|
||||
finally:
|
||||
registry.pop(escalation_id, None)
|
||||
|
||||
# 3. Filter tools for subagent
|
||||
# Use the full tool catalog (ctx.all_tools) so subagents can access tools
|
||||
# that aren't in the parent node's filtered set (e.g. browser tools for a
|
||||
# GCU subagent when the parent only has web_scrape/save_data).
|
||||
# Falls back to ctx.available_tools if all_tools is empty (e.g. in tests).
|
||||
subagent_tool_names = set(subagent_spec.tools or [])
|
||||
tool_source = ctx.all_tools if ctx.all_tools else ctx.available_tools
|
||||
|
||||
subagent_tools = [
|
||||
t
|
||||
for t in tool_source
|
||||
if t.name in subagent_tool_names and t.name != "delegate_to_sub_agent"
|
||||
]
|
||||
|
||||
missing = subagent_tool_names - {t.name for t in subagent_tools}
|
||||
if missing:
|
||||
logger.warning(
|
||||
"Subagent '%s' requested tools not found in catalog: %s",
|
||||
agent_id,
|
||||
sorted(missing),
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"📦 Subagent '%s' configuration:\n"
|
||||
" - System prompt: %s\n"
|
||||
" - Tools available (%d): %s\n"
|
||||
" - Memory keys inherited: %s",
|
||||
agent_id,
|
||||
(subagent_spec.system_prompt[:200] + "...")
|
||||
if subagent_spec.system_prompt and len(subagent_spec.system_prompt) > 200
|
||||
else subagent_spec.system_prompt,
|
||||
len(subagent_tools),
|
||||
[t.name for t in subagent_tools],
|
||||
list(parent_data.keys()),
|
||||
)
|
||||
|
||||
# 4. Build subagent context
|
||||
max_iter = min(self._config.max_iterations, 10)
|
||||
subagent_ctx = NodeContext(
|
||||
runtime=ctx.runtime,
|
||||
node_id=f"{ctx.node_id}:subagent:{agent_id}",
|
||||
node_spec=subagent_spec,
|
||||
memory=scoped_memory,
|
||||
input_data={"task": task, **parent_data},
|
||||
llm=ctx.llm,
|
||||
available_tools=subagent_tools,
|
||||
goal_context=(
|
||||
f"Your specific task: {task}\n\n"
|
||||
f"COMPLETION REQUIREMENTS:\n"
|
||||
f"When your task is done, you MUST call set_output() "
|
||||
f"for each required key: {subagent_spec.output_keys}\n"
|
||||
f"Alternatively, call report_to_parent(mark_complete=true) "
|
||||
f"with your findings in message/data.\n"
|
||||
f"You have a maximum of {max_iter} turns to complete this task."
|
||||
),
|
||||
goal=ctx.goal,
|
||||
max_tokens=ctx.max_tokens,
|
||||
runtime_logger=ctx.runtime_logger,
|
||||
is_subagent_mode=True, # Prevents nested delegation
|
||||
report_callback=_report_callback,
|
||||
node_registry={}, # Empty - no nested subagents
|
||||
shared_node_registry=ctx.shared_node_registry, # For escalation routing
|
||||
)
|
||||
|
||||
# 5. Create and execute subagent EventLoopNode
|
||||
# Derive a conversation store for the subagent from the parent's store.
|
||||
# Each invocation gets a unique path so that repeated delegate calls
|
||||
# (e.g. one per profile) don't restore a stale completed conversation.
|
||||
self._subagent_instance_counter.setdefault(agent_id, 0)
|
||||
self._subagent_instance_counter[agent_id] += 1
|
||||
subagent_instance = str(self._subagent_instance_counter[agent_id])
|
||||
|
||||
subagent_conv_store = None
|
||||
if self._conversation_store is not None:
|
||||
from framework.storage.conversation_store import FileConversationStore
|
||||
|
||||
parent_base = getattr(self._conversation_store, "_base", None)
|
||||
if parent_base is not None:
|
||||
# Store subagent conversations parallel to the parent node,
|
||||
# not nested inside it. e.g. conversations/{node}:subagent:{agent_id}:{instance}/
|
||||
conversations_dir = parent_base.parent # e.g. conversations/
|
||||
subagent_dir_name = f"{agent_id}-{subagent_instance}"
|
||||
subagent_store_path = conversations_dir / subagent_dir_name
|
||||
subagent_conv_store = FileConversationStore(base_path=subagent_store_path)
|
||||
|
||||
# Derive a subagent-scoped spillover dir so large tool results
|
||||
# (e.g. browser_snapshot) get written to disk instead of being
|
||||
# silently truncated. Each instance gets its own directory to
|
||||
# avoid file collisions between concurrent subagents.
|
||||
subagent_spillover = None
|
||||
if self._config.spillover_dir:
|
||||
subagent_spillover = str(
|
||||
Path(self._config.spillover_dir) / agent_id / subagent_instance
|
||||
)
|
||||
|
||||
subagent_node = EventLoopNode(
|
||||
event_bus=None, # Subagents don't emit events to parent's bus
|
||||
judge=SubagentJudge(task=task, max_iterations=max_iter),
|
||||
config=LoopConfig(
|
||||
max_iterations=max_iter, # Tighter budget
|
||||
max_tool_calls_per_turn=self._config.max_tool_calls_per_turn,
|
||||
tool_call_overflow_margin=self._config.tool_call_overflow_margin,
|
||||
max_history_tokens=self._config.max_history_tokens,
|
||||
stall_detection_threshold=self._config.stall_detection_threshold,
|
||||
max_tool_result_chars=self._config.max_tool_result_chars,
|
||||
spillover_dir=subagent_spillover,
|
||||
),
|
||||
tool_executor=self._tool_executor,
|
||||
conversation_store=subagent_conv_store,
|
||||
)
|
||||
|
||||
try:
|
||||
logger.info("🚀 Starting subagent '%s' execution...", agent_id)
|
||||
start_time = time.time()
|
||||
result = await subagent_node.execute(subagent_ctx)
|
||||
latency_ms = int((time.time() - start_time) * 1000)
|
||||
|
||||
logger.info(
|
||||
"\n" + "-" * 60 + "\n"
|
||||
"✅ SUBAGENT '%s' COMPLETED\n"
|
||||
"-" * 60 + "\n"
|
||||
"Success: %s\n"
|
||||
"Latency: %dms\n"
|
||||
"Tokens used: %s\n"
|
||||
"Output keys: %s\n" + "-" * 60,
|
||||
agent_id,
|
||||
result.success,
|
||||
latency_ms,
|
||||
result.tokens_used,
|
||||
list(result.output.keys()) if result.output else [],
|
||||
)
|
||||
|
||||
result_json = {
|
||||
"message": (
|
||||
f"Sub-agent '{agent_id}' completed successfully"
|
||||
if result.success
|
||||
else f"Sub-agent '{agent_id}' failed: {result.error}"
|
||||
),
|
||||
"data": result.output,
|
||||
"reports": subagent_reports if subagent_reports else None,
|
||||
"metadata": {
|
||||
"agent_id": agent_id,
|
||||
"success": result.success,
|
||||
"tokens_used": result.tokens_used,
|
||||
"latency_ms": latency_ms,
|
||||
"report_count": len(subagent_reports),
|
||||
},
|
||||
}
|
||||
|
||||
return ToolResult(
|
||||
tool_use_id="",
|
||||
content=json.dumps(result_json, indent=2, default=str),
|
||||
is_error=not result.success,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"\n" + "!" * 60 + "\n❌ SUBAGENT '%s' FAILED\nError: %s\n" + "!" * 60,
|
||||
agent_id,
|
||||
str(e),
|
||||
)
|
||||
result_json = {
|
||||
"message": f"Sub-agent '{agent_id}' raised exception: {e}",
|
||||
"data": None,
|
||||
"metadata": {
|
||||
"agent_id": agent_id,
|
||||
"success": False,
|
||||
"error": str(e),
|
||||
},
|
||||
}
|
||||
return ToolResult(
|
||||
tool_use_id="",
|
||||
content=json.dumps(result_json, indent=2),
|
||||
is_error=True,
|
||||
)
|
||||
|
||||
@@ -183,11 +183,12 @@ class GraphExecutor:
|
||||
self.tool_provider_map = tool_provider_map
|
||||
self.dynamic_tools_provider = dynamic_tools_provider
|
||||
|
||||
# Initialize output cleaner
|
||||
# Initialize output cleaner — uses its own dedicated fast model (CEREBRAS_API_KEY),
|
||||
# never the main agent LLM. Passing the main LLM here would cause expensive
|
||||
# Anthropic calls for output cleaning whenever ANTHROPIC_API_KEY is set.
|
||||
self.cleansing_config = cleansing_config or CleansingConfig()
|
||||
self.output_cleaner = OutputCleaner(
|
||||
config=self.cleansing_config,
|
||||
llm_provider=llm,
|
||||
)
|
||||
|
||||
# Parallel execution settings
|
||||
@@ -620,11 +621,14 @@ class GraphExecutor:
|
||||
# node doesn't restore a filled OutputAccumulator from the previous
|
||||
# webhook run (which would cause the judge to accept immediately).
|
||||
# The conversation history is preserved (continuous memory).
|
||||
# Exclude cold restores — those need to continue the conversation
|
||||
# naturally without a "start fresh" marker.
|
||||
_is_fresh_shared = bool(
|
||||
session_state
|
||||
and session_state.get("resume_session_id")
|
||||
and not session_state.get("paused_at")
|
||||
and not session_state.get("resume_from_checkpoint")
|
||||
and not session_state.get("cold_restore")
|
||||
)
|
||||
if _is_fresh_shared and is_continuous and self._storage_path:
|
||||
try:
|
||||
|
||||
@@ -154,69 +154,17 @@ class HITLProtocol:
|
||||
"""
|
||||
Parse human's raw input into structured response.
|
||||
|
||||
Uses Haiku to intelligently extract answers for each question.
|
||||
Maps the raw input to the first question. For multi-question HITL,
|
||||
the caller should present one question at a time.
|
||||
"""
|
||||
import os
|
||||
|
||||
response = HITLResponse(request_id=request.request_id, raw_input=raw_input)
|
||||
|
||||
# If no questions, just return raw input
|
||||
if not request.questions:
|
||||
return response
|
||||
|
||||
# Try to use Haiku for intelligent parsing
|
||||
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
||||
if not use_haiku or not api_key:
|
||||
# Simple fallback: treat as answer to first question
|
||||
if request.questions:
|
||||
response.answers[request.questions[0].id] = raw_input
|
||||
return response
|
||||
|
||||
# Use Haiku to extract answers
|
||||
try:
|
||||
import json
|
||||
|
||||
import anthropic
|
||||
|
||||
questions_str = "\n".join(
|
||||
[f"{i + 1}. {q.question} (id: {q.id})" for i, q in enumerate(request.questions)]
|
||||
)
|
||||
|
||||
prompt = f"""Parse the user's response and extract answers for each question.
|
||||
|
||||
Questions asked:
|
||||
{questions_str}
|
||||
|
||||
User's response:
|
||||
{raw_input}
|
||||
|
||||
Extract the answer for each question. Output JSON with question IDs as keys.
|
||||
|
||||
Example format:
|
||||
{{"question-1": "answer here", "question-2": "answer here"}}"""
|
||||
|
||||
client = anthropic.Anthropic(api_key=api_key)
|
||||
message = client.messages.create(
|
||||
model="claude-haiku-4-5-20251001",
|
||||
max_tokens=500,
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
)
|
||||
|
||||
# Parse Haiku's response
|
||||
import re
|
||||
|
||||
response_text = message.content[0].text.strip()
|
||||
json_match = re.search(r"\{[^{}]*\}", response_text, re.DOTALL)
|
||||
|
||||
if json_match:
|
||||
parsed = json.loads(json_match.group())
|
||||
response.answers = parsed
|
||||
|
||||
except Exception:
|
||||
# Fallback: use raw input for first question
|
||||
if request.questions:
|
||||
response.answers[request.questions[0].id] = raw_input
|
||||
|
||||
# Map raw input to first question
|
||||
response.answers[request.questions[0].id] = raw_input
|
||||
return response
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -585,7 +585,6 @@ class NodeResult:
|
||||
Generate a human-readable summary of this node's execution and output.
|
||||
|
||||
This is like toString() - it describes what the node produced in its current state.
|
||||
Uses Haiku to intelligently summarize complex outputs.
|
||||
"""
|
||||
if not self.success:
|
||||
return f"❌ Failed: {self.error}"
|
||||
@@ -593,59 +592,13 @@ class NodeResult:
|
||||
if not self.output:
|
||||
return "✓ Completed (no output)"
|
||||
|
||||
# Use Haiku to generate intelligent summary
|
||||
import os
|
||||
|
||||
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
||||
|
||||
if not api_key:
|
||||
# Fallback: simple key-value listing
|
||||
parts = [f"✓ Completed with {len(self.output)} outputs:"]
|
||||
for key, value in list(self.output.items())[:5]: # Limit to 5 keys
|
||||
value_str = str(value)[:100]
|
||||
if len(str(value)) > 100:
|
||||
value_str += "..."
|
||||
parts.append(f" • {key}: {value_str}")
|
||||
return "\n".join(parts)
|
||||
|
||||
# Use Haiku to generate intelligent summary
|
||||
try:
|
||||
import json
|
||||
|
||||
import anthropic
|
||||
|
||||
node_context = ""
|
||||
if node_spec:
|
||||
node_context = f"\nNode: {node_spec.name}\nPurpose: {node_spec.description}"
|
||||
|
||||
output_json = json.dumps(self.output, indent=2, default=str)[:2000]
|
||||
prompt = (
|
||||
f"Generate a 1-2 sentence human-readable summary of "
|
||||
f"what this node produced.{node_context}\n\n"
|
||||
f"Node output:\n{output_json}\n\n"
|
||||
"Provide a concise, clear summary that a human can quickly "
|
||||
"understand. Focus on the key information produced."
|
||||
)
|
||||
|
||||
client = anthropic.Anthropic(api_key=api_key)
|
||||
message = client.messages.create(
|
||||
model="claude-haiku-4-5-20251001",
|
||||
max_tokens=200,
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
)
|
||||
|
||||
summary = message.content[0].text.strip()
|
||||
return f"✓ {summary}"
|
||||
|
||||
except Exception:
|
||||
# Fallback on error
|
||||
parts = [f"✓ Completed with {len(self.output)} outputs:"]
|
||||
for key, value in list(self.output.items())[:3]:
|
||||
value_str = str(value)[:80]
|
||||
if len(str(value)) > 80:
|
||||
value_str += "..."
|
||||
parts.append(f" • {key}: {value_str}")
|
||||
return "\n".join(parts)
|
||||
parts = [f"✓ Completed with {len(self.output)} outputs:"]
|
||||
for key, value in list(self.output.items())[:5]: # Limit to 5 keys
|
||||
value_str = str(value)[:100]
|
||||
if len(str(value)) > 100:
|
||||
value_str += "..."
|
||||
parts.append(f" • {key}: {value_str}")
|
||||
return "\n".join(parts)
|
||||
|
||||
|
||||
class NodeProtocol(ABC):
|
||||
|
||||
@@ -2899,6 +2899,7 @@ def run_tests(
|
||||
text=True,
|
||||
timeout=600, # 10 minute timeout
|
||||
env=env,
|
||||
stdin=subprocess.DEVNULL,
|
||||
)
|
||||
except subprocess.TimeoutExpired:
|
||||
return json.dumps(
|
||||
@@ -3091,6 +3092,7 @@ def debug_test(
|
||||
text=True,
|
||||
timeout=120, # 2 minute timeout for single test
|
||||
env=env,
|
||||
stdin=subprocess.DEVNULL,
|
||||
)
|
||||
except subprocess.TimeoutExpired:
|
||||
return json.dumps(
|
||||
@@ -3714,82 +3716,6 @@ def list_agent_sessions(
|
||||
)
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
def get_agent_session_state(
|
||||
agent_work_dir: Annotated[str, "Path to the agent's working directory"],
|
||||
session_id: Annotated[str, "The session ID (e.g., 'session_20260208_143022_abc12345')"],
|
||||
) -> str:
|
||||
"""
|
||||
Load full session state for a specific session.
|
||||
|
||||
Returns complete session data including status, progress, result,
|
||||
metrics, and checkpoint info. Memory values are excluded to prevent
|
||||
context bloat -- use get_agent_session_memory to retrieve memory contents.
|
||||
"""
|
||||
state_path = Path(agent_work_dir) / "sessions" / session_id / "state.json"
|
||||
data = _read_session_json(state_path)
|
||||
if data is None:
|
||||
return json.dumps({"error": f"Session not found: {session_id}"})
|
||||
|
||||
memory = data.get("memory", {})
|
||||
data["memory_keys"] = list(memory.keys()) if isinstance(memory, dict) else []
|
||||
data["memory_size"] = len(memory) if isinstance(memory, dict) else 0
|
||||
data.pop("memory", None)
|
||||
|
||||
return json.dumps(data, indent=2, default=str)
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
def get_agent_session_memory(
|
||||
agent_work_dir: Annotated[str, "Path to the agent's working directory"],
|
||||
session_id: Annotated[str, "The session ID"],
|
||||
key: Annotated[str, "Specific memory key to retrieve. Empty for all."] = "",
|
||||
) -> str:
|
||||
"""
|
||||
Get memory contents from a session.
|
||||
|
||||
Memory stores intermediate results passed between nodes. Use this
|
||||
to inspect what data was produced during execution.
|
||||
|
||||
If key is provided, returns only that memory key's value.
|
||||
If key is empty, returns all memory keys and their values.
|
||||
"""
|
||||
state_path = Path(agent_work_dir) / "sessions" / session_id / "state.json"
|
||||
data = _read_session_json(state_path)
|
||||
if data is None:
|
||||
return json.dumps({"error": f"Session not found: {session_id}"})
|
||||
|
||||
memory = data.get("memory", {})
|
||||
if not isinstance(memory, dict):
|
||||
memory = {}
|
||||
|
||||
if key:
|
||||
if key not in memory:
|
||||
return json.dumps(
|
||||
{
|
||||
"error": f"Memory key not found: '{key}'",
|
||||
"available_keys": list(memory.keys()),
|
||||
}
|
||||
)
|
||||
value = memory[key]
|
||||
return json.dumps(
|
||||
{
|
||||
"session_id": session_id,
|
||||
"key": key,
|
||||
"value": value,
|
||||
"value_type": type(value).__name__,
|
||||
},
|
||||
indent=2,
|
||||
default=str,
|
||||
)
|
||||
|
||||
return json.dumps(
|
||||
{"session_id": session_id, "memory": memory, "total_keys": len(memory)},
|
||||
indent=2,
|
||||
default=str,
|
||||
)
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
def list_agent_checkpoints(
|
||||
agent_work_dir: Annotated[str, "Path to the agent's working directory"],
|
||||
|
||||
@@ -559,6 +559,23 @@ def cmd_run(args: argparse.Namespace) -> int:
|
||||
except (FileNotFoundError, json.JSONDecodeError) as e:
|
||||
print(f"Error reading input file: {e}", file=sys.stderr)
|
||||
return 1
|
||||
# Validate --output path before execution begins (fail fast, before agent loads)
|
||||
if args.output:
|
||||
import os
|
||||
|
||||
output_parent = Path(args.output).parent
|
||||
if not output_parent.exists():
|
||||
print(
|
||||
f"Error: output directory does not exist: {output_parent}/",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return 1
|
||||
if not os.access(output_parent, os.W_OK):
|
||||
print(
|
||||
f"Error: output directory is not writable: {output_parent}/",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return 1
|
||||
|
||||
# Run the agent (with TUI or standard)
|
||||
if getattr(args, "tui", False):
|
||||
@@ -1090,62 +1107,19 @@ def _interactive_approval(request):
|
||||
def _format_natural_language_to_json(
|
||||
user_input: str, input_keys: list[str], agent_description: str, session_context: dict = None
|
||||
) -> dict:
|
||||
"""Use Haiku to convert natural language input to JSON based on agent's input schema."""
|
||||
import os
|
||||
"""Convert natural language input to JSON based on agent's input schema.
|
||||
|
||||
import anthropic
|
||||
Maps user input to the primary input field. For follow-up inputs,
|
||||
appends to the existing value.
|
||||
"""
|
||||
main_field = input_keys[0] if input_keys else "objective"
|
||||
|
||||
client = anthropic.Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY"))
|
||||
|
||||
# Build prompt for Haiku
|
||||
session_info = ""
|
||||
if session_context:
|
||||
# Extract the main field (usually 'objective') that we'll append to
|
||||
main_field = input_keys[0] if input_keys else "objective"
|
||||
existing_value = session_context.get(main_field, "")
|
||||
if existing_value:
|
||||
return {main_field: f"{existing_value}\n\n{user_input}"}
|
||||
|
||||
session_info = (
|
||||
f'\n\nExisting {main_field}: "{existing_value}"\n\n'
|
||||
f"The user is providing ADDITIONAL information. Append this new "
|
||||
f"information to the existing {main_field} to create an enriched, "
|
||||
"more detailed version."
|
||||
)
|
||||
|
||||
prompt = f"""You are formatting user input for an agent that requires specific input fields.
|
||||
|
||||
Agent: {agent_description}
|
||||
|
||||
Required input fields: {", ".join(input_keys)}{session_info}
|
||||
|
||||
User input: {user_input}
|
||||
|
||||
{"If this is a follow-up, APPEND new info to the existing field value." if session_context else ""}
|
||||
|
||||
Output ONLY valid JSON, no explanation:"""
|
||||
|
||||
try:
|
||||
message = client.messages.create(
|
||||
model="claude-haiku-4-5-20251001", # Fast and cheap
|
||||
max_tokens=500,
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
)
|
||||
|
||||
json_str = message.content[0].text.strip()
|
||||
# Remove markdown code blocks if present
|
||||
if json_str.startswith("```"):
|
||||
json_str = json_str.split("```")[1]
|
||||
if json_str.startswith("json"):
|
||||
json_str = json_str[4:]
|
||||
json_str = json_str.strip()
|
||||
|
||||
return json.loads(json_str)
|
||||
except Exception:
|
||||
# Fallback: try to infer the main field
|
||||
if len(input_keys) == 1:
|
||||
return {input_keys[0]: user_input}
|
||||
else:
|
||||
# Put it in the first field as fallback
|
||||
return {input_keys[0]: user_input}
|
||||
return {main_field: user_input}
|
||||
|
||||
|
||||
def cmd_shell(args: argparse.Namespace) -> int:
|
||||
@@ -1965,7 +1939,6 @@ def cmd_setup_credentials(args: argparse.Namespace) -> int:
|
||||
def _open_browser(url: str) -> None:
|
||||
"""Open URL in the default browser (best-effort, non-blocking)."""
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
try:
|
||||
if sys.platform == "darwin":
|
||||
@@ -1975,6 +1948,12 @@ def _open_browser(url: str) -> None:
|
||||
stderr=subprocess.DEVNULL,
|
||||
encoding="utf-8",
|
||||
)
|
||||
elif sys.platform == "win32":
|
||||
subprocess.Popen(
|
||||
["cmd", "/c", "start", "", url],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
elif sys.platform == "linux":
|
||||
subprocess.Popen(
|
||||
["xdg-open", url],
|
||||
|
||||
@@ -7,6 +7,8 @@ Supports both STDIO and HTTP transports using the official MCP Python SDK.
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Literal
|
||||
|
||||
@@ -73,6 +75,8 @@ class MCPClient:
|
||||
# Background event loop for persistent STDIO connection
|
||||
self._loop = None
|
||||
self._loop_thread = None
|
||||
# Serialize STDIO tool calls (avoids races, helps on Windows)
|
||||
self._stdio_call_lock = threading.Lock()
|
||||
|
||||
def _run_async(self, coro):
|
||||
"""
|
||||
@@ -156,11 +160,19 @@ class MCPClient:
|
||||
# Create server parameters
|
||||
# Always inherit parent environment and merge with any custom env vars
|
||||
merged_env = {**os.environ, **(self.config.env or {})}
|
||||
# On Windows, passing cwd can cause WinError 267 ("invalid directory name").
|
||||
# tool_registry passes cwd=None and uses absolute script paths when applicable.
|
||||
cwd = self.config.cwd
|
||||
if os.name == "nt" and cwd is not None:
|
||||
# Avoid passing cwd on Windows; tool_registry should have set cwd=None
|
||||
# and absolute script paths for tools-dir servers. If cwd is still set,
|
||||
# pass None to prevent WinError 267 (caller should use absolute paths).
|
||||
cwd = None
|
||||
server_params = StdioServerParameters(
|
||||
command=self.config.command,
|
||||
args=self.config.args,
|
||||
env=merged_env,
|
||||
cwd=self.config.cwd,
|
||||
cwd=cwd,
|
||||
)
|
||||
|
||||
# Store for later use
|
||||
@@ -184,10 +196,12 @@ class MCPClient:
|
||||
from mcp.client.stdio import stdio_client
|
||||
|
||||
# Create persistent stdio client context.
|
||||
# Redirect server stderr to devnull to prevent raw
|
||||
# output from leaking behind the TUI.
|
||||
devnull = open(os.devnull, "w") # noqa: SIM115
|
||||
self._stdio_context = stdio_client(server_params, errlog=devnull)
|
||||
# On Windows, use stderr so subprocess startup errors are visible.
|
||||
if os.name == "nt":
|
||||
errlog = sys.stderr
|
||||
else:
|
||||
errlog = open(os.devnull, "w") # noqa: SIM115
|
||||
self._stdio_context = stdio_client(server_params, errlog=errlog)
|
||||
(
|
||||
self._read_stream,
|
||||
self._write_stream,
|
||||
@@ -353,7 +367,8 @@ class MCPClient:
|
||||
raise ValueError(f"Unknown tool: {tool_name}")
|
||||
|
||||
if self.config.transport == "stdio":
|
||||
return self._run_async(self._call_tool_stdio_async(tool_name, arguments))
|
||||
with self._stdio_call_lock:
|
||||
return self._run_async(self._call_tool_stdio_async(tool_name, arguments))
|
||||
else:
|
||||
return self._call_tool_http(tool_name, arguments)
|
||||
|
||||
@@ -448,11 +463,15 @@ class MCPClient:
|
||||
if self._stdio_context:
|
||||
await self._stdio_context.__aexit__(None, None, None)
|
||||
except asyncio.CancelledError:
|
||||
logger.warning(
|
||||
logger.debug(
|
||||
"STDIO context cleanup was cancelled; proceeding with best-effort shutdown"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Error closing STDIO context: {e}")
|
||||
msg = str(e).lower()
|
||||
if "cancel scope" in msg or "different task" in msg:
|
||||
logger.debug("STDIO context teardown (known anyio quirk): %s", e)
|
||||
else:
|
||||
logger.warning(f"Error closing STDIO context: {e}")
|
||||
finally:
|
||||
self._stdio_context = None
|
||||
|
||||
|
||||
@@ -956,11 +956,14 @@ class AgentRunner:
|
||||
|
||||
# Fallback: load from agent.json (legacy JSON-based agents)
|
||||
agent_json_path = agent_path / "agent.json"
|
||||
if not agent_json_path.exists():
|
||||
if not agent_json_path.is_file():
|
||||
raise FileNotFoundError(f"No agent.py or agent.json found in {agent_path}")
|
||||
|
||||
with open(agent_json_path, encoding="utf-8") as f:
|
||||
graph, goal = load_agent_export(f.read())
|
||||
content = agent_json_path.read_text(encoding="utf-8").strip()
|
||||
if not content:
|
||||
raise FileNotFoundError(f"agent.json is empty: {agent_json_path}")
|
||||
|
||||
graph, goal = load_agent_export(content)
|
||||
|
||||
return cls(
|
||||
agent_path=agent_path,
|
||||
|
||||
@@ -326,6 +326,103 @@ class ToolRegistry:
|
||||
"""Restore execution context to its previous state."""
|
||||
_execution_context.reset(token)
|
||||
|
||||
@staticmethod
|
||||
def resolve_mcp_stdio_config(server_config: dict[str, Any], base_dir: Path) -> dict[str, Any]:
|
||||
"""Resolve cwd and script paths for MCP stdio config (Windows compatibility).
|
||||
|
||||
Use this when building MCPServerConfig from a config file (e.g. in
|
||||
list_agent_tools, discover_mcp_tools) so hive-tools and other servers
|
||||
work on Windows. Call with base_dir = directory containing the config.
|
||||
"""
|
||||
registry = ToolRegistry()
|
||||
return registry._resolve_mcp_server_config(server_config, base_dir)
|
||||
|
||||
def _resolve_mcp_server_config(
|
||||
self, server_config: dict[str, Any], base_dir: Path
|
||||
) -> dict[str, Any]:
|
||||
"""Resolve cwd and script paths for MCP stdio servers (Windows compatibility).
|
||||
|
||||
On Windows, passing cwd to subprocess can cause WinError 267. We use cwd=None
|
||||
and absolute script paths when the server runs a .py script from the tools dir.
|
||||
If the resolved cwd doesn't exist (e.g. config from ~/.hive/agents/), fall back
|
||||
to Path.cwd() / "tools".
|
||||
"""
|
||||
config = dict(server_config)
|
||||
if config.get("transport") != "stdio":
|
||||
return config
|
||||
|
||||
cwd = config.get("cwd")
|
||||
args = list(config.get("args", []))
|
||||
if not cwd and not args:
|
||||
return config
|
||||
|
||||
# Resolve cwd relative to base_dir
|
||||
resolved_cwd: Path | None = None
|
||||
if cwd:
|
||||
if Path(cwd).is_absolute():
|
||||
resolved_cwd = Path(cwd)
|
||||
else:
|
||||
resolved_cwd = (base_dir / cwd).resolve()
|
||||
|
||||
# Find .py script in args (e.g. coder_tools_server.py, files_server.py)
|
||||
script_name = None
|
||||
for i, arg in enumerate(args):
|
||||
if isinstance(arg, str) and arg.endswith(".py"):
|
||||
script_name = arg
|
||||
script_idx = i
|
||||
break
|
||||
|
||||
if resolved_cwd is None:
|
||||
return config
|
||||
|
||||
# If resolved cwd doesn't exist or (when we have a script) doesn't contain it,
|
||||
# try fallback
|
||||
tools_fallback = Path.cwd() / "tools"
|
||||
need_fallback = not resolved_cwd.is_dir()
|
||||
if script_name and not need_fallback:
|
||||
need_fallback = not (resolved_cwd / script_name).exists()
|
||||
if need_fallback:
|
||||
fallback_ok = tools_fallback.is_dir()
|
||||
if script_name:
|
||||
fallback_ok = fallback_ok and (tools_fallback / script_name).exists()
|
||||
else:
|
||||
# No script (e.g. GCU); just need tools dir to exist
|
||||
pass
|
||||
if fallback_ok:
|
||||
resolved_cwd = tools_fallback
|
||||
logger.debug(
|
||||
"MCP server '%s': using fallback tools dir %s",
|
||||
config.get("name", "?"),
|
||||
resolved_cwd,
|
||||
)
|
||||
else:
|
||||
config["cwd"] = str(resolved_cwd)
|
||||
return config
|
||||
|
||||
if not script_name:
|
||||
# No .py script (e.g. GCU uses -m gcu.server); just set cwd
|
||||
config["cwd"] = str(resolved_cwd)
|
||||
return config
|
||||
|
||||
# For coder_tools_server, inject --project-root so writes go to the expected workspace
|
||||
if script_name and "coder_tools" in script_name:
|
||||
project_root = str(resolved_cwd.parent.resolve())
|
||||
args = list(args)
|
||||
if "--project-root" not in args:
|
||||
args.extend(["--project-root", project_root])
|
||||
config["args"] = args
|
||||
|
||||
if os.name == "nt":
|
||||
# Windows: cwd=None avoids WinError 267; use absolute script path
|
||||
config["cwd"] = None
|
||||
abs_script = str((resolved_cwd / script_name).resolve())
|
||||
args = list(config["args"])
|
||||
args[script_idx] = abs_script
|
||||
config["args"] = args
|
||||
else:
|
||||
config["cwd"] = str(resolved_cwd)
|
||||
return config
|
||||
|
||||
def load_mcp_config(self, config_path: Path) -> None:
|
||||
"""
|
||||
Load and register MCP servers from a config file.
|
||||
@@ -357,9 +454,7 @@ class ToolRegistry:
|
||||
server_list = [{"name": name, **cfg} for name, cfg in config.items()]
|
||||
|
||||
for server_config in server_list:
|
||||
cwd = server_config.get("cwd")
|
||||
if cwd and not Path(cwd).is_absolute():
|
||||
server_config["cwd"] = str((base_dir / cwd).resolve())
|
||||
server_config = self._resolve_mcp_server_config(server_config, base_dir)
|
||||
try:
|
||||
self.register_mcp_server(server_config)
|
||||
except Exception as e:
|
||||
@@ -480,6 +575,11 @@ class ToolRegistry:
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to register MCP server: {e}")
|
||||
if "Connection closed" in str(e) and os.name == "nt":
|
||||
logger.debug(
|
||||
"On Windows, check that the MCP subprocess starts (e.g. uv in PATH, "
|
||||
"script path correct). Worker config uses base_dir = mcp_servers.json parent."
|
||||
)
|
||||
return 0
|
||||
|
||||
def _convert_mcp_tool_to_framework_tool(self, mcp_tool: Any) -> Tool:
|
||||
|
||||
@@ -4,6 +4,7 @@ import asyncio
|
||||
import logging
|
||||
|
||||
from aiohttp import web
|
||||
from aiohttp.client_exceptions import ClientConnectionResetError as _AiohttpConnReset
|
||||
|
||||
from framework.runtime.event_bus import EventType
|
||||
from framework.server.app import resolve_session
|
||||
@@ -168,8 +169,15 @@ async def handle_events(request: web.Request) -> web.StreamResponse:
|
||||
"SSE first event: session='%s', type='%s'", session.id, data.get("type")
|
||||
)
|
||||
except TimeoutError:
|
||||
await sse.send_keepalive()
|
||||
except (ConnectionResetError, ConnectionError):
|
||||
try:
|
||||
await sse.send_keepalive()
|
||||
except (ConnectionResetError, ConnectionError, _AiohttpConnReset):
|
||||
close_reason = "client_disconnected"
|
||||
break
|
||||
except Exception as exc:
|
||||
close_reason = f"keepalive_error: {exc}"
|
||||
break
|
||||
except (ConnectionResetError, ConnectionError, _AiohttpConnReset):
|
||||
close_reason = "client_disconnected"
|
||||
break
|
||||
except Exception as exc:
|
||||
|
||||
@@ -288,6 +288,60 @@ async def handle_resume(request: web.Request) -> web.Response:
|
||||
)
|
||||
|
||||
|
||||
async def handle_pause(request: web.Request) -> web.Response:
|
||||
"""POST /api/sessions/{session_id}/pause — pause the worker (queen stays alive).
|
||||
|
||||
Mirrors the queen's stop_worker() tool: cancels all active worker
|
||||
executions, pauses timers so nothing auto-restarts, but does NOT
|
||||
touch the queen so she can observe and react to the pause.
|
||||
"""
|
||||
session, err = resolve_session(request)
|
||||
if err:
|
||||
return err
|
||||
|
||||
if not session.worker_runtime:
|
||||
return web.json_response({"error": "No worker loaded in this session"}, status=503)
|
||||
|
||||
runtime = session.worker_runtime
|
||||
cancelled = []
|
||||
|
||||
for graph_id in runtime.list_graphs():
|
||||
reg = runtime.get_graph_registration(graph_id)
|
||||
if reg is None:
|
||||
continue
|
||||
for _ep_id, stream in reg.streams.items():
|
||||
# Signal shutdown on active nodes to abort in-flight LLM streams
|
||||
for executor in stream._active_executors.values():
|
||||
for node in executor.node_registry.values():
|
||||
if hasattr(node, "signal_shutdown"):
|
||||
node.signal_shutdown()
|
||||
if hasattr(node, "cancel_current_turn"):
|
||||
node.cancel_current_turn()
|
||||
|
||||
for exec_id in list(stream.active_execution_ids):
|
||||
try:
|
||||
ok = await stream.cancel_execution(exec_id)
|
||||
if ok:
|
||||
cancelled.append(exec_id)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Pause timers so the next tick doesn't restart execution
|
||||
runtime.pause_timers()
|
||||
|
||||
# Switch to staging (agent still loaded, ready to re-run)
|
||||
if session.mode_state is not None:
|
||||
await session.mode_state.switch_to_staging(source="frontend")
|
||||
|
||||
return web.json_response(
|
||||
{
|
||||
"stopped": bool(cancelled),
|
||||
"cancelled": cancelled,
|
||||
"timers_paused": True,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
async def handle_stop(request: web.Request) -> web.Response:
|
||||
"""POST /api/sessions/{session_id}/stop — cancel a running execution.
|
||||
|
||||
@@ -416,7 +470,7 @@ def register_routes(app: web.Application) -> None:
|
||||
app.router.add_post("/api/sessions/{session_id}/chat", handle_chat)
|
||||
app.router.add_post("/api/sessions/{session_id}/queen-context", handle_queen_context)
|
||||
app.router.add_post("/api/sessions/{session_id}/worker-input", handle_worker_input)
|
||||
app.router.add_post("/api/sessions/{session_id}/pause", handle_stop)
|
||||
app.router.add_post("/api/sessions/{session_id}/pause", handle_pause)
|
||||
app.router.add_post("/api/sessions/{session_id}/resume", handle_resume)
|
||||
app.router.add_post("/api/sessions/{session_id}/stop", handle_stop)
|
||||
app.router.add_post("/api/sessions/{session_id}/cancel-queen", handle_cancel_queen)
|
||||
|
||||
@@ -124,6 +124,9 @@ async def handle_create_session(request: web.Request) -> web.Response:
|
||||
session_id = body.get("session_id")
|
||||
model = body.get("model")
|
||||
initial_prompt = body.get("initial_prompt")
|
||||
# When set, the queen writes conversations to this existing session's directory
|
||||
# so the full history accumulates in one place across server restarts.
|
||||
queen_resume_from = body.get("queen_resume_from")
|
||||
|
||||
if agent_path:
|
||||
try:
|
||||
@@ -139,6 +142,7 @@ async def handle_create_session(request: web.Request) -> web.Response:
|
||||
agent_id=agent_id,
|
||||
model=model,
|
||||
initial_prompt=initial_prompt,
|
||||
queen_resume_from=queen_resume_from,
|
||||
)
|
||||
else:
|
||||
# Queen-only session
|
||||
@@ -146,6 +150,7 @@ async def handle_create_session(request: web.Request) -> web.Response:
|
||||
session_id=session_id,
|
||||
model=model,
|
||||
initial_prompt=initial_prompt,
|
||||
queen_resume_from=queen_resume_from,
|
||||
)
|
||||
except ValueError as e:
|
||||
msg = str(e)
|
||||
@@ -179,7 +184,12 @@ async def handle_list_live_sessions(request: web.Request) -> web.Response:
|
||||
|
||||
|
||||
async def handle_get_live_session(request: web.Request) -> web.Response:
|
||||
"""GET /api/sessions/{session_id} — get session detail."""
|
||||
"""GET /api/sessions/{session_id} — get session detail.
|
||||
|
||||
Falls back to cold session metadata (HTTP 200 with ``cold: true``) when the
|
||||
session is not alive in memory but queen conversation files exist on disk.
|
||||
This lets the frontend detect a server restart and restore message history.
|
||||
"""
|
||||
manager = _get_manager(request)
|
||||
session_id = request.match_info["session_id"]
|
||||
session = manager.get_session(session_id)
|
||||
@@ -190,6 +200,10 @@ async def handle_get_live_session(request: web.Request) -> web.Response:
|
||||
{"session_id": session_id, "loading": True},
|
||||
status=202,
|
||||
)
|
||||
# Check if conversation files survived on disk (post-restart scenario)
|
||||
cold_info = SessionManager.get_cold_session_info(session_id)
|
||||
if cold_info is not None:
|
||||
return web.json_response(cold_info)
|
||||
return web.json_response(
|
||||
{"error": f"Session '{session_id}' not found"},
|
||||
status=404,
|
||||
@@ -613,15 +627,17 @@ async def handle_messages(request: web.Request) -> web.Response:
|
||||
|
||||
|
||||
async def handle_queen_messages(request: web.Request) -> web.Response:
|
||||
"""GET /api/sessions/{session_id}/queen-messages — get queen conversation."""
|
||||
session, err = resolve_session(request)
|
||||
if err:
|
||||
return err
|
||||
"""GET /api/sessions/{session_id}/queen-messages — get queen conversation.
|
||||
|
||||
queen_dir = Path.home() / ".hive" / "queen" / "session" / session.id
|
||||
Reads directly from disk so it works for both live sessions and cold
|
||||
(post-server-restart) sessions — no live session required.
|
||||
"""
|
||||
session_id = request.match_info["session_id"]
|
||||
|
||||
queen_dir = Path.home() / ".hive" / "queen" / "session" / session_id
|
||||
convs_dir = queen_dir / "conversations"
|
||||
if not convs_dir.exists():
|
||||
return web.json_response({"messages": []})
|
||||
return web.json_response({"messages": [], "session_id": session_id})
|
||||
|
||||
all_messages: list[dict] = []
|
||||
for node_dir in convs_dir.iterdir():
|
||||
@@ -654,7 +670,58 @@ async def handle_queen_messages(request: web.Request) -> web.Response:
|
||||
and not (m["role"] == "assistant" and m.get("tool_calls"))
|
||||
]
|
||||
|
||||
return web.json_response({"messages": all_messages})
|
||||
return web.json_response({"messages": all_messages, "session_id": session_id})
|
||||
|
||||
|
||||
async def handle_session_history(request: web.Request) -> web.Response:
|
||||
"""GET /api/sessions/history — all queen sessions on disk (live + cold).
|
||||
|
||||
Returns every session directory under ~/.hive/queen/session/, newest first.
|
||||
Live sessions have ``live: true, cold: false``; sessions that survived a
|
||||
server restart have ``live: false, cold: true``.
|
||||
"""
|
||||
manager = _get_manager(request)
|
||||
live_sessions = {s.id: s for s in manager.list_sessions()}
|
||||
|
||||
disk_sessions = SessionManager.list_cold_sessions()
|
||||
for s in disk_sessions:
|
||||
if s["session_id"] in live_sessions:
|
||||
live = live_sessions[s["session_id"]]
|
||||
s["cold"] = False
|
||||
s["live"] = True
|
||||
# Fill in agent_name from live memory if meta.json wasn't written yet
|
||||
if not s.get("agent_name") and live.worker_info:
|
||||
s["agent_name"] = live.worker_info.name
|
||||
if not s.get("agent_path") and live.worker_path:
|
||||
s["agent_path"] = str(live.worker_path)
|
||||
|
||||
return web.json_response({"sessions": disk_sessions})
|
||||
|
||||
|
||||
async def handle_delete_history_session(request: web.Request) -> web.Response:
|
||||
"""DELETE /api/sessions/history/{session_id} — permanently remove a session.
|
||||
|
||||
Stops the live session (if still running) and deletes the queen session
|
||||
directory from disk at ~/.hive/queen/session/{session_id}/.
|
||||
This is the frontend 'delete from history' action.
|
||||
"""
|
||||
manager = _get_manager(request)
|
||||
session_id = request.match_info["session_id"]
|
||||
|
||||
# Stop the live session if it exists (best-effort)
|
||||
if manager.get_session(session_id):
|
||||
await manager.stop_session(session_id)
|
||||
|
||||
# Delete the queen session directory from disk
|
||||
queen_session_dir = Path.home() / ".hive" / "queen" / "session" / session_id
|
||||
if queen_session_dir.exists() and queen_session_dir.is_dir():
|
||||
try:
|
||||
shutil.rmtree(queen_session_dir)
|
||||
except OSError as e:
|
||||
logger.warning("Failed to delete session directory %s: %s", queen_session_dir, e)
|
||||
return web.json_response({"error": f"Failed to delete session: {e}"}, status=500)
|
||||
|
||||
return web.json_response({"deleted": session_id})
|
||||
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
@@ -703,6 +770,9 @@ def register_routes(app: web.Application) -> None:
|
||||
# Session lifecycle
|
||||
app.router.add_post("/api/sessions", handle_create_session)
|
||||
app.router.add_get("/api/sessions", handle_list_live_sessions)
|
||||
# history must be registered before {session_id} so it takes priority
|
||||
app.router.add_get("/api/sessions/history", handle_session_history)
|
||||
app.router.add_delete("/api/sessions/history/{session_id}", handle_delete_history_session)
|
||||
app.router.add_get("/api/sessions/{session_id}", handle_get_live_session)
|
||||
app.router.add_delete("/api/sessions/{session_id}", handle_stop_session)
|
||||
|
||||
|
||||
@@ -45,6 +45,11 @@ class Session:
|
||||
# Judge (active when worker is loaded)
|
||||
judge_task: asyncio.Task | None = None
|
||||
escalation_sub: str | None = None
|
||||
# Session directory resumption:
|
||||
# When set, _start_queen writes queen conversations to this existing session's
|
||||
# directory instead of creating a new one. This lets cold-restores accumulate
|
||||
# all messages in the original session folder so history is never fragmented.
|
||||
queen_resume_from: str | None = None
|
||||
|
||||
|
||||
class SessionManager:
|
||||
@@ -114,18 +119,25 @@ class SessionManager:
|
||||
session_id: str | None = None,
|
||||
model: str | None = None,
|
||||
initial_prompt: str | None = None,
|
||||
queen_resume_from: str | None = None,
|
||||
) -> Session:
|
||||
"""Create a new session with a queen but no worker.
|
||||
|
||||
The queen starts immediately with MCP coding tools.
|
||||
A worker can be loaded later via load_worker().
|
||||
When ``queen_resume_from`` is set the queen writes conversation messages
|
||||
to that existing session's directory instead of creating a new one.
|
||||
This preserves full conversation history across server restarts.
|
||||
"""
|
||||
session = await self._create_session_core(session_id=session_id, model=model)
|
||||
session.queen_resume_from = queen_resume_from
|
||||
|
||||
# Start queen immediately (queen-only, no worker tools yet)
|
||||
await self._start_queen(session, worker_identity=None, initial_prompt=initial_prompt)
|
||||
|
||||
logger.info("Session '%s' created (queen-only)", session.id)
|
||||
logger.info(
|
||||
"Session '%s' created (queen-only, resume_from=%s)",
|
||||
session.id,
|
||||
queen_resume_from,
|
||||
)
|
||||
return session
|
||||
|
||||
async def create_session_with_worker(
|
||||
@@ -134,15 +146,12 @@ class SessionManager:
|
||||
agent_id: str | None = None,
|
||||
model: str | None = None,
|
||||
initial_prompt: str | None = None,
|
||||
queen_resume_from: str | None = None,
|
||||
) -> Session:
|
||||
"""Create a session and load a worker in one step.
|
||||
|
||||
Backward-compatible with the old POST /api/agents flow.
|
||||
Loads the worker FIRST so the queen starts with full lifecycle
|
||||
and monitoring tools available.
|
||||
|
||||
The session gets an auto-generated unique ID. The agent name
|
||||
becomes the worker_id (used by the frontend as backendAgentId).
|
||||
When ``queen_resume_from`` is set the queen writes conversation messages
|
||||
to that existing session's directory instead of creating a new one.
|
||||
"""
|
||||
from framework.tools.queen_lifecycle_tools import build_worker_profile
|
||||
|
||||
@@ -151,6 +160,7 @@ class SessionManager:
|
||||
|
||||
# Auto-generate session ID (not the agent name)
|
||||
session = await self._create_session_core(model=model)
|
||||
session.queen_resume_from = queen_resume_from
|
||||
try:
|
||||
# Load worker FIRST (before queen) so queen gets full tools
|
||||
await self._load_worker_core(
|
||||
@@ -170,10 +180,6 @@ class SessionManager:
|
||||
session, worker_identity=worker_identity, initial_prompt=initial_prompt
|
||||
)
|
||||
|
||||
# Health judge disabled for simplicity.
|
||||
# if agent_path.name != "hive_coder" and session.worker_runtime:
|
||||
# await self._start_judge(session, session.runner._storage_path)
|
||||
|
||||
except Exception:
|
||||
# If anything fails, tear down the session
|
||||
await self.stop_session(session.id)
|
||||
@@ -401,7 +407,12 @@ class SessionManager:
|
||||
worker_identity: str | None,
|
||||
initial_prompt: str | None = None,
|
||||
) -> None:
|
||||
"""Start the queen executor for a session."""
|
||||
"""Start the queen executor for a session.
|
||||
|
||||
When ``session.queen_resume_from`` is set, queen conversation messages
|
||||
are written to the ORIGINAL session's directory so the full conversation
|
||||
history accumulates in one place across server restarts.
|
||||
"""
|
||||
from framework.agents.hive_coder.agent import (
|
||||
queen_goal,
|
||||
queen_graph as _queen_graph,
|
||||
@@ -411,9 +422,41 @@ class SessionManager:
|
||||
from framework.runtime.core import Runtime
|
||||
|
||||
hive_home = Path.home() / ".hive"
|
||||
queen_dir = hive_home / "queen" / "session" / session.id
|
||||
|
||||
# Determine which session directory to use for queen storage.
|
||||
# When queen_resume_from is set we write to the ORIGINAL session's
|
||||
# directory so that all messages accumulate in one place.
|
||||
storage_session_id = session.queen_resume_from or session.id
|
||||
queen_dir = hive_home / "queen" / "session" / storage_session_id
|
||||
queen_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Always write/update session metadata so history sidebar has correct
|
||||
# agent name, path, and last-active timestamp (important so the original
|
||||
# session directory sorts as "most recent" after a cold-restore resume).
|
||||
_meta_path = queen_dir / "meta.json"
|
||||
try:
|
||||
_agent_name = (
|
||||
session.worker_info.name
|
||||
if session.worker_info
|
||||
else (
|
||||
str(session.worker_path.name).replace("_", " ").title()
|
||||
if session.worker_path
|
||||
else None
|
||||
)
|
||||
)
|
||||
_meta_path.write_text(
|
||||
json.dumps(
|
||||
{
|
||||
"agent_name": _agent_name,
|
||||
"agent_path": str(session.worker_path) if session.worker_path else None,
|
||||
"created_at": time.time(),
|
||||
}
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
# Register MCP coding tools
|
||||
queen_registry = ToolRegistry()
|
||||
import framework.agents.hive_coder as _hive_coder_pkg
|
||||
@@ -774,6 +817,166 @@ class SessionManager:
|
||||
def list_sessions(self) -> list[Session]:
|
||||
return list(self._sessions.values())
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Cold session helpers (disk-only, no live runtime required)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@staticmethod
|
||||
def get_cold_session_info(session_id: str) -> dict | None:
|
||||
"""Return disk metadata for a session that is no longer live in memory.
|
||||
|
||||
Checks whether queen conversation files exist at
|
||||
~/.hive/queen/session/{session_id}/conversations/. Returns None when
|
||||
no data is found so callers can fall through to a 404.
|
||||
"""
|
||||
queen_dir = Path.home() / ".hive" / "queen" / "session" / session_id
|
||||
convs_dir = queen_dir / "conversations"
|
||||
if not convs_dir.exists():
|
||||
return None
|
||||
|
||||
# Check whether any message part files are actually present
|
||||
has_messages = False
|
||||
try:
|
||||
for node_dir in convs_dir.iterdir():
|
||||
if not node_dir.is_dir():
|
||||
continue
|
||||
parts_dir = node_dir / "parts"
|
||||
if parts_dir.exists() and any(f.suffix == ".json" for f in parts_dir.iterdir()):
|
||||
has_messages = True
|
||||
break
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
try:
|
||||
created_at = queen_dir.stat().st_ctime
|
||||
except OSError:
|
||||
created_at = 0.0
|
||||
|
||||
# Read extra metadata written at session start
|
||||
agent_name: str | None = None
|
||||
agent_path: str | None = None
|
||||
meta_path = queen_dir / "meta.json"
|
||||
if meta_path.exists():
|
||||
try:
|
||||
meta = json.loads(meta_path.read_text(encoding="utf-8"))
|
||||
agent_name = meta.get("agent_name")
|
||||
agent_path = meta.get("agent_path")
|
||||
created_at = meta.get("created_at") or created_at
|
||||
except (json.JSONDecodeError, OSError):
|
||||
pass
|
||||
|
||||
return {
|
||||
"session_id": session_id,
|
||||
"cold": True,
|
||||
"live": False,
|
||||
"has_messages": has_messages,
|
||||
"created_at": created_at,
|
||||
"agent_name": agent_name,
|
||||
"agent_path": agent_path,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def list_cold_sessions() -> list[dict]:
|
||||
"""Return metadata for every queen session directory on disk, newest first."""
|
||||
queen_sessions_dir = Path.home() / ".hive" / "queen" / "session"
|
||||
if not queen_sessions_dir.exists():
|
||||
return []
|
||||
|
||||
results: list[dict] = []
|
||||
try:
|
||||
entries = sorted(
|
||||
queen_sessions_dir.iterdir(),
|
||||
key=lambda p: p.stat().st_mtime,
|
||||
reverse=True,
|
||||
)
|
||||
except OSError:
|
||||
return []
|
||||
|
||||
for d in entries:
|
||||
if not d.is_dir():
|
||||
continue
|
||||
try:
|
||||
created_at = d.stat().st_ctime
|
||||
except OSError:
|
||||
created_at = 0.0
|
||||
agent_name: str | None = None
|
||||
agent_path: str | None = None
|
||||
meta_path = d / "meta.json"
|
||||
if meta_path.exists():
|
||||
try:
|
||||
meta = json.loads(meta_path.read_text(encoding="utf-8"))
|
||||
agent_name = meta.get("agent_name")
|
||||
agent_path = meta.get("agent_path")
|
||||
created_at = meta.get("created_at") or created_at
|
||||
except (json.JSONDecodeError, OSError):
|
||||
pass
|
||||
|
||||
# Build a quick preview of the last human/assistant exchange.
|
||||
# We read all conversation parts, filter to client-facing messages,
|
||||
# and return the last assistant message content as a snippet.
|
||||
last_message: str | None = None
|
||||
message_count: int = 0
|
||||
convs_dir = d / "conversations"
|
||||
if convs_dir.exists():
|
||||
try:
|
||||
all_parts: list[dict] = []
|
||||
for node_dir in convs_dir.iterdir():
|
||||
if not node_dir.is_dir():
|
||||
continue
|
||||
parts_dir = node_dir / "parts"
|
||||
if not parts_dir.exists():
|
||||
continue
|
||||
for part_file in sorted(parts_dir.iterdir()):
|
||||
if part_file.suffix != ".json":
|
||||
continue
|
||||
try:
|
||||
part = json.loads(part_file.read_text(encoding="utf-8"))
|
||||
part.setdefault("created_at", part_file.stat().st_mtime)
|
||||
all_parts.append(part)
|
||||
except (json.JSONDecodeError, OSError):
|
||||
continue
|
||||
# Filter to client-facing messages only
|
||||
client_msgs = [
|
||||
p
|
||||
for p in all_parts
|
||||
if not p.get("is_transition_marker")
|
||||
and p.get("role") != "tool"
|
||||
and not (p.get("role") == "assistant" and p.get("tool_calls"))
|
||||
]
|
||||
client_msgs.sort(key=lambda m: m.get("created_at", m.get("seq", 0)))
|
||||
message_count = len(client_msgs)
|
||||
# Last assistant message as preview snippet
|
||||
for msg in reversed(client_msgs):
|
||||
content = msg.get("content") or ""
|
||||
if isinstance(content, list):
|
||||
# Anthropic-style content blocks
|
||||
content = " ".join(
|
||||
b.get("text", "")
|
||||
for b in content
|
||||
if isinstance(b, dict) and b.get("type") == "text"
|
||||
)
|
||||
if content and msg.get("role") == "assistant":
|
||||
last_message = content[:120].strip()
|
||||
break
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
results.append(
|
||||
{
|
||||
"session_id": d.name,
|
||||
"cold": True, # caller overrides for live sessions
|
||||
"live": False,
|
||||
"has_messages": convs_dir.exists() and message_count > 0,
|
||||
"created_at": created_at,
|
||||
"agent_name": agent_name,
|
||||
"agent_path": agent_path,
|
||||
"last_message": last_message,
|
||||
"message_count": message_count,
|
||||
}
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
async def shutdown_all(self) -> None:
|
||||
"""Gracefully stop all sessions. Called on server shutdown."""
|
||||
session_ids = list(self._sessions.keys())
|
||||
|
||||
@@ -74,6 +74,7 @@ class MockStream:
|
||||
is_awaiting_input: bool = False
|
||||
_execution_tasks: dict = field(default_factory=dict)
|
||||
_active_executors: dict = field(default_factory=dict)
|
||||
active_execution_ids: set = field(default_factory=set)
|
||||
|
||||
async def cancel_execution(self, execution_id: str) -> bool:
|
||||
return execution_id in self._execution_tasks
|
||||
@@ -117,6 +118,9 @@ class MockRuntime:
|
||||
async def inject_input(self, node_id, content, graph_id=None, *, is_client_input=False):
|
||||
return True
|
||||
|
||||
def pause_timers(self):
|
||||
pass
|
||||
|
||||
async def get_goal_progress(self):
|
||||
return {"progress": 0.5, "criteria": []}
|
||||
|
||||
@@ -537,18 +541,8 @@ class TestExecution:
|
||||
assert resp.status == 400
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pause_not_found(self):
|
||||
session = _make_session()
|
||||
app = _make_app_with_session(session)
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.post(
|
||||
"/api/sessions/test_agent/pause",
|
||||
json={"execution_id": "nonexistent"},
|
||||
)
|
||||
assert resp.status == 404
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pause_missing_execution_id(self):
|
||||
async def test_pause_no_active_executions(self):
|
||||
"""Pause with no active executions returns stopped=False."""
|
||||
session = _make_session()
|
||||
app = _make_app_with_session(session)
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
@@ -556,7 +550,26 @@ class TestExecution:
|
||||
"/api/sessions/test_agent/pause",
|
||||
json={},
|
||||
)
|
||||
assert resp.status == 400
|
||||
assert resp.status == 200
|
||||
data = await resp.json()
|
||||
assert data["stopped"] is False
|
||||
assert data["cancelled"] == []
|
||||
assert data["timers_paused"] is True
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pause_does_not_cancel_queen(self):
|
||||
"""Pause should stop the worker but leave the queen running."""
|
||||
session = _make_session()
|
||||
app = _make_app_with_session(session)
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.post(
|
||||
"/api/sessions/test_agent/pause",
|
||||
json={},
|
||||
)
|
||||
assert resp.status == 200
|
||||
# Queen's cancel_current_turn should NOT have been called
|
||||
queen_node = session.queen_executor.node_registry["queen"]
|
||||
queen_node.cancel_current_turn.assert_not_called()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_goal_progress(self):
|
||||
|
||||
@@ -13,12 +13,13 @@ export const sessionsApi = {
|
||||
// --- Session lifecycle ---
|
||||
|
||||
/** Create a session. If agentPath is provided, loads worker in one step. */
|
||||
create: (agentPath?: string, agentId?: string, model?: string, initialPrompt?: string) =>
|
||||
create: (agentPath?: string, agentId?: string, model?: string, initialPrompt?: string, queenResumeFrom?: string) =>
|
||||
api.post<LiveSession>("/sessions", {
|
||||
agent_path: agentPath,
|
||||
agent_id: agentId,
|
||||
model,
|
||||
initial_prompt: initialPrompt,
|
||||
queen_resume_from: queenResumeFrom || undefined,
|
||||
}),
|
||||
|
||||
/** List all active sessions. */
|
||||
@@ -66,9 +67,17 @@ export const sessionsApi = {
|
||||
graphs: (sessionId: string) =>
|
||||
api.get<{ graphs: string[] }>(`/sessions/${sessionId}/graphs`),
|
||||
|
||||
/** Get queen conversation history for a session. */
|
||||
/** Get queen conversation history for a session (works for cold/post-restart sessions too). */
|
||||
queenMessages: (sessionId: string) =>
|
||||
api.get<{ messages: Message[] }>(`/sessions/${sessionId}/queen-messages`),
|
||||
api.get<{ messages: Message[]; session_id: string }>(`/sessions/${sessionId}/queen-messages`),
|
||||
|
||||
/** List all queen sessions on disk — live + cold (post-restart). */
|
||||
history: () =>
|
||||
api.get<{ sessions: Array<{ session_id: string; cold: boolean; live: boolean; has_messages: boolean; created_at: number; agent_name?: string | null; agent_path?: string | null }> }>("/sessions/history"),
|
||||
|
||||
/** Permanently delete a history session (stops live session + removes disk files). */
|
||||
deleteHistory: (sessionId: string) =>
|
||||
api.delete<{ deleted: string }>(`/sessions/history/${sessionId}`),
|
||||
|
||||
// --- Worker session browsing (persisted execution runs) ---
|
||||
|
||||
|
||||
@@ -21,6 +21,8 @@ export interface LiveSession {
|
||||
export interface LiveSessionDetail extends LiveSession {
|
||||
entry_points?: EntryPoint[];
|
||||
graphs?: string[];
|
||||
/** True when the session exists on disk but is not live (server restarted). */
|
||||
cold?: boolean;
|
||||
}
|
||||
|
||||
export interface EntryPoint {
|
||||
|
||||
@@ -0,0 +1,431 @@
|
||||
/**
|
||||
* HistorySidebar — persistent ChatGPT-style session history sidebar.
|
||||
*
|
||||
* Shown on both the Home page and the Workspace. Clicking a session fires
|
||||
* `onOpen(sessionId, agentPath)` so the caller decides what to do (navigate
|
||||
* to workspace on Home, open/switch tab on Workspace).
|
||||
*
|
||||
* Labels (user-visible names) are stored purely in localStorage — backend
|
||||
* session IDs are never touched.
|
||||
*
|
||||
* Session deduplication: the backend may have multiple session directories
|
||||
* for the same agent (cold restarts create new directories). We deduplicate
|
||||
* by agent_path and show only the most-recent session per agent so the
|
||||
* history list stays clean.
|
||||
*/
|
||||
|
||||
import { useState, useEffect, useRef, useCallback } from "react";
|
||||
import { ChevronLeft, ChevronRight, Clock, Bot, Loader2, MoreHorizontal, Pencil, Trash2, Check, X } from "lucide-react";
|
||||
import { sessionsApi } from "@/api/sessions";
|
||||
|
||||
// ── Types ─────────────────────────────────────────────────────────────────────
|
||||
|
||||
export type HistorySession = {
|
||||
session_id: string;
|
||||
cold: boolean;
|
||||
live: boolean;
|
||||
has_messages: boolean;
|
||||
created_at: number;
|
||||
agent_name?: string | null;
|
||||
agent_path?: string | null;
|
||||
/** Snippet of the last assistant message — for sidebar preview. */
|
||||
last_message?: string | null;
|
||||
/** Total number of client-facing messages in this session. */
|
||||
message_count?: number;
|
||||
};
|
||||
|
||||
const LABEL_STORE_KEY = "hive:history-labels";
|
||||
|
||||
function loadLabelStore(): Record<string, string> {
|
||||
try {
|
||||
const raw = localStorage.getItem(LABEL_STORE_KEY);
|
||||
return raw ? (JSON.parse(raw) as Record<string, string>) : {};
|
||||
} catch {
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
function saveLabelStore(store: Record<string, string>) {
|
||||
try {
|
||||
localStorage.setItem(LABEL_STORE_KEY, JSON.stringify(store));
|
||||
} catch { }
|
||||
}
|
||||
|
||||
// ── Helpers ───────────────────────────────────────────────────────────────────
|
||||
|
||||
function defaultLabel(s: HistorySession, index: number): string {
|
||||
if (s.agent_name) return s.agent_name;
|
||||
if (s.agent_path) {
|
||||
const base = s.agent_path.replace(/\/$/, "").split("/").pop() || s.agent_path;
|
||||
return base
|
||||
.split("_")
|
||||
.map((w) => w.charAt(0).toUpperCase() + w.slice(1))
|
||||
.join(" ");
|
||||
}
|
||||
return `New Agent${index > 0 ? ` #${index + 1}` : ""}`;
|
||||
}
|
||||
|
||||
function formatDateTime(createdAt: number, sessionId: string): string {
|
||||
// Prefer timestamp embedded in session_id: session_YYYYMMDD_HHMMSS_xxx
|
||||
const match = sessionId.match(/^session_(\d{4})(\d{2})(\d{2})_(\d{2})(\d{2})(\d{2})/);
|
||||
const d = match
|
||||
? new Date(+match[1], +match[2] - 1, +match[3], +match[4], +match[5], +match[6])
|
||||
: new Date(createdAt * 1000);
|
||||
return d.toLocaleString(undefined, {
|
||||
month: "short",
|
||||
day: "numeric",
|
||||
hour: "2-digit",
|
||||
minute: "2-digit",
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Deduplicate sessions by agent_path — keep only the most recent session
|
||||
* per agent. Sessions are already sorted newest-first by the backend.
|
||||
* Sessions without an agent_path (new-agent / queen-only) are kept individually.
|
||||
*/
|
||||
function deduplicateByAgent(sessions: HistorySession[]): HistorySession[] {
|
||||
const seen = new Set<string>();
|
||||
const result: HistorySession[] = [];
|
||||
for (const s of sessions) {
|
||||
// Group key: use agent_path when present, otherwise use session_id (unique)
|
||||
const key = s.agent_path ? s.agent_path.replace(/\/$/, "") : `__no_agent__${s.session_id}`;
|
||||
if (!seen.has(key)) {
|
||||
seen.add(key);
|
||||
result.push(s);
|
||||
}
|
||||
// Additional sessions for the same agent are silently skipped
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
function groupByDate(sessions: HistorySession[]): { label: string; items: HistorySession[] }[] {
|
||||
const now = new Date();
|
||||
const today = new Date(now.getFullYear(), now.getMonth(), now.getDate()).getTime();
|
||||
const yesterday = today - 86_400_000;
|
||||
const weekAgo = today - 7 * 86_400_000;
|
||||
const groups: { label: string; items: HistorySession[] }[] = [
|
||||
{ label: "Today", items: [] },
|
||||
{ label: "Yesterday", items: [] },
|
||||
{ label: "Last 7 days", items: [] },
|
||||
{ label: "Older", items: [] },
|
||||
];
|
||||
for (const s of sessions) {
|
||||
const d = new Date(s.created_at * 1000);
|
||||
const dayTs = new Date(d.getFullYear(), d.getMonth(), d.getDate()).getTime();
|
||||
if (dayTs >= today) groups[0].items.push(s);
|
||||
else if (dayTs >= yesterday) groups[1].items.push(s);
|
||||
else if (dayTs >= weekAgo) groups[2].items.push(s);
|
||||
else groups[3].items.push(s);
|
||||
}
|
||||
return groups.filter((g) => g.items.length > 0);
|
||||
}
|
||||
|
||||
// ── Row component ─────────────────────────────────────────────────────────────
|
||||
|
||||
interface RowProps {
|
||||
session: HistorySession;
|
||||
label: string;
|
||||
index: number;
|
||||
isActive: boolean;
|
||||
isLive: boolean;
|
||||
onOpen: () => void;
|
||||
onRename: (newLabel: string) => void;
|
||||
onDelete: () => void;
|
||||
}
|
||||
|
||||
function HistoryRow({ session: s, label, isActive, isLive, onOpen, onRename, onDelete }: RowProps) {
|
||||
const [menuOpen, setMenuOpen] = useState(false);
|
||||
const [renaming, setRenaming] = useState(false);
|
||||
const [draftLabel, setDraftLabel] = useState(label);
|
||||
const menuRef = useRef<HTMLDivElement>(null);
|
||||
const inputRef = useRef<HTMLInputElement>(null);
|
||||
|
||||
useEffect(() => {
|
||||
if (!menuOpen) return;
|
||||
const handler = (e: MouseEvent) => {
|
||||
if (menuRef.current && !menuRef.current.contains(e.target as Node)) setMenuOpen(false);
|
||||
};
|
||||
document.addEventListener("mousedown", handler);
|
||||
return () => document.removeEventListener("mousedown", handler);
|
||||
}, [menuOpen]);
|
||||
|
||||
useEffect(() => {
|
||||
if (renaming) {
|
||||
setDraftLabel(label);
|
||||
requestAnimationFrame(() => inputRef.current?.select());
|
||||
}
|
||||
}, [renaming, label]);
|
||||
|
||||
const commitRename = () => {
|
||||
const trimmed = draftLabel.trim();
|
||||
if (trimmed) onRename(trimmed);
|
||||
setRenaming(false);
|
||||
};
|
||||
|
||||
const dateStr = formatDateTime(s.created_at, s.session_id);
|
||||
|
||||
return (
|
||||
<div
|
||||
className={`group relative flex items-start gap-2 px-3 py-2 cursor-pointer transition-colors ${isActive
|
||||
? "bg-primary/10 border-l-2 border-primary"
|
||||
: "border-l-2 border-transparent hover:bg-muted/40"
|
||||
}`}
|
||||
onClick={() => { if (!renaming) onOpen(); }}
|
||||
>
|
||||
<Bot className="w-3.5 h-3.5 flex-shrink-0 mt-[3px] text-muted-foreground/40 group-hover:text-muted-foreground/70 transition-colors" />
|
||||
|
||||
<div className="min-w-0 flex-1">
|
||||
{renaming ? (
|
||||
<div className="flex items-center gap-1" onClick={(e) => e.stopPropagation()}>
|
||||
<input
|
||||
ref={inputRef}
|
||||
value={draftLabel}
|
||||
onChange={(e) => setDraftLabel(e.target.value)}
|
||||
onKeyDown={(e) => {
|
||||
if (e.key === "Enter") commitRename();
|
||||
if (e.key === "Escape") setRenaming(false);
|
||||
}}
|
||||
className="flex-1 min-w-0 text-[11px] bg-muted/60 border border-border/50 rounded px-1.5 py-0.5 text-foreground focus:outline-none focus:ring-1 focus:ring-primary/40"
|
||||
/>
|
||||
<button onClick={commitRename} className="p-0.5 text-primary hover:text-primary/80">
|
||||
<Check className="w-3 h-3" />
|
||||
</button>
|
||||
<button onClick={() => setRenaming(false)} className="p-0.5 text-muted-foreground hover:text-foreground">
|
||||
<X className="w-3 h-3" />
|
||||
</button>
|
||||
</div>
|
||||
) : (
|
||||
<>
|
||||
<div className={`text-[11px] font-medium truncate leading-tight ${isActive ? "text-foreground" : "text-foreground/80"}`}>
|
||||
{label}
|
||||
</div>
|
||||
{/* Message preview — most recent assistant message */}
|
||||
{s.last_message && (
|
||||
<div className="text-[10px] text-muted-foreground/50 mt-0.5 leading-tight line-clamp-2 break-words">
|
||||
{s.last_message}
|
||||
</div>
|
||||
)}
|
||||
<div className="flex items-center gap-1.5 mt-0.5">
|
||||
<div className="text-[10px] text-muted-foreground/40">{dateStr}</div>
|
||||
{(s.message_count ?? 0) > 0 && (
|
||||
<span className="text-[9px] text-muted-foreground/30">· {s.message_count} msgs</span>
|
||||
)}
|
||||
</div>
|
||||
{isLive && (
|
||||
<span className="text-[9px] text-emerald-500/80 font-semibold uppercase tracking-wide">live</span>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* 3-dot button — visible on row hover */}
|
||||
{!renaming && (
|
||||
<div className="relative flex-shrink-0" ref={menuRef} onClick={(e) => e.stopPropagation()}>
|
||||
<button
|
||||
onClick={() => setMenuOpen((o) => !o)}
|
||||
className={`p-0.5 rounded transition-colors text-muted-foreground/40 hover:text-foreground hover:bg-muted/60 ${menuOpen ? "opacity-100" : "opacity-0 group-hover:opacity-100"
|
||||
}`}
|
||||
title="More options"
|
||||
>
|
||||
<MoreHorizontal className="w-3.5 h-3.5" />
|
||||
</button>
|
||||
|
||||
{menuOpen && (
|
||||
<div className="absolute right-0 top-5 z-50 w-36 rounded-lg border border-border/60 bg-card shadow-xl shadow-black/30 overflow-hidden py-1">
|
||||
<button
|
||||
onClick={() => { setMenuOpen(false); setRenaming(true); }}
|
||||
className="flex items-center gap-2 w-full px-3 py-1.5 text-xs text-foreground hover:bg-muted/60 transition-colors"
|
||||
>
|
||||
<Pencil className="w-3 h-3 text-muted-foreground" />
|
||||
Rename
|
||||
</button>
|
||||
<button
|
||||
onClick={() => { setMenuOpen(false); onDelete(); }}
|
||||
className="flex items-center gap-2 w-full px-3 py-1.5 text-xs text-destructive hover:bg-destructive/10 transition-colors"
|
||||
>
|
||||
<Trash2 className="w-3 h-3" />
|
||||
Delete
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
// ── Main sidebar component ────────────────────────────────────────────────────
|
||||
|
||||
interface HistorySidebarProps {
|
||||
/** Called when a history session is clicked. */
|
||||
onOpen: (sessionId: string, agentPath?: string | null, agentName?: string | null) => void;
|
||||
/** session_ids of tabs already open (for highlighting). */
|
||||
openSessionIds?: string[];
|
||||
/** session_id of the currently active/viewed session (live backend ID). */
|
||||
activeSessionId?: string | null;
|
||||
/** historySourceId of the active session — the original cold session ID before revive,
|
||||
* stays stable even after the backend creates a new live session on cold-restore. */
|
||||
activeHistorySourceId?: string | null;
|
||||
/** Increment this to force a refresh of the session list. */
|
||||
refreshKey?: number;
|
||||
}
|
||||
|
||||
export default function HistorySidebar({ onOpen, openSessionIds = [], activeSessionId, activeHistorySourceId, refreshKey }: HistorySidebarProps) {
|
||||
const [collapsed, setCollapsed] = useState(false);
|
||||
// Raw sessions from the backend (may contain duplicates per agent)
|
||||
const [rawSessions, setRawSessions] = useState<HistorySession[]>([]);
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [labels, setLabels] = useState<Record<string, string>>(loadLabelStore);
|
||||
|
||||
const refresh = useCallback(() => {
|
||||
setLoading(true);
|
||||
sessionsApi
|
||||
.history()
|
||||
.then((r) => setRawSessions(r.sessions))
|
||||
.catch(() => { })
|
||||
.finally(() => setLoading(false));
|
||||
}, []);
|
||||
|
||||
// Refresh on mount and whenever the parent forces a refresh
|
||||
useEffect(() => {
|
||||
refresh();
|
||||
}, [refresh, refreshKey]);
|
||||
|
||||
// Refresh when the browser tab regains visibility
|
||||
useEffect(() => {
|
||||
const handleVisibility = () => {
|
||||
if (document.visibilityState === "visible") refresh();
|
||||
};
|
||||
document.addEventListener("visibilitychange", handleVisibility);
|
||||
return () => document.removeEventListener("visibilitychange", handleVisibility);
|
||||
}, [refresh]);
|
||||
|
||||
const handleRename = (sessionId: string, newLabel: string) => {
|
||||
const next = { ...labels, [sessionId]: newLabel };
|
||||
setLabels(next);
|
||||
saveLabelStore(next);
|
||||
};
|
||||
|
||||
const handleDelete = (sessionId: string) => {
|
||||
// Optimistically remove from in-memory list immediately
|
||||
setRawSessions((prev) => prev.filter((s) => s.session_id !== sessionId));
|
||||
const next = { ...labels };
|
||||
delete next[sessionId];
|
||||
setLabels(next);
|
||||
saveLabelStore(next);
|
||||
|
||||
// Permanently delete session files from disk (fire-and-forget)
|
||||
sessionsApi.deleteHistory(sessionId).catch(() => {
|
||||
// Soft failure — the entry is already removed from the UI.
|
||||
// The file may linger on disk, but won't appear in the next refresh
|
||||
// because it's been removed from rawSessions.
|
||||
});
|
||||
};
|
||||
|
||||
// ── Deduplicate & render ────────────────────────────────────────────────────
|
||||
|
||||
// Deduplicate: show only the most-recent session per agent_path.
|
||||
// rawSessions is already sorted newest-first by the backend.
|
||||
const sessions = deduplicateByAgent(rawSessions);
|
||||
const groups = groupByDate(sessions);
|
||||
|
||||
return (
|
||||
<div
|
||||
className={`flex-shrink-0 flex flex-col bg-card/20 border-r border-border/30 transition-[width] duration-200 overflow-hidden ${collapsed ? "w-[44px]" : "w-[220px]"
|
||||
}`}
|
||||
>
|
||||
{/* Header */}
|
||||
<div
|
||||
className={`flex items-center border-b border-border/20 flex-shrink-0 h-10 ${collapsed ? "justify-center" : "px-3 gap-2"
|
||||
}`}
|
||||
>
|
||||
{!collapsed && (
|
||||
<span className="text-[11px] font-semibold text-muted-foreground/60 uppercase tracking-wider flex-1">
|
||||
History
|
||||
</span>
|
||||
)}
|
||||
<button
|
||||
onClick={() => setCollapsed((o) => !o)}
|
||||
className="p-1 rounded-md text-muted-foreground hover:text-foreground hover:bg-muted/50 transition-colors flex-shrink-0"
|
||||
title={collapsed ? "Expand history" : "Collapse history"}
|
||||
>
|
||||
{collapsed ? (
|
||||
<ChevronRight className="w-3.5 h-3.5" />
|
||||
) : (
|
||||
<ChevronLeft className="w-3.5 h-3.5" />
|
||||
)}
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Expanded list */}
|
||||
{!collapsed && (
|
||||
<div className="flex-1 overflow-y-auto min-h-0">
|
||||
{loading ? (
|
||||
<div className="flex items-center justify-center py-8">
|
||||
<Loader2 className="w-4 h-4 animate-spin text-muted-foreground/40" />
|
||||
</div>
|
||||
) : sessions.length === 0 ? (
|
||||
<div className="px-4 py-12 text-center text-[11px] text-muted-foreground/40 leading-relaxed">
|
||||
No previous
|
||||
<br />
|
||||
sessions yet
|
||||
</div>
|
||||
) : (
|
||||
groups.map(({ label: groupLabel, items }) => (
|
||||
<div key={groupLabel}>
|
||||
<p className="px-3 pt-4 pb-1 text-[10px] font-semibold text-muted-foreground/35 uppercase tracking-wider">
|
||||
{groupLabel}
|
||||
</p>
|
||||
{items.map((s, idx) => {
|
||||
const customLabel = labels[s.session_id];
|
||||
const computedLabel = customLabel || defaultLabel(s, idx);
|
||||
const isActive =
|
||||
s.session_id === activeSessionId ||
|
||||
s.session_id === activeHistorySourceId;
|
||||
// Mark as live if the backend flagged it OR if it's currently open in a tab
|
||||
const isLive = s.live || openSessionIds.includes(s.session_id);
|
||||
return (
|
||||
<HistoryRow
|
||||
key={s.session_id}
|
||||
session={s}
|
||||
label={computedLabel}
|
||||
index={idx}
|
||||
isActive={isActive}
|
||||
isLive={isLive}
|
||||
onOpen={() => onOpen(s.session_id, s.agent_path, s.agent_name)}
|
||||
onRename={(nl) => handleRename(s.session_id, nl)}
|
||||
onDelete={() => handleDelete(s.session_id)}
|
||||
/>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
))
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Collapsed icon strip */}
|
||||
{collapsed && (
|
||||
<div className="flex-1 overflow-y-auto min-h-0 flex flex-col items-center py-2 gap-0.5">
|
||||
{sessions.slice(0, 30).map((s) => {
|
||||
const isLive = s.live || openSessionIds.includes(s.session_id);
|
||||
return (
|
||||
<button
|
||||
key={s.session_id}
|
||||
onClick={() => { setCollapsed(false); onOpen(s.session_id, s.agent_path, s.agent_name); }}
|
||||
className="w-7 h-7 rounded-md flex items-center justify-center text-muted-foreground/40 hover:text-foreground hover:bg-muted/50 transition-colors relative"
|
||||
title={labels[s.session_id] || defaultLabel(s, 0)}
|
||||
>
|
||||
<Clock className="w-3 h-3" />
|
||||
{isLive && (
|
||||
<span className="absolute top-0.5 right-0.5 w-1.5 h-1.5 rounded-full bg-emerald-500" />
|
||||
)}
|
||||
</button>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -9,7 +9,7 @@ import type { GraphNode } from "@/components/AgentGraph";
|
||||
export const TAB_STORAGE_KEY = "hive:workspace-tabs";
|
||||
|
||||
export interface PersistedTabState {
|
||||
tabs: Array<{ id: string; agentType: string; label: string; backendSessionId?: string }>;
|
||||
tabs: Array<{ id: string; agentType: string; tabKey?: string; label: string; backendSessionId?: string; historySourceId?: string }>;
|
||||
activeSessionByAgent: Record<string, string>;
|
||||
activeWorker: string;
|
||||
sessions?: Record<string, { messages: ChatMessage[]; graphNodes: GraphNode[] }>;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { useState, useEffect, useRef } from "react";
|
||||
import { useNavigate } from "react-router-dom";
|
||||
import { Crown, Mail, Briefcase, Shield, Search, Newspaper, ArrowRight, Hexagon, Send, Bot } from "lucide-react";
|
||||
import { Crown, Mail, Briefcase, Shield, Search, Newspaper, ArrowRight, Hexagon, Send, Bot, Radar, Reply, DollarSign, MapPin, Calendar, UserPlus, Twitter } from "lucide-react";
|
||||
import TopBar from "@/components/TopBar";
|
||||
import type { LucideIcon } from "lucide-react";
|
||||
import { agentsApi } from "@/api/agents";
|
||||
@@ -14,6 +14,13 @@ const AGENT_ICONS: Record<string, LucideIcon> = {
|
||||
vulnerability_assessment: Shield,
|
||||
deep_research_agent: Search,
|
||||
tech_news_reporter: Newspaper,
|
||||
competitive_intel_agent: Radar,
|
||||
email_reply_agent: Reply,
|
||||
hubspot_revenue_leak_detector: DollarSign,
|
||||
local_business_extractor: MapPin,
|
||||
meeting_scheduler: Calendar,
|
||||
sdr_agent: UserPlus,
|
||||
twitter_news_agent: Twitter,
|
||||
};
|
||||
|
||||
const AGENT_COLORS: Record<string, string> = {
|
||||
@@ -22,6 +29,13 @@ const AGENT_COLORS: Record<string, string> = {
|
||||
vulnerability_assessment: "hsl(15,70%,52%)",
|
||||
deep_research_agent: "hsl(210,70%,55%)",
|
||||
tech_news_reporter: "hsl(270,60%,55%)",
|
||||
competitive_intel_agent: "hsl(190,70%,45%)",
|
||||
email_reply_agent: "hsl(45,80%,55%)",
|
||||
hubspot_revenue_leak_detector: "hsl(145,60%,42%)",
|
||||
local_business_extractor: "hsl(350,65%,55%)",
|
||||
meeting_scheduler: "hsl(220,65%,55%)",
|
||||
sdr_agent: "hsl(165,55%,45%)",
|
||||
twitter_news_agent: "hsl(200,85%,55%)",
|
||||
};
|
||||
|
||||
function agentSlug(path: string): string {
|
||||
|
||||
@@ -41,11 +41,11 @@ export default function MyAgents() {
|
||||
const idleCount = agents.length - activeCount;
|
||||
|
||||
return (
|
||||
<div className="min-h-screen bg-background flex flex-col">
|
||||
<div className="h-screen bg-background flex flex-col overflow-hidden">
|
||||
<TopBar />
|
||||
|
||||
{/* Content */}
|
||||
<div className="flex-1 p-6 md:p-10 max-w-5xl mx-auto w-full">
|
||||
<div className="flex-1 p-6 md:p-10 max-w-5xl mx-auto w-full overflow-y-auto">
|
||||
<div className="flex items-center justify-between mb-8">
|
||||
<div>
|
||||
<h1 className="text-xl font-semibold text-foreground">My Agents</h1>
|
||||
|
||||
@@ -8,7 +8,6 @@ import TopBar from "@/components/TopBar";
|
||||
import { TAB_STORAGE_KEY, loadPersistedTabs, savePersistedTabs, type PersistedTabState } from "@/lib/tab-persistence";
|
||||
import NodeDetailPanel from "@/components/NodeDetailPanel";
|
||||
import CredentialsModal, { type Credential, createFreshCredentials, cloneCredentials, allRequiredCredentialsMet, clearCredentialCache } from "@/components/CredentialsModal";
|
||||
|
||||
import { agentsApi } from "@/api/agents";
|
||||
import { executionApi } from "@/api/execution";
|
||||
import { graphsApi } from "@/api/graphs";
|
||||
@@ -21,6 +20,13 @@ import { ApiError } from "@/api/client";
|
||||
|
||||
const makeId = () => Math.random().toString(36).slice(2, 9);
|
||||
|
||||
/**
|
||||
* Strip the instance suffix added when multiple tabs share the same agentType.
|
||||
* e.g. "exports/deep_research::abc123" → "exports/deep_research"
|
||||
* First-instance keys (no "::") are returned unchanged.
|
||||
*/
|
||||
const baseAgentType = (key: string): string => key.split("::")[0];
|
||||
|
||||
/** Format seconds into a compact countdown string. */
|
||||
function formatCountdown(totalSecs: number): string {
|
||||
const h = Math.floor(totalSecs / 3600);
|
||||
@@ -56,11 +62,19 @@ function TimerCountdown({ initialSeconds }: { initialSeconds: number }) {
|
||||
interface Session {
|
||||
id: string;
|
||||
agentType: string;
|
||||
/** The key used in sessionsByAgent / agentStates for this specific tab instance.
|
||||
* Equals agentType for the first tab; equals "agentType::frontendSessionId" for
|
||||
* additional tabs opened for the same agent so each gets its own isolated slot. */
|
||||
tabKey?: string;
|
||||
label: string;
|
||||
messages: ChatMessage[];
|
||||
graphNodes: GraphNode[];
|
||||
credentials: Credential[];
|
||||
backendSessionId?: string;
|
||||
/** The cold history session ID this tab was originally opened from (if any).
|
||||
* Used to detect "already open" even after backendSessionId is updated to a
|
||||
* new live session ID when the cold session is revived. */
|
||||
historySourceId?: string;
|
||||
}
|
||||
|
||||
function createSession(agentType: string, label: string, existingCredentials?: Credential[]): Session {
|
||||
@@ -301,6 +315,9 @@ export default function Workspace() {
|
||||
const rawAgent = searchParams.get("agent") || "new-agent";
|
||||
const hasExplicitAgent = searchParams.has("agent");
|
||||
const initialPrompt = searchParams.get("prompt") || "";
|
||||
// ?session= param: when navigating from the home history sidebar, this
|
||||
// carries the backendSessionId to open as a tab on mount.
|
||||
const initialSessionId = searchParams.get("session") || "";
|
||||
|
||||
// When submitting a new prompt from home for "new-agent", use a unique key
|
||||
// so each prompt gets its own tab instead of overwriting the previous one.
|
||||
@@ -317,10 +334,15 @@ export default function Workspace() {
|
||||
|
||||
if (persisted) {
|
||||
for (const tab of persisted.tabs) {
|
||||
if (!initial[tab.agentType]) initial[tab.agentType] = [];
|
||||
// tabKey is the actual key used in sessionsByAgent (may contain "::" suffix).
|
||||
// Fall back to agentType for tabs persisted before this field was added.
|
||||
const tabKey = tab.tabKey || tab.agentType;
|
||||
if (!initial[tabKey]) initial[tabKey] = [];
|
||||
const session = createSession(tab.agentType, tab.label);
|
||||
session.id = tab.id;
|
||||
session.backendSessionId = tab.backendSessionId;
|
||||
session.tabKey = tab.tabKey; // restore so future persistence uses correct key
|
||||
session.historySourceId = tab.historySourceId;
|
||||
// Restore messages and graph from localStorage (up to 50 messages).
|
||||
// If the backend session is still alive, loadAgentForType may
|
||||
// append additional messages fetched from the server.
|
||||
@@ -329,7 +351,7 @@ export default function Workspace() {
|
||||
session.messages = cached.messages || [];
|
||||
session.graphNodes = cached.graphNodes || [];
|
||||
}
|
||||
initial[tab.agentType].push(session);
|
||||
initial[tabKey].push(session);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -339,6 +361,13 @@ export default function Workspace() {
|
||||
return initial;
|
||||
}
|
||||
|
||||
// If there are already persisted tabs for this agent type, don't create
|
||||
// a new one — the post-mount effect will call handleHistoryOpen if needed
|
||||
// (for ?session= params coming from the home page sidebar).
|
||||
if (initial[initialAgent]?.length) {
|
||||
return initial;
|
||||
}
|
||||
|
||||
// If the user submitted a new prompt from the home page, always create
|
||||
// a fresh session so the prompt isn't lost into an existing session.
|
||||
// initialAgent is already a unique key (e.g. "new-agent-abc123") when
|
||||
@@ -352,15 +381,16 @@ export default function Workspace() {
|
||||
return initial;
|
||||
}
|
||||
|
||||
if (initial[initialAgent]?.length) {
|
||||
return initial;
|
||||
}
|
||||
|
||||
// Only create a fresh default tab when there are no persisted tabs at all.
|
||||
// If ?session= was passed we intentionally do NOT create a tab here —
|
||||
// handleHistoryOpen is called post-mount and does proper dedup.
|
||||
if (initialAgent === "new-agent") {
|
||||
initial["new-agent"] = [...(initial["new-agent"] || []), createSession("new-agent", "New Agent")];
|
||||
} else {
|
||||
initial[initialAgent] = [...(initial[initialAgent] || []),
|
||||
createSession(initialAgent, formatAgentDisplayName(initialAgent))];
|
||||
const s = createSession("new-agent", "New Agent");
|
||||
initial["new-agent"] = [...(initial["new-agent"] || []), s];
|
||||
} else if (!initialSessionId) {
|
||||
// Only auto-create an agent tab if there's no session to restore
|
||||
const s = createSession(initialAgent, formatAgentDisplayName(initialAgent));
|
||||
initial[initialAgent] = [...(initial[initialAgent] || []), s];
|
||||
}
|
||||
|
||||
return initial;
|
||||
@@ -368,6 +398,17 @@ export default function Workspace() {
|
||||
|
||||
const [activeSessionByAgent, setActiveSessionByAgent] = useState<Record<string, string>>(() => {
|
||||
const persisted = loadPersistedTabs();
|
||||
// If initialSessionId maps to an already-restored tab, activate that tab
|
||||
if (initialSessionId) {
|
||||
for (const [tabKey, sessions] of Object.entries(sessionsByAgent)) {
|
||||
const match = sessions.find(
|
||||
s => s.backendSessionId === initialSessionId || s.historySourceId === initialSessionId,
|
||||
);
|
||||
if (match) {
|
||||
return { ...(persisted?.activeSessionByAgent ?? {}), [tabKey]: match.id };
|
||||
}
|
||||
}
|
||||
}
|
||||
if (persisted) {
|
||||
const restored = { ...persisted.activeSessionByAgent };
|
||||
const urlSessions = sessionsByAgent[initialAgent];
|
||||
@@ -387,6 +428,14 @@ export default function Workspace() {
|
||||
});
|
||||
|
||||
const [activeWorker, setActiveWorker] = useState(() => {
|
||||
// If initialSessionId maps to an already-restored tab, activate that key
|
||||
if (initialSessionId) {
|
||||
for (const [tabKey, sessions] of Object.entries(sessionsByAgent)) {
|
||||
if (sessions.some(
|
||||
s => s.backendSessionId === initialSessionId || s.historySourceId === initialSessionId,
|
||||
)) return tabKey;
|
||||
}
|
||||
}
|
||||
if (!hasExplicitAgent) {
|
||||
const persisted = loadPersistedTabs();
|
||||
if (persisted?.activeWorker) return persisted.activeWorker;
|
||||
@@ -400,6 +449,16 @@ export default function Workspace() {
|
||||
navigate("/workspace", { replace: true });
|
||||
}, []);
|
||||
|
||||
// Post-mount: if the URL carried a ?session= param (from the home page history
|
||||
// sidebar), open it via handleHistoryOpen instead of creating a tab in init state.
|
||||
// This is the single canonical path — it has robust dedup (checks backendSessionId
|
||||
// AND historySourceId across all in-memory tabs) and is safe to call after persisted
|
||||
// state has been hydrated.
|
||||
// We capture initialSessionId and related URL params in stable refs so the effect
|
||||
// only fires once on mount, regardless of re-renders.
|
||||
const initialSessionIdRef = useRef(initialSessionId);
|
||||
const initialAgentRef = useRef(initialAgent);
|
||||
const mountedRef = useRef(false);
|
||||
const [credentialsOpen, setCredentialsOpen] = useState(false);
|
||||
// Explicit agent path for the credentials modal — set from 424 responses
|
||||
// when activeWorker doesn't match the actual agent (e.g. "new-agent" tab).
|
||||
@@ -425,6 +484,12 @@ export default function Workspace() {
|
||||
// arrive in the same React batch.
|
||||
const turnCounterRef = useRef<Record<string, number>>({});
|
||||
|
||||
// Synchronous ref to suppress the queen's auto-intro SSE messages
|
||||
// after a cold-restore (where we already restored the conversation from disk).
|
||||
// Using a ref avoids the race condition where sessionId is set in agentState
|
||||
// (opening SSE) before the suppressQueenIntro flag can be committed.
|
||||
const suppressIntroRef = useRef(new Set<string>());
|
||||
|
||||
// --- Consolidated per-agent backend state ---
|
||||
const [agentStates, setAgentStates] = useState<Record<string, AgentBackendState>>({});
|
||||
|
||||
@@ -448,11 +513,15 @@ export default function Workspace() {
|
||||
const sessions: Record<string, { messages: ChatMessage[]; graphNodes: GraphNode[] }> = {};
|
||||
for (const agentSessions of Object.values(sessionsByAgent)) {
|
||||
for (const s of agentSessions) {
|
||||
const tKey = s.tabKey || s.agentType;
|
||||
tabs.push({
|
||||
id: s.id,
|
||||
agentType: s.agentType,
|
||||
tabKey: s.tabKey,
|
||||
label: s.label,
|
||||
backendSessionId: s.backendSessionId || agentStates[s.agentType]?.sessionId || undefined,
|
||||
// agentStates is keyed by tabKey (unique per tab), not by base agentType
|
||||
backendSessionId: s.backendSessionId || agentStates[tKey]?.sessionId || undefined,
|
||||
...(s.historySourceId ? { historySourceId: s.historySourceId } : {}),
|
||||
});
|
||||
sessions[s.id] = { messages: s.messages, graphNodes: s.graphNodes };
|
||||
}
|
||||
@@ -512,13 +581,16 @@ export default function Workspace() {
|
||||
const { Framework: _fw, ...userFacing } = result;
|
||||
const all = Object.values(userFacing).flat();
|
||||
setDiscoverAgents(all);
|
||||
}).catch(() => {});
|
||||
}).catch(() => { });
|
||||
}, []);
|
||||
|
||||
// --- Agent loading: loadAgentForType ---
|
||||
const loadingRef = useRef(new Set<string>());
|
||||
const loadAgentForType = useCallback(async (agentType: string) => {
|
||||
if (agentType === "new-agent" || agentType.startsWith("new-agent-")) {
|
||||
// agentType may be a unique composite key ("exports/foo::sessionId") for additional
|
||||
// tabs — extract the real agent path for selector checks and API calls.
|
||||
const agentPath = baseAgentType(agentType);
|
||||
if (agentPath === "new-agent" || agentType.startsWith("new-agent-")) {
|
||||
// Create a queen-only session (no worker) for agent building
|
||||
updateAgentState(agentType, { loading: true, error: null, ready: false, sessionId: null });
|
||||
try {
|
||||
@@ -532,17 +604,68 @@ export default function Workspace() {
|
||||
|
||||
// Try to reconnect to stored backend session (e.g., after browser refresh)
|
||||
const storedId = activeSess?.backendSessionId;
|
||||
// When the server restarts the session is "cold" — conversation files
|
||||
// survive on disk but there is no live runtime. Track the old ID so
|
||||
// we can restore message history after creating a new session.
|
||||
let coldRestoreId: string | undefined;
|
||||
|
||||
if (storedId) {
|
||||
try {
|
||||
liveSession = await sessionsApi.get(storedId);
|
||||
const sessionData = await sessionsApi.get(storedId);
|
||||
if (sessionData.cold) {
|
||||
// Server restarted — files on disk, no live runtime
|
||||
coldRestoreId = storedId;
|
||||
} else {
|
||||
liveSession = sessionData;
|
||||
}
|
||||
} catch {
|
||||
// Session gone — fall through to create new
|
||||
// Session gone entirely (no disk files either)
|
||||
}
|
||||
}
|
||||
|
||||
let restoredMessageCount = 0;
|
||||
|
||||
if (!liveSession) {
|
||||
// Reconnect failed — clear stale cached messages from localStorage restore
|
||||
if (storedId && activeId) {
|
||||
// Fetch conversation history from disk BEFORE creating the new session.
|
||||
// SKIP if messages were already pre-populated by handleHistoryOpen.
|
||||
const restoreFrom = coldRestoreId ?? storedId;
|
||||
const preRestoredMsgs: ChatMessage[] = [];
|
||||
const alreadyHasMessages = (activeSess?.messages?.length ?? 0) > 0;
|
||||
if (restoreFrom && !alreadyHasMessages) {
|
||||
try {
|
||||
const { messages: queenMsgs } = await sessionsApi.queenMessages(restoreFrom);
|
||||
for (const m of queenMsgs as Message[]) {
|
||||
const msg = backendMessageToChatMessage(m, agentType, "Queen Bee");
|
||||
msg.role = "queen";
|
||||
preRestoredMsgs.push(msg);
|
||||
}
|
||||
} catch {
|
||||
// Not available — will start fresh
|
||||
}
|
||||
}
|
||||
|
||||
// Suppress the queen's intro cycle whenever we are about to restore a
|
||||
// previous conversation, or whenever we have a stored session ID.
|
||||
const willRestore = !!(restoreFrom);
|
||||
if (willRestore || preRestoredMsgs.length > 0) suppressIntroRef.current.add(agentType);
|
||||
|
||||
// Pass coldRestoreId as queenResumeFrom so the backend writes queen
|
||||
// messages into the ORIGINAL session's directory.
|
||||
liveSession = await sessionsApi.create(undefined, undefined, undefined, prompt, coldRestoreId ?? undefined);
|
||||
|
||||
if (preRestoredMsgs.length > 0) {
|
||||
preRestoredMsgs.sort((a, b) => (a.createdAt ?? 0) - (b.createdAt ?? 0));
|
||||
if (activeId) {
|
||||
setSessionsByAgent(prev => ({
|
||||
...prev,
|
||||
[agentType]: (prev[agentType] || []).map(s =>
|
||||
s.id === activeId ? { ...s, messages: preRestoredMsgs, graphNodes: [] } : s,
|
||||
),
|
||||
}));
|
||||
}
|
||||
restoredMessageCount = preRestoredMsgs.length;
|
||||
} else if (restoreFrom && activeId) {
|
||||
// We had a stored session but no messages on disk — wipe stale localStorage cache
|
||||
setSessionsByAgent(prev => ({
|
||||
...prev,
|
||||
[agentType]: (prev[agentType] || []).map(s =>
|
||||
@@ -551,10 +674,8 @@ export default function Workspace() {
|
||||
}));
|
||||
}
|
||||
|
||||
liveSession = await sessionsApi.create(undefined, undefined, undefined, prompt);
|
||||
|
||||
// Show the initial prompt as a user message in chat (only on fresh create)
|
||||
if (prompt && activeId) {
|
||||
// Show the initial prompt as a user message only on a truly fresh session
|
||||
if (prompt && restoredMessageCount === 0 && activeId) {
|
||||
const userMsg: ChatMessage = {
|
||||
id: makeId(), agent: "You", agentColor: "",
|
||||
content: prompt, timestamp: "", type: "user", thread: agentType, createdAt: Date.now(),
|
||||
@@ -568,16 +689,25 @@ export default function Workspace() {
|
||||
}
|
||||
}
|
||||
|
||||
// Store backendSessionId on the active Session object for persistence
|
||||
// Store backendSessionId on the Session object for persistence.
|
||||
// Also set historySourceId so the sidebar "already-open" check works
|
||||
// even after cold-revive changes backendSessionId to a new live session ID.
|
||||
if (activeId) {
|
||||
setSessionsByAgent(prev => ({
|
||||
...prev,
|
||||
[agentType]: (prev[agentType] || []).map(s =>
|
||||
s.id === activeId ? { ...s, backendSessionId: liveSession!.session_id } : s,
|
||||
s.id === activeId ? {
|
||||
...s,
|
||||
backendSessionId: liveSession!.session_id,
|
||||
historySourceId: s.historySourceId || coldRestoreId || undefined,
|
||||
} : s,
|
||||
),
|
||||
}));
|
||||
}
|
||||
|
||||
// If no messages were actually restored, lift the intro suppression
|
||||
if (restoredMessageCount === 0) suppressIntroRef.current.delete(agentType);
|
||||
|
||||
updateAgentState(agentType, {
|
||||
sessionId: liveSession.session_id,
|
||||
displayName: "Queen Bee",
|
||||
@@ -601,22 +731,42 @@ export default function Workspace() {
|
||||
try {
|
||||
let liveSession: LiveSession | undefined;
|
||||
let isResumedSession = false;
|
||||
// Set when the stored session is cold (server restarted) so we can restore
|
||||
// messages from the old session files after creating a new live session.
|
||||
let coldRestoreId: string | undefined;
|
||||
|
||||
// Try to reconnect to an existing backend session (e.g., after browser refresh).
|
||||
// The backendSessionId is persisted in localStorage per tab.
|
||||
const storedSessionId = sessionsRef.current[agentType]?.[0]?.backendSessionId;
|
||||
// Also check historySourceId — handleHistoryOpen populates this with the
|
||||
// original session ID from the sidebar. Use it as a fallback for stored ID.
|
||||
const historySourceId = sessionsRef.current[agentType]?.[0]?.historySourceId;
|
||||
const storedSessionId = sessionsRef.current[agentType]?.[0]?.backendSessionId
|
||||
|| historySourceId;
|
||||
if (storedSessionId) {
|
||||
try {
|
||||
liveSession = await sessionsApi.get(storedSessionId);
|
||||
isResumedSession = true;
|
||||
const sessionData = await sessionsApi.get(storedSessionId);
|
||||
if (sessionData.cold) {
|
||||
// Server restarted — conversation files survive on disk, no live runtime.
|
||||
coldRestoreId = storedSessionId;
|
||||
} else {
|
||||
liveSession = sessionData;
|
||||
isResumedSession = true;
|
||||
}
|
||||
} catch {
|
||||
// Session gone (server restarted, etc.) — fall through to create new
|
||||
// 404: session was explicitly stopped (via closeAgentTab) but conversation
|
||||
// files likely still exist on disk. Treat it as cold so we can restore.
|
||||
// Verify files exist before assuming cold — if queenMessages succeeds with
|
||||
// content, files are there.
|
||||
coldRestoreId = historySourceId || storedSessionId;
|
||||
}
|
||||
}
|
||||
|
||||
if (!liveSession) {
|
||||
// Reconnect failed — clear stale cached messages from localStorage restore
|
||||
if (storedSessionId) {
|
||||
// Reconnect failed — clear stale cached messages from localStorage restore.
|
||||
// NEVER wipe when: (a) doing a cold restore (we'll restore from disk) or
|
||||
// (b) handleHistoryOpen already pre-populated messages (alreadyHasMessages).
|
||||
const alreadyHasMessages = (sessionsRef.current[agentType] || [])[0]?.messages?.length > 0;
|
||||
if (storedSessionId && !coldRestoreId && !alreadyHasMessages) {
|
||||
setSessionsByAgent(prev => ({
|
||||
...prev,
|
||||
[agentType]: (prev[agentType] || []).map((s, i) =>
|
||||
@@ -625,8 +775,48 @@ export default function Workspace() {
|
||||
}));
|
||||
}
|
||||
|
||||
// CRITICAL: Pre-fetch queen messages from the old session directory BEFORE
|
||||
// creating the new session. When queen_resume_from is set the new session writes
|
||||
// to the SAME directory, so if we fetch after creation we risk capturing the
|
||||
// new queen's greeting in the restored history.
|
||||
// SKIP if messages were already pre-populated by handleHistoryOpen (avoids
|
||||
// double-fetch and greeting leakage).
|
||||
let preQueenMsgs: ChatMessage[] = [];
|
||||
if (coldRestoreId && !alreadyHasMessages) {
|
||||
try {
|
||||
const { messages: queenMsgs } = await sessionsApi.queenMessages(coldRestoreId);
|
||||
// Also pre-fetch worker messages from the old session if a resumable worker exists
|
||||
const displayNameTemp = formatAgentDisplayName(agentPath);
|
||||
for (const m of queenMsgs as Message[]) {
|
||||
const msg = backendMessageToChatMessage(m, agentType, "Queen Bee");
|
||||
msg.role = "queen";
|
||||
preQueenMsgs.push(msg);
|
||||
}
|
||||
// Also try to grab worker messages while we're here
|
||||
try {
|
||||
const { sessions: workerSessions } = await sessionsApi.workerSessions(coldRestoreId);
|
||||
const resumable = workerSessions.find(s => s.status === "active" || s.status === "paused");
|
||||
if (resumable) {
|
||||
const { messages: wMsgs } = await sessionsApi.messages(coldRestoreId, resumable.session_id);
|
||||
for (const m of wMsgs as Message[]) {
|
||||
preQueenMsgs.push(backendMessageToChatMessage(m, agentType, displayNameTemp));
|
||||
}
|
||||
}
|
||||
} catch { /* not critical */ }
|
||||
} catch {
|
||||
// Not available — will start fresh
|
||||
}
|
||||
}
|
||||
|
||||
// Suppress intro whenever we are about to restore a previous conversation.
|
||||
// The user never expects a greeting when reopening a session.
|
||||
if (coldRestoreId) suppressIntroRef.current.add(agentType);
|
||||
|
||||
try {
|
||||
liveSession = await sessionsApi.create(agentType);
|
||||
// Pass coldRestoreId as queenResumeFrom so the backend writes queen
|
||||
// messages into the ORIGINAL session's directory — all conversation
|
||||
// history accumulates in one place across server restarts.
|
||||
liveSession = await sessionsApi.create(agentPath, undefined, undefined, undefined, coldRestoreId ?? undefined);
|
||||
} catch (loadErr: unknown) {
|
||||
// 424 = credentials required — open the credentials modal
|
||||
if (loadErr instanceof ApiError && loadErr.status === 424) {
|
||||
@@ -671,6 +861,18 @@ export default function Workspace() {
|
||||
liveSession = body as unknown as LiveSession;
|
||||
}
|
||||
}
|
||||
|
||||
// If we pre-fetched messages for a cold restore, populate the UI immediately.
|
||||
// This happens before the SSE connection opens so no greeting can slip through.
|
||||
if (preQueenMsgs.length > 0) {
|
||||
preQueenMsgs.sort((a, b) => (a.createdAt ?? 0) - (b.createdAt ?? 0));
|
||||
setSessionsByAgent(prev => ({
|
||||
...prev,
|
||||
[agentType]: (prev[agentType] || []).map((s, i) =>
|
||||
i === 0 ? { ...s, messages: preQueenMsgs, graphNodes: [] } : s,
|
||||
),
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
// At this point liveSession is guaranteed set — if both reconnect and create
|
||||
@@ -685,43 +887,58 @@ export default function Workspace() {
|
||||
queenBuilding: initialMode === "building",
|
||||
});
|
||||
|
||||
// Update the session label
|
||||
// Update the session label + backendSessionId. Also set historySourceId
|
||||
// so the sidebar "already-open" check works even after cold-revive changes
|
||||
// backendSessionId to a new live session ID.
|
||||
setSessionsByAgent((prev) => {
|
||||
const sessions = prev[agentType] || [];
|
||||
if (!sessions.length) return prev;
|
||||
return {
|
||||
...prev,
|
||||
[agentType]: sessions.map((s, i) =>
|
||||
i === 0 ? { ...s, label: sessions.length === 1 ? displayName : `${displayName} #${i + 1}`, backendSessionId: session.session_id } : s,
|
||||
i === 0 ? {
|
||||
...s,
|
||||
// Preserve existing label if it was already set with a #N suffix by
|
||||
// addAgentSession/handleHistoryOpen. Only overwrite with the bare
|
||||
// displayName when the label doesn't match the resolved display name.
|
||||
label: s.label.startsWith(displayName) ? s.label : displayName,
|
||||
backendSessionId: session.session_id,
|
||||
// Preserve existing historySourceId; set it from coldRestoreId if missing
|
||||
historySourceId: s.historySourceId || coldRestoreId || undefined,
|
||||
} : s,
|
||||
),
|
||||
};
|
||||
});
|
||||
|
||||
// Check worker session status (detects running worker).
|
||||
// Only restore messages when rejoining an existing backend session.
|
||||
// Restore messages when rejoining an existing session OR cold-restoring from disk.
|
||||
let isWorkerRunning = false;
|
||||
const restoredMsgs: ChatMessage[] = [];
|
||||
try {
|
||||
const { sessions: workerSessions } = await sessionsApi.workerSessions(session.session_id);
|
||||
const resumable = workerSessions.find(
|
||||
(s) => s.status === "active" || s.status === "paused",
|
||||
);
|
||||
isWorkerRunning = resumable?.status === "active";
|
||||
// For cold-restore, use the old session ID. For live resume, use current session.
|
||||
const historyId = coldRestoreId ?? (isResumedSession ? session.session_id : undefined);
|
||||
|
||||
if (isResumedSession && resumable) {
|
||||
const { messages } = await sessionsApi.messages(session.session_id, resumable.session_id);
|
||||
for (const m of messages as Message[]) {
|
||||
restoredMsgs.push(backendMessageToChatMessage(m, agentType, displayName));
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Worker session listing failed — not critical
|
||||
}
|
||||
|
||||
// Restore queen conversation when rejoining an existing session
|
||||
if (isResumedSession) {
|
||||
// For LIVE resume (not cold restore), fetch worker + queen messages now.
|
||||
// For cold restore they were already pre-fetched above (before create) so we skip to avoid
|
||||
// double-restoring and to avoid capturing the new greeting.
|
||||
if (historyId && !coldRestoreId) {
|
||||
try {
|
||||
const { messages: queenMsgs } = await sessionsApi.queenMessages(session.session_id);
|
||||
const { sessions: workerSessions } = await sessionsApi.workerSessions(historyId);
|
||||
const resumable = workerSessions.find(
|
||||
(s) => s.status === "active" || s.status === "paused",
|
||||
);
|
||||
isWorkerRunning = resumable?.status === "active";
|
||||
|
||||
if (resumable) {
|
||||
const { messages } = await sessionsApi.messages(historyId, resumable.session_id);
|
||||
for (const m of messages as Message[]) {
|
||||
restoredMsgs.push(backendMessageToChatMessage(m, agentType, displayName));
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Worker session listing failed — not critical
|
||||
}
|
||||
|
||||
try {
|
||||
const { messages: queenMsgs } = await sessionsApi.queenMessages(historyId);
|
||||
for (const m of queenMsgs as Message[]) {
|
||||
const msg = backendMessageToChatMessage(m, agentType, "Queen Bee");
|
||||
msg.role = "queen";
|
||||
@@ -732,7 +949,8 @@ export default function Workspace() {
|
||||
}
|
||||
}
|
||||
|
||||
// Merge queen + worker messages in chronological order
|
||||
// Merge messages in chronological order (only for live resume; cold restore
|
||||
// was already applied above before create).
|
||||
if (restoredMsgs.length > 0) {
|
||||
restoredMsgs.sort((a, b) => (a.createdAt ?? 0) - (b.createdAt ?? 0));
|
||||
setSessionsByAgent((prev) => ({
|
||||
@@ -743,7 +961,12 @@ export default function Workspace() {
|
||||
}));
|
||||
}
|
||||
|
||||
// If no messages were actually restored, lift the intro suppression gate
|
||||
if (restoredMsgs.length === 0 && !coldRestoreId) suppressIntroRef.current.delete(agentType);
|
||||
|
||||
updateAgentState(agentType, {
|
||||
sessionId: session.session_id,
|
||||
displayName,
|
||||
ready: true,
|
||||
loading: false,
|
||||
queenReady: true,
|
||||
@@ -1017,6 +1240,9 @@ export default function Workspace() {
|
||||
|
||||
const isQueen = streamId === "queen";
|
||||
if (isQueen) console.log('[QUEEN] handleSSEEvent:', event.type, 'agentType:', agentType);
|
||||
// Drop queen message content while suppressing the auto-intro after a cold-restore.
|
||||
// Uses a synchronous ref to avoid race conditions with React state batching.
|
||||
const suppressQueenMessages = isQueen && suppressIntroRef.current.has(agentType);
|
||||
const agentDisplayName = agentStates[agentType]?.displayName;
|
||||
const displayName = isQueen ? "Queen Bee" : (agentDisplayName || undefined);
|
||||
const role = isQueen ? "queen" as const : "worker" as const;
|
||||
@@ -1068,6 +1294,7 @@ export default function Workspace() {
|
||||
|
||||
case "execution_completed":
|
||||
if (isQueen) {
|
||||
suppressIntroRef.current.delete(agentType);
|
||||
updateAgentState(agentType, { isTyping: false, queenIsTyping: false });
|
||||
} else {
|
||||
// Flush any remaining LLM snapshots before clearing state
|
||||
@@ -1106,7 +1333,7 @@ export default function Workspace() {
|
||||
case "llm_text_delta": {
|
||||
const chatMsg = sseEventToChatMessage(event, agentType, displayName, currentTurn);
|
||||
if (isQueen) console.log('[QUEEN] chatMsg:', chatMsg?.id, chatMsg?.content?.slice(0, 50), 'turn:', currentTurn);
|
||||
if (chatMsg) {
|
||||
if (chatMsg && !suppressQueenMessages) {
|
||||
if (isQueen) chatMsg.role = role;
|
||||
upsertChatMessage(agentType, chatMsg);
|
||||
}
|
||||
@@ -1149,26 +1376,30 @@ export default function Workspace() {
|
||||
const cur = prev[agentType] || defaultAgentState();
|
||||
const workerQuestionActive = cur.pendingQuestionSource === "worker";
|
||||
if (isAutoBlock && workerQuestionActive) {
|
||||
return { ...prev, [agentType]: {
|
||||
return {
|
||||
...prev, [agentType]: {
|
||||
...cur,
|
||||
awaitingInput: true,
|
||||
isTyping: false,
|
||||
isStreaming: false,
|
||||
queenIsTyping: false,
|
||||
queenBuilding: false,
|
||||
}
|
||||
};
|
||||
}
|
||||
return {
|
||||
...prev, [agentType]: {
|
||||
...cur,
|
||||
awaitingInput: true,
|
||||
isTyping: false,
|
||||
isStreaming: false,
|
||||
queenIsTyping: false,
|
||||
queenBuilding: false,
|
||||
}};
|
||||
}
|
||||
return { ...prev, [agentType]: {
|
||||
...cur,
|
||||
awaitingInput: true,
|
||||
isTyping: false,
|
||||
isStreaming: false,
|
||||
queenIsTyping: false,
|
||||
queenBuilding: false,
|
||||
pendingQuestion: prompt || null,
|
||||
pendingOptions: options,
|
||||
pendingQuestionSource: "queen",
|
||||
}};
|
||||
pendingQuestion: prompt || null,
|
||||
pendingOptions: options,
|
||||
pendingQuestionSource: "queen",
|
||||
}
|
||||
};
|
||||
});
|
||||
} else {
|
||||
// Worker input request.
|
||||
@@ -1200,7 +1431,7 @@ export default function Workspace() {
|
||||
queenIsTyping: false,
|
||||
pendingQuestion: prompt || null,
|
||||
pendingOptions: options,
|
||||
pendingQuestionSource: options ? "worker" : null,
|
||||
pendingQuestionSource: "worker",
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1547,13 +1778,13 @@ export default function Workspace() {
|
||||
case "worker_loaded": {
|
||||
const workerName = event.data?.worker_name as string | undefined;
|
||||
const agentPathFromEvent = event.data?.agent_path as string | undefined;
|
||||
const displayName = formatAgentDisplayName(workerName || agentType);
|
||||
const displayName = formatAgentDisplayName(workerName || baseAgentType(agentType));
|
||||
|
||||
// Invalidate cached credential requirements so the modal fetches
|
||||
// fresh data the next time it opens (the new agent may have
|
||||
// different credential needs than the previous one).
|
||||
clearCredentialCache(agentPathFromEvent);
|
||||
clearCredentialCache(agentType);
|
||||
clearCredentialCache(baseAgentType(agentType));
|
||||
|
||||
// Update agent state: new display name, reset graph so topology refetch triggers
|
||||
updateAgentState(agentType, {
|
||||
@@ -1564,15 +1795,17 @@ export default function Workspace() {
|
||||
nodeSpecs: [],
|
||||
});
|
||||
|
||||
// Update session label (tab name) and clear graph nodes for fresh fetch
|
||||
// Update ONLY the active session's label + graph nodes — never touch
|
||||
// sessions belonging to a different tab sharing the same agentType key.
|
||||
// Also clear worker messages so the fresh worker starts with a clean slate.
|
||||
const activeId = activeSessionRef.current[agentType];
|
||||
setSessionsByAgent(prev => ({
|
||||
...prev,
|
||||
[agentType]: (prev[agentType] || []).map(s => ({
|
||||
...s,
|
||||
label: displayName,
|
||||
graphNodes: [],
|
||||
messages: s.messages.filter(m => m.role !== "worker"),
|
||||
})),
|
||||
[agentType]: (prev[agentType] || []).map(s =>
|
||||
s.id === activeId || (!activeId && prev[agentType]?.[0]?.id === s.id)
|
||||
? { ...s, label: displayName, graphNodes: [], messages: s.messages.filter(m => m.role !== "worker") }
|
||||
: s
|
||||
),
|
||||
}));
|
||||
|
||||
// Explicitly fetch graph topology for the newly loaded worker
|
||||
@@ -1610,7 +1843,7 @@ export default function Workspace() {
|
||||
const activeSession = currentSessions.find(s => s.id === activeSessionId) || currentSessions[0];
|
||||
|
||||
const currentGraph = activeSession
|
||||
? { nodes: activeSession.graphNodes, title: activeAgentState?.displayName || formatAgentDisplayName(activeWorker) }
|
||||
? { nodes: activeSession.graphNodes, title: activeAgentState?.displayName || formatAgentDisplayName(baseAgentType(activeWorker)) }
|
||||
: { nodes: [] as GraphNode[], title: "" };
|
||||
|
||||
// Build a flat list of all agent-type tabs for the tab bar
|
||||
@@ -1652,6 +1885,41 @@ export default function Workspace() {
|
||||
return;
|
||||
}
|
||||
|
||||
// If worker is awaiting free-text input (no options / no QuestionWidget),
|
||||
// route the message directly to the worker instead of the queen.
|
||||
if (agentStates[activeWorker]?.awaitingInput && agentStates[activeWorker]?.pendingQuestionSource === "worker" && !agentStates[activeWorker]?.pendingOptions) {
|
||||
const state = agentStates[activeWorker];
|
||||
if (state?.sessionId && state?.ready) {
|
||||
const userMsg: ChatMessage = {
|
||||
id: makeId(), agent: "You", agentColor: "",
|
||||
content: text, timestamp: "", type: "user", thread, createdAt: Date.now(),
|
||||
};
|
||||
setSessionsByAgent(prev => ({
|
||||
...prev,
|
||||
[activeWorker]: prev[activeWorker].map(s =>
|
||||
s.id === activeSession.id ? { ...s, messages: [...s.messages, userMsg] } : s
|
||||
),
|
||||
}));
|
||||
updateAgentState(activeWorker, { awaitingInput: false, workerInputMessageId: null, isTyping: true, pendingQuestion: null, pendingOptions: null, pendingQuestionSource: null });
|
||||
executionApi.workerInput(state.sessionId, text).catch((err: unknown) => {
|
||||
const errMsg = err instanceof Error ? err.message : String(err);
|
||||
const errorChatMsg: ChatMessage = {
|
||||
id: makeId(), agent: "System", agentColor: "",
|
||||
content: `Failed to send to worker: ${errMsg}`,
|
||||
timestamp: "", type: "system", thread, createdAt: Date.now(),
|
||||
};
|
||||
setSessionsByAgent(prev => ({
|
||||
...prev,
|
||||
[activeWorker]: prev[activeWorker].map(s =>
|
||||
s.id === activeSession.id ? { ...s, messages: [...s.messages, errorChatMsg] } : s
|
||||
),
|
||||
}));
|
||||
updateAgentState(activeWorker, { isTyping: false, isStreaming: false });
|
||||
});
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// If queen has a pending question widget, dismiss it when user types directly
|
||||
if (agentStates[activeWorker]?.pendingQuestionSource === "queen") {
|
||||
updateAgentState(activeWorker, { pendingQuestion: null, pendingOptions: null, pendingQuestionSource: null });
|
||||
@@ -1667,6 +1935,7 @@ export default function Workspace() {
|
||||
s.id === activeSession.id ? { ...s, messages: [...s.messages, userMsg] } : s
|
||||
),
|
||||
}));
|
||||
suppressIntroRef.current.delete(activeWorker);
|
||||
updateAgentState(activeWorker, { isTyping: true, queenIsTyping: true });
|
||||
|
||||
if (state?.sessionId && state?.ready) {
|
||||
@@ -1786,7 +2055,7 @@ export default function Workspace() {
|
||||
// Queue context for queen (fire-and-forget, no LLM response triggered)
|
||||
if (question && state?.sessionId && state?.ready) {
|
||||
const notification = `[Worker asked: "${question}" | User selected: "${answer}"]`;
|
||||
executionApi.queenContext(state.sessionId, notification).catch(() => {});
|
||||
executionApi.queenContext(state.sessionId, notification).catch(() => { });
|
||||
}
|
||||
}
|
||||
}, [activeWorker, activeSession, agentStates, handleWorkerReply, handleSend, updateAgentState, setSessionsByAgent]);
|
||||
@@ -1817,9 +2086,9 @@ export default function Workspace() {
|
||||
// Unblock the waiting node with a dismiss signal
|
||||
const dismissMsg = `[User dismissed the question: "${question}"]`;
|
||||
if (source === "worker") {
|
||||
executionApi.workerInput(state.sessionId, dismissMsg).catch(() => {});
|
||||
executionApi.workerInput(state.sessionId, dismissMsg).catch(() => { });
|
||||
} else {
|
||||
executionApi.chat(state.sessionId, dismissMsg).catch(() => {});
|
||||
executionApi.chat(state.sessionId, dismissMsg).catch(() => { });
|
||||
}
|
||||
}, [agentStates, activeWorker, updateAgentState]);
|
||||
|
||||
@@ -1867,9 +2136,9 @@ export default function Workspace() {
|
||||
: Promise.resolve();
|
||||
|
||||
pausePromise
|
||||
.catch(() => {}) // pause failure shouldn't block kill
|
||||
.catch(() => { }) // pause failure shouldn't block kill
|
||||
.then(() => sessionsApi.stop(state.sessionId!))
|
||||
.catch(() => {}); // fire-and-forget
|
||||
.catch(() => { }); // fire-and-forget
|
||||
}
|
||||
|
||||
const allTypes = Object.keys(sessionsByAgent).filter(k => (sessionsByAgent[k] || []).length > 0);
|
||||
@@ -1901,22 +2170,128 @@ export default function Workspace() {
|
||||
|
||||
// Create a new session for any agent type (used by NewTabPopover)
|
||||
const addAgentSession = useCallback((agentType: string, agentLabel?: string) => {
|
||||
const sessions = sessionsByAgent[agentType] || [];
|
||||
const newIndex = sessions.length + 1;
|
||||
const existingCreds = sessions.length > 0 ? sessions[0].credentials : undefined;
|
||||
// Count all existing open tabs for this base agent type (first tab uses agentType
|
||||
// as key; subsequent tabs use "agentType::frontendSessionId" as unique keys).
|
||||
const existingTabCount = Object.keys(sessionsByAgent).filter(
|
||||
k => baseAgentType(k) === agentType && (sessionsByAgent[k] || []).length > 0,
|
||||
).length;
|
||||
|
||||
const newIndex = existingTabCount + 1;
|
||||
const existingCreds = sessionsByAgent[agentType]?.[0]?.credentials;
|
||||
const displayLabel = agentLabel || formatAgentDisplayName(agentType);
|
||||
const label = newIndex === 1 ? displayLabel : `${displayLabel} #${newIndex}`;
|
||||
const newSession = createSession(agentType, label, existingCreds);
|
||||
|
||||
// First tab keeps agentType as its key (backward-compatible with all existing
|
||||
// logic). Additional tabs get a unique key so each has its own isolated
|
||||
// agentStates slot, its own backend session, and its own tab-bar entry.
|
||||
const tabKey = existingTabCount === 0 ? agentType : `${agentType}::${newSession.id}`;
|
||||
if (tabKey !== agentType) {
|
||||
newSession.tabKey = tabKey;
|
||||
}
|
||||
|
||||
setSessionsByAgent(prev => ({
|
||||
...prev,
|
||||
[agentType]: [...(prev[agentType] || []), newSession],
|
||||
[tabKey]: [newSession],
|
||||
}));
|
||||
setActiveSessionByAgent(prev => ({ ...prev, [agentType]: newSession.id }));
|
||||
setActiveWorker(agentType);
|
||||
setActiveSessionByAgent(prev => ({ ...prev, [tabKey]: newSession.id }));
|
||||
setActiveWorker(tabKey);
|
||||
}, [sessionsByAgent]);
|
||||
|
||||
const activeWorkerLabel = activeAgentState?.displayName || formatAgentDisplayName(activeWorker);
|
||||
// Open a history session: switch to its existing tab, or open a new tab.
|
||||
// Async so we can pre-fetch messages before creating the tab — this gives
|
||||
// instant visual feedback without waiting for loadAgentForType.
|
||||
const handleHistoryOpen = useCallback(async (sessionId: string, agentPath?: string | null, agentName?: string | null) => {
|
||||
// Already open as a tab — just switch to it.
|
||||
for (const [type, sessions] of Object.entries(sessionsByAgent)) {
|
||||
for (const s of sessions) {
|
||||
if (s.backendSessionId === sessionId || s.historySourceId === sessionId) {
|
||||
setActiveWorker(type);
|
||||
setActiveSessionByAgent(prev => ({ ...prev, [type]: s.id }));
|
||||
if (s.messages.length > 0) {
|
||||
suppressIntroRef.current.add(type);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Pre-fetch messages from disk so the tab opens with conversation already shown.
|
||||
// This happens BEFORE creating the tab so no "new session" empty state is visible.
|
||||
let prefetchedMessages: ChatMessage[] = [];
|
||||
try {
|
||||
const { messages: queenMsgs } = await sessionsApi.queenMessages(sessionId);
|
||||
for (const m of queenMsgs as Message[]) {
|
||||
const resolvedType = agentPath || "new-agent";
|
||||
const msg = backendMessageToChatMessage(m, resolvedType, "Queen Bee");
|
||||
msg.role = "queen";
|
||||
prefetchedMessages.push(msg);
|
||||
}
|
||||
if (prefetchedMessages.length > 0) {
|
||||
prefetchedMessages.sort((a, b) => (a.createdAt ?? 0) - (b.createdAt ?? 0));
|
||||
}
|
||||
} catch {
|
||||
// Not available — session will open empty and loadAgentForType will try again
|
||||
}
|
||||
|
||||
const resolvedAgentType = agentPath || "new-agent";
|
||||
const existingTabCount = Object.keys(sessionsByAgent).filter(
|
||||
k => baseAgentType(k) === resolvedAgentType && (sessionsByAgent[k] || []).length > 0
|
||||
).length;
|
||||
const rawLabel = agentName ||
|
||||
(agentPath ? agentPath.replace(/\/$/, "").split("/").pop()?.replace(/_/g, " ").replace(/\b\w/g, c => c.toUpperCase()) || agentPath : null) ||
|
||||
"New Agent";
|
||||
const label = existingTabCount === 0 ? rawLabel : `${rawLabel} #${existingTabCount + 1}`;
|
||||
const newSession = createSession(resolvedAgentType, label);
|
||||
newSession.backendSessionId = sessionId;
|
||||
newSession.historySourceId = sessionId;
|
||||
// Pre-populate messages so the chat panel immediately shows the conversation
|
||||
if (prefetchedMessages.length > 0) {
|
||||
newSession.messages = prefetchedMessages;
|
||||
}
|
||||
const tabKey = existingTabCount === 0 ? resolvedAgentType : `${resolvedAgentType}::${newSession.id}`;
|
||||
if (tabKey !== resolvedAgentType) newSession.tabKey = tabKey;
|
||||
|
||||
// Suppress queen intro BEFORE the tab is created so loadAgentForType
|
||||
// never sees an unsuppressed window — the user never expects a greeting on reopen.
|
||||
if (prefetchedMessages.length > 0 || sessionId) {
|
||||
suppressIntroRef.current.add(tabKey);
|
||||
}
|
||||
|
||||
setSessionsByAgent(prev => ({ ...prev, [tabKey]: [newSession] }));
|
||||
setActiveSessionByAgent(prev => ({ ...prev, [tabKey]: newSession.id }));
|
||||
setActiveWorker(tabKey);
|
||||
}, [sessionsByAgent]);
|
||||
|
||||
// Post-mount: open the session from the URL ?session= param via handleHistoryOpen.
|
||||
// This runs AFTER persisted tabs are hydrated, so dedup works correctly.
|
||||
// Use a ref guard so it fires exactly once even in React StrictMode.
|
||||
useEffect(() => {
|
||||
if (mountedRef.current) return;
|
||||
mountedRef.current = true;
|
||||
const sid = initialSessionIdRef.current;
|
||||
if (!sid) return;
|
||||
// Fetch agent metadata from the backend so handleHistoryOpen gets the right
|
||||
// agentPath and agentName (needed to label the tab correctly).
|
||||
sessionsApi.history().then(r => {
|
||||
const match = r.sessions.find((s: { session_id: string }) => s.session_id === sid);
|
||||
handleHistoryOpen(
|
||||
sid,
|
||||
match?.agent_path ?? initialAgentRef.current !== "new-agent" ? initialAgentRef.current : null,
|
||||
match?.agent_name ?? null,
|
||||
);
|
||||
}).catch(() => {
|
||||
// History fetch failed — still open the session with what we know.
|
||||
handleHistoryOpen(
|
||||
sid,
|
||||
initialAgentRef.current !== "new-agent" ? initialAgentRef.current : null,
|
||||
null,
|
||||
);
|
||||
});
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, []);
|
||||
|
||||
const activeWorkerLabel = activeAgentState?.displayName || formatAgentDisplayName(baseAgentType(activeWorker));
|
||||
|
||||
|
||||
return (
|
||||
@@ -1965,9 +2340,11 @@ export default function Workspace() {
|
||||
|
||||
{/* Main content area */}
|
||||
<div className="flex flex-1 min-h-0">
|
||||
<div className="w-[340px] min-w-[280px] bg-card/30 flex flex-col border-r border-border/30">
|
||||
|
||||
{/* ── Pipeline graph + chat ──────────────────────────────────── */}
|
||||
<div className="w-[300px] min-w-[240px] bg-card/30 flex flex-col border-r border-border/30">
|
||||
<div className="flex-1 min-h-0">
|
||||
<AgentGraph
|
||||
<AgentGraph
|
||||
nodes={currentGraph.nodes}
|
||||
title={currentGraph.title}
|
||||
onNodeClick={(node) => setSelectedNode(prev => prev?.id === node.id ? null : node)}
|
||||
@@ -2065,7 +2442,7 @@ export default function Workspace() {
|
||||
<div className="flex items-start gap-3 min-w-0">
|
||||
<div className="w-8 h-8 rounded-lg flex items-center justify-center flex-shrink-0 mt-0.5 bg-[hsl(210,40%,55%)]/15 border border-[hsl(210,40%,55%)]/25">
|
||||
<span className="text-sm" style={{ color: "hsl(210,40%,55%)" }}>
|
||||
{{"webhook": "\u26A1", "timer": "\u23F1", "api": "\u2192", "event": "\u223F"}[selectedNode.triggerType || ""] || "\u26A1"}
|
||||
{{ "webhook": "\u26A1", "timer": "\u23F1", "api": "\u2192", "event": "\u223F" }[selectedNode.triggerType || ""] || "\u26A1"}
|
||||
</span>
|
||||
</div>
|
||||
<div className="min-w-0">
|
||||
|
||||
@@ -0,0 +1,23 @@
|
||||
"""Tests for framework/config.py - Hive configuration loading."""
|
||||
|
||||
import logging
|
||||
|
||||
from framework.config import get_hive_config
|
||||
|
||||
|
||||
class TestGetHiveConfig:
|
||||
"""Test get_hive_config() logs warnings on parse errors."""
|
||||
|
||||
def test_logs_warning_on_malformed_json(self, tmp_path, monkeypatch, caplog):
|
||||
"""Test that malformed JSON logs warning and returns empty dict."""
|
||||
config_file = tmp_path / "configuration.json"
|
||||
config_file.write_text('{"broken": }')
|
||||
|
||||
monkeypatch.setattr("framework.config.HIVE_CONFIG_FILE", config_file)
|
||||
|
||||
with caplog.at_level(logging.WARNING):
|
||||
result = get_hive_config()
|
||||
|
||||
assert result == {}
|
||||
assert "Failed to load Hive config" in caplog.text
|
||||
assert str(config_file) in caplog.text
|
||||
@@ -0,0 +1,145 @@
|
||||
# Integration Bounty Program
|
||||
|
||||
Earn XP, Discord roles, and money by testing, documenting, and building integrations for the Aden agent framework.
|
||||
|
||||
## Why Contribute?
|
||||
|
||||
**Your name in the product.** When you promote a tool to verified, your GitHub handle goes in the tool's README under `Contributed by`. Every agent that uses that integration carries your name — permanent credit in a production codebase.
|
||||
|
||||
**Visible status.** Your Discord tier role is earned, not bought. When you answer a question in `#integrations-help` with a Core Contributor badge, people listen.
|
||||
|
||||
**Weekly races.** Every Monday the bot posts the leaderboard. Top 3 get medal emojis. The best work gets highlighted in announcements.
|
||||
|
||||
**The path to paid.** Core Contributor unlocks real money. It takes sustained quality work across testing, docs, and code — the scarcity makes it matter.
|
||||
|
||||
## How It Works
|
||||
|
||||
1. Pick a bounty from the [GitHub issues board](https://github.com/adenhq/hive/issues?q=is%3Aissue+is%3Aopen+label%3A%22bounty%3A*%22)
|
||||
2. Claim it by commenting on the issue
|
||||
3. Do the work and submit a PR (or test report)
|
||||
4. A maintainer reviews and merges
|
||||
5. You automatically get XP in Discord via Lurkr
|
||||
6. At certain levels, you unlock roles. At the top tier, you unlock paid bounties.
|
||||
|
||||
## Tiers
|
||||
|
||||
| Tier | How to Reach | Rewards |
|
||||
| --------------------------- | -------------------------- | ------------------------------------------------------------- |
|
||||
| **Agent Builder** | ~500 XP (Lurkr level 5) | Discord role, bounty board access |
|
||||
| **Open Source Contributor** | ~2,000 XP (Lurkr level 15) | Discord role, name in CONTRIBUTORS.md and tool READMEs |
|
||||
| **Core Contributor** | Maintainer-approved | Monetary payout per bounty, private `#bounty-payouts` channel |
|
||||
|
||||
Lurkr auto-assigns the first two roles. Core Contributor requires sustained, quality contributions across multiple bounty types and a maintainer vouching for you.
|
||||
|
||||
## Bounty Types
|
||||
|
||||
| Type | Label | Points | What You Do |
|
||||
| --------------------- | ----------------- | ------ | -------------------------------------------------------------------------- |
|
||||
| **Test a tool** | `bounty:test` | 20 | Test with a real API key, submit a report with logs |
|
||||
| **Write docs** | `bounty:docs` | 20 | Write a README following the [template](templates/tool-readme-template.md) |
|
||||
| **Code contribution** | `bounty:code` | 30 | Add health checker, fix a bug, or improve an integration |
|
||||
| **New integration** | `bounty:new-tool` | 75 | Build a complete integration from scratch |
|
||||
|
||||
Promoting a tool from unverified to verified is the final step — submit a PR moving it from `_register_unverified()` to `_register_verified()` after the [promotion checklist](promotion-checklist.md) is complete.
|
||||
|
||||
## Quality Gates
|
||||
|
||||
- **PRs** must be merged by a maintainer (not self-merged)
|
||||
- **Test reports** must follow the [test report template](templates/agent-test-report-template.md) with logs or session ID
|
||||
- **READMEs** must follow the [tool README template](templates/tool-readme-template.md)
|
||||
- **Claim before you start** — comment on the issue, wait for assignment
|
||||
- No self-review, no splitting one change across multiple PRs, no AI-only submissions without verification
|
||||
|
||||
## Labels
|
||||
|
||||
| Label | Color | Meaning |
|
||||
| ------------------- | ------------------ | --------------------------------------- |
|
||||
| `bounty:test` | `#1D76DB` (blue) | Test a tool with a real API key |
|
||||
| `bounty:docs` | `#FBCA04` (yellow) | Write or improve documentation |
|
||||
| `bounty:code` | `#D93F0B` (orange) | Health checker, bug fix, or improvement |
|
||||
| `bounty:new-tool` | `#6F42C1` (purple) | Build a new integration from scratch |
|
||||
| `difficulty:easy` | `#BFD4F2` | Good first contribution |
|
||||
| `difficulty:medium` | `#D4C5F9` | Requires some familiarity |
|
||||
| `difficulty:hard` | `#F9D0C4` | Significant effort or expertise needed |
|
||||
|
||||
## Discord
|
||||
|
||||
```
|
||||
#integrations-announcements — Bounties, leaderboard, tool promotions (bot + admin only)
|
||||
#integrations-help — Questions, testing coordination, showcases
|
||||
#bounty-payouts — Dollar values and payout tracking (Core Contributors only)
|
||||
```
|
||||
|
||||
## Leaderboard
|
||||
|
||||
Weekly leaderboard auto-posts to `#integrations-announcements` every Monday. Top 3 get medal emojis. Check your rank anytime with `/rank` in Discord.
|
||||
|
||||
XP comes from two sources: GitHub bounties (auto-pushed on PR merge) and Discord activity in `#integrations-help`.
|
||||
|
||||
## Launch Plan: The 55-Tool Blitz
|
||||
|
||||
A 2-week sprint to get all 55 unverified tools tested, documented, and health-checked.
|
||||
|
||||
### Day 1: Post Everything
|
||||
|
||||
- **41 `bounty:docs` issues** — tools missing READMEs, `difficulty:easy`, 20 pts each
|
||||
- **40 `bounty:code` issues** — tools missing health checkers, `difficulty:medium`, 30 pts each
|
||||
- **55 `bounty:test` issues** — one per unverified tool, `difficulty:medium`, 20 pts each
|
||||
|
||||
### Week 1-2
|
||||
|
||||
All bounty types open in parallel. Contributors self-select. Daily progress updates in `#integrations-announcements`. Day 14 wrap-up with final leaderboard and shoutouts.
|
||||
|
||||
## Automation
|
||||
|
||||
```
|
||||
PR merged with bounty:* label
|
||||
→ GitHub Action runs bounty-tracker.ts
|
||||
→ Calculates points from label
|
||||
→ Resolves GitHub → Discord ID via contributors.yml
|
||||
→ Pushes XP to Lurkr API
|
||||
→ Posts notification to #integrations-announcements
|
||||
```
|
||||
|
||||
See the [Setup Guide](setup-guide.md) for full configuration (Lurkr, webhooks, secrets, labels).
|
||||
|
||||
### Identity Linking
|
||||
|
||||
Contributors link GitHub ↔ Discord by opening a [Link Discord Account](https://github.com/aden-hive/hive/issues/new?template=link-discord.yml) issue. A GitHub Action auto-adds them to `contributors.yml` and closes the issue.
|
||||
|
||||
Without this link, bounties are still tracked but Lurkr can't push XP to your Discord account.
|
||||
|
||||
### What Handles What
|
||||
|
||||
| Concern | Handled By | How |
|
||||
| ------------------------ | -------------------------- | ----------------------------------------------- |
|
||||
| Bounty point calculation | GitHub Actions | `bounty-completed.yml` reads PR labels |
|
||||
| XP push to Discord | GitHub Actions → Lurkr API | `PATCH /levels/{guild}/users/{user}` |
|
||||
| Discord engagement XP | Lurkr bot | Native message XP (configurable per-channel) |
|
||||
| Leaderboard | Lurkr bot + GitHub Actions | `/leaderboard` in Discord + weekly webhook post |
|
||||
| Agent Builder role | Lurkr bot | Auto-assigned at level 5 |
|
||||
| OSS Contributor role | Lurkr bot | Auto-assigned at level 15 |
|
||||
| Core Contributor role | Maintainer | Manual (involves money) |
|
||||
| Identity linking | contributors.yml | PR-based, reviewed by maintainers |
|
||||
|
||||
## Guides
|
||||
|
||||
- **[Setup Guide](setup-guide.md)** — Admin setup from zero to running
|
||||
- **[Game Master Manual](game-master-manual.md)** — Maintainer operations
|
||||
- **[Contributor Guide](contributor-guide.md)** — Everything a contributor needs to start
|
||||
|
||||
## Reference
|
||||
|
||||
- [Promotion Checklist](promotion-checklist.md) — Criteria for unverified → verified
|
||||
- [Tool README Template](templates/tool-readme-template.md)
|
||||
- [Agent Test Report Template](templates/agent-test-report-template.md)
|
||||
- [Building Tools Guide](../tools/BUILDING_TOOLS.md)
|
||||
- [Lurkr API Docs](https://lurkr.gg/docs/api)
|
||||
|
||||
### Automation Files
|
||||
|
||||
- `.github/workflows/bounty-completed.yml` — PR merge → XP push + Discord notification
|
||||
- `.github/workflows/weekly-leaderboard.yml` — Monday leaderboard post
|
||||
- `scripts/bounty-tracker.ts` — Point calculation, Lurkr API, Discord formatting
|
||||
- `scripts/setup-bounty-labels.sh` — One-time label setup
|
||||
- `contributors.yml` — GitHub ↔ Discord identity mapping
|
||||
@@ -0,0 +1,109 @@
|
||||
# Contributor Guide — Integration Bounty Program
|
||||
|
||||
Earn XP, Discord roles, and eventually real money by testing and building integrations for the Aden agent framework.
|
||||
|
||||
## Getting Started
|
||||
|
||||
### 1. Link your GitHub and Discord
|
||||
|
||||
Open a [Link Discord Account](https://github.com/aden-hive/hive/issues/new?template=link-discord.yml) issue — just paste your Discord ID and submit. A GitHub Action will automatically add you to `contributors.yml` and close the issue.
|
||||
|
||||
To find your Discord ID: Discord Settings > Advanced > Enable **Developer Mode**, then right-click your name > **Copy User ID**.
|
||||
|
||||
Without this link, Lurkr can't push XP to your Discord account.
|
||||
|
||||
### 2. Pick your first bounty
|
||||
|
||||
Browse [GitHub Issues with bounty labels](https://github.com/adenhq/hive/issues?q=is%3Aissue+is%3Aopen+label%3A%22bounty%3A*%22). Start with `bounty:docs` or `difficulty:easy`.
|
||||
|
||||
Comment "I'd like to work on this" and wait for a maintainer to assign you.
|
||||
|
||||
## Tiers
|
||||
|
||||
| Tier | How to Reach | What You Get |
|
||||
|------|-------------|--------------|
|
||||
| **Agent Builder** | ~500 XP (Lurkr level 5) | Discord role, bounty board access |
|
||||
| **Open Source Contributor** | ~2,000 XP (Lurkr level 15) | Discord role, name in CONTRIBUTORS.md and tool READMEs |
|
||||
| **Core Contributor** | Maintainer nomination | Dollar values on bounties, paid per completion |
|
||||
|
||||
XP comes from GitHub bounties (auto-pushed on PR merge) and Discord activity in `#integrations-help`.
|
||||
|
||||
## Bounty Types
|
||||
|
||||
### Test a Tool (20 pts)
|
||||
|
||||
Test an unverified tool with a real API key and report what happens.
|
||||
|
||||
1. Get an API key for the service (the bounty issue links to where)
|
||||
2. Run the tool functions with real data
|
||||
3. Fill out the [test report template](templates/agent-test-report-template.md)
|
||||
4. Submit as a comment on the issue or a file in a PR
|
||||
|
||||
Report both successes and failures. Finding bugs is valuable.
|
||||
|
||||
### Write Docs (20 pts)
|
||||
|
||||
Write a README for a tool that's missing one.
|
||||
|
||||
1. Read the tool's source code in `tools/src/aden_tools/tools/{tool_name}/`
|
||||
2. Read the credential spec in `tools/src/aden_tools/credentials/`
|
||||
3. Fill in the [tool README template](templates/tool-readme-template.md)
|
||||
4. Submit a PR adding `README.md` to the tool directory
|
||||
|
||||
Function names and API URLs must match reality — no AI hallucinations.
|
||||
|
||||
### Code Contribution (30 pts)
|
||||
|
||||
Add a health checker, fix a bug, or improve an integration.
|
||||
|
||||
**Health checker:**
|
||||
1. Find a lightweight API endpoint that validates the credential (GET, no writes)
|
||||
2. Add `health_check_endpoint` to the tool's CredentialSpec
|
||||
3. Implement a HealthChecker class in `tools/src/aden_tools/credentials/health_check.py`
|
||||
4. Register in `HEALTH_CHECKERS`, run `uv run pytest tools/tests/test_credential_registry.py`
|
||||
|
||||
**Bug fix:**
|
||||
1. Find a bug during testing, file an issue
|
||||
2. Fix it in a PR with a test covering the bug
|
||||
|
||||
### New Integration (75 pts)
|
||||
|
||||
Build a complete integration from scratch.
|
||||
|
||||
1. Follow the [BUILDING_TOOLS.md](../tools/BUILDING_TOOLS.md) guide
|
||||
2. Create: tool + credential spec + health checker + tests + README
|
||||
3. Register in `_register_unverified()` in `tools/__init__.py`
|
||||
4. Run `make check && make test`
|
||||
|
||||
Expect multiple review rounds.
|
||||
|
||||
## Rules
|
||||
|
||||
1. **Claim before you start** — comment on the issue, wait for assignment
|
||||
2. **7-day window** — no PR within 7 days = bounty gets re-opened
|
||||
3. **Max 3 active claims** — don't hoard bounties
|
||||
4. **Quality matters** — PRs must pass CI and follow templates
|
||||
5. **No self-review** and no AI-only submissions without verification
|
||||
|
||||
## FAQ
|
||||
|
||||
**Q: Do I need an API key for every tool I test?**
|
||||
A: Yes. Most services have free tiers. The bounty issue links to where you get the key.
|
||||
|
||||
**Q: How do I become a Core Contributor?**
|
||||
A: Contribute consistently across different bounty types for 4+ weeks. Maintainers will nominate you.
|
||||
|
||||
**Q: What if I haven't linked my Discord yet?**
|
||||
A: You'll still get credit in GitHub, but no Lurkr XP or Discord roles. Add yourself to `contributors.yml`.
|
||||
|
||||
## Quick Reference
|
||||
|
||||
| What | Where |
|
||||
|------|-------|
|
||||
| Bounty board | [GitHub Issues](https://github.com/adenhq/hive/issues?q=is%3Aissue+is%3Aopen+label%3A%22bounty%3A*%22) |
|
||||
| README template | [templates/tool-readme-template.md](templates/tool-readme-template.md) |
|
||||
| Test report template | [templates/agent-test-report-template.md](templates/agent-test-report-template.md) |
|
||||
| Promotion checklist | [promotion-checklist.md](promotion-checklist.md) |
|
||||
| Building tools | [BUILDING_TOOLS.md](../tools/BUILDING_TOOLS.md) |
|
||||
| Discord | [Join](https://discord.com/invite/MXE49hrKDk) |
|
||||
| Your rank | `/rank` in Discord |
|
||||
@@ -0,0 +1,107 @@
|
||||
# Game Master Manual
|
||||
|
||||
Operations guide for maintainers running the Integration Bounty Program.
|
||||
|
||||
## Your Role
|
||||
|
||||
- Post bounty issues and set dollar values for Core Contributors
|
||||
- Assign claimed bounties to contributors
|
||||
- Review and merge bounty PRs (auto-triggers XP awards)
|
||||
- Manage the Core Contributor role
|
||||
- Monitor for gaming and low-quality submissions
|
||||
|
||||
## Handling Bounty Claims
|
||||
|
||||
When someone comments "I'd like to work on this":
|
||||
|
||||
1. For `difficulty:easy`, assign immediately
|
||||
2. For `difficulty:medium`/`difficulty:hard`, check if they've done easier bounties first
|
||||
3. Assign via GitHub. If no PR within 7 days, unassign and re-open
|
||||
|
||||
## Reviewing Bounty PRs
|
||||
|
||||
1. Verify the PR matches the bounty issue
|
||||
2. Check quality gates (below)
|
||||
3. A **different maintainer** must approve than the one who created the bounty
|
||||
4. Apply the correct `bounty:*` label to the PR before merging
|
||||
5. Merge — the GitHub Action auto-awards XP and posts to Discord
|
||||
6. Close the linked bounty issue
|
||||
|
||||
### Quality Gates
|
||||
|
||||
**`bounty:docs`:**
|
||||
- [ ] Follows the [tool README template](templates/tool-readme-template.md)
|
||||
- [ ] Setup instructions are accurate (API key URL works)
|
||||
- [ ] Function names match the actual code
|
||||
- [ ] Not AI-generated without verification
|
||||
|
||||
**`bounty:test`:**
|
||||
- [ ] Test report follows the [template](templates/agent-test-report-template.md)
|
||||
- [ ] Includes logs, session ID, or screenshots
|
||||
- [ ] Done with a real API key, not mocked
|
||||
- [ ] Reports failures honestly
|
||||
|
||||
**`bounty:code`:**
|
||||
- [ ] CI passes (`uv run pytest tools/tests/test_credential_registry.py` for health checks)
|
||||
- [ ] Fix addresses root cause, not symptom
|
||||
- [ ] New test added for bug fixes
|
||||
|
||||
**`bounty:new-tool`:**
|
||||
- [ ] Full implementation: tool + credential spec + tests + README
|
||||
- [ ] `make check && make test` passes
|
||||
- [ ] Registered in `_register_unverified()` (not verified)
|
||||
|
||||
### Rejecting Submissions
|
||||
|
||||
1. Leave specific, constructive feedback
|
||||
2. Request changes (don't close the PR)
|
||||
3. 7 days to address. No response → close PR, unassign bounty
|
||||
|
||||
Never merge low-quality work just to be nice.
|
||||
|
||||
## Core Contributor Promotion
|
||||
|
||||
Core Contributor unlocks monetary rewards. The bar must be high.
|
||||
|
||||
**Promote when:**
|
||||
- Active for **4+ weeks** with contributions across **3+ bounty types**
|
||||
- PRs are consistently clean
|
||||
- At least one maintainer vouches for them
|
||||
|
||||
**How:** Discuss with maintainers → assign role in Discord → announce in `#integrations-announcements` → add to `#bounty-payouts`
|
||||
|
||||
**Don't promote** if they only do easy bounties, have been active < 4 weeks, or show signs of gaming.
|
||||
|
||||
If a Core Contributor is inactive 8+ weeks, reach out privately first, then remove the role if no response.
|
||||
|
||||
## Dollar Values
|
||||
|
||||
Post dollar values in `#bounty-payouts` (Core Contributors only):
|
||||
|
||||
| Bounty Type | Dollar Range |
|
||||
|-------------|-------------|
|
||||
| `bounty:test` | $10–30 |
|
||||
| `bounty:docs` | $10–20 |
|
||||
| `bounty:code` | $20–50 |
|
||||
| `bounty:new-tool` | $50–150 |
|
||||
|
||||
**Payout:** PR merged → verify quality → record in `#bounty-payouts` → process payment.
|
||||
|
||||
XP is always awarded regardless of budget. Money is a bonus layer.
|
||||
|
||||
## Anti-Gaming
|
||||
|
||||
| Pattern | Response |
|
||||
|---------|----------|
|
||||
| Splitting one change across multiple PRs | Reject extras, warn |
|
||||
| AI-generated without verification | Reject, explain why |
|
||||
| Claiming many bounties, completing few | Unassign after 7 days |
|
||||
|
||||
**First offense:** warning. **Second:** 2-week cooldown. **Third:** permanent removal.
|
||||
|
||||
## Keeping It Fresh
|
||||
|
||||
- Aim for 10+ unclaimed bounties at all times
|
||||
- Unassign stale claims (>7 days)
|
||||
- Shoutout exceptional contributions in announcements
|
||||
- Post milestones ("10th tool promoted to verified!")
|
||||
@@ -0,0 +1,99 @@
|
||||
# Integration Promotion Checklist
|
||||
|
||||
Formal criteria for promoting a tool from **unverified** to **verified**. A tool must satisfy every required item before a maintainer moves it from `_register_unverified()` to `_register_verified()` in [tools/__init__.py](../tools/src/aden_tools/tools/__init__.py).
|
||||
|
||||
## Checklist
|
||||
|
||||
### Code Quality (Required)
|
||||
|
||||
- [ ] **`register_tools` function** follows the standard signature pattern from [BUILDING_TOOLS.md](../tools/BUILDING_TOOLS.md)
|
||||
- [ ] **Error handling** — all tools return `{"error": ...}` dicts instead of raising exceptions
|
||||
- [ ] **Credential handling** — graceful fallback when credentials are missing, with actionable `"help"` message
|
||||
- [ ] **Input validation** — parameters are validated before making API calls
|
||||
- [ ] **No hardcoded secrets** — API keys come from credentials adapter or environment variables only
|
||||
|
||||
### Credential Spec (Required)
|
||||
|
||||
- [ ] **CredentialSpec exists** in `tools/src/aden_tools/credentials/{category}.py`
|
||||
- [ ] **`env_var`** is set and unique (no collisions with other specs)
|
||||
- [ ] **`tools`** list includes every tool function name registered by this module
|
||||
- [ ] **`help_url`** points to the page where users get their API key
|
||||
- [ ] **`description`** is a clear one-liner
|
||||
- [ ] **`credential_id`** and **`credential_key`** are set for credential store mapping
|
||||
- [ ] **Spec is merged** into `CREDENTIAL_SPECS` in `credentials/__init__.py`
|
||||
|
||||
### Health Check (Required)
|
||||
|
||||
- [ ] **`health_check_endpoint`** is set in the CredentialSpec
|
||||
- [ ] **HealthChecker class** is implemented in `tools/src/aden_tools/credentials/health_check.py`
|
||||
- [ ] **Checker is registered** in the `HEALTH_CHECKERS` dict
|
||||
- [ ] **Handles 200** (valid), **401** (invalid/expired), and **429** (rate limited but valid) responses
|
||||
- [ ] **Registry tests pass** — `uv run pytest tools/tests/test_credential_registry.py -v`
|
||||
|
||||
### Documentation (Required)
|
||||
|
||||
- [ ] **README.md** exists in the tool directory, following the [tool README template](templates/tool-readme-template.md)
|
||||
- [ ] **Setup instructions** — how to get and configure the API key
|
||||
- [ ] **Tool table** — lists all tool functions with descriptions
|
||||
- [ ] **Usage examples** — at least one example per tool function
|
||||
- [ ] **API reference link** — link to the service's API docs
|
||||
|
||||
### Testing (Required)
|
||||
|
||||
- [ ] **Unit tests exist** in `tools/tests/tools/test_{tool_name}.py`
|
||||
- [ ] **Tests mock external APIs** — no live API calls in unit tests
|
||||
- [ ] **Tests cover happy path** for each tool function
|
||||
- [ ] **Tests cover error cases** — missing credentials, invalid input, API errors
|
||||
- [ ] **CI passes** — `make check && make test`
|
||||
|
||||
### Community Testing (Required)
|
||||
|
||||
- [ ] **At least 1 community member** has tested with a real API key
|
||||
- [ ] **Agent test report submitted** following the [test report template](templates/agent-test-report-template.md)
|
||||
- [ ] **Tool works in a real agent workflow** (not just isolated function calls)
|
||||
- [ ] **No blocking issues** reported in the test report
|
||||
|
||||
### Optional (Bonus)
|
||||
|
||||
- [ ] Multiple community test reports from different testers
|
||||
- [ ] Rate limit documentation
|
||||
- [ ] Integration tests with sandboxed API accounts
|
||||
- [ ] Pagination support for list endpoints
|
||||
- [ ] Webhook support (if applicable to the service)
|
||||
|
||||
## Promotion Process
|
||||
|
||||
1. **Contributor opens a PR** that checks off all required items above
|
||||
2. **PR description** includes links to: the tool README, the health checker, the test report(s)
|
||||
3. **Maintainer reviews** the checklist — every required item must be verified
|
||||
4. **Maintainer moves** the tool registration from `_register_unverified()` to `_register_verified()` in `tools/__init__.py`
|
||||
5. **Maintainer adds the `bounty:code` label** to the PR — this triggers the GitHub Action to award XP via Lurkr and post a Discord notification
|
||||
6. **Announcement** auto-posted in `#integrations-announcements` on Discord
|
||||
|
||||
## Current Status
|
||||
|
||||
### Tools Ready for Promotion Testing
|
||||
|
||||
The following 55 unverified tools have implementations, credential specs, and unit tests. They need documentation, health checks, and community testing to be promoted:
|
||||
|
||||
<details>
|
||||
<summary>Full list of unverified tools</summary>
|
||||
|
||||
airtable, apify, asana, attio, aws_s3, azure_sql, calendly, cloudinary, confluence,
|
||||
databricks, docker_hub, duckduckgo, gitlab, google_analytics, google_search_console,
|
||||
google_sheets, greenhouse, huggingface, jira, kafka, langfuse, linear, lusha,
|
||||
microsoft_graph, mongodb, n8n, notion, obsidian, pagerduty, pinecone, pipedrive,
|
||||
plaid, powerbi, pushover, quickbooks, reddit, redis, redshift, salesforce, sap,
|
||||
shopify, snowflake, supabase, terraform, tines, trello, twilio, twitter, vercel,
|
||||
yahoo_finance, youtube, youtube_transcript, zendesk, zoho_crm, zoom
|
||||
|
||||
</details>
|
||||
|
||||
### Gap Summary
|
||||
|
||||
| Gap | Count | Bounty Type |
|
||||
|-----|-------|-------------|
|
||||
| Missing README | ~41 | `bounty:docs` |
|
||||
| Missing health_check_endpoint | ~40 | `bounty:code` |
|
||||
| Missing HealthChecker class | ~40 | `bounty:code` |
|
||||
| No community test report | 55 | `bounty:test` |
|
||||
@@ -0,0 +1,157 @@
|
||||
# Integration Bounty Program — Setup Guide
|
||||
|
||||
Complete setup from zero to running. Estimated time: 30 minutes.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Admin access to the GitHub repo
|
||||
- Admin access to the Discord server
|
||||
- `gh` CLI installed and authenticated
|
||||
|
||||
## Step 1: Create GitHub Labels (2 min)
|
||||
|
||||
```bash
|
||||
./scripts/setup-bounty-labels.sh
|
||||
```
|
||||
|
||||
This creates 7 labels: 4 bounty types (`bounty:test`, `bounty:docs`, `bounty:code`, `bounty:new-tool`) and 3 difficulty levels (`difficulty:easy`, `difficulty:medium`, `difficulty:hard`).
|
||||
|
||||
## Step 2: Create Discord Channels (3 min)
|
||||
|
||||
```
|
||||
Category: Integrations
|
||||
#integrations-announcements (read-only for non-admins)
|
||||
#integrations-help
|
||||
|
||||
Category: Private
|
||||
#bounty-payouts (visible only to Core Contributor role)
|
||||
```
|
||||
|
||||
**Permissions:**
|
||||
|
||||
- `#integrations-announcements`: Everyone reads, only bots + admins post
|
||||
- `#bounty-payouts`: Core Contributor role only
|
||||
|
||||
## Step 3: Create Discord Roles (2 min)
|
||||
|
||||
Order matters — higher = more prestigious:
|
||||
|
||||
| Role | Color | Hoisted | Mentionable |
|
||||
| ----------------------- | ---------------- | ------- | ----------- |
|
||||
| Core Contributor | Gold `#F1C40F` | Yes | Yes |
|
||||
| Open Source Contributor | Purple `#9B59B6` | Yes | No |
|
||||
| Agent Builder | Green `#2ECC71` | Yes | No |
|
||||
|
||||
## Step 4: Install and Configure Lurkr (10 min)
|
||||
|
||||
### 4a. Invite Lurkr
|
||||
|
||||
Go to https://lurkr.gg/ and invite the bot. Grant requested permissions.
|
||||
|
||||
### 4b. Enable Leveling
|
||||
|
||||
In Discord, run:
|
||||
|
||||
```
|
||||
/config toggle option:Leveling System
|
||||
```
|
||||
|
||||
### 4c. Configure XP and Cooldown (Dashboard)
|
||||
|
||||
Lurkr configures XP range and cooldown through the web dashboard, not slash commands.
|
||||
|
||||
1. Go to https://lurkr.gg/dashboard and select your server
|
||||
2. Open the **Leveling** category
|
||||
3. Set **XP range** to min 15, max 25
|
||||
4. Set **Cooldown** to 60 seconds
|
||||
|
||||
### 4d. Configure Channel Settings
|
||||
|
||||
Set `#integrations-help` as a leveling channel with a 2x multiplier, and exclude announcement/payout channels:
|
||||
|
||||
1. In the Lurkr dashboard **Leveling** settings, add `#integrations-help` as a leveling channel
|
||||
2. Set a **channel multiplier** of 2x for `#integrations-help` using `/config set` (channel multiplier option)
|
||||
3. Do NOT add `#integrations-announcements` or `#bounty-payouts` as leveling channels
|
||||
|
||||
### 4e. Configure Role Rewards
|
||||
|
||||
Use `/config set` to add role rewards:
|
||||
|
||||
1. Set `@Agent Builder` as a role reward at **level 5**
|
||||
2. Set `@Open Source Contributor` as a role reward at **level 15**
|
||||
|
||||
Do NOT auto-assign Core Contributor — that's maintainer-only.
|
||||
|
||||
### 4f. Generate Lurkr API Key
|
||||
|
||||
1. Go to https://lurkr.gg/ and log in
|
||||
2. Profile > API settings > Create API Key
|
||||
3. Select **Read/Write** (not read-only)
|
||||
4. Copy the key
|
||||
|
||||
## Step 5: Create Discord Webhook (2 min)
|
||||
|
||||
1. Server Settings > Integrations > Webhooks > New Webhook
|
||||
2. Name: `Bounty Tracker`, channel: `#integrations-announcements`
|
||||
3. Copy the webhook URL
|
||||
|
||||
## Step 6: Add GitHub Secrets (3 min)
|
||||
|
||||
Repo Settings > Secrets and variables > Actions:
|
||||
|
||||
| Secret | Value |
|
||||
| ---------------------------- | -------------------------- |
|
||||
| `DISCORD_BOUNTY_WEBHOOK_URL` | Webhook URL from Step 5 |
|
||||
| `LURKR_API_KEY` | Lurkr API key from Step 4f |
|
||||
| `LURKR_GUILD_ID` | Your Discord server ID\* |
|
||||
|
||||
\*Enable Developer Mode in Discord, right-click server name > Copy Server ID.
|
||||
|
||||
## Step 7: Test the Pipeline (5 min)
|
||||
|
||||
```bash
|
||||
GITHUB_TOKEN=$(gh auth token) \
|
||||
GITHUB_REPOSITORY_OWNER=aden-hive \
|
||||
GITHUB_REPOSITORY_NAME=hive \
|
||||
bun run scripts/bounty-tracker.ts leaderboard
|
||||
```
|
||||
|
||||
Then create a test PR with `bounty:docs` label, merge it, verify the Discord notification appears.
|
||||
|
||||
## Step 8: Seed the 55-Tool Blitz
|
||||
|
||||
Post all bounties at once on launch day:
|
||||
|
||||
**Documentation (41 issues):** `bounty:docs`, `difficulty:easy`, 20 pts
|
||||
**Health checks (40 issues):** `bounty:code`, `difficulty:medium`, 30 pts
|
||||
**Testing (55 issues):** `bounty:test`, `difficulty:medium`, 20 pts
|
||||
|
||||
### Tools missing READMEs
|
||||
|
||||
```
|
||||
azure_sql, cloudinary, confluence, databricks, docker_hub, duckduckgo,
|
||||
google_search_console, google_sheets, greenhouse, jira, kafka, lusha,
|
||||
mongodb, notion, obsidian, pagerduty, pinecone, pipedrive, plaid,
|
||||
pushover, quickbooks, redshift, sap, salesforce, shopify, snowflake,
|
||||
supabase, terraform, tines, trello, twilio, twitter, vercel,
|
||||
yahoo_finance, zoom, huggingface, langfuse, microsoft_graph, n8n,
|
||||
powerbi, redis
|
||||
```
|
||||
|
||||
## Verification Checklist
|
||||
|
||||
- [ ] Labels exist (`bounty:*` and `difficulty:*`)
|
||||
- [ ] Discord channels and roles created
|
||||
- [ ] Lurkr installed, leveling enabled, XP/cooldown configured in dashboard, role rewards set
|
||||
- [ ] All 3 GitHub secrets added
|
||||
- [ ] Both workflows enabled (`bounty-completed.yml`, `weekly-leaderboard.yml`)
|
||||
- [ ] Test PR + merge triggers Discord notification
|
||||
- [ ] `contributors.yml` exists at repo root
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**No Discord message:** Check `DISCORD_BOUNTY_WEBHOOK_URL` secret and Action logs.
|
||||
|
||||
**Lurkr XP not awarded:** Confirm API key is Read/Write, contributor is in `contributors.yml`, check Action logs for `Lurkr XP push failed`.
|
||||
|
||||
**Role not assigned:** Verify role rewards in the Lurkr dashboard or via `/config set`. Lurkr's role must be above the roles it assigns in server hierarchy.
|
||||
@@ -0,0 +1,90 @@
|
||||
# Agent Test Report: {tool_name}
|
||||
|
||||
<!-- Submit this report as a comment on the bounty issue, or as a file in a PR. -->
|
||||
|
||||
## Summary
|
||||
|
||||
- **Tool tested:** `{tool_name}`
|
||||
- **Tester:** @{github_handle}
|
||||
- **Date:** {YYYY-MM-DD}
|
||||
- **Verdict:** Pass / Partial / Fail
|
||||
|
||||
## Environment
|
||||
|
||||
- **OS:** {e.g., macOS 15.2, Ubuntu 24.04}
|
||||
- **Python:** {e.g., 3.12.1}
|
||||
- **Hive version:** {commit hash or version}
|
||||
- **API tier:** {e.g., Free, Pro — relevant for rate limits}
|
||||
|
||||
## Credential Setup
|
||||
|
||||
- **Auth method:** {API key / OAuth / Bearer token}
|
||||
- **Health check result:** {Pass / Fail / No health checker available}
|
||||
- **Setup difficulty:** {Easy / Medium / Hard}
|
||||
- **Setup notes:** {Any friction, confusing docs, extra steps not documented}
|
||||
|
||||
## Agent Configuration
|
||||
|
||||
<!-- Describe the agent you built or used to test this tool. -->
|
||||
|
||||
```
|
||||
Agent name: {name}
|
||||
Tools used: {tool_name}, {any other tools}
|
||||
Goal: {What the agent was supposed to accomplish}
|
||||
```
|
||||
|
||||
## Test Results
|
||||
|
||||
### Tool Functions Tested
|
||||
|
||||
| Function | Input | Expected | Actual | Status |
|
||||
|----------|-------|----------|--------|--------|
|
||||
| `{function_name}` | {brief input description} | {expected behavior} | {what happened} | Pass/Fail |
|
||||
| `{function_name}` | {brief input description} | {expected behavior} | {what happened} | Pass/Fail |
|
||||
|
||||
### Agent Workflow Test
|
||||
|
||||
<!-- Did the agent successfully use this tool to accomplish a task? -->
|
||||
|
||||
**Goal:** {What you asked the agent to do}
|
||||
|
||||
**Result:** {What actually happened}
|
||||
|
||||
**Session ID:** `{session_id if available}`
|
||||
|
||||
### Edge Cases Found
|
||||
|
||||
<!-- Document any unexpected behavior, errors, or limitations. -->
|
||||
|
||||
| Edge Case | Behavior | Severity |
|
||||
|-----------|----------|----------|
|
||||
| {e.g., empty query} | {what happened} | Low/Medium/High |
|
||||
| {e.g., rate limit hit} | {what happened} | Low/Medium/High |
|
||||
|
||||
## Issues Found
|
||||
|
||||
<!-- List any bugs or problems. Link to new issues if you filed them. -->
|
||||
|
||||
- [ ] {Issue description} — {filed as #XXXX / not yet filed}
|
||||
- [ ] {Issue description}
|
||||
|
||||
## Recommendations
|
||||
|
||||
<!-- Suggestions for the tool maintainer. -->
|
||||
|
||||
- {e.g., "Error message for missing API key should include the help URL"}
|
||||
- {e.g., "Rate limit handling should retry with backoff"}
|
||||
- {e.g., "Ready for promotion after health checker is added"}
|
||||
|
||||
## Evidence
|
||||
|
||||
<!-- Attach or link to logs, screenshots, or recordings. At minimum, include the session ID or key log output. -->
|
||||
|
||||
<details>
|
||||
<summary>Logs</summary>
|
||||
|
||||
```
|
||||
{Paste relevant log output here}
|
||||
```
|
||||
|
||||
</details>
|
||||
@@ -0,0 +1,71 @@
|
||||
# {Tool Name} Tool
|
||||
|
||||
<!-- One-liner: what this tool does and what it enables agents to do. -->
|
||||
|
||||
{Brief description of what the tool does and its primary use case.}
|
||||
|
||||
## Setup
|
||||
|
||||
```bash
|
||||
# Required
|
||||
export {ENV_VAR}=your-api-key
|
||||
```
|
||||
|
||||
**Get your key:**
|
||||
1. Go to {help_url}
|
||||
2. {Step to create/generate a key}
|
||||
3. {Step to copy the key}
|
||||
4. Set `{ENV_VAR}` environment variable
|
||||
|
||||
Alternatively, configure via the credential store (`CredentialStoreAdapter`).
|
||||
|
||||
<!-- If OAuth is supported, add: -->
|
||||
<!-- **OAuth:** This integration also supports OAuth2 via Aden. -->
|
||||
|
||||
## Tools ({count})
|
||||
|
||||
| Tool | Description |
|
||||
|------|-------------|
|
||||
| `{tool_function_name}` | {What it does} |
|
||||
| `{tool_function_name}` | {What it does} |
|
||||
|
||||
## Usage
|
||||
|
||||
### {Action name}
|
||||
|
||||
```python
|
||||
result = {tool_function_name}(
|
||||
param="value",
|
||||
)
|
||||
# Returns: {brief description of return value}
|
||||
```
|
||||
|
||||
### {Action name}
|
||||
|
||||
```python
|
||||
result = {tool_function_name}(
|
||||
param="value",
|
||||
)
|
||||
# Returns: {brief description of return value}
|
||||
```
|
||||
|
||||
## Scope
|
||||
|
||||
<!-- What this integration covers in its current form. -->
|
||||
|
||||
- {Capability 1}
|
||||
- {Capability 2}
|
||||
- {Capability 3}
|
||||
|
||||
## Rate Limits
|
||||
|
||||
<!-- Document known rate limits if applicable. Remove this section if not relevant. -->
|
||||
|
||||
| Tier | Limit |
|
||||
|------|-------|
|
||||
| Free | {X requests/minute} |
|
||||
| Paid | {Y requests/minute} |
|
||||
|
||||
## API Reference
|
||||
|
||||
- [{Service} API Docs]({url})
|
||||
@@ -37,8 +37,6 @@ Ported from `agent_builder_server.py` lines 3484-3856. Pure filesystem reads —
|
||||
| Tool | Purpose |
|
||||
|------|---------|
|
||||
| `list_agent_sessions(agent_name, status?, limit?)` | List sessions, filterable by status |
|
||||
| `get_agent_session_state(agent_name, session_id)` | Full session state (memory excluded to prevent context bloat) |
|
||||
| `get_agent_session_memory(agent_name, session_id, key?)` | Read memory contents from a session |
|
||||
| `list_agent_checkpoints(agent_name, session_id)` | List checkpoints for debugging |
|
||||
| `get_agent_checkpoint(agent_name, session_id, checkpoint_id?)` | Load a checkpoint's full state |
|
||||
|
||||
@@ -67,7 +65,7 @@ Add all 8 tools after the existing `undo_changes` tool:
|
||||
|
||||
# ── Meta-agent: Session & checkpoint inspection ───────────────
|
||||
# _resolve_hive_agent_path(), _read_session_json(), _scan_agent_sessions(), _truncate_value()
|
||||
# list_agent_sessions(), get_agent_session_state(), get_agent_session_memory()
|
||||
# list_agent_sessions(), list_agent_checkpoints(), get_agent_checkpoint()
|
||||
# list_agent_checkpoints(), get_agent_checkpoint()
|
||||
|
||||
# ── Meta-agent: Test execution ────────────────────────────────
|
||||
|
||||
@@ -43,7 +43,7 @@ Dedicated tool server providing:
|
||||
- **File I/O**: `read_file` (with line numbers, offset/limit), `write_file` (auto-mkdir), `edit_file` (9-strategy fuzzy matching ported from opencode), `list_directory`, `search_files` (regex)
|
||||
- **Shell**: `run_command` (timeout, cwd, output truncation)
|
||||
- **Git**: `undo_changes` (snapshot-based rollback)
|
||||
- **Meta-agent**: `discover_mcp_tools`, `list_agents`, `list_agent_sessions`, `get_agent_session_state`, `get_agent_session_memory`, `list_agent_checkpoints`, `get_agent_checkpoint`, `run_agent_tests`
|
||||
- **Meta-agent**: `discover_mcp_tools`, `list_agents`, `list_agent_sessions`, `list_agent_checkpoints`, `get_agent_checkpoint`, `run_agent_tests`
|
||||
|
||||
All file operations sandboxed to a configurable project root.
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ The agent is deeply integrated with the framework: it can discover available MCP
|
||||
- **`reference/`** — Framework guide, file templates, and anti-patterns docs embedded as agent reference material
|
||||
|
||||
### New: Coder Tools MCP Server (`tools/coder_tools_server.py`)
|
||||
- 1500-line MCP server providing 15 tools: `read_file`, `write_file`, `edit_file` (with opencode-style 9-strategy fuzzy matching), `list_directory`, `search_files`, `run_command`, `undo_changes`, `discover_mcp_tools`, `list_agents`, `list_agent_sessions`, `get_agent_session_state`, `get_agent_session_memory`, `list_agent_checkpoints`, `get_agent_checkpoint`, `run_agent_tests`
|
||||
- 1500-line MCP server providing 13 tools: `read_file`, `write_file`, `edit_file` (with opencode-style 9-strategy fuzzy matching), `list_directory`, `search_files`, `run_command`, `undo_changes`, `discover_mcp_tools`, `list_agents`, `list_agent_sessions`, `list_agent_checkpoints`, `get_agent_checkpoint`, `run_agent_tests`
|
||||
- Path-scoped security: all file operations sandboxed to project root
|
||||
- Git-based undo: automatic snapshots before writes with `undo_changes` rollback
|
||||
|
||||
|
||||
+5
-5
@@ -145,7 +145,7 @@ Implement the core execution engine where every Agent operates as an isolated, a
|
||||
- [x] SharedState manager (runtime/shared_state.py)
|
||||
- [x] Session-based storage (storage/session_store.py)
|
||||
- [x] Isolation levels: ISOLATED, SHARED, SYNCHRONIZED
|
||||
- [ ] **Default Monitoring Hooks**
|
||||
- [x] **Default Monitoring Hooks**
|
||||
- [ ] Performance metrics collection
|
||||
- [ ] Resource usage tracking
|
||||
- [ ] Health check endpoints
|
||||
@@ -590,7 +590,7 @@ Write the Quick Start guide, detailed tool usage documentation, and set up the M
|
||||
- [x] README with examples
|
||||
- [x] Contributing guidelines
|
||||
- [x] GitHub Page setup
|
||||
- [ ] **Tool Usage Documentation**
|
||||
- [x] **Tool Usage Documentation**
|
||||
- [ ] Comprehensive tool documentation
|
||||
- [ ] Tool integration examples
|
||||
- [ ] Best practices guide
|
||||
@@ -643,7 +643,7 @@ Expose basic REST/WebSocket endpoints for external control (Start, Stop, Pause,
|
||||
- [x] Load/unload/start/restart in AgentRuntime
|
||||
- [x] State persistence
|
||||
- [x] Recovery mechanisms
|
||||
- [ ] **REST API Endpoints**
|
||||
- [x] **REST API Endpoints**
|
||||
- [ ] Start endpoint for agent execution
|
||||
- [ ] Stop endpoint for graceful shutdown
|
||||
- [ ] Pause endpoint for execution suspension
|
||||
@@ -661,7 +661,7 @@ Implement automated test execution, agent version control, and mandatory test-pa
|
||||
- [x] Test framework with pytest integration (testing/)
|
||||
- [x] Test result reporting
|
||||
- [x] Test CLI commands (test-run, test-debug, etc.)
|
||||
- [ ] **Automated Testing Pipeline**
|
||||
- [x] **Automated Testing Pipeline**
|
||||
- [ ] CI integration (GitHub Actions, etc.)
|
||||
- [ ] Mandatory test-passing gates
|
||||
- [ ] Coverage reporting
|
||||
@@ -873,7 +873,7 @@ Build native frontend configurations to easily connect Open Hive's backend to lo
|
||||
- [ ] Node.js runtime support
|
||||
- [ ] Browser runtime support
|
||||
- [ ] **Platform Compatibility**
|
||||
- [ ] Windows support improvements
|
||||
- [x] Windows support improvements
|
||||
- [ ] macOS optimization
|
||||
- [ ] Linux distribution support
|
||||
|
||||
|
||||
@@ -0,0 +1,78 @@
|
||||
# Tools
|
||||
|
||||
Hive agents interact with external services through **tools** — functions exposed via MCP (Model Context Protocol) servers. The main tool server lives at `tools/mcp_server.py` and registers integrations from the `aden_tools` package.
|
||||
|
||||
## Verified vs Unverified
|
||||
|
||||
Tools are split into two tiers:
|
||||
|
||||
| Tier | Description | Default |
|
||||
|------|-------------|---------|
|
||||
| **Verified** | Stable integrations tested on main. Always loaded. | On |
|
||||
| **Unverified** | New or community integrations pending full review. | Off |
|
||||
|
||||
Verified tools include core capabilities like web search, GitHub, email, file system operations, and security scanners. Unverified tools cover newer integrations like Jira, Notion, Salesforce, Snowflake, and others that are functional but haven't completed the full review process.
|
||||
|
||||
## Enabling Unverified Tools
|
||||
|
||||
Set the `INCLUDE_UNVERIFIED_TOOLS` environment variable to opt in:
|
||||
|
||||
```bash
|
||||
# Shell
|
||||
INCLUDE_UNVERIFIED_TOOLS=true uv run python tools/mcp_server.py --stdio
|
||||
```
|
||||
|
||||
### In `mcp_servers.json`
|
||||
|
||||
When configuring an agent's MCP server, pass the env var in the server config:
|
||||
|
||||
```json
|
||||
{
|
||||
"servers": [
|
||||
{
|
||||
"name": "tools",
|
||||
"transport": "stdio",
|
||||
"command": "uv",
|
||||
"args": ["run", "python", "tools/mcp_server.py", "--stdio"],
|
||||
"env": {
|
||||
"INCLUDE_UNVERIFIED_TOOLS": "true"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### In Docker
|
||||
|
||||
```bash
|
||||
docker run -e INCLUDE_UNVERIFIED_TOOLS=true ...
|
||||
```
|
||||
|
||||
### In Python
|
||||
|
||||
If calling `register_all_tools` directly (e.g., in a custom server):
|
||||
|
||||
```python
|
||||
from aden_tools.tools import register_all_tools
|
||||
|
||||
register_all_tools(mcp, credentials=credentials, include_unverified=True)
|
||||
```
|
||||
|
||||
Accepted values: `true`, `1`, `yes` (case-insensitive). Any other value or unset means off.
|
||||
|
||||
## Listing Available Tools
|
||||
|
||||
The MCP server logs registered tools at startup (HTTP mode):
|
||||
|
||||
```bash
|
||||
uv run python tools/mcp_server.py
|
||||
# [MCP] Registered 47 tools: [...]
|
||||
```
|
||||
|
||||
In STDIO mode, logs go to stderr to keep stdout clean for JSON-RPC.
|
||||
|
||||
## Adding a New Tool
|
||||
|
||||
New tool integrations are added to `tools/src/aden_tools/tools/` and registered in `_register_unverified()` in `tools/src/aden_tools/tools/__init__.py`. Once reviewed and stabilized, they graduate to `_register_verified()`.
|
||||
|
||||
See the [developer guide](developer-guide.md) for the full contribution workflow.
|
||||
@@ -1,59 +0,0 @@
|
||||
# TUI Dashboard Guide
|
||||
|
||||
## Launching the TUI
|
||||
|
||||
There are two ways to launch the TUI dashboard:
|
||||
|
||||
```bash
|
||||
# Browse and select an agent interactively
|
||||
hive tui
|
||||
|
||||
# Launch the TUI for a specific agent
|
||||
hive run exports/my_agent --tui
|
||||
```
|
||||
|
||||
`hive tui` scans both `exports/` and `examples/templates/` for available agents, then presents a selection menu.
|
||||
|
||||
## Dashboard Panels
|
||||
|
||||
The TUI dashboard is divided into four areas:
|
||||
|
||||
- **Status Bar** - Shows the current agent name, execution state, and model in use
|
||||
- **Graph Overview** - Live visualization of the agent's node graph with highlighted active node
|
||||
- **Log Pane** - Scrollable event log streaming node transitions, LLM calls, and tool outputs
|
||||
- **Chat REPL** - Input area for interacting with client-facing nodes (`ask_user()` prompts appear here)
|
||||
|
||||
## Keybindings
|
||||
|
||||
| Key | Action |
|
||||
|---------------|-----------------------|
|
||||
| `Tab` | Next panel |
|
||||
| `Shift+Tab` | Previous panel |
|
||||
| `Ctrl+S` | Save SVG screenshot |
|
||||
| `Ctrl+O` | Command palette |
|
||||
| `Q` | Quit |
|
||||
|
||||
## Panel Cycle Order
|
||||
|
||||
`Tab` cycles: **Log Pane → Graph View → Chat Input**
|
||||
|
||||
## Text Selection
|
||||
|
||||
Textual apps capture the mouse, so normal click-drag selection won't work by default. To select and copy text from any pane:
|
||||
|
||||
1. **Hold `Shift`** while clicking and dragging — this bypasses Textual's mouse capture and lets your terminal handle selection natively.
|
||||
2. Copy with your terminal's shortcut (`Cmd+C` on macOS, `Ctrl+Shift+C` on most Linux terminals).
|
||||
|
||||
## Log Pane Scrolling
|
||||
|
||||
The log pane uses `auto_scroll=False`. New output only scrolls to the bottom when you are already at the bottom of the log. If you've scrolled up to read earlier output, it stays in place.
|
||||
|
||||
## Screenshots
|
||||
|
||||
`Ctrl+S` saves an SVG screenshot to the `screenshots/` directory with a timestamped filename. Open the SVG in any browser to view it.
|
||||
|
||||
## Tips
|
||||
|
||||
- Use `--mock` mode to explore agent execution without spending API credits: `hive run exports/my_agent --tui --mock`
|
||||
- Override the default model with `--model`: `hive run exports/my_agent --model gpt-4o`
|
||||
- Screenshots are saved as SVG files to `screenshots/` and can be opened in any browser
|
||||
@@ -191,7 +191,7 @@ Both events are handled in the cross-graph filter (events from non-active graphs
|
||||
|
||||
## Known Gaps
|
||||
|
||||
**Gap 1 — Resolved.** The queen is now the full `HiveCoderAgent` graph (not a minimal hand-assembled subset). `_load_judge_and_queen` calls `HiveCoderAgent._setup(mock_mode=True)` to load hive-tools MCP, then merges those tools into the worker runtime alongside monitoring tools. When the operator connects via Ctrl+Q, they get `coder_node` with `read_file`, `write_file`, `run_command`, `restart_agent`, `get_agent_session_state`, and all other hive-tools. The `ticket_triage_node` still handles auto-triage on ticket events. `self._queen_agent` is held on the TUI instance to keep the MCP process alive.
|
||||
**Gap 1 — Resolved.** The queen is now the full `HiveCoderAgent` graph (not a minimal hand-assembled subset). `_load_judge_and_queen` calls `HiveCoderAgent._setup(mock_mode=True)` to load hive-tools MCP, then merges those tools into the worker runtime alongside monitoring tools. When the operator connects via Ctrl+Q, they get `coder_node` with `read_file`, `write_file`, `run_command`, `restart_agent`, and all other hive-tools. The `ticket_triage_node` still handles auto-triage on ticket events. `self._queen_agent` is held on the TUI instance to keep the MCP process alive.
|
||||
|
||||
**Gap 2 — LLM-hang detection latency.**
|
||||
If the worker's LLM call hangs (API never returns), no new log entries are written. The judge detects this on its next timer tick (≤2 min). Bounded latency, not zero.
|
||||
|
||||
@@ -43,4 +43,5 @@ uv run python -m exports.my_research_agent --input '{"topic": "..."}'
|
||||
| Template | Description |
|
||||
|----------|-------------|
|
||||
| [deep_research_agent](deep_research_agent/) | Interactive research agent that searches diverse sources, evaluates findings with user checkpoints, and produces a cited HTML report |
|
||||
| [local_business_extractor](local_business_extractor/) | Finds local businesses on Google Maps, scrapes contact details, and syncs to Google Sheets |
|
||||
| [tech_news_reporter](tech_news_reporter/) | Researches the latest technology and AI news from the web and produces a well-organized report |
|
||||
|
||||
@@ -0,0 +1,31 @@
|
||||
# Local Business Extractor
|
||||
|
||||
Finds local businesses on Google Maps, scrapes their websites for contact details, and syncs everything to a Google Sheets spreadsheet.
|
||||
|
||||
## Nodes
|
||||
|
||||
| Node | Type | Description |
|
||||
|------|------|-------------|
|
||||
| `map-search-worker` | `gcu` (browser) | Searches Google Maps and extracts business names + website URLs |
|
||||
| `extract-contacts` | `event_loop` | Scrapes business websites for emails, phone, hours, reviews, address |
|
||||
| `sheets-sync` | `event_loop` | Appends extracted data to a Google Sheets spreadsheet |
|
||||
|
||||
## Flow
|
||||
|
||||
```
|
||||
extract-contacts → sheets-sync → (loop back to extract-contacts)
|
||||
↓
|
||||
map-search-worker (sub-agent)
|
||||
```
|
||||
|
||||
## Tools used
|
||||
|
||||
- **Exa** — `exa_search`, `exa_get_contents` for web scraping
|
||||
- **Google Sheets** — `google_sheets_create_spreadsheet`, `google_sheets_update_values`, `google_sheets_append_values`, `google_sheets_get_values`
|
||||
- **Browser (GCU)** — automated Google Maps browsing
|
||||
|
||||
## Running
|
||||
|
||||
```bash
|
||||
uv run python -m examples.templates.local_business_extractor run --query "bakeries in San Francisco"
|
||||
```
|
||||
@@ -0,0 +1,34 @@
|
||||
"""Local Business Extractor package."""
|
||||
|
||||
from .agent import (
|
||||
LocalBusinessExtractor,
|
||||
default_agent,
|
||||
goal,
|
||||
nodes,
|
||||
edges,
|
||||
entry_node,
|
||||
entry_points,
|
||||
pause_nodes,
|
||||
terminal_nodes,
|
||||
conversation_mode,
|
||||
identity_prompt,
|
||||
loop_config,
|
||||
)
|
||||
from .config import default_config, metadata
|
||||
|
||||
__all__ = [
|
||||
"LocalBusinessExtractor",
|
||||
"default_agent",
|
||||
"goal",
|
||||
"nodes",
|
||||
"edges",
|
||||
"entry_node",
|
||||
"entry_points",
|
||||
"pause_nodes",
|
||||
"terminal_nodes",
|
||||
"conversation_mode",
|
||||
"identity_prompt",
|
||||
"loop_config",
|
||||
"default_config",
|
||||
"metadata",
|
||||
]
|
||||
@@ -0,0 +1,146 @@
|
||||
"""
|
||||
CLI entry point for Local Business Extractor.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
import click
|
||||
|
||||
from .agent import default_agent, LocalBusinessExtractor
|
||||
|
||||
|
||||
def setup_logging(verbose=False, debug=False):
|
||||
"""Configure logging for execution visibility."""
|
||||
if debug:
|
||||
level, fmt = logging.DEBUG, "%(asctime)s %(name)s: %(message)s"
|
||||
elif verbose:
|
||||
level, fmt = logging.INFO, "%(message)s"
|
||||
else:
|
||||
level, fmt = logging.WARNING, "%(levelname)s: %(message)s"
|
||||
logging.basicConfig(level=level, format=fmt, stream=sys.stderr)
|
||||
logging.getLogger("framework").setLevel(level)
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.version_option(version="1.0.0")
|
||||
def cli():
|
||||
"""Local Business Extractor - Find businesses, extract contacts, sync to Sheets."""
|
||||
pass
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option(
|
||||
"--query",
|
||||
"-q",
|
||||
type=str,
|
||||
required=True,
|
||||
help="Search query (e.g. 'bakeries in San Francisco')",
|
||||
)
|
||||
@click.option("--quiet", is_flag=True, help="Only output result JSON")
|
||||
@click.option("--verbose", "-v", is_flag=True, help="Show execution details")
|
||||
@click.option("--debug", is_flag=True, help="Show debug logging")
|
||||
def run(query, quiet, verbose, debug):
|
||||
"""Extract businesses matching a search query."""
|
||||
if not quiet:
|
||||
setup_logging(verbose=verbose, debug=debug)
|
||||
|
||||
context = {"user_request": query}
|
||||
|
||||
result = asyncio.run(default_agent.run(context))
|
||||
|
||||
output_data = {
|
||||
"success": result.success,
|
||||
"steps_executed": result.steps_executed,
|
||||
"output": result.output,
|
||||
}
|
||||
if result.error:
|
||||
output_data["error"] = result.error
|
||||
|
||||
click.echo(json.dumps(output_data, indent=2, default=str))
|
||||
sys.exit(0 if result.success else 1)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--json", "output_json", is_flag=True)
|
||||
def info(output_json):
|
||||
"""Show agent information."""
|
||||
info_data = default_agent.info()
|
||||
if output_json:
|
||||
click.echo(json.dumps(info_data, indent=2))
|
||||
else:
|
||||
click.echo(f"Agent: {info_data['name']}")
|
||||
click.echo(f"Version: {info_data['version']}")
|
||||
click.echo(f"Description: {info_data['description']}")
|
||||
click.echo(f"\nNodes: {', '.join(info_data['nodes'])}")
|
||||
click.echo(f"Entry: {info_data['entry_node']}")
|
||||
click.echo(f"Terminal: {', '.join(info_data['terminal_nodes'])}")
|
||||
|
||||
|
||||
@cli.command()
|
||||
def validate():
|
||||
"""Validate agent structure."""
|
||||
validation = default_agent.validate()
|
||||
if validation["valid"]:
|
||||
click.echo("Agent is valid")
|
||||
if validation["warnings"]:
|
||||
for warning in validation["warnings"]:
|
||||
click.echo(f" WARNING: {warning}")
|
||||
else:
|
||||
click.echo("Agent has errors:")
|
||||
for error in validation["errors"]:
|
||||
click.echo(f" ERROR: {error}")
|
||||
sys.exit(0 if validation["valid"] else 1)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--verbose", "-v", is_flag=True)
|
||||
def shell(verbose):
|
||||
"""Interactive session (CLI)."""
|
||||
asyncio.run(_interactive_shell(verbose))
|
||||
|
||||
|
||||
async def _interactive_shell(verbose=False):
|
||||
"""Async interactive shell."""
|
||||
setup_logging(verbose=verbose)
|
||||
|
||||
click.echo("=== Local Business Extractor ===")
|
||||
click.echo("Enter a search query (or 'quit' to exit):\n")
|
||||
|
||||
agent = LocalBusinessExtractor()
|
||||
await agent.start()
|
||||
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
query = await asyncio.get_event_loop().run_in_executor(
|
||||
None, input, "Query> "
|
||||
)
|
||||
if query.lower() in ["quit", "exit", "q"]:
|
||||
click.echo("Goodbye!")
|
||||
break
|
||||
|
||||
if not query.strip():
|
||||
continue
|
||||
|
||||
click.echo("\nExtracting...\n")
|
||||
|
||||
result = await agent.run({"user_request": query})
|
||||
|
||||
if result.success:
|
||||
click.echo("\nExtraction complete\n")
|
||||
else:
|
||||
click.echo(f"\nExtraction failed: {result.error}\n")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
click.echo("\nGoodbye!")
|
||||
break
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {e}", err=True)
|
||||
finally:
|
||||
await agent.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
@@ -0,0 +1,205 @@
|
||||
"""Agent graph construction for Local Business Extractor."""
|
||||
|
||||
from pathlib import Path
|
||||
from framework.graph import EdgeSpec, EdgeCondition, Goal, SuccessCriterion, Constraint
|
||||
from framework.graph.edge import GraphSpec
|
||||
from framework.graph.executor import ExecutionResult
|
||||
from framework.graph.checkpoint_config import CheckpointConfig
|
||||
from framework.llm import LiteLLMProvider
|
||||
from framework.runner.tool_registry import ToolRegistry
|
||||
from framework.runtime.agent_runtime import create_agent_runtime
|
||||
from framework.runtime.execution_stream import EntryPointSpec
|
||||
|
||||
from .config import default_config, metadata
|
||||
from .nodes import map_search_gcu, extract_contacts_node, sheets_sync_node
|
||||
|
||||
goal = Goal(
|
||||
id="local-business-extraction",
|
||||
name="Local Business Extraction",
|
||||
description="Find local businesses on Maps, extract contacts, and sync to Google Sheets.",
|
||||
success_criteria=[
|
||||
SuccessCriterion(
|
||||
id="sc-1",
|
||||
description="Extract business details from Maps",
|
||||
metric="count",
|
||||
target="5",
|
||||
weight=0.5,
|
||||
),
|
||||
SuccessCriterion(
|
||||
id="sc-2",
|
||||
description="Sync data to Google Sheets",
|
||||
metric="success_rate",
|
||||
target="1.0",
|
||||
weight=0.5,
|
||||
),
|
||||
],
|
||||
constraints=[
|
||||
Constraint(
|
||||
id="c-1",
|
||||
description="Must verify website presence before scraping",
|
||||
constraint_type="hard",
|
||||
category="quality",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
nodes = [map_search_gcu, extract_contacts_node, sheets_sync_node]
|
||||
|
||||
edges = [
|
||||
EdgeSpec(
|
||||
id="extract-to-sheets",
|
||||
source="extract-contacts",
|
||||
target="sheets-sync",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
priority=1,
|
||||
),
|
||||
# Loop back for new tasks
|
||||
EdgeSpec(
|
||||
id="sheets-to-extract",
|
||||
source="sheets-sync",
|
||||
target="extract-contacts",
|
||||
condition=EdgeCondition.ALWAYS,
|
||||
priority=1,
|
||||
),
|
||||
]
|
||||
|
||||
entry_node = "extract-contacts"
|
||||
entry_points = {"start": "extract-contacts"}
|
||||
pause_nodes = []
|
||||
terminal_nodes = []
|
||||
|
||||
conversation_mode = "continuous"
|
||||
identity_prompt = "You are a lead generation specialist focused on local businesses."
|
||||
loop_config = {
|
||||
"max_iterations": 100,
|
||||
"max_tool_calls_per_turn": 30,
|
||||
"max_history_tokens": 32000,
|
||||
}
|
||||
|
||||
|
||||
class LocalBusinessExtractor:
|
||||
def __init__(self, config=None):
|
||||
self.config = config or default_config
|
||||
self.goal = goal
|
||||
self.nodes = nodes
|
||||
self.edges = edges
|
||||
self.entry_node = entry_node
|
||||
self.entry_points = entry_points
|
||||
self.pause_nodes = pause_nodes
|
||||
self.terminal_nodes = terminal_nodes
|
||||
self._graph = None
|
||||
self._agent_runtime = None
|
||||
self._tool_registry = None
|
||||
self._storage_path = None
|
||||
|
||||
def _build_graph(self):
|
||||
return GraphSpec(
|
||||
id="local-business-extractor-graph",
|
||||
goal_id=self.goal.id,
|
||||
version="1.0.0",
|
||||
entry_node=self.entry_node,
|
||||
entry_points=self.entry_points,
|
||||
terminal_nodes=self.terminal_nodes,
|
||||
pause_nodes=self.pause_nodes,
|
||||
nodes=self.nodes,
|
||||
edges=self.edges,
|
||||
default_model=self.config.model,
|
||||
max_tokens=self.config.max_tokens,
|
||||
loop_config=loop_config,
|
||||
conversation_mode=conversation_mode,
|
||||
identity_prompt=identity_prompt,
|
||||
)
|
||||
|
||||
def _setup(self):
|
||||
self._storage_path = (
|
||||
Path.home() / ".hive" / "agents" / "local_business_extractor"
|
||||
)
|
||||
self._storage_path.mkdir(parents=True, exist_ok=True)
|
||||
self._tool_registry = ToolRegistry()
|
||||
mcp_config = Path(__file__).parent / "mcp_servers.json"
|
||||
if mcp_config.exists():
|
||||
self._tool_registry.load_mcp_config(mcp_config)
|
||||
llm = LiteLLMProvider(
|
||||
model=self.config.model,
|
||||
api_key=self.config.api_key,
|
||||
api_base=self.config.api_base,
|
||||
)
|
||||
tools = list(self._tool_registry.get_tools().values())
|
||||
tool_executor = self._tool_registry.get_executor()
|
||||
self._graph = self._build_graph()
|
||||
self._agent_runtime = create_agent_runtime(
|
||||
graph=self._graph,
|
||||
goal=self.goal,
|
||||
storage_path=self._storage_path,
|
||||
entry_points=[
|
||||
EntryPointSpec(
|
||||
id="default",
|
||||
name="Default",
|
||||
entry_node=self.entry_node,
|
||||
trigger_type="manual",
|
||||
isolation_level="shared",
|
||||
)
|
||||
],
|
||||
llm=llm,
|
||||
tools=tools,
|
||||
tool_executor=tool_executor,
|
||||
checkpoint_config=CheckpointConfig(
|
||||
enabled=True, checkpoint_on_node_complete=True
|
||||
),
|
||||
)
|
||||
|
||||
async def start(self):
|
||||
if self._agent_runtime is None:
|
||||
self._setup()
|
||||
if not self._agent_runtime.is_running:
|
||||
await self._agent_runtime.start()
|
||||
|
||||
async def stop(self):
|
||||
if self._agent_runtime and self._agent_runtime.is_running:
|
||||
await self._agent_runtime.stop()
|
||||
self._agent_runtime = None
|
||||
|
||||
async def run(self, context, session_state=None):
|
||||
await self.start()
|
||||
try:
|
||||
result = await self._agent_runtime.trigger_and_wait(
|
||||
"default", context, session_state=session_state
|
||||
)
|
||||
return result or ExecutionResult(success=False, error="Execution timeout")
|
||||
finally:
|
||||
await self.stop()
|
||||
|
||||
def info(self):
|
||||
"""Get agent information."""
|
||||
return {
|
||||
"name": metadata.name,
|
||||
"version": metadata.version,
|
||||
"description": metadata.description,
|
||||
"goal": {
|
||||
"name": self.goal.name,
|
||||
"description": self.goal.description,
|
||||
},
|
||||
"nodes": [n.id for n in self.nodes],
|
||||
"edges": [e.id for e in self.edges],
|
||||
"entry_node": self.entry_node,
|
||||
"entry_points": self.entry_points,
|
||||
"pause_nodes": self.pause_nodes,
|
||||
"terminal_nodes": self.terminal_nodes,
|
||||
}
|
||||
|
||||
def validate(self):
|
||||
"""Validate agent structure."""
|
||||
errors = []
|
||||
warnings = []
|
||||
node_ids = {n.id for n in self.nodes}
|
||||
for edge in self.edges:
|
||||
if edge.source not in node_ids:
|
||||
errors.append(f"Edge {edge.id}: source '{edge.source}' not found")
|
||||
if edge.target not in node_ids:
|
||||
errors.append(f"Edge {edge.id}: target '{edge.target}' not found")
|
||||
if self.entry_node not in node_ids:
|
||||
errors.append(f"Entry node '{self.entry_node}' not found")
|
||||
return {"valid": len(errors) == 0, "errors": errors, "warnings": warnings}
|
||||
|
||||
|
||||
default_agent = LocalBusinessExtractor()
|
||||
@@ -0,0 +1,21 @@
|
||||
"""Runtime configuration."""
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
from framework.config import RuntimeConfig
|
||||
|
||||
default_config = RuntimeConfig()
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentMetadata:
|
||||
name: str = "Local Business Extractor"
|
||||
version: str = "1.0.0"
|
||||
description: str = (
|
||||
"Extracts local businesses from Google Maps, scrapes contact details, "
|
||||
"and syncs the results to Google Sheets."
|
||||
)
|
||||
intro_message: str = "I'm ready to extract business data. What should I search for?"
|
||||
|
||||
|
||||
metadata = AgentMetadata()
|
||||
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"hive-tools": {
|
||||
"transport": "stdio",
|
||||
"command": "uv",
|
||||
"args": ["run", "python", "mcp_server.py", "--stdio"],
|
||||
"cwd": "../../../tools"
|
||||
},
|
||||
"gcu-tools": {
|
||||
"transport": "stdio",
|
||||
"command": "uv",
|
||||
"args": ["run", "python", "-m", "gcu.server", "--stdio"],
|
||||
"cwd": "../../../tools"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,86 @@
|
||||
"""Node definitions for Local Business Extractor."""
|
||||
|
||||
from framework.graph import NodeSpec
|
||||
|
||||
# GCU Subagent for Google Maps
|
||||
map_search_gcu = NodeSpec(
|
||||
id="map-search-worker",
|
||||
name="Maps Browser Worker",
|
||||
description="Browser subagent that searches Google Maps and extracts business links.",
|
||||
node_type="gcu",
|
||||
client_facing=False,
|
||||
max_node_visits=1,
|
||||
input_keys=["query"],
|
||||
output_keys=["business_list"],
|
||||
tools=[], # Auto-populated with browser tools
|
||||
system_prompt="""\
|
||||
You are a browser agent. Your job: Search Google Maps for the provided query and extract business names and website URLs.
|
||||
|
||||
## Workflow
|
||||
1. browser_start
|
||||
2. browser_open(url="https://www.google.com/maps")
|
||||
3. Use browser_type or browser_click to search for the "query" in memory.
|
||||
4. browser_wait(seconds=3)
|
||||
5. browser_snapshot to find the list of results.
|
||||
6. For each relevant result, extract:
|
||||
- Name of the business
|
||||
- Website URL (look for the website icon/link)
|
||||
7. set_output("business_list", [{"name": "...", "website": "..."}, ...])
|
||||
|
||||
## Constraints
|
||||
- Extract at least 5-10 businesses if possible.
|
||||
- If you see a "Website" button, extract that URL specifically.
|
||||
""",
|
||||
)
|
||||
|
||||
# Processing Node: Scrape & Prepare
|
||||
extract_contacts_node = NodeSpec(
|
||||
id="extract-contacts",
|
||||
name="Extract Business Details",
|
||||
description="Scrapes business websites and Maps for comprehensive business details.",
|
||||
node_type="event_loop",
|
||||
sub_agents=["map-search-worker"],
|
||||
input_keys=["user_request"],
|
||||
output_keys=["business_data"],
|
||||
success_criteria="Comprehensive business details (reviews, hours, contacts) extracted.",
|
||||
system_prompt="""\
|
||||
1. Call delegate_to_sub_agent(agent_id="map-search-worker", task=user_request)
|
||||
2. Receive "business_list" from memory.
|
||||
3. For each business in the list:
|
||||
- Use exa_get_contents or exa_search to find:
|
||||
- Contact emails and phone numbers.
|
||||
- Business hours.
|
||||
- Customer reviews or ratings summary.
|
||||
- Physical address.
|
||||
4. Format the data into a comprehensive report for each business.
|
||||
5. set_output("business_data", enriched_business_list)
|
||||
""",
|
||||
tools=["exa_get_contents", "exa_search"],
|
||||
)
|
||||
|
||||
# Google Sheets Sync Node
|
||||
sheets_sync_node = NodeSpec(
|
||||
id="sheets-sync",
|
||||
name="Google Sheets Sync",
|
||||
description="Appends the extracted business data to a Google Sheets spreadsheet.",
|
||||
node_type="event_loop",
|
||||
input_keys=["business_data"],
|
||||
output_keys=["spreadsheet_id"],
|
||||
success_criteria="Data successfully synced to Google Sheets.",
|
||||
system_prompt="""\
|
||||
1. Check memory for "spreadsheet_id". If not set, create a new spreadsheet:
|
||||
- Use google_sheets_create_spreadsheet(title="Comprehensive Business Leads")
|
||||
- Save the spreadsheet ID with set_output("spreadsheet_id", id)
|
||||
2. If the spreadsheet is new, write header row:
|
||||
- Use google_sheets_update_values(spreadsheet_id=id, range_name="Sheet1!A1:G1", values=[["Name", "Website", "Email", "Phone", "Address", "Hours", "Reviews"]])
|
||||
3. For each business in "business_data", append a row:
|
||||
- Use google_sheets_append_values(spreadsheet_id=id, range_name="Sheet1!A:G", values=[[name, website, email, phone, address, hours, reviews]])
|
||||
4. set_output("spreadsheet_id", id)
|
||||
""",
|
||||
tools=[
|
||||
"google_sheets_create_spreadsheet",
|
||||
"google_sheets_update_values",
|
||||
"google_sheets_append_values",
|
||||
"google_sheets_get_values",
|
||||
],
|
||||
)
|
||||
@@ -0,0 +1,34 @@
|
||||
"""Meeting Scheduler — Find available times on your calendar and book meetings."""
|
||||
|
||||
from .agent import (
|
||||
MeetingScheduler,
|
||||
default_agent,
|
||||
goal,
|
||||
nodes,
|
||||
edges,
|
||||
entry_node,
|
||||
entry_points,
|
||||
pause_nodes,
|
||||
terminal_nodes,
|
||||
conversation_mode,
|
||||
identity_prompt,
|
||||
loop_config,
|
||||
)
|
||||
from .config import default_config, metadata
|
||||
|
||||
__all__ = [
|
||||
"MeetingScheduler",
|
||||
"default_agent",
|
||||
"goal",
|
||||
"nodes",
|
||||
"edges",
|
||||
"entry_node",
|
||||
"entry_points",
|
||||
"pause_nodes",
|
||||
"terminal_nodes",
|
||||
"conversation_mode",
|
||||
"identity_prompt",
|
||||
"loop_config",
|
||||
"default_config",
|
||||
"metadata",
|
||||
]
|
||||
@@ -0,0 +1,131 @@
|
||||
"""CLI entry point for Meeting Scheduler."""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
import click
|
||||
from .agent import default_agent, MeetingScheduler
|
||||
|
||||
|
||||
def setup_logging(verbose=False, debug=False):
|
||||
if debug:
|
||||
level, fmt = logging.DEBUG, "%(asctime)s %(name)s: %(message)s"
|
||||
elif verbose:
|
||||
level, fmt = logging.INFO, "%(message)s"
|
||||
else:
|
||||
level, fmt = logging.WARNING, "%(levelname)s: %(message)s"
|
||||
logging.basicConfig(level=level, format=fmt, stream=sys.stderr)
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.version_option(version="1.0.0")
|
||||
def cli():
|
||||
"""Meeting Scheduler — Find available times on your calendar and book meetings."""
|
||||
pass
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--attendee", "-a", required=True, help="Attendee email address")
|
||||
@click.option(
|
||||
"--duration", "-d", type=int, required=True, help="Meeting duration in minutes"
|
||||
)
|
||||
@click.option("--title", "-t", required=True, help="Meeting title")
|
||||
@click.option("--verbose", "-v", is_flag=True)
|
||||
def run(attendee, duration, title, verbose):
|
||||
"""Execute the scheduler."""
|
||||
setup_logging(verbose=verbose)
|
||||
result = asyncio.run(
|
||||
default_agent.run(
|
||||
{
|
||||
"attendee_email": attendee,
|
||||
"meeting_duration_minutes": str(duration),
|
||||
"meeting_title": title,
|
||||
}
|
||||
)
|
||||
)
|
||||
click.echo(
|
||||
json.dumps(
|
||||
{"success": result.success, "output": result.output}, indent=2, default=str
|
||||
)
|
||||
)
|
||||
sys.exit(0 if result.success else 1)
|
||||
|
||||
|
||||
@cli.command()
|
||||
def tui():
|
||||
"""Launch TUI dashboard."""
|
||||
from pathlib import Path
|
||||
from framework.tui.app import AdenTUI
|
||||
from framework.llm import LiteLLMProvider
|
||||
from framework.runner.tool_registry import ToolRegistry
|
||||
from framework.runtime.agent_runtime import create_agent_runtime
|
||||
from framework.runtime.execution_stream import EntryPointSpec
|
||||
|
||||
async def run_tui():
|
||||
agent = MeetingScheduler()
|
||||
agent._tool_registry = ToolRegistry()
|
||||
storage = Path.home() / ".hive" / "agents" / "meeting_scheduler"
|
||||
storage.mkdir(parents=True, exist_ok=True)
|
||||
mcp_cfg = Path(__file__).parent / "mcp_servers.json"
|
||||
if mcp_cfg.exists():
|
||||
agent._tool_registry.load_mcp_config(mcp_cfg)
|
||||
llm = LiteLLMProvider(
|
||||
model=agent.config.model,
|
||||
api_key=agent.config.api_key,
|
||||
api_base=agent.config.api_base,
|
||||
)
|
||||
runtime = create_agent_runtime(
|
||||
graph=agent._build_graph(),
|
||||
goal=agent.goal,
|
||||
storage_path=storage,
|
||||
entry_points=[
|
||||
EntryPointSpec(
|
||||
id="start",
|
||||
name="Start",
|
||||
entry_node="intake",
|
||||
trigger_type="manual",
|
||||
isolation_level="isolated",
|
||||
)
|
||||
],
|
||||
llm=llm,
|
||||
tools=list(agent._tool_registry.get_tools().values()),
|
||||
tool_executor=agent._tool_registry.get_executor(),
|
||||
)
|
||||
await runtime.start()
|
||||
try:
|
||||
app = AdenTUI(runtime)
|
||||
await app.run_async()
|
||||
finally:
|
||||
await runtime.stop()
|
||||
|
||||
asyncio.run(run_tui())
|
||||
|
||||
|
||||
@cli.command()
|
||||
def info():
|
||||
"""Show agent info."""
|
||||
data = default_agent.info()
|
||||
click.echo(
|
||||
f"Agent: {data['name']}\nVersion: {data['version']}\nDescription: {data['description']}"
|
||||
)
|
||||
click.echo(
|
||||
f"Nodes: {', '.join(data['nodes'])}\nClient-facing: {', '.join(data['client_facing_nodes'])}"
|
||||
)
|
||||
|
||||
|
||||
@cli.command()
|
||||
def validate():
|
||||
"""Validate agent structure."""
|
||||
v = default_agent.validate()
|
||||
if v["valid"]:
|
||||
click.echo("Agent is valid")
|
||||
else:
|
||||
click.echo("Errors:")
|
||||
for e in v["errors"]:
|
||||
click.echo(f" {e}")
|
||||
sys.exit(0 if v["valid"] else 1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
@@ -0,0 +1,257 @@
|
||||
"""Agent graph construction for Meeting Scheduler."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from framework.graph import EdgeSpec, EdgeCondition, Goal, SuccessCriterion, Constraint
|
||||
from framework.graph.edge import GraphSpec
|
||||
from framework.graph.executor import ExecutionResult
|
||||
from framework.graph.checkpoint_config import CheckpointConfig
|
||||
from framework.llm import LiteLLMProvider
|
||||
from framework.runner.tool_registry import ToolRegistry
|
||||
from framework.runtime.agent_runtime import create_agent_runtime
|
||||
from framework.runtime.execution_stream import EntryPointSpec
|
||||
|
||||
from .config import default_config, metadata
|
||||
from .nodes import intake_node, schedule_node, confirm_node
|
||||
|
||||
# Goal definition
|
||||
goal = Goal(
|
||||
id="meeting-scheduler-goal",
|
||||
name="Schedule Meetings",
|
||||
description="Check calendar availability, find optimal meeting times, record meetings, and send reminders.",
|
||||
success_criteria=[
|
||||
SuccessCriterion(
|
||||
id="sc-1",
|
||||
description="Meeting time found within requested duration",
|
||||
metric="calendar_availability",
|
||||
target="success",
|
||||
weight=0.35,
|
||||
),
|
||||
SuccessCriterion(
|
||||
id="sc-2",
|
||||
description="Meeting recorded in spreadsheet accurately",
|
||||
metric="data_persistence",
|
||||
target="recorded",
|
||||
weight=0.30,
|
||||
),
|
||||
SuccessCriterion(
|
||||
id="sc-3",
|
||||
description="Attendee email reminder sent",
|
||||
metric="communication",
|
||||
target="sent",
|
||||
weight=0.25,
|
||||
),
|
||||
SuccessCriterion(
|
||||
id="sc-4",
|
||||
description="User confirms meeting details",
|
||||
metric="user_acknowledgment",
|
||||
target="confirmed",
|
||||
weight=0.10,
|
||||
),
|
||||
],
|
||||
constraints=[
|
||||
Constraint(
|
||||
id="c-1",
|
||||
description="Must use Google Calendar API for availability check",
|
||||
constraint_type="hard",
|
||||
category="functional",
|
||||
),
|
||||
Constraint(
|
||||
id="c-2",
|
||||
description="Meeting duration must match requested time",
|
||||
constraint_type="hard",
|
||||
category="accuracy",
|
||||
),
|
||||
Constraint(
|
||||
id="c-3",
|
||||
description="Spreadsheet record must include date, time, attendee, title",
|
||||
constraint_type="hard",
|
||||
category="quality",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
# Node list
|
||||
nodes = [intake_node, schedule_node, confirm_node]
|
||||
|
||||
# Edge definitions
|
||||
edges = [
|
||||
EdgeSpec(
|
||||
id="intake-to-schedule",
|
||||
source="intake",
|
||||
target="schedule",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
priority=1,
|
||||
),
|
||||
EdgeSpec(
|
||||
id="schedule-to-confirm",
|
||||
source="schedule",
|
||||
target="confirm",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
priority=1,
|
||||
),
|
||||
# Loop back for another booking
|
||||
EdgeSpec(
|
||||
id="confirm-to-intake",
|
||||
source="confirm",
|
||||
target="intake",
|
||||
condition=EdgeCondition.CONDITIONAL,
|
||||
condition_expr="str(next_action).lower() == 'another'",
|
||||
priority=1,
|
||||
),
|
||||
]
|
||||
|
||||
# Graph configuration
|
||||
entry_node = "intake"
|
||||
entry_points = {"start": "intake"}
|
||||
pause_nodes = []
|
||||
terminal_nodes = [] # Forever-alive
|
||||
|
||||
# Module-level vars read by AgentRunner.load()
|
||||
conversation_mode = "continuous"
|
||||
identity_prompt = "You are a helpful meeting scheduler assistant that manages calendar availability and sends confirmations."
|
||||
loop_config = {
|
||||
"max_iterations": 100,
|
||||
"max_tool_calls_per_turn": 20,
|
||||
"max_history_tokens": 32000,
|
||||
}
|
||||
|
||||
|
||||
class MeetingScheduler:
|
||||
def __init__(self, config=None):
|
||||
self.config = config or default_config
|
||||
self.goal = goal
|
||||
self.nodes = nodes
|
||||
self.edges = edges
|
||||
self.entry_node = entry_node
|
||||
self.entry_points = entry_points
|
||||
self.pause_nodes = pause_nodes
|
||||
self.terminal_nodes = terminal_nodes
|
||||
self._graph = None
|
||||
self._agent_runtime = None
|
||||
self._tool_registry = None
|
||||
self._storage_path = None
|
||||
|
||||
def _build_graph(self):
|
||||
return GraphSpec(
|
||||
id="meeting-scheduler-graph",
|
||||
goal_id=self.goal.id,
|
||||
version="1.0.0",
|
||||
entry_node=self.entry_node,
|
||||
entry_points=self.entry_points,
|
||||
terminal_nodes=self.terminal_nodes,
|
||||
pause_nodes=self.pause_nodes,
|
||||
nodes=self.nodes,
|
||||
edges=self.edges,
|
||||
default_model=self.config.model,
|
||||
max_tokens=self.config.max_tokens,
|
||||
loop_config=loop_config,
|
||||
conversation_mode=conversation_mode,
|
||||
identity_prompt=identity_prompt,
|
||||
)
|
||||
|
||||
def _setup(self):
|
||||
self._storage_path = Path.home() / ".hive" / "agents" / "meeting_scheduler"
|
||||
self._storage_path.mkdir(parents=True, exist_ok=True)
|
||||
self._tool_registry = ToolRegistry()
|
||||
mcp_config = Path(__file__).parent / "mcp_servers.json"
|
||||
if mcp_config.exists():
|
||||
self._tool_registry.load_mcp_config(mcp_config)
|
||||
llm = LiteLLMProvider(
|
||||
model=self.config.model,
|
||||
api_key=self.config.api_key,
|
||||
api_base=self.config.api_base,
|
||||
)
|
||||
tools = list(self._tool_registry.get_tools().values())
|
||||
tool_executor = self._tool_registry.get_executor()
|
||||
self._graph = self._build_graph()
|
||||
self._agent_runtime = create_agent_runtime(
|
||||
graph=self._graph,
|
||||
goal=self.goal,
|
||||
storage_path=self._storage_path,
|
||||
entry_points=[
|
||||
EntryPointSpec(
|
||||
id="default",
|
||||
name="Default",
|
||||
entry_node=self.entry_node,
|
||||
trigger_type="manual",
|
||||
isolation_level="shared",
|
||||
)
|
||||
],
|
||||
llm=llm,
|
||||
tools=tools,
|
||||
tool_executor=tool_executor,
|
||||
checkpoint_config=CheckpointConfig(
|
||||
enabled=True,
|
||||
checkpoint_on_node_complete=True,
|
||||
checkpoint_max_age_days=7,
|
||||
async_checkpoint=True,
|
||||
),
|
||||
)
|
||||
|
||||
async def start(self):
|
||||
if self._agent_runtime is None:
|
||||
self._setup()
|
||||
if not self._agent_runtime.is_running:
|
||||
await self._agent_runtime.start()
|
||||
|
||||
async def stop(self):
|
||||
if self._agent_runtime and self._agent_runtime.is_running:
|
||||
await self._agent_runtime.stop()
|
||||
self._agent_runtime = None
|
||||
|
||||
async def trigger_and_wait(
|
||||
self, entry_point="default", input_data=None, timeout=None, session_state=None
|
||||
):
|
||||
if self._agent_runtime is None:
|
||||
raise RuntimeError("Agent not started. Call start() first.")
|
||||
return await self._agent_runtime.trigger_and_wait(
|
||||
entry_point_id=entry_point,
|
||||
input_data=input_data or {},
|
||||
session_state=session_state,
|
||||
)
|
||||
|
||||
async def run(self, context, session_state=None):
|
||||
await self.start()
|
||||
try:
|
||||
result = await self.trigger_and_wait(
|
||||
"default", context, session_state=session_state
|
||||
)
|
||||
return result or ExecutionResult(success=False, error="Execution timeout")
|
||||
finally:
|
||||
await self.stop()
|
||||
|
||||
def info(self):
|
||||
return {
|
||||
"name": metadata.name,
|
||||
"version": metadata.version,
|
||||
"description": metadata.description,
|
||||
"goal": {"name": self.goal.name, "description": self.goal.description},
|
||||
"nodes": [n.id for n in self.nodes],
|
||||
"edges": [e.id for e in self.edges],
|
||||
"entry_node": self.entry_node,
|
||||
"entry_points": self.entry_points,
|
||||
"terminal_nodes": self.terminal_nodes,
|
||||
"client_facing_nodes": [n.id for n in self.nodes if n.client_facing],
|
||||
}
|
||||
|
||||
def validate(self):
|
||||
errors, warnings = [], []
|
||||
node_ids = {n.id for n in self.nodes}
|
||||
for e in self.edges:
|
||||
if e.source not in node_ids:
|
||||
errors.append(f"Edge {e.id}: source '{e.source}' not found")
|
||||
if e.target not in node_ids:
|
||||
errors.append(f"Edge {e.id}: target '{e.target}' not found")
|
||||
if self.entry_node not in node_ids:
|
||||
errors.append(f"Entry node '{self.entry_node}' not found")
|
||||
for t in self.terminal_nodes:
|
||||
if t not in node_ids:
|
||||
errors.append(f"Terminal node '{t}' not found")
|
||||
for ep_id, nid in self.entry_points.items():
|
||||
if nid not in node_ids:
|
||||
errors.append(f"Entry point '{ep_id}' references unknown node '{nid}'")
|
||||
return {"valid": len(errors) == 0, "errors": errors, "warnings": warnings}
|
||||
|
||||
|
||||
default_agent = MeetingScheduler()
|
||||
@@ -0,0 +1,28 @@
|
||||
"""Runtime configuration."""
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
from framework.config import RuntimeConfig
|
||||
|
||||
default_config = RuntimeConfig()
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentMetadata:
|
||||
name: str = "Meeting Scheduler"
|
||||
version: str = "1.0.0"
|
||||
description: str = (
|
||||
"Schedule meetings by checking Google Calendar availability, booking "
|
||||
"optimal time slots, recording details in Google Sheets, and sending "
|
||||
"email confirmations with Google Meet links to attendees."
|
||||
)
|
||||
intro_message: str = (
|
||||
"Hi! I'm your meeting scheduler. Tell me who you'd like to meet with, "
|
||||
"how long the meeting should be, and what it's about — I'll check "
|
||||
"calendar availability, book a time slot, log it to your spreadsheet, "
|
||||
"and send a confirmation email with a Google Meet link. "
|
||||
"Who would you like to schedule a meeting with?"
|
||||
)
|
||||
|
||||
|
||||
metadata = AgentMetadata()
|
||||
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"hive-tools": {
|
||||
"transport": "stdio",
|
||||
"command": "uv",
|
||||
"args": ["run", "python", "mcp_server.py", "--stdio"],
|
||||
"cwd": "../../../tools",
|
||||
"description": "Hive tools MCP server"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,140 @@
|
||||
"""Node definitions for Meeting Scheduler."""
|
||||
|
||||
from framework.graph import NodeSpec
|
||||
|
||||
# Node 1: Intake (client-facing)
|
||||
intake_node = NodeSpec(
|
||||
id="intake",
|
||||
name="Intake",
|
||||
description="Gather meeting details from the user",
|
||||
node_type="event_loop",
|
||||
client_facing=True,
|
||||
max_node_visits=0,
|
||||
input_keys=["attendee_email", "meeting_duration_minutes"],
|
||||
output_keys=["attendee_email", "meeting_duration_minutes", "meeting_title"],
|
||||
nullable_output_keys=[
|
||||
"attendee_email",
|
||||
"meeting_duration_minutes",
|
||||
"meeting_title",
|
||||
],
|
||||
success_criteria="User has provided attendee email, meeting duration, and title.",
|
||||
system_prompt="""\
|
||||
You are a meeting scheduler assistant.
|
||||
|
||||
**STEP 1 — Use ask_user to collect meeting details:**
|
||||
1. Call ask_user to ask for: attendee email, meeting duration (minutes), and meeting title
|
||||
2. Wait for the user's response before proceeding
|
||||
|
||||
**STEP 2 — After user provides all details, call set_output:**
|
||||
- set_output("attendee_email", "user's email address")
|
||||
- set_output("meeting_duration_minutes", meeting duration as string)
|
||||
- set_output("meeting_title", "title of the meeting")
|
||||
""",
|
||||
tools=[],
|
||||
)
|
||||
|
||||
# Node 2: Schedule (autonomous)
|
||||
schedule_node = NodeSpec(
|
||||
id="schedule",
|
||||
name="Schedule",
|
||||
description="Find available time on calendar, book meeting with Google Meet, and log to Google Sheet",
|
||||
node_type="event_loop",
|
||||
max_node_visits=0,
|
||||
input_keys=["attendee_email", "meeting_duration_minutes", "meeting_title"],
|
||||
output_keys=[
|
||||
"meeting_time",
|
||||
"booking_confirmed",
|
||||
"spreadsheet_recorded",
|
||||
"email_sent",
|
||||
"meet_link",
|
||||
],
|
||||
nullable_output_keys=[],
|
||||
success_criteria="Meeting time found, Google Meet created, Google Sheet 'Meeting Scheduler' updated with date/time/attendee/title/meet_link, and confirmation email sent.",
|
||||
system_prompt="""\
|
||||
You are a meeting booking agent that creates Google Calendar events with Google Meet and logs to Google Sheets.
|
||||
|
||||
## STEP 1 — Calendar Operations (tool calls in this turn):
|
||||
|
||||
1. **Find availability and verify conflicts:**
|
||||
- Use calendar_check_availability to find potential time slots.
|
||||
- **CRITICAL:** Always search a broad window (at least 8 hours) for the target day to see the full context of the user's schedule.
|
||||
- **SECONDARY CHECK:** Before finalizing a slot, use calendar_list_events for that specific day. This ensures you catch "soft" conflicts or events not marked as 'busy' that might still be important.
|
||||
|
||||
2. **Create the event WITH GOOGLE MEET (AUTOMATIC):**
|
||||
- Use calendar_create_event with these parameters:
|
||||
- summary: the meeting title
|
||||
- start_time: the start datetime in ISO format (e.g., "2024-01-15T09:00:00")
|
||||
- end_time: the end datetime in ISO format
|
||||
- attendees: list with the attendee email address (e.g., ["user@example.com"])
|
||||
- timezone: user's timezone (e.g., "America/Los_Angeles")
|
||||
- IMPORTANT: The tool automatically generates a Google Meet link when attendees are provided.
|
||||
You do NOT need to pass conferenceData - it is handled automatically.
|
||||
- The response will include conferenceData.entryPoints with the Google Meet link
|
||||
- Extract the meet_link from conferenceData.entryPoints[0].uri in the response
|
||||
|
||||
3. **Log to Google Sheets:**
|
||||
- First, use google_sheets_get_spreadsheet with spreadsheet_id="Meeting Scheduler" to check if it exists
|
||||
- If it doesn't exist, use google_sheets_create_spreadsheet with title="Meeting Scheduler"
|
||||
- Then use google_sheets_append_values to add a row with:
|
||||
- Date, Time, Attendee Email, Meeting Title, Google Meet Link
|
||||
- The spreadsheet_id should be "Meeting Scheduler" (by name) or the ID returned from create
|
||||
|
||||
4. **Send confirmation email:**
|
||||
- Use send_email to send the attendee a confirmation with:
|
||||
- to: attendee email address
|
||||
- subject: "Meeting Confirmation: {meeting_title}"
|
||||
- body: Include meeting title, date/time, and Google Meet link
|
||||
|
||||
## STEP 2 — set_output (SEPARATE turn, no other tool calls):
|
||||
|
||||
After all tools complete successfully, call set_output:
|
||||
- set_output("meeting_time", "YYYY-MM-DD HH:MM")
|
||||
- set_output("meet_link", "https://meet.google.com/xxx/yyy")
|
||||
- set_output("booking_confirmed", "true")
|
||||
- set_output("spreadsheet_recorded", "true")
|
||||
- set_output("email_sent", "true")
|
||||
|
||||
## CRITICAL: Google Meet creation
|
||||
Google Meet links are AUTOMATICALLY created by calendar_create_event when attendees are provided.
|
||||
Simply pass the attendees list and the tool will generate the Meet link.
|
||||
""",
|
||||
tools=[
|
||||
"calendar_check_availability",
|
||||
"calendar_create_event",
|
||||
"calendar_list_events",
|
||||
"google_sheets_create_spreadsheet",
|
||||
"google_sheets_get_spreadsheet",
|
||||
"google_sheets_append_values",
|
||||
"send_email",
|
||||
],
|
||||
)
|
||||
|
||||
# Node 3: Confirm (client-facing)
|
||||
confirm_node = NodeSpec(
|
||||
id="confirm",
|
||||
name="Confirm",
|
||||
description="Present booking confirmation to user with Google Meet link",
|
||||
node_type="event_loop",
|
||||
client_facing=True,
|
||||
max_node_visits=0,
|
||||
input_keys=["meeting_time", "booking_confirmed", "meet_link"],
|
||||
output_keys=["next_action"],
|
||||
nullable_output_keys=["next_action"],
|
||||
success_criteria="User has acknowledged the booking and received the Google Meet link.",
|
||||
system_prompt="""\
|
||||
You are a confirmation assistant.
|
||||
|
||||
**STEP 1 — Present confirmation and ask user:**
|
||||
1. Show the meeting details (date, time, attendee, title)
|
||||
2. Display the Google Meet link prominently
|
||||
3. Confirm the booking is complete and logged to Google Sheets
|
||||
4. Call ask_user to ask if they want to schedule another meeting or finish
|
||||
|
||||
**STEP 2 — After user responds, call set_output:**
|
||||
- set_output("next_action", "another") — if booking another meeting
|
||||
- set_output("next_action", "done") — if finished
|
||||
""",
|
||||
tools=[],
|
||||
)
|
||||
|
||||
__all__ = ["intake_node", "schedule_node", "confirm_node"]
|
||||
@@ -0,0 +1,34 @@
|
||||
"""Test fixtures."""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
_repo_root = Path(__file__).resolve().parents[4]
|
||||
for _p in ["examples/templates", "core"]:
|
||||
_path = str(_repo_root / _p)
|
||||
if _path not in sys.path:
|
||||
sys.path.insert(0, _path)
|
||||
|
||||
AGENT_PATH = str(Path(__file__).resolve().parents[1])
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def agent_module():
|
||||
"""Import the agent package for structural validation."""
|
||||
import importlib
|
||||
|
||||
return importlib.import_module(Path(AGENT_PATH).name)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def runner_loaded():
|
||||
"""Load the agent through AgentRunner (structural only, no LLM needed)."""
|
||||
from framework.runner.runner import AgentRunner
|
||||
from framework.credentials.models import CredentialError
|
||||
|
||||
try:
|
||||
return AgentRunner.load(AGENT_PATH)
|
||||
except CredentialError:
|
||||
pytest.skip("Google OAuth credentials not configured")
|
||||
@@ -0,0 +1,103 @@
|
||||
"""Structural tests for Meeting Scheduler."""
|
||||
|
||||
from meeting_scheduler import (
|
||||
default_agent,
|
||||
goal,
|
||||
nodes,
|
||||
edges,
|
||||
entry_node,
|
||||
entry_points,
|
||||
terminal_nodes,
|
||||
conversation_mode,
|
||||
loop_config,
|
||||
)
|
||||
|
||||
|
||||
class TestGoalDefinition:
|
||||
def test_goal_exists(self):
|
||||
assert goal is not None
|
||||
assert goal.id == "meeting-scheduler-goal"
|
||||
assert len(goal.success_criteria) == 4
|
||||
assert len(goal.constraints) == 3
|
||||
|
||||
def test_success_criteria_weights_sum_to_one(self):
|
||||
total = sum(sc.weight for sc in goal.success_criteria)
|
||||
assert abs(total - 1.0) < 0.01
|
||||
|
||||
|
||||
class TestNodeStructure:
|
||||
def test_three_nodes(self):
|
||||
assert len(nodes) == 3
|
||||
assert nodes[0].id == "intake"
|
||||
assert nodes[1].id == "schedule"
|
||||
assert nodes[2].id == "confirm"
|
||||
|
||||
def test_intake_is_client_facing(self):
|
||||
assert nodes[0].client_facing is True
|
||||
|
||||
def test_schedule_has_required_tools(self):
|
||||
required = {
|
||||
"calendar_check_availability",
|
||||
"calendar_create_event",
|
||||
"google_sheets_append_values",
|
||||
"send_email",
|
||||
}
|
||||
actual = set(nodes[1].tools)
|
||||
assert required.issubset(actual)
|
||||
|
||||
def test_confirm_is_client_facing(self):
|
||||
assert nodes[2].client_facing is True
|
||||
|
||||
|
||||
class TestEdgeStructure:
|
||||
def test_three_edges(self):
|
||||
assert len(edges) == 3
|
||||
|
||||
def test_linear_path(self):
|
||||
assert edges[0].source == "intake"
|
||||
assert edges[0].target == "schedule"
|
||||
assert edges[1].source == "schedule"
|
||||
assert edges[1].target == "confirm"
|
||||
|
||||
def test_loop_back(self):
|
||||
assert edges[2].source == "confirm"
|
||||
assert edges[2].target == "intake"
|
||||
|
||||
|
||||
class TestGraphConfiguration:
|
||||
def test_entry_node(self):
|
||||
assert entry_node == "intake"
|
||||
|
||||
def test_entry_points(self):
|
||||
assert entry_points == {"start": "intake"}
|
||||
|
||||
def test_forever_alive(self):
|
||||
assert terminal_nodes == []
|
||||
|
||||
def test_conversation_mode(self):
|
||||
assert conversation_mode == "continuous"
|
||||
|
||||
def test_loop_config_valid(self):
|
||||
assert "max_iterations" in loop_config
|
||||
assert "max_tool_calls_per_turn" in loop_config
|
||||
assert "max_history_tokens" in loop_config
|
||||
|
||||
|
||||
class TestAgentClass:
|
||||
def test_default_agent_created(self):
|
||||
assert default_agent is not None
|
||||
|
||||
def test_validate_passes(self):
|
||||
result = default_agent.validate()
|
||||
assert result["valid"] is True
|
||||
assert len(result["errors"]) == 0
|
||||
|
||||
def test_agent_info(self):
|
||||
info = default_agent.info()
|
||||
assert info["name"] == "Meeting Scheduler"
|
||||
assert "schedule" in [n for n in info["nodes"]]
|
||||
|
||||
|
||||
class TestRunnerLoad:
|
||||
def test_agent_runner_load_succeeds(self, runner_loaded):
|
||||
assert runner_loaded is not None
|
||||
@@ -0,0 +1,32 @@
|
||||
# Twitter News Digest
|
||||
|
||||
Monitors tech Twitter profiles, extracts the latest tweets, and compiles a daily tech news digest with user review.
|
||||
|
||||
## Nodes
|
||||
|
||||
| Node | Type | Description |
|
||||
|------|------|-------------|
|
||||
| `fetch-tweets` | `gcu` (browser) | Navigates to Twitter profiles and extracts latest tweets |
|
||||
| `process-news` | `event_loop` | Analyzes and summarizes tweets into a tech digest |
|
||||
| `review-digest` | `event_loop` (client-facing) | Presents digest for user review and feedback |
|
||||
|
||||
## Flow
|
||||
|
||||
```
|
||||
process-news → review-digest → (loop back to process-news)
|
||||
↓ ↑
|
||||
fetch-tweets feedback loop (if revisions needed)
|
||||
(sub-agent)
|
||||
```
|
||||
|
||||
## Tools used
|
||||
|
||||
- **save_data / load_data** — persist daily reports
|
||||
- **Browser (GCU)** — automated Twitter browsing and tweet extraction
|
||||
|
||||
## Running
|
||||
|
||||
```bash
|
||||
uv run python -m examples.templates.twitter_news_agent run
|
||||
uv run python -m examples.templates.twitter_news_agent run --handles "@TechCrunch,@verge,@WIRED"
|
||||
```
|
||||
@@ -0,0 +1,34 @@
|
||||
"""Twitter News Digest — monitors Twitter for news."""
|
||||
|
||||
from .agent import (
|
||||
TwitterNewsAgent,
|
||||
default_agent,
|
||||
goal,
|
||||
nodes,
|
||||
edges,
|
||||
entry_node,
|
||||
entry_points,
|
||||
pause_nodes,
|
||||
terminal_nodes,
|
||||
conversation_mode,
|
||||
identity_prompt,
|
||||
loop_config,
|
||||
)
|
||||
from .config import default_config, metadata
|
||||
|
||||
__all__ = [
|
||||
"TwitterNewsAgent",
|
||||
"default_agent",
|
||||
"goal",
|
||||
"nodes",
|
||||
"edges",
|
||||
"entry_node",
|
||||
"entry_points",
|
||||
"pause_nodes",
|
||||
"terminal_nodes",
|
||||
"conversation_mode",
|
||||
"identity_prompt",
|
||||
"loop_config",
|
||||
"default_config",
|
||||
"metadata",
|
||||
]
|
||||
@@ -0,0 +1,148 @@
|
||||
"""
|
||||
CLI entry point for Twitter News Digest.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
import click
|
||||
|
||||
from .agent import default_agent, TwitterNewsAgent
|
||||
|
||||
|
||||
def setup_logging(verbose=False, debug=False):
|
||||
"""Configure logging for execution visibility."""
|
||||
if debug:
|
||||
level, fmt = logging.DEBUG, "%(asctime)s %(name)s: %(message)s"
|
||||
elif verbose:
|
||||
level, fmt = logging.INFO, "%(message)s"
|
||||
else:
|
||||
level, fmt = logging.WARNING, "%(levelname)s: %(message)s"
|
||||
logging.basicConfig(level=level, format=fmt, stream=sys.stderr)
|
||||
logging.getLogger("framework").setLevel(level)
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.version_option(version="1.1.0")
|
||||
def cli():
|
||||
"""Twitter News Digest - Monitor Twitter feeds for tech news."""
|
||||
pass
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option(
|
||||
"--handles",
|
||||
"-h",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Comma-separated Twitter handles to monitor",
|
||||
)
|
||||
@click.option("--quiet", is_flag=True, help="Only output result JSON")
|
||||
@click.option("--verbose", "-v", is_flag=True, help="Show execution details")
|
||||
@click.option("--debug", is_flag=True, help="Show debug logging")
|
||||
def run(handles, quiet, verbose, debug):
|
||||
"""Fetch and summarize tech news from Twitter."""
|
||||
if not quiet:
|
||||
setup_logging(verbose=verbose, debug=debug)
|
||||
|
||||
context = {"user_request": "Fetch the latest tech news digest from Twitter"}
|
||||
if handles:
|
||||
context["twitter_handles"] = [h.strip() for h in handles.split(",")]
|
||||
|
||||
result = asyncio.run(default_agent.run(context))
|
||||
|
||||
output_data = {
|
||||
"success": result.success,
|
||||
"steps_executed": result.steps_executed,
|
||||
"output": result.output,
|
||||
}
|
||||
if result.error:
|
||||
output_data["error"] = result.error
|
||||
|
||||
click.echo(json.dumps(output_data, indent=2, default=str))
|
||||
sys.exit(0 if result.success else 1)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--json", "output_json", is_flag=True)
|
||||
def info(output_json):
|
||||
"""Show agent information."""
|
||||
info_data = default_agent.info()
|
||||
if output_json:
|
||||
click.echo(json.dumps(info_data, indent=2))
|
||||
else:
|
||||
click.echo(f"Agent: {info_data['name']}")
|
||||
click.echo(f"Version: {info_data['version']}")
|
||||
click.echo(f"Description: {info_data['description']}")
|
||||
click.echo(f"\nNodes: {', '.join(info_data['nodes'])}")
|
||||
click.echo(f"Entry: {info_data['entry_node']}")
|
||||
click.echo(f"Terminal: {', '.join(info_data['terminal_nodes'])}")
|
||||
|
||||
|
||||
@cli.command()
|
||||
def validate():
|
||||
"""Validate agent structure."""
|
||||
validation = default_agent.validate()
|
||||
if validation["valid"]:
|
||||
click.echo("Agent is valid")
|
||||
if validation["warnings"]:
|
||||
for warning in validation["warnings"]:
|
||||
click.echo(f" WARNING: {warning}")
|
||||
else:
|
||||
click.echo("Agent has errors:")
|
||||
for error in validation["errors"]:
|
||||
click.echo(f" ERROR: {error}")
|
||||
sys.exit(0 if validation["valid"] else 1)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--verbose", "-v", is_flag=True)
|
||||
def shell(verbose):
|
||||
"""Interactive session (CLI)."""
|
||||
asyncio.run(_interactive_shell(verbose))
|
||||
|
||||
|
||||
async def _interactive_shell(verbose=False):
|
||||
"""Async interactive shell."""
|
||||
setup_logging(verbose=verbose)
|
||||
|
||||
click.echo("=== Twitter News Digest ===")
|
||||
click.echo("Enter a request (or 'quit' to exit):\n")
|
||||
|
||||
agent = TwitterNewsAgent()
|
||||
await agent.start()
|
||||
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
query = await asyncio.get_event_loop().run_in_executor(
|
||||
None, input, "News> "
|
||||
)
|
||||
if query.lower() in ["quit", "exit", "q"]:
|
||||
click.echo("Goodbye!")
|
||||
break
|
||||
|
||||
if not query.strip():
|
||||
continue
|
||||
|
||||
click.echo("\nFetching news...\n")
|
||||
|
||||
result = await agent.run({"user_request": query})
|
||||
|
||||
if result.success:
|
||||
click.echo("\nDigest complete\n")
|
||||
else:
|
||||
click.echo(f"\nDigest failed: {result.error}\n")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
click.echo("\nGoodbye!")
|
||||
break
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {e}", err=True)
|
||||
finally:
|
||||
await agent.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
@@ -0,0 +1,241 @@
|
||||
"""Agent graph construction for Twitter News Digest."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from framework.graph import EdgeSpec, EdgeCondition, Goal, SuccessCriterion, Constraint
|
||||
from framework.graph.edge import GraphSpec
|
||||
from framework.graph.executor import ExecutionResult
|
||||
from framework.graph.checkpoint_config import CheckpointConfig
|
||||
from framework.llm import LiteLLMProvider
|
||||
from framework.runner.tool_registry import ToolRegistry
|
||||
from framework.runtime.agent_runtime import create_agent_runtime
|
||||
from framework.runtime.execution_stream import EntryPointSpec
|
||||
|
||||
from .config import default_config, metadata
|
||||
from .nodes import fetch_node, process_node, review_node
|
||||
|
||||
# Goal definition
|
||||
goal = Goal(
|
||||
id="twitter-news-goal",
|
||||
name="Twitter News Digest",
|
||||
description="Achieve an accurate and concise daily news digest based on Twitter feed monitoring.",
|
||||
success_criteria=[
|
||||
SuccessCriterion(
|
||||
id="sc-1",
|
||||
description="Navigate and extract tweets from at least 3 handles.",
|
||||
metric="handle_count",
|
||||
target=">=3",
|
||||
weight=0.4,
|
||||
),
|
||||
SuccessCriterion(
|
||||
id="sc-2",
|
||||
description="Provide a summary of the most important stories.",
|
||||
metric="summary_quality",
|
||||
target="high",
|
||||
weight=0.4,
|
||||
),
|
||||
SuccessCriterion(
|
||||
id="sc-3",
|
||||
description="Maintain a persistent log of daily digests.",
|
||||
metric="file_exists",
|
||||
target="true",
|
||||
weight=0.2,
|
||||
),
|
||||
],
|
||||
constraints=[
|
||||
Constraint(
|
||||
id="c-1",
|
||||
description="Respect rate limits and ethical web usage.",
|
||||
constraint_type="hard",
|
||||
category="functional",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
# Node list
|
||||
nodes = [fetch_node, process_node, review_node]
|
||||
|
||||
# Edge definitions
|
||||
edges = [
|
||||
# Process tweets then review
|
||||
EdgeSpec(
|
||||
id="process-to-review",
|
||||
source="process-news",
|
||||
target="review-digest",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
priority=1,
|
||||
),
|
||||
# Feedback loop if revisions needed
|
||||
EdgeSpec(
|
||||
id="review-to-process",
|
||||
source="review-digest",
|
||||
target="process-news",
|
||||
condition=EdgeCondition.CONDITIONAL,
|
||||
condition_expr="str(status).lower() == 'revise'",
|
||||
priority=2,
|
||||
),
|
||||
# Loop back for next run (forever-alive)
|
||||
EdgeSpec(
|
||||
id="review-done",
|
||||
source="review-digest",
|
||||
target="process-news",
|
||||
condition=EdgeCondition.CONDITIONAL,
|
||||
condition_expr="str(status).lower() == 'approved'",
|
||||
priority=1,
|
||||
),
|
||||
]
|
||||
|
||||
# Entry point is the autonomous processing node (queen handles intake)
|
||||
entry_node = "process-news"
|
||||
entry_points = {"start": "process-news"}
|
||||
pause_nodes = []
|
||||
terminal_nodes = [] # Forever-alive
|
||||
|
||||
# Module-level vars read by AgentRunner.load()
|
||||
conversation_mode = "continuous"
|
||||
identity_prompt = "You are a professional news analyst and researcher."
|
||||
loop_config = {
|
||||
"max_iterations": 100,
|
||||
"max_tool_calls_per_turn": 20,
|
||||
"max_history_tokens": 32000,
|
||||
}
|
||||
|
||||
|
||||
class TwitterNewsAgent:
|
||||
def __init__(self, config=None):
|
||||
self.config = config or default_config
|
||||
self.goal = goal
|
||||
self.nodes = nodes
|
||||
self.edges = edges
|
||||
self.entry_node = entry_node
|
||||
self.entry_points = entry_points
|
||||
self.pause_nodes = pause_nodes
|
||||
self.terminal_nodes = terminal_nodes
|
||||
self._graph = None
|
||||
self._agent_runtime = None
|
||||
self._tool_registry = None
|
||||
self._storage_path = None
|
||||
|
||||
def _build_graph(self):
|
||||
return GraphSpec(
|
||||
id="twitter-news-graph",
|
||||
goal_id=self.goal.id,
|
||||
version="1.0.0",
|
||||
entry_node=self.entry_node,
|
||||
entry_points=self.entry_points,
|
||||
terminal_nodes=self.terminal_nodes,
|
||||
pause_nodes=self.pause_nodes,
|
||||
nodes=self.nodes,
|
||||
edges=self.edges,
|
||||
default_model=self.config.model,
|
||||
max_tokens=self.config.max_tokens,
|
||||
loop_config=loop_config,
|
||||
conversation_mode=conversation_mode,
|
||||
identity_prompt=identity_prompt,
|
||||
)
|
||||
|
||||
def _setup(self):
|
||||
self._storage_path = Path.home() / ".hive" / "agents" / "twitter_news_agent"
|
||||
self._storage_path.mkdir(parents=True, exist_ok=True)
|
||||
self._tool_registry = ToolRegistry()
|
||||
mcp_config = Path(__file__).parent / "mcp_servers.json"
|
||||
if mcp_config.exists():
|
||||
self._tool_registry.load_mcp_config(mcp_config)
|
||||
llm = LiteLLMProvider(
|
||||
model=self.config.model,
|
||||
api_key=self.config.api_key,
|
||||
api_base=self.config.api_base,
|
||||
)
|
||||
tools = list(self._tool_registry.get_tools().values())
|
||||
tool_executor = self._tool_registry.get_executor()
|
||||
self._graph = self._build_graph()
|
||||
self._agent_runtime = create_agent_runtime(
|
||||
graph=self._graph,
|
||||
goal=self.goal,
|
||||
storage_path=self._storage_path,
|
||||
entry_points=[
|
||||
EntryPointSpec(
|
||||
id="default",
|
||||
name="Default",
|
||||
entry_node=self.entry_node,
|
||||
trigger_type="manual",
|
||||
isolation_level="shared",
|
||||
)
|
||||
],
|
||||
llm=llm,
|
||||
tools=tools,
|
||||
tool_executor=tool_executor,
|
||||
checkpoint_config=CheckpointConfig(
|
||||
enabled=True,
|
||||
checkpoint_on_node_complete=True,
|
||||
checkpoint_max_age_days=7,
|
||||
async_checkpoint=True,
|
||||
),
|
||||
)
|
||||
|
||||
async def start(self):
|
||||
if self._agent_runtime is None:
|
||||
self._setup()
|
||||
if not self._agent_runtime.is_running:
|
||||
await self._agent_runtime.start()
|
||||
|
||||
async def stop(self):
|
||||
if self._agent_runtime and self._agent_runtime.is_running:
|
||||
await self._agent_runtime.stop()
|
||||
self._agent_runtime = None
|
||||
|
||||
async def trigger_and_wait(
|
||||
self, entry_point="default", input_data=None, timeout=None, session_state=None
|
||||
):
|
||||
if self._agent_runtime is None:
|
||||
raise RuntimeError("Agent not started. Call start() first.")
|
||||
return await self._agent_runtime.trigger_and_wait(
|
||||
entry_point_id=entry_point,
|
||||
input_data=input_data or {},
|
||||
session_state=session_state,
|
||||
)
|
||||
|
||||
async def run(self, context, session_state=None):
|
||||
await self.start()
|
||||
try:
|
||||
result = await self.trigger_and_wait(
|
||||
"default", context, session_state=session_state
|
||||
)
|
||||
return result or ExecutionResult(success=False, error="Execution timeout")
|
||||
finally:
|
||||
await self.stop()
|
||||
|
||||
def info(self):
|
||||
return {
|
||||
"name": metadata.name,
|
||||
"version": metadata.version,
|
||||
"description": metadata.description,
|
||||
"goal": {"name": self.goal.name, "description": self.goal.description},
|
||||
"nodes": [n.id for n in self.nodes],
|
||||
"edges": [e.id for e in self.edges],
|
||||
"entry_node": self.entry_node,
|
||||
"entry_points": self.entry_points,
|
||||
"terminal_nodes": self.terminal_nodes,
|
||||
"client_facing_nodes": [n.id for n in self.nodes if n.client_facing],
|
||||
}
|
||||
|
||||
def validate(self):
|
||||
errors, warnings = [], []
|
||||
node_ids = {n.id for n in self.nodes}
|
||||
for e in self.edges:
|
||||
if e.source not in node_ids:
|
||||
errors.append(f"Edge {e.id}: source '{e.source}' not found")
|
||||
if e.target not in node_ids:
|
||||
errors.append(f"Edge {e.id}: target '{e.target}' not found")
|
||||
if self.entry_node not in node_ids:
|
||||
errors.append(f"Entry node '{self.entry_node}' not found")
|
||||
for t in self.terminal_nodes:
|
||||
if t not in node_ids:
|
||||
errors.append(f"Terminal node '{t}' not found")
|
||||
for ep_id, nid in self.entry_points.items():
|
||||
if nid not in node_ids:
|
||||
errors.append(f"Entry point '{ep_id}' references unknown node '{nid}'")
|
||||
return {"valid": len(errors) == 0, "errors": errors, "warnings": warnings}
|
||||
|
||||
|
||||
default_agent = TwitterNewsAgent()
|
||||
@@ -0,0 +1,20 @@
|
||||
"""Runtime configuration."""
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
from framework.config import RuntimeConfig
|
||||
|
||||
default_config = RuntimeConfig()
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentMetadata:
|
||||
name: str = "Twitter News Digest"
|
||||
version: str = "1.1.0"
|
||||
description: str = (
|
||||
"Monitors Twitter feeds and provides a daily news digest, focused on tech news."
|
||||
)
|
||||
intro_message: str = "I'm ready to fetch the latest tech news from Twitter. Which tech handles should I check?"
|
||||
|
||||
|
||||
metadata = AgentMetadata()
|
||||
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"hive-tools": {
|
||||
"transport": "stdio",
|
||||
"command": "uv",
|
||||
"args": ["run", "python", "mcp_server.py", "--stdio"],
|
||||
"cwd": "../../../tools",
|
||||
"description": "Hive tools MCP server"
|
||||
},
|
||||
"gcu-tools": {
|
||||
"transport": "stdio",
|
||||
"command": "uv",
|
||||
"args": ["run", "python", "-m", "gcu.server", "--stdio"],
|
||||
"cwd": "../../../tools",
|
||||
"description": "GCU tools for browser automation"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,86 @@
|
||||
"""Node definitions for Twitter News Digest."""
|
||||
|
||||
from framework.graph import NodeSpec
|
||||
|
||||
# Node 1: Browser subagent (GCU) to fetch tweets
|
||||
fetch_node = NodeSpec(
|
||||
id="fetch-tweets",
|
||||
name="Fetch Tech Tweets",
|
||||
description="Browser subagent to navigate to tech news Twitter profiles and extract latest tweets.",
|
||||
node_type="gcu",
|
||||
client_facing=False,
|
||||
max_node_visits=1,
|
||||
input_keys=["twitter_handles"],
|
||||
output_keys=["raw_tweets"],
|
||||
tools=[], # Auto-populated with browser tools
|
||||
system_prompt="""\
|
||||
You are a specialized tech news researcher.
|
||||
Your task is to navigate to the provided tech Twitter profiles and extract the latest 10 tweets from each.
|
||||
|
||||
## Target Content
|
||||
Focus on:
|
||||
- Major software/AI releases
|
||||
- Tech company earnings/acquisitions
|
||||
- Hardware/Silicon breakthroughs
|
||||
|
||||
## Instructions
|
||||
1. browser_start
|
||||
2. For each handle:
|
||||
a. browser_open(url=f"https://x.com/{handle}")
|
||||
b. browser_wait(seconds=5)
|
||||
c. browser_snapshot
|
||||
d. Parse relevant tech news text
|
||||
3. set_output("raw_tweets", consolidated_json)
|
||||
""",
|
||||
)
|
||||
|
||||
# Node 2: Process and summarize (autonomous)
|
||||
process_node = NodeSpec(
|
||||
id="process-news",
|
||||
name="Process Tech News",
|
||||
description="Analyze and summarize the raw tweets into a daily tech digest.",
|
||||
node_type="event_loop",
|
||||
sub_agents=["fetch-tweets"],
|
||||
input_keys=["user_request", "feedback", "raw_tweets"],
|
||||
output_keys=["daily_digest"],
|
||||
nullable_output_keys=["feedback", "raw_tweets"],
|
||||
success_criteria="A high-quality, tech-focused news summary.",
|
||||
system_prompt="""\
|
||||
You are a senior technology editor.
|
||||
If "raw_tweets" is missing, call delegate_to_sub_agent(agent_id="fetch-tweets", task="Fetch tech news from @TechCrunch, @verge, @WIRED, @CNET, @engadget, @Gizmodo, @TheRegister, @ArsTechnica, @ZDNet, @venturebeat, @AndrewYNg, @ylecun, @geoffreyhinton, @goodfellow_ian, @drfeifei, @hardmaru, @tegmark, @GaryMarcus, @schmidhuberAI, @fastdotai").
|
||||
|
||||
Once tech tweets are available:
|
||||
1. Synthesize a "Daily Tech Report" highlighting major breakthroughs.
|
||||
2. Save the report using save_data(filename="daily_tech_report.txt", data=summary).
|
||||
3. set_output("daily_digest", summary)
|
||||
""",
|
||||
tools=["save_data", "load_data"],
|
||||
)
|
||||
|
||||
# Node 3: Review (client-facing)
|
||||
review_node = NodeSpec(
|
||||
id="review-digest",
|
||||
name="Review Digest",
|
||||
description="Present the news digest for user review and approval.",
|
||||
node_type="event_loop",
|
||||
client_facing=True,
|
||||
input_keys=["daily_digest"],
|
||||
output_keys=["status", "feedback"],
|
||||
nullable_output_keys=["feedback"],
|
||||
success_criteria="User has reviewed the digest and provided feedback or approval.",
|
||||
system_prompt="""\
|
||||
Present the daily news digest to the user.
|
||||
|
||||
**STEP 1 — Present (text only, NO tool calls):**
|
||||
Display the summary and ask:
|
||||
1. Is this summary helpful?
|
||||
2. Are there specific handles or topics you'd like to focus on for tomorrow?
|
||||
|
||||
**STEP 2 — After user responds, call set_output:**
|
||||
- set_output("status", "approved") if satisfied.
|
||||
- set_output("status", "revise") and set_output("feedback", "...") if changes are needed.
|
||||
""",
|
||||
tools=[],
|
||||
)
|
||||
|
||||
__all__ = ["fetch_node", "process_node", "review_node"]
|
||||
@@ -78,5 +78,6 @@ if (-not $env:HIVE_CREDENTIAL_KEY) {
|
||||
}
|
||||
|
||||
# ── Run the Hive CLI ────────────────────────────────────────────────
|
||||
|
||||
# PYTHONUTF8=1: use UTF-8 for default encoding (fixes charmap decode errors on Windows)
|
||||
$env:PYTHONUTF8 = "1"
|
||||
& uv run hive @args
|
||||
|
||||
+15
-6
@@ -130,8 +130,8 @@ function Test-DefenderExclusions {
|
||||
|
||||
# Normalize and filter null/empty values
|
||||
$safePrefixes = $safePrefixes | Where-Object { $_ } | ForEach-Object {
|
||||
[System.IO.Path]::GetFullPath($_)
|
||||
}
|
||||
try { [System.IO.Path]::GetFullPath($_) } catch { $null }
|
||||
} | Where-Object { $_ }
|
||||
|
||||
try {
|
||||
# Check if Defender cmdlets are available (may not exist on older Windows)
|
||||
@@ -157,15 +157,20 @@ function Test-DefenderExclusions {
|
||||
$existing = $prefs.ExclusionPath
|
||||
if (-not $existing) { $existing = @() }
|
||||
|
||||
# Normalize existing paths for comparison
|
||||
# Normalize existing paths for comparison (some may contain wildcards
|
||||
# or env vars that GetFullPath rejects — skip those gracefully)
|
||||
$existing = $existing | Where-Object { $_ } | ForEach-Object {
|
||||
[System.IO.Path]::GetFullPath($_)
|
||||
try { [System.IO.Path]::GetFullPath($_) } catch { $_ }
|
||||
}
|
||||
|
||||
# Normalize paths and find missing exclusions
|
||||
$missing = @()
|
||||
foreach ($path in $Paths) {
|
||||
$normalized = [System.IO.Path]::GetFullPath($path)
|
||||
try {
|
||||
$normalized = [System.IO.Path]::GetFullPath($path)
|
||||
} catch {
|
||||
continue # Skip paths with unsupported format
|
||||
}
|
||||
|
||||
# Security: Ensure path is within safe boundaries
|
||||
$isSafe = $false
|
||||
@@ -250,7 +255,11 @@ function Add-DefenderExclusions {
|
||||
|
||||
foreach ($path in $Paths) {
|
||||
try {
|
||||
$normalized = [System.IO.Path]::GetFullPath($path)
|
||||
try {
|
||||
$normalized = [System.IO.Path]::GetFullPath($path)
|
||||
} catch {
|
||||
$normalized = $path # Use raw path if normalization fails
|
||||
}
|
||||
Add-MpPreference -ExclusionPath $normalized -ErrorAction Stop
|
||||
$added += $normalized
|
||||
} catch {
|
||||
|
||||
+23
-2
@@ -927,7 +927,8 @@ PROVIDER_MENU_ENVS=(ANTHROPIC_API_KEY OPENAI_API_KEY GEMINI_API_KEY GROQ_API_KEY
|
||||
PROVIDER_MENU_NAMES=("Anthropic (Claude) - Recommended" "OpenAI (GPT)" "Google Gemini - Free tier available" "Groq - Fast, free tier" "Cerebras - Fast, free tier")
|
||||
for idx in 0 1 2 3 4; do
|
||||
num=$((idx + 4))
|
||||
if [ -n "${!PROVIDER_MENU_ENVS[$idx]}" ]; then
|
||||
env_var="${PROVIDER_MENU_ENVS[$idx]}"
|
||||
if [ -n "${!env_var}" ]; then
|
||||
echo -e " ${CYAN}$num)${NC} ${PROVIDER_MENU_NAMES[$idx]} ${GREEN}(credential detected)${NC}"
|
||||
else
|
||||
echo -e " ${CYAN}$num)${NC} ${PROVIDER_MENU_NAMES[$idx]}"
|
||||
@@ -1477,6 +1478,26 @@ if [ -n "$HIVE_CREDENTIAL_KEY" ]; then
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Show tool summary
|
||||
TOOL_COUNTS=$(uv run python -c "
|
||||
from fastmcp import FastMCP
|
||||
from aden_tools.tools import register_all_tools
|
||||
mv = FastMCP('v')
|
||||
v = register_all_tools(mv, include_unverified=False)
|
||||
ma = FastMCP('a')
|
||||
a = register_all_tools(ma, include_unverified=True)
|
||||
print(f'{len(v)}|{len(a) - len(v)}')
|
||||
" 2>/dev/null)
|
||||
if [ -n "$TOOL_COUNTS" ]; then
|
||||
VERIFIED=$(echo "$TOOL_COUNTS" | cut -d'|' -f1)
|
||||
UNVERIFIED=$(echo "$TOOL_COUNTS" | cut -d'|' -f2)
|
||||
echo -e "${BOLD}Tools:${NC}"
|
||||
echo -e " ${GREEN}⬢${NC} ${VERIFIED} verified ${DIM}${UNVERIFIED} unverified available${NC}"
|
||||
echo -e " ${DIM}Enable unverified: INCLUDE_UNVERIFIED_TOOLS=true${NC}"
|
||||
echo -e " ${DIM}Learn more: docs/tools.md${NC}"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Show Codex instructions if available
|
||||
if [ "$CODEX_AVAILABLE" = true ]; then
|
||||
echo -e "${BOLD}Build a New Agent (Codex):${NC}"
|
||||
@@ -1520,7 +1541,7 @@ else
|
||||
echo ""
|
||||
echo -e " Launch the interactive dashboard to browse and run agents:"
|
||||
echo -e " You can start an example agent or an agent built by yourself:"
|
||||
echo -e " ${CYAN}hive tui${NC}"
|
||||
echo -e " ${CYAN}hive open${NC}"
|
||||
echo ""
|
||||
echo -e "${DIM}Run ./quickstart.sh again to reconfigure.${NC}"
|
||||
echo ""
|
||||
|
||||
@@ -0,0 +1,550 @@
|
||||
#!/usr/bin/env bun
|
||||
|
||||
/**
|
||||
* Bounty Tracker — calculates points from merged PRs and generates leaderboards.
|
||||
*
|
||||
* Modes:
|
||||
* notify — Post a Discord message for a single completed bounty (called by bounty-completed.yml)
|
||||
* leaderboard — Generate and post the weekly leaderboard (called by weekly-leaderboard.yml)
|
||||
*
|
||||
* Environment:
|
||||
* GITHUB_TOKEN — GitHub API token
|
||||
* GITHUB_REPOSITORY_OWNER — e.g. "adenhq"
|
||||
* GITHUB_REPOSITORY_NAME — e.g. "hive"
|
||||
* DISCORD_WEBHOOK_URL — Discord webhook for #integrations-announcements
|
||||
* LURKR_API_KEY — Lurkr Read/Write API key (for XP push)
|
||||
* LURKR_GUILD_ID — Discord server ID where Lurkr is installed
|
||||
* PR_NUMBER — (notify mode) The merged PR number
|
||||
*/
|
||||
|
||||
import { readFileSync } from "fs";
|
||||
import { join } from "path";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
interface Contributor {
|
||||
github: string;
|
||||
discord: string;
|
||||
name?: string;
|
||||
}
|
||||
|
||||
interface GitHubLabel {
|
||||
name: string;
|
||||
}
|
||||
|
||||
interface GitHubUser {
|
||||
login: string;
|
||||
}
|
||||
|
||||
interface GitHubPR {
|
||||
number: number;
|
||||
title: string;
|
||||
merged_at: string | null;
|
||||
labels: GitHubLabel[];
|
||||
user: GitHubUser;
|
||||
html_url: string;
|
||||
}
|
||||
|
||||
interface BountyResult {
|
||||
pr: GitHubPR;
|
||||
bountyType: string;
|
||||
points: number;
|
||||
difficulty: string;
|
||||
contributor: string;
|
||||
discordId: string | null;
|
||||
}
|
||||
|
||||
interface LeaderboardEntry {
|
||||
github: string;
|
||||
discordId: string | null;
|
||||
points: number;
|
||||
bounties: number;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Constants
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const POINTS: Record<string, number> = {
|
||||
"bounty:test": 20,
|
||||
"bounty:docs": 20,
|
||||
"bounty:code": 30,
|
||||
"bounty:new-tool": 75,
|
||||
};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// GitHub API
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async function githubRequest<T>(
|
||||
endpoint: string,
|
||||
token: string,
|
||||
method: string = "GET",
|
||||
body?: unknown
|
||||
): Promise<T> {
|
||||
const headers: Record<string, string> = {
|
||||
Authorization: `Bearer ${token}`,
|
||||
Accept: "application/vnd.github.v3+json",
|
||||
"User-Agent": "bounty-tracker",
|
||||
};
|
||||
|
||||
if (body) {
|
||||
headers["Content-Type"] = "application/json";
|
||||
}
|
||||
|
||||
const options: RequestInit = { method, headers };
|
||||
if (body) {
|
||||
options.body = JSON.stringify(body);
|
||||
}
|
||||
|
||||
const response = await fetch(`https://api.github.com${endpoint}`, options);
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(
|
||||
`GitHub API request failed: ${response.status} ${response.statusText}`
|
||||
);
|
||||
}
|
||||
|
||||
return response.json();
|
||||
}
|
||||
|
||||
async function getPR(
|
||||
owner: string,
|
||||
repo: string,
|
||||
prNumber: number,
|
||||
token: string
|
||||
): Promise<GitHubPR> {
|
||||
return githubRequest<GitHubPR>(
|
||||
`/repos/${owner}/${repo}/pulls/${prNumber}`,
|
||||
token
|
||||
);
|
||||
}
|
||||
|
||||
async function getMergedBountyPRs(
|
||||
owner: string,
|
||||
repo: string,
|
||||
token: string,
|
||||
since?: string
|
||||
): Promise<GitHubPR[]> {
|
||||
// GitHub search API requires each label with special chars to be quoted individually.
|
||||
// Multiple label: qualifiers are OR'd together.
|
||||
const bountyLabels = Object.keys(POINTS)
|
||||
.map((l) => `label:"${l}"`)
|
||||
.join(" ");
|
||||
|
||||
const query = `repo:${owner}/${repo} is:pr is:merged ${bountyLabels}${since ? ` merged:>=${since}` : ""}`;
|
||||
|
||||
const result = await githubRequest<{ items: GitHubPR[] }>(
|
||||
`/search/issues?q=${encodeURIComponent(query)}&per_page=100&sort=updated&order=desc`,
|
||||
token
|
||||
);
|
||||
|
||||
return result.items;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Identity resolution
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// Parse contributors.yml without a YAML dependency.
|
||||
// The format is simple enough to parse with regex:
|
||||
// contributors:
|
||||
// - github: jane-doe
|
||||
// discord: "123456789012345678"
|
||||
// name: Jane Doe
|
||||
function parseContributorsYaml(raw: string): Contributor[] {
|
||||
const contributors: Contributor[] = [];
|
||||
let current: Partial<Contributor> | null = null;
|
||||
|
||||
for (const line of raw.split("\n")) {
|
||||
const trimmed = line.trim();
|
||||
|
||||
if (trimmed.startsWith("- github:")) {
|
||||
if (current?.github && current?.discord) {
|
||||
contributors.push(current as Contributor);
|
||||
}
|
||||
current = { github: trimmed.replace("- github:", "").trim() };
|
||||
} else if (trimmed.startsWith("discord:") && current) {
|
||||
current.discord = trimmed.replace("discord:", "").trim().replace(/^["']|["']$/g, "");
|
||||
} else if (trimmed.startsWith("name:") && current) {
|
||||
current.name = trimmed.replace("name:", "").trim();
|
||||
}
|
||||
}
|
||||
|
||||
// Don't forget the last entry
|
||||
if (current?.github && current?.discord) {
|
||||
contributors.push(current as Contributor);
|
||||
}
|
||||
|
||||
return contributors;
|
||||
}
|
||||
|
||||
function loadContributors(): Map<string, Contributor> {
|
||||
const map = new Map<string, Contributor>();
|
||||
|
||||
try {
|
||||
// Resolve path relative to the script location (scripts/ dir → repo root)
|
||||
const scriptDir = new URL(".", import.meta.url).pathname;
|
||||
const raw = readFileSync(
|
||||
join(scriptDir, "..", "contributors.yml"),
|
||||
"utf-8"
|
||||
);
|
||||
const entries = parseContributorsYaml(raw);
|
||||
|
||||
for (const c of entries) {
|
||||
map.set(c.github.toLowerCase(), c);
|
||||
}
|
||||
} catch {
|
||||
console.warn("Warning: could not load contributors.yml");
|
||||
}
|
||||
|
||||
return map;
|
||||
}
|
||||
|
||||
function resolveDiscord(
|
||||
githubUsername: string,
|
||||
contributors: Map<string, Contributor>
|
||||
): string | null {
|
||||
const entry = contributors.get(githubUsername.toLowerCase());
|
||||
return entry?.discord ?? null;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Bounty extraction
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function extractBounty(
|
||||
pr: GitHubPR,
|
||||
contributors: Map<string, Contributor>
|
||||
): BountyResult | null {
|
||||
const labels = pr.labels.map((l) => l.name);
|
||||
|
||||
const bountyLabel = labels.find((l) => l.startsWith("bounty:"));
|
||||
if (!bountyLabel) return null;
|
||||
|
||||
const points = POINTS[bountyLabel];
|
||||
if (points === undefined) return null;
|
||||
|
||||
const difficulty =
|
||||
labels.find((l) => l.startsWith("difficulty:"))?.replace("difficulty:", "") ??
|
||||
"unknown";
|
||||
|
||||
return {
|
||||
pr,
|
||||
bountyType: bountyLabel.replace("bounty:", ""),
|
||||
points,
|
||||
difficulty,
|
||||
contributor: pr.user.login,
|
||||
discordId: resolveDiscord(pr.user.login, contributors),
|
||||
};
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Discord notifications
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async function postToDiscord(
|
||||
webhookUrl: string,
|
||||
content: string,
|
||||
embeds?: unknown[]
|
||||
): Promise<void> {
|
||||
const body: Record<string, unknown> = { content };
|
||||
if (embeds) body.embeds = embeds;
|
||||
|
||||
const response = await fetch(webhookUrl, {
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(
|
||||
`Discord webhook failed: ${response.status} ${response.statusText}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
function formatBountyNotification(bounty: BountyResult): string {
|
||||
const userMention = bounty.discordId
|
||||
? `<@${bounty.discordId}>`
|
||||
: `**${bounty.contributor}**`;
|
||||
|
||||
const typeEmoji: Record<string, string> = {
|
||||
test: "\u{1F9EA}",
|
||||
docs: "\u{1F4DD}",
|
||||
code: "\u{1F527}",
|
||||
"new-tool": "\u{2B50}",
|
||||
};
|
||||
|
||||
const emoji = typeEmoji[bounty.bountyType] ?? "\u{1F3AF}";
|
||||
|
||||
let msg = `${emoji} **Bounty Completed!**\n\n`;
|
||||
msg += `${userMention} completed a **${bounty.bountyType}** bounty (+${bounty.points} pts)\n`;
|
||||
msg += `PR: ${bounty.pr.html_url}\n`;
|
||||
|
||||
if (!bounty.discordId) {
|
||||
msg += `\n_\u{1F517} @${bounty.contributor}: link your Discord in \`contributors.yml\` to get pinged!_`;
|
||||
}
|
||||
|
||||
return msg;
|
||||
}
|
||||
|
||||
function formatLeaderboard(entries: LeaderboardEntry[]): string {
|
||||
if (entries.length === 0) {
|
||||
return "No bounty completions this period.";
|
||||
}
|
||||
|
||||
const sorted = [...entries].sort((a, b) => b.points - a.points);
|
||||
const top10 = sorted.slice(0, 10);
|
||||
|
||||
const medals = ["\u{1F947}", "\u{1F948}", "\u{1F949}"];
|
||||
|
||||
let msg = "**\u{1F3C6} Integration Bounty Leaderboard**\n\n";
|
||||
|
||||
for (let i = 0; i < top10.length; i++) {
|
||||
const entry = top10[i];
|
||||
const rank = medals[i] ?? `**${i + 1}.**`;
|
||||
const name = entry.discordId
|
||||
? `<@${entry.discordId}>`
|
||||
: `**${entry.github}**`;
|
||||
msg += `${rank} ${name} — ${entry.points} pts (${entry.bounties} bounties)\n`;
|
||||
}
|
||||
|
||||
msg += `\n_${sorted.length} contributors total_`;
|
||||
|
||||
return msg;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Lurkr API — push XP to Discord leveling system
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const LURKR_BASE_URL = "https://api.lurkr.gg/v2";
|
||||
|
||||
interface LurkrLevelResponse {
|
||||
level: {
|
||||
level: number;
|
||||
xp: number;
|
||||
messageCount: number;
|
||||
};
|
||||
}
|
||||
|
||||
async function lurkrAddXP(
|
||||
guildId: string,
|
||||
userId: string,
|
||||
xp: number,
|
||||
apiKey: string
|
||||
): Promise<LurkrLevelResponse> {
|
||||
const response = await fetch(
|
||||
`${LURKR_BASE_URL}/levels/${guildId}/users/${userId}`,
|
||||
{
|
||||
method: "PATCH",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"X-API-Key": apiKey,
|
||||
},
|
||||
body: JSON.stringify({ xp: { increment: xp } }),
|
||||
}
|
||||
);
|
||||
|
||||
if (!response.ok) {
|
||||
const text = await response.text();
|
||||
throw new Error(`Lurkr API failed: ${response.status} ${text}`);
|
||||
}
|
||||
|
||||
return response.json();
|
||||
}
|
||||
|
||||
async function lurkrGetUser(
|
||||
guildId: string,
|
||||
userId: string,
|
||||
apiKey: string
|
||||
): Promise<LurkrLevelResponse | null> {
|
||||
const response = await fetch(
|
||||
`${LURKR_BASE_URL}/levels/${guildId}/users/${userId}`,
|
||||
{
|
||||
method: "GET",
|
||||
headers: { "X-API-Key": apiKey },
|
||||
}
|
||||
);
|
||||
|
||||
if (response.status === 404) return null;
|
||||
|
||||
if (!response.ok) {
|
||||
const text = await response.text();
|
||||
throw new Error(`Lurkr API failed: ${response.status} ${text}`);
|
||||
}
|
||||
|
||||
return response.json();
|
||||
}
|
||||
|
||||
async function awardLurkrXP(bounty: BountyResult): Promise<string | null> {
|
||||
const apiKey = process.env.LURKR_API_KEY;
|
||||
const guildId = process.env.LURKR_GUILD_ID;
|
||||
|
||||
if (!apiKey || !guildId) {
|
||||
console.log("Lurkr not configured (missing LURKR_API_KEY or LURKR_GUILD_ID), skipping XP push");
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!bounty.discordId) {
|
||||
console.log(`No Discord ID for @${bounty.contributor}, cannot push Lurkr XP`);
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await lurkrAddXP(guildId, bounty.discordId, bounty.points, apiKey);
|
||||
const msg = `Lurkr: +${bounty.points} XP \u2192 <@${bounty.discordId}> (now level ${result.level.level}, ${result.level.xp} XP)`;
|
||||
console.log(msg);
|
||||
return msg;
|
||||
} catch (err) {
|
||||
// Lurkr failure should not prevent the Discord notification from being sent
|
||||
console.error(`Lurkr XP push failed (non-fatal): ${err}`);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Leaderboard calculation
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function buildLeaderboard(
|
||||
bounties: BountyResult[]
|
||||
): LeaderboardEntry[] {
|
||||
const map = new Map<string, LeaderboardEntry>();
|
||||
|
||||
for (const b of bounties) {
|
||||
const key = b.contributor.toLowerCase();
|
||||
const existing = map.get(key);
|
||||
|
||||
if (existing) {
|
||||
existing.points += b.points;
|
||||
existing.bounties += 1;
|
||||
} else {
|
||||
map.set(key, {
|
||||
github: b.contributor,
|
||||
discordId: b.discordId,
|
||||
points: b.points,
|
||||
bounties: 1,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return Array.from(map.values());
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// CLI
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async function main() {
|
||||
const mode = process.argv[2];
|
||||
|
||||
const token = process.env.GITHUB_TOKEN;
|
||||
const owner = process.env.GITHUB_REPOSITORY_OWNER;
|
||||
const repo = process.env.GITHUB_REPOSITORY_NAME;
|
||||
const webhookUrl = process.env.DISCORD_WEBHOOK_URL;
|
||||
|
||||
if (!token || !owner || !repo) {
|
||||
console.error(
|
||||
"Missing required env: GITHUB_TOKEN, GITHUB_REPOSITORY_OWNER, GITHUB_REPOSITORY_NAME"
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const contributors = loadContributors();
|
||||
|
||||
if (mode === "notify") {
|
||||
// Single bounty notification
|
||||
const prNumber = parseInt(process.env.PR_NUMBER ?? "", 10);
|
||||
if (!prNumber) {
|
||||
console.error("Missing PR_NUMBER env var");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const pr = await getPR(owner, repo, prNumber, token);
|
||||
if (!pr.merged_at) {
|
||||
console.log("PR not merged, skipping");
|
||||
return;
|
||||
}
|
||||
|
||||
const bounty = extractBounty(pr, contributors);
|
||||
if (!bounty) {
|
||||
console.log("No bounty label found, skipping");
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(
|
||||
`Bounty: ${bounty.bountyType} | ${bounty.points} pts | @${bounty.contributor}`
|
||||
);
|
||||
|
||||
// Push XP to Lurkr (before Discord notification so we can include level info)
|
||||
const lurkrMsg = await awardLurkrXP(bounty);
|
||||
|
||||
if (webhookUrl) {
|
||||
let msg = formatBountyNotification(bounty);
|
||||
if (lurkrMsg) {
|
||||
msg += `\n${lurkrMsg}`;
|
||||
}
|
||||
await postToDiscord(webhookUrl, msg);
|
||||
console.log("Discord notification sent");
|
||||
} else {
|
||||
console.log("No DISCORD_WEBHOOK_URL set, skipping Discord notification");
|
||||
console.log(formatBountyNotification(bounty));
|
||||
}
|
||||
} else if (mode === "leaderboard") {
|
||||
// Weekly leaderboard
|
||||
const since = process.env.SINCE_DATE;
|
||||
const prs = await getMergedBountyPRs(owner, repo, token, since);
|
||||
|
||||
console.log(`Found ${prs.length} merged bounty PRs`);
|
||||
|
||||
const bounties = prs
|
||||
.map((pr) => extractBounty(pr, contributors))
|
||||
.filter((b): b is BountyResult => b !== null);
|
||||
|
||||
const entries = buildLeaderboard(bounties);
|
||||
const msg = formatLeaderboard(entries);
|
||||
|
||||
console.log(msg);
|
||||
|
||||
if (webhookUrl) {
|
||||
await postToDiscord(webhookUrl, msg);
|
||||
console.log("Leaderboard posted to Discord");
|
||||
}
|
||||
} else {
|
||||
console.error("Usage: bounty-tracker.ts <notify|leaderboard>");
|
||||
console.error(" notify — Post Discord notification for a merged bounty PR");
|
||||
console.error(" leaderboard — Generate and post the leaderboard");
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Run if invoked directly
|
||||
main().catch((err) => {
|
||||
console.error(err);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// Export for testing
|
||||
export {
|
||||
extractBounty,
|
||||
buildLeaderboard,
|
||||
formatBountyNotification,
|
||||
formatLeaderboard,
|
||||
loadContributors,
|
||||
resolveDiscord,
|
||||
awardLurkrXP,
|
||||
lurkrAddXP,
|
||||
lurkrGetUser,
|
||||
POINTS,
|
||||
};
|
||||
export type {
|
||||
BountyResult,
|
||||
LeaderboardEntry,
|
||||
Contributor,
|
||||
GitHubPR,
|
||||
LurkrLevelResponse,
|
||||
};
|
||||
@@ -87,7 +87,14 @@ PROVIDERS = {
|
||||
|
||||
def main() -> None:
|
||||
if len(sys.argv) < 3:
|
||||
print(json.dumps({"valid": False, "message": "Usage: check_llm_key.py <provider> <key> [api_base]"}))
|
||||
print(
|
||||
json.dumps(
|
||||
{
|
||||
"valid": False,
|
||||
"message": "Usage: check_llm_key.py <provider> <key> [api_base]",
|
||||
}
|
||||
)
|
||||
)
|
||||
sys.exit(2)
|
||||
|
||||
provider_id = sys.argv[1]
|
||||
|
||||
Executable
+23
@@ -0,0 +1,23 @@
|
||||
#!/usr/bin/env bash
|
||||
# Creates GitHub labels for the Integration Bounty Program.
|
||||
# Usage: ./scripts/setup-bounty-labels.sh [owner/repo]
|
||||
# Requires: gh CLI authenticated
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
REPO="${1:-adenhq/hive}"
|
||||
|
||||
echo "Setting up bounty labels for $REPO..."
|
||||
|
||||
# Bounty type labels
|
||||
gh label create "bounty:test" --repo "$REPO" --color "1D76DB" --description "Bounty: test a tool with real API key (20 pts)" --force
|
||||
gh label create "bounty:docs" --repo "$REPO" --color "FBCA04" --description "Bounty: write or improve documentation (20 pts)" --force
|
||||
gh label create "bounty:code" --repo "$REPO" --color "D93F0B" --description "Bounty: health checker, bug fix, or improvement (30 pts)" --force
|
||||
gh label create "bounty:new-tool" --repo "$REPO" --color "6F42C1" --description "Bounty: build a new integration from scratch (75 pts)" --force
|
||||
|
||||
# Difficulty labels
|
||||
gh label create "difficulty:easy" --repo "$REPO" --color "BFD4F2" --description "Good first contribution" --force
|
||||
gh label create "difficulty:medium" --repo "$REPO" --color "D4C5F9" --description "Requires some familiarity" --force
|
||||
gh label create "difficulty:hard" --repo "$REPO" --color "F9D0C4" --description "Significant effort or expertise needed" --force
|
||||
|
||||
echo "Done. Labels created for $REPO."
|
||||
+12
-8
@@ -72,6 +72,7 @@ python mcp_server.py
|
||||
| `apply_diff` | Apply diff patches to files |
|
||||
| `apply_patch` | Apply unified patches to files |
|
||||
| `grep_search` | Search file contents with regex |
|
||||
| `hashline_edit` | Anchor-based file editing with hash-validated line references |
|
||||
| `execute_command_tool` | Execute shell commands |
|
||||
| `save_data` / `load_data` | Persist and retrieve structured data across steps |
|
||||
| `serve_file_to_user` | Serve a file for the user to download |
|
||||
@@ -175,14 +176,17 @@ tools/
|
||||
│ └── tools/ # Tool implementations
|
||||
│ ├── example_tool/
|
||||
│ ├── file_system_toolkits/ # File operation tools
|
||||
│ │ ├── view_file.py
|
||||
│ │ ├── write_to_file.py
|
||||
│ │ ├── list_dir.py
|
||||
│ │ ├── replace_file_content.py
|
||||
│ │ ├── apply_diff.py
|
||||
│ │ ├── apply_patch.py
|
||||
│ │ ├── grep_search.py
|
||||
│ │ └── execute_command_tool.py
|
||||
│ │ ├── security.py
|
||||
│ │ ├── hashline.py
|
||||
│ │ ├── view_file/
|
||||
│ │ ├── write_to_file/
|
||||
│ │ ├── list_dir/
|
||||
│ │ ├── replace_file_content/
|
||||
│ │ ├── apply_diff/
|
||||
│ │ ├── apply_patch/
|
||||
│ │ ├── grep_search/
|
||||
│ │ ├── hashline_edit/
|
||||
│ │ └── execute_command_tool/
|
||||
│ ├── web_search_tool/
|
||||
│ ├── web_scrape_tool/
|
||||
│ ├── pdf_read_tool/
|
||||
|
||||
+124
-125
@@ -71,8 +71,49 @@ def _find_project_root() -> str:
|
||||
|
||||
def _resolve_path(path: str) -> str:
|
||||
"""Resolve path relative to PROJECT_ROOT. Raises ValueError if outside."""
|
||||
# Normalize slashes for cross-platform (e.g. exports/hi_agent from LLM)
|
||||
path = path.replace("/", os.sep)
|
||||
if os.path.isabs(path):
|
||||
resolved = os.path.abspath(path)
|
||||
try:
|
||||
common = os.path.commonpath([resolved, PROJECT_ROOT])
|
||||
except ValueError:
|
||||
common = ""
|
||||
if common != PROJECT_ROOT:
|
||||
# LLM may emit wrong-root paths (/mnt/data, /workspace, etc.).
|
||||
# Strip known prefixes and treat the remainder as relative to PROJECT_ROOT.
|
||||
path_norm = path.replace("\\", "/")
|
||||
for prefix in (
|
||||
"/mnt/data/",
|
||||
"/mnt/data",
|
||||
"/workspace/",
|
||||
"/workspace",
|
||||
"/repo/",
|
||||
"/repo",
|
||||
):
|
||||
p = prefix.rstrip("/") + "/"
|
||||
prefix_stripped = prefix.rstrip("/")
|
||||
if path_norm.startswith(p) or (
|
||||
path_norm.startswith(prefix_stripped) and len(path_norm) > len(prefix)
|
||||
):
|
||||
suffix = path_norm[len(prefix_stripped) :].lstrip("/")
|
||||
if suffix:
|
||||
path = suffix.replace("/", os.sep)
|
||||
resolved = os.path.abspath(os.path.join(PROJECT_ROOT, path))
|
||||
break
|
||||
else:
|
||||
# Try extracting exports/ or core/ subpath from the absolute path
|
||||
parts = path.split(os.sep)
|
||||
if "exports" in parts:
|
||||
idx = parts.index("exports")
|
||||
path = os.sep.join(parts[idx:])
|
||||
resolved = os.path.abspath(os.path.join(PROJECT_ROOT, path))
|
||||
elif "core" in parts:
|
||||
idx = parts.index("core")
|
||||
path = os.sep.join(parts[idx:])
|
||||
resolved = os.path.abspath(os.path.join(PROJECT_ROOT, path))
|
||||
else:
|
||||
raise ValueError(f"Access denied: '{path}' is outside the project root.")
|
||||
else:
|
||||
resolved = os.path.abspath(os.path.join(PROJECT_ROOT, path))
|
||||
try:
|
||||
@@ -91,11 +132,7 @@ def _snapshot_git(*args: str) -> str:
|
||||
"""Run a git command with the snapshot GIT_DIR and PROJECT_ROOT worktree."""
|
||||
cmd = ["git", "--git-dir", SNAPSHOT_DIR, "--work-tree", PROJECT_ROOT, *args]
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=30,
|
||||
encoding="utf-8",
|
||||
cmd, capture_output=True, text=True, timeout=30, encoding="utf-8", stdin=subprocess.DEVNULL
|
||||
)
|
||||
return result.stdout.strip()
|
||||
|
||||
@@ -110,6 +147,7 @@ def _ensure_snapshot_repo():
|
||||
["git", "init", "--bare", SNAPSHOT_DIR],
|
||||
capture_output=True,
|
||||
timeout=10,
|
||||
stdin=subprocess.DEVNULL,
|
||||
encoding="utf-8",
|
||||
)
|
||||
_snapshot_git("config", "core.autocrlf", "false")
|
||||
@@ -132,6 +170,37 @@ def _take_snapshot() -> str:
|
||||
MAX_COMMAND_OUTPUT = 30_000 # chars before truncation
|
||||
|
||||
|
||||
def _translate_command_for_windows(command: str) -> str:
|
||||
"""Translate common Unix commands to Windows equivalents."""
|
||||
if os.name != "nt":
|
||||
return command
|
||||
cmd = command.strip()
|
||||
|
||||
# mkdir -p: Unix creates parents; Windows mkdir already does; -p becomes a dir name
|
||||
if cmd.startswith("mkdir -p ") or cmd.startswith("mkdir -p\t"):
|
||||
rest = cmd[9:].lstrip().replace("/", os.sep)
|
||||
return "mkdir " + rest
|
||||
|
||||
# ls / pwd: cmd.exe uses dir and cd
|
||||
# Order matters: replace longer patterns first
|
||||
for unix, win in [
|
||||
("ls -la", "dir /a"),
|
||||
("ls -al", "dir /a"),
|
||||
("ls -l", "dir"),
|
||||
("ls -a", "dir /a"),
|
||||
("ls ", "dir "),
|
||||
("pwd", "cd"),
|
||||
]:
|
||||
cmd = cmd.replace(unix, win)
|
||||
# Standalone "ls" at end (e.g. "cd x && ls")
|
||||
if cmd.endswith(" ls"):
|
||||
cmd = cmd[:-3] + " dir"
|
||||
elif cmd == "ls":
|
||||
cmd = "dir"
|
||||
|
||||
return cmd
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
def run_command(command: str, cwd: str = "", timeout: int = 120) -> str:
|
||||
"""Execute a shell command in the project context.
|
||||
@@ -151,6 +220,7 @@ def run_command(command: str, cwd: str = "", timeout: int = 120) -> str:
|
||||
work_dir = _resolve_path(cwd) if cwd else PROJECT_ROOT
|
||||
|
||||
try:
|
||||
command = _translate_command_for_windows(command)
|
||||
start = time.monotonic()
|
||||
result = subprocess.run(
|
||||
command,
|
||||
@@ -159,12 +229,16 @@ def run_command(command: str, cwd: str = "", timeout: int = 120) -> str:
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout,
|
||||
stdin=subprocess.DEVNULL,
|
||||
encoding="utf-8",
|
||||
env={
|
||||
**os.environ,
|
||||
"PYTHONPATH": (
|
||||
f"{PROJECT_ROOT}/core:{PROJECT_ROOT}/exports"
|
||||
f":{PROJECT_ROOT}/core/framework/agents"
|
||||
"PYTHONPATH": os.pathsep.join(
|
||||
[
|
||||
os.path.join(PROJECT_ROOT, "core"),
|
||||
os.path.join(PROJECT_ROOT, "exports"),
|
||||
os.path.join(PROJECT_ROOT, "core", "framework", "agents"),
|
||||
]
|
||||
),
|
||||
},
|
||||
)
|
||||
@@ -236,6 +310,7 @@ def undo_changes(path: str = "") -> str:
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10,
|
||||
stdin=subprocess.DEVNULL,
|
||||
encoding="utf-8",
|
||||
)
|
||||
return f"Restored: {path}"
|
||||
@@ -309,28 +384,31 @@ def list_agent_tools(
|
||||
return json.dumps({"error": f"Failed to read config: {e}"})
|
||||
|
||||
try:
|
||||
from pathlib import Path
|
||||
|
||||
from framework.runner.mcp_client import MCPClient, MCPServerConfig
|
||||
from framework.runner.tool_registry import ToolRegistry
|
||||
except ImportError:
|
||||
return json.dumps({"error": "Cannot import MCPClient"})
|
||||
|
||||
all_tools: list[dict] = []
|
||||
errors = []
|
||||
config_dir = os.path.dirname(config_path)
|
||||
config_dir = Path(config_path).parent
|
||||
|
||||
for server_name, server_conf in servers_config.items():
|
||||
cwd = server_conf.get("cwd", "")
|
||||
if cwd and not os.path.isabs(cwd):
|
||||
cwd = os.path.abspath(os.path.join(config_dir, cwd))
|
||||
resolved = ToolRegistry.resolve_mcp_stdio_config(
|
||||
{"name": server_name, **server_conf}, config_dir
|
||||
)
|
||||
try:
|
||||
config = MCPServerConfig(
|
||||
name=server_name,
|
||||
transport=server_conf.get("transport", "stdio"),
|
||||
command=server_conf.get("command"),
|
||||
args=server_conf.get("args", []),
|
||||
env=server_conf.get("env", {}),
|
||||
cwd=cwd or None,
|
||||
url=server_conf.get("url"),
|
||||
headers=server_conf.get("headers", {}),
|
||||
transport=resolved.get("transport", "stdio"),
|
||||
command=resolved.get("command"),
|
||||
args=resolved.get("args", []),
|
||||
env=resolved.get("env", {}),
|
||||
cwd=resolved.get("cwd"),
|
||||
url=resolved.get("url"),
|
||||
headers=resolved.get("headers", {}),
|
||||
)
|
||||
client = MCPClient(config)
|
||||
client.connect()
|
||||
@@ -419,19 +497,24 @@ def validate_agent_tools(agent_path: str) -> str:
|
||||
if not os.path.isdir(resolved):
|
||||
return json.dumps({"error": f"Agent directory not found: {agent_path}"})
|
||||
|
||||
agent_dir = resolved # Keep path; 'resolved' is reused for MCP config in loop
|
||||
|
||||
# --- Discover available tools from agent's MCP servers ---
|
||||
mcp_config_path = os.path.join(resolved, "mcp_servers.json")
|
||||
mcp_config_path = os.path.join(agent_dir, "mcp_servers.json")
|
||||
if not os.path.isfile(mcp_config_path):
|
||||
return json.dumps({"error": f"No mcp_servers.json found in {agent_path}"})
|
||||
|
||||
try:
|
||||
from pathlib import Path
|
||||
|
||||
from framework.runner.mcp_client import MCPClient, MCPServerConfig
|
||||
from framework.runner.tool_registry import ToolRegistry
|
||||
except ImportError:
|
||||
return json.dumps({"error": "Cannot import MCPClient"})
|
||||
|
||||
available_tools: set[str] = set()
|
||||
discovery_errors = []
|
||||
config_dir = os.path.dirname(mcp_config_path)
|
||||
config_dir = Path(mcp_config_path).parent
|
||||
|
||||
try:
|
||||
with open(mcp_config_path, encoding="utf-8") as f:
|
||||
@@ -440,19 +523,19 @@ def validate_agent_tools(agent_path: str) -> str:
|
||||
return json.dumps({"error": f"Failed to read mcp_servers.json: {e}"})
|
||||
|
||||
for server_name, server_conf in servers_config.items():
|
||||
cwd = server_conf.get("cwd", "")
|
||||
if cwd and not os.path.isabs(cwd):
|
||||
cwd = os.path.abspath(os.path.join(config_dir, cwd))
|
||||
resolved = ToolRegistry.resolve_mcp_stdio_config(
|
||||
{"name": server_name, **server_conf}, config_dir
|
||||
)
|
||||
try:
|
||||
config = MCPServerConfig(
|
||||
name=server_name,
|
||||
transport=server_conf.get("transport", "stdio"),
|
||||
command=server_conf.get("command"),
|
||||
args=server_conf.get("args", []),
|
||||
env=server_conf.get("env", {}),
|
||||
cwd=cwd or None,
|
||||
url=server_conf.get("url"),
|
||||
headers=server_conf.get("headers", {}),
|
||||
transport=resolved.get("transport", "stdio"),
|
||||
command=resolved.get("command"),
|
||||
args=resolved.get("args", []),
|
||||
env=resolved.get("env", {}),
|
||||
cwd=resolved.get("cwd"),
|
||||
url=resolved.get("url"),
|
||||
headers=resolved.get("headers", {}),
|
||||
)
|
||||
client = MCPClient(config)
|
||||
client.connect()
|
||||
@@ -463,7 +546,7 @@ def validate_agent_tools(agent_path: str) -> str:
|
||||
discovery_errors.append({"server": server_name, "error": str(e)})
|
||||
|
||||
# --- Load agent nodes and extract declared tools ---
|
||||
agent_py = os.path.join(resolved, "agent.py")
|
||||
agent_py = os.path.join(agent_dir, "agent.py")
|
||||
if not os.path.isfile(agent_py):
|
||||
return json.dumps({"error": f"No agent.py found in {agent_path}"})
|
||||
|
||||
@@ -471,8 +554,8 @@ def validate_agent_tools(agent_path: str) -> str:
|
||||
import importlib.util
|
||||
import sys
|
||||
|
||||
package_name = os.path.basename(resolved)
|
||||
parent_dir = os.path.dirname(os.path.abspath(resolved))
|
||||
package_name = os.path.basename(agent_dir)
|
||||
parent_dir = os.path.dirname(os.path.abspath(agent_dir))
|
||||
if parent_dir not in sys.path:
|
||||
sys.path.insert(0, parent_dir)
|
||||
|
||||
@@ -726,94 +809,6 @@ def list_agent_sessions(
|
||||
)
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
def get_agent_session_state(agent_name: str, session_id: str) -> str:
|
||||
"""Load full session state (excluding memory to prevent context bloat).
|
||||
|
||||
Returns status, progress, result, metrics, and checkpoint info.
|
||||
Use get_agent_session_memory to read memory contents separately.
|
||||
|
||||
Args:
|
||||
agent_name: Agent package name (e.g. 'deep_research_agent')
|
||||
session_id: Session ID (e.g. 'session_20260208_143022_abc12345')
|
||||
|
||||
Returns:
|
||||
JSON with full session state
|
||||
"""
|
||||
agent_dir = _resolve_hive_agent_path(agent_name)
|
||||
state_path = agent_dir / "sessions" / session_id / "state.json"
|
||||
data = _read_session_json(state_path)
|
||||
if data is None:
|
||||
return json.dumps({"error": f"Session not found: {session_id}"})
|
||||
|
||||
# Exclude memory values but show keys
|
||||
memory = data.get("memory", {})
|
||||
data["memory_keys"] = list(memory.keys()) if isinstance(memory, dict) else []
|
||||
data["memory_size"] = len(memory) if isinstance(memory, dict) else 0
|
||||
data.pop("memory", None)
|
||||
|
||||
return json.dumps(data, indent=2, default=str)
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
def get_agent_session_memory(
|
||||
agent_name: str,
|
||||
session_id: str,
|
||||
key: str = "",
|
||||
) -> str:
|
||||
"""Read memory contents from a session.
|
||||
|
||||
Memory stores intermediate results passed between nodes. Use this
|
||||
to inspect what data was produced during execution.
|
||||
|
||||
Args:
|
||||
agent_name: Agent package name
|
||||
session_id: Session ID
|
||||
key: Specific memory key to retrieve. Empty for all keys.
|
||||
|
||||
Returns:
|
||||
JSON with memory contents
|
||||
"""
|
||||
agent_dir = _resolve_hive_agent_path(agent_name)
|
||||
state_path = agent_dir / "sessions" / session_id / "state.json"
|
||||
data = _read_session_json(state_path)
|
||||
if data is None:
|
||||
return json.dumps({"error": f"Session not found: {session_id}"})
|
||||
|
||||
memory = data.get("memory", {})
|
||||
if not isinstance(memory, dict):
|
||||
memory = {}
|
||||
|
||||
if key:
|
||||
if key not in memory:
|
||||
return json.dumps(
|
||||
{
|
||||
"error": f"Memory key not found: '{key}'",
|
||||
"available_keys": list(memory.keys()),
|
||||
}
|
||||
)
|
||||
return json.dumps(
|
||||
{
|
||||
"session_id": session_id,
|
||||
"key": key,
|
||||
"value": memory[key],
|
||||
"value_type": type(memory[key]).__name__,
|
||||
},
|
||||
indent=2,
|
||||
default=str,
|
||||
)
|
||||
|
||||
return json.dumps(
|
||||
{
|
||||
"session_id": session_id,
|
||||
"memory": memory,
|
||||
"total_keys": len(memory),
|
||||
},
|
||||
indent=2,
|
||||
default=str,
|
||||
)
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
def list_agent_checkpoints(
|
||||
agent_name: str,
|
||||
@@ -1015,13 +1010,16 @@ def run_agent_tests(
|
||||
cmd.append("-x")
|
||||
cmd.append("--tb=short")
|
||||
|
||||
# Set PYTHONPATH
|
||||
# Set PYTHONPATH (use pathsep for Windows)
|
||||
env = os.environ.copy()
|
||||
pythonpath = env.get("PYTHONPATH", "")
|
||||
core_path = os.path.join(PROJECT_ROOT, "core")
|
||||
exports_path = os.path.join(PROJECT_ROOT, "exports")
|
||||
fw_agents_path = os.path.join(PROJECT_ROOT, "core", "framework", "agents")
|
||||
env["PYTHONPATH"] = f"{core_path}:{exports_path}:{fw_agents_path}:{PROJECT_ROOT}:{pythonpath}"
|
||||
path_parts = [core_path, exports_path, fw_agents_path, PROJECT_ROOT]
|
||||
if pythonpath:
|
||||
path_parts.append(pythonpath)
|
||||
env["PYTHONPATH"] = os.pathsep.join(path_parts)
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
@@ -1030,6 +1028,7 @@ def run_agent_tests(
|
||||
text=True,
|
||||
timeout=120,
|
||||
env=env,
|
||||
stdin=subprocess.DEVNULL,
|
||||
encoding="utf-8",
|
||||
)
|
||||
except subprocess.TimeoutExpired:
|
||||
@@ -1154,7 +1153,7 @@ def main() -> None:
|
||||
register_file_tools(
|
||||
mcp,
|
||||
resolve_path=_resolve_path,
|
||||
before_write=_take_snapshot,
|
||||
before_write=None, # Git snapshot causes stdio deadlock on Windows; undo_changes limited
|
||||
project_root=PROJECT_ROOT,
|
||||
)
|
||||
|
||||
|
||||
+7
-4
@@ -15,9 +15,11 @@ Usage:
|
||||
python mcp_server.py --stdio
|
||||
|
||||
Environment Variables:
|
||||
MCP_PORT - Server port (default: 4001)
|
||||
ANTHROPIC_API_KEY - Required at startup for testing/LLM nodes
|
||||
BRAVE_SEARCH_API_KEY - Required for web_search tool (validated at agent load time)
|
||||
MCP_PORT - Server port (default: 4001)
|
||||
INCLUDE_UNVERIFIED_TOOLS - Set to "true", "1", or "yes" to also load
|
||||
unverified/community tool integrations (default: off)
|
||||
ANTHROPIC_API_KEY - Required at startup for testing/LLM nodes
|
||||
BRAVE_SEARCH_API_KEY - Required for web_search tool (validated at agent load time)
|
||||
|
||||
Note:
|
||||
Two-tier credential validation:
|
||||
@@ -81,7 +83,8 @@ except CredentialError as e:
|
||||
mcp = FastMCP("tools")
|
||||
|
||||
# Register all tools with the MCP server, passing credential store
|
||||
tools = register_all_tools(mcp, credentials=credentials)
|
||||
include_unverified = os.getenv("INCLUDE_UNVERIFIED_TOOLS", "").lower() in ("true", "1", "yes")
|
||||
tools = register_all_tools(mcp, credentials=credentials, include_unverified=include_unverified)
|
||||
# Only print to stdout in HTTP mode (STDIO mode requires clean stdout for JSON-RPC)
|
||||
if "--stdio" not in sys.argv:
|
||||
logger.info(f"Registered {len(tools)} tools: {tools}")
|
||||
|
||||
@@ -114,6 +114,10 @@ lint.isort.section-order = [
|
||||
[tool.pytest.ini_options]
|
||||
testpaths = ["tests"]
|
||||
asyncio_mode = "auto"
|
||||
addopts = "-m 'not live'"
|
||||
markers = [
|
||||
"live: Tests that call real external APIs (require credentials, never run in CI)",
|
||||
]
|
||||
|
||||
[dependency-groups]
|
||||
dev = [
|
||||
|
||||
@@ -33,17 +33,20 @@ Usage:
|
||||
})
|
||||
|
||||
Credential categories:
|
||||
- llm.py: LLM provider credentials (anthropic, openai, etc.)
|
||||
- search.py: Search tool credentials (brave_search, google_search, etc.)
|
||||
- email.py: Email provider credentials (resend, google/gmail)
|
||||
- apollo.py: Apollo.io API credentials
|
||||
- brevo.py: Brevo (Sendinblue) transactional email/SMS credentials
|
||||
- discord.py: Discord bot credentials
|
||||
- github.py: GitHub API credentials
|
||||
- google_analytics.py: Google Analytics 4 Data API credentials
|
||||
- google_docs.py: Google Docs API credentials
|
||||
- google_maps.py: Google Maps Platform credentials
|
||||
- hubspot.py: HubSpot CRM credentials
|
||||
- intercom.py: Intercom customer messaging credentials
|
||||
- postgres.py: PostgreSQL database credentials
|
||||
- slack.py: Slack workspace credentials
|
||||
- google_analytics.py: Google Analytics credentials
|
||||
- google_maps.py: Google Maps Platform credentials
|
||||
- stripe.py: Stripe payments API credentials
|
||||
- calcom.py: Cal.com scheduling API credentials
|
||||
|
||||
Note: Tools that don't need credentials simply omit the 'credentials' parameter
|
||||
@@ -78,17 +81,13 @@ from .gcp_vision import GCP_VISION_CREDENTIALS
|
||||
from .github import GITHUB_CREDENTIALS
|
||||
from .gitlab import GITLAB_CREDENTIALS
|
||||
from .google_analytics import GOOGLE_ANALYTICS_CREDENTIALS
|
||||
from .google_calendar import GOOGLE_CALENDAR_CREDENTIALS
|
||||
from .google_docs import GOOGLE_DOCS_CREDENTIALS
|
||||
from .google_maps import GOOGLE_MAPS_CREDENTIALS
|
||||
from .google_search_console import GOOGLE_SEARCH_CONSOLE_CREDENTIALS
|
||||
from .google_sheets import GOOGLE_SHEETS_CREDENTIALS
|
||||
from .greenhouse import GREENHOUSE_CREDENTIALS
|
||||
from .health_check import (
|
||||
BaseHttpHealthChecker,
|
||||
HealthCheckResult,
|
||||
check_credential_health,
|
||||
validate_integration_wiring,
|
||||
)
|
||||
from .hubspot import HUBSPOT_CREDENTIALS
|
||||
from .huggingface import HUGGINGFACE_CREDENTIALS
|
||||
@@ -97,7 +96,6 @@ from .jira import JIRA_CREDENTIALS
|
||||
from .kafka import KAFKA_CREDENTIALS
|
||||
from .langfuse import LANGFUSE_CREDENTIALS
|
||||
from .linear import LINEAR_CREDENTIALS
|
||||
from .llm import LLM_CREDENTIALS
|
||||
from .lusha import LUSHA_CREDENTIALS
|
||||
from .microsoft_graph import MICROSOFT_GRAPH_CREDENTIALS
|
||||
from .mongodb import MONGODB_CREDENTIALS
|
||||
@@ -148,79 +146,80 @@ from .zoom import ZOOM_CREDENTIALS
|
||||
# Merged registry of all credentials
|
||||
CREDENTIAL_SPECS = {
|
||||
**AIRTABLE_CREDENTIALS,
|
||||
**LLM_CREDENTIALS,
|
||||
**NEWS_CREDENTIALS,
|
||||
**SEARCH_CREDENTIALS,
|
||||
**EMAIL_CREDENTIALS,
|
||||
**GCP_VISION_CREDENTIALS,
|
||||
**APIFY_CREDENTIALS,
|
||||
**AWS_S3_CREDENTIALS,
|
||||
**ASANA_CREDENTIALS,
|
||||
**APOLLO_CREDENTIALS,
|
||||
**ASANA_CREDENTIALS,
|
||||
**ATTIO_CREDENTIALS,
|
||||
**AWS_S3_CREDENTIALS,
|
||||
**AZURE_SQL_CREDENTIALS,
|
||||
**BIGQUERY_CREDENTIALS,
|
||||
**BREVO_CREDENTIALS,
|
||||
**CALCOM_CREDENTIALS,
|
||||
**CALENDLY_CREDENTIALS,
|
||||
**CLOUDINARY_CREDENTIALS,
|
||||
**CONFLUENCE_CREDENTIALS,
|
||||
**DATABRICKS_CREDENTIALS,
|
||||
**DISCORD_CREDENTIALS,
|
||||
**DOCKER_HUB_CREDENTIALS,
|
||||
**EMAIL_CREDENTIALS,
|
||||
**GCP_VISION_CREDENTIALS,
|
||||
**GITHUB_CREDENTIALS,
|
||||
**GREENHOUSE_CREDENTIALS,
|
||||
**GITLAB_CREDENTIALS,
|
||||
**GOOGLE_ANALYTICS_CREDENTIALS,
|
||||
**GOOGLE_DOCS_CREDENTIALS,
|
||||
**GOOGLE_MAPS_CREDENTIALS,
|
||||
**GOOGLE_SEARCH_CONSOLE_CREDENTIALS,
|
||||
**HUGGINGFACE_CREDENTIALS,
|
||||
**HUBSPOT_CREDENTIALS,
|
||||
**HUGGINGFACE_CREDENTIALS,
|
||||
**INTERCOM_CREDENTIALS,
|
||||
**JIRA_CREDENTIALS,
|
||||
**KAFKA_CREDENTIALS,
|
||||
**LANGFUSE_CREDENTIALS,
|
||||
**LINEAR_CREDENTIALS,
|
||||
**MONGODB_CREDENTIALS,
|
||||
**PAGERDUTY_CREDENTIALS,
|
||||
**GOOGLE_CALENDAR_CREDENTIALS,
|
||||
**SLACK_CREDENTIALS,
|
||||
**SERPAPI_CREDENTIALS,
|
||||
**RAZORPAY_CREDENTIALS,
|
||||
**TELEGRAM_CREDENTIALS,
|
||||
**BIGQUERY_CREDENTIALS,
|
||||
**CALCOM_CREDENTIALS,
|
||||
**CALENDLY_CREDENTIALS,
|
||||
**DATABRICKS_CREDENTIALS,
|
||||
**DOCKER_HUB_CREDENTIALS,
|
||||
**PIPEDRIVE_CREDENTIALS,
|
||||
**STRIPE_CREDENTIALS,
|
||||
**BREVO_CREDENTIALS,
|
||||
**POSTGRES_CREDENTIALS,
|
||||
**QUICKBOOKS_CREDENTIALS,
|
||||
**LUSHA_CREDENTIALS,
|
||||
**MICROSOFT_GRAPH_CREDENTIALS,
|
||||
**MONGODB_CREDENTIALS,
|
||||
**N8N_CREDENTIALS,
|
||||
**NEWS_CREDENTIALS,
|
||||
**NOTION_CREDENTIALS,
|
||||
**OBSIDIAN_CREDENTIALS,
|
||||
**PAGERDUTY_CREDENTIALS,
|
||||
**PINECONE_CREDENTIALS,
|
||||
**PIPEDRIVE_CREDENTIALS,
|
||||
**PLAID_CREDENTIALS,
|
||||
**POSTGRES_CREDENTIALS,
|
||||
**POWERBI_CREDENTIALS,
|
||||
**PUSHOVER_CREDENTIALS,
|
||||
**QUICKBOOKS_CREDENTIALS,
|
||||
**RAZORPAY_CREDENTIALS,
|
||||
**REDDIT_CREDENTIALS,
|
||||
**REDIS_CREDENTIALS,
|
||||
**REDSHIFT_CREDENTIALS,
|
||||
**SALESFORCE_CREDENTIALS,
|
||||
**SAP_CREDENTIALS,
|
||||
**SEARCH_CREDENTIALS,
|
||||
**SERPAPI_CREDENTIALS,
|
||||
**SHOPIFY_CREDENTIALS,
|
||||
**SLACK_CREDENTIALS,
|
||||
**SNOWFLAKE_CREDENTIALS,
|
||||
**STRIPE_CREDENTIALS,
|
||||
**SUPABASE_CREDENTIALS,
|
||||
**TELEGRAM_CREDENTIALS,
|
||||
**TERRAFORM_CREDENTIALS,
|
||||
**TINES_CREDENTIALS,
|
||||
**TRELLO_CREDENTIALS,
|
||||
**TWILIO_CREDENTIALS,
|
||||
**TWITTER_CREDENTIALS,
|
||||
**VERCEL_CREDENTIALS,
|
||||
**YOUTUBE_CREDENTIALS,
|
||||
**PINECONE_CREDENTIALS,
|
||||
**PLAID_CREDENTIALS,
|
||||
**TRELLO_CREDENTIALS,
|
||||
**CONFLUENCE_CREDENTIALS,
|
||||
**CLOUDINARY_CREDENTIALS,
|
||||
**GITLAB_CREDENTIALS,
|
||||
**GOOGLE_SHEETS_CREDENTIALS,
|
||||
**GREENHOUSE_CREDENTIALS,
|
||||
**JIRA_CREDENTIALS,
|
||||
**NOTION_CREDENTIALS,
|
||||
**REDDIT_CREDENTIALS,
|
||||
**TINES_CREDENTIALS,
|
||||
**TWITTER_CREDENTIALS,
|
||||
**TWILIO_CREDENTIALS,
|
||||
**ZENDESK_CREDENTIALS,
|
||||
**ZOHO_CRM_CREDENTIALS,
|
||||
**TERRAFORM_CREDENTIALS,
|
||||
**LUSHA_CREDENTIALS,
|
||||
**POWERBI_CREDENTIALS,
|
||||
**SNOWFLAKE_CREDENTIALS,
|
||||
**AZURE_SQL_CREDENTIALS,
|
||||
**KAFKA_CREDENTIALS,
|
||||
**REDSHIFT_CREDENTIALS,
|
||||
**SAP_CREDENTIALS,
|
||||
**SALESFORCE_CREDENTIALS,
|
||||
**SHOPIFY_CREDENTIALS,
|
||||
**ZOOM_CREDENTIALS,
|
||||
**N8N_CREDENTIALS,
|
||||
**LANGFUSE_CREDENTIALS,
|
||||
**OBSIDIAN_CREDENTIALS,
|
||||
}
|
||||
|
||||
__all__ = [
|
||||
@@ -228,13 +227,9 @@ __all__ = [
|
||||
"CredentialSpec",
|
||||
"CredentialStoreAdapter",
|
||||
"CredentialError",
|
||||
# Credential store adapter (replaces deprecated CredentialManager)
|
||||
"CredentialStoreAdapter",
|
||||
# Health check utilities
|
||||
"BaseHttpHealthChecker",
|
||||
"HealthCheckResult",
|
||||
"check_credential_health",
|
||||
"validate_integration_wiring",
|
||||
# Browser utilities for OAuth2 flows
|
||||
"open_browser",
|
||||
"get_aden_auth_url",
|
||||
@@ -246,79 +241,76 @@ __all__ = [
|
||||
"add_env_var_to_shell_config",
|
||||
# Merged registry
|
||||
"CREDENTIAL_SPECS",
|
||||
# Category registries (for direct access if needed)
|
||||
# Category registries
|
||||
"AIRTABLE_CREDENTIALS",
|
||||
"LLM_CREDENTIALS",
|
||||
"NEWS_CREDENTIALS",
|
||||
"SEARCH_CREDENTIALS",
|
||||
"APIFY_CREDENTIALS",
|
||||
"APOLLO_CREDENTIALS",
|
||||
"ASANA_CREDENTIALS",
|
||||
"ATTIO_CREDENTIALS",
|
||||
"AWS_S3_CREDENTIALS",
|
||||
"AZURE_SQL_CREDENTIALS",
|
||||
"BIGQUERY_CREDENTIALS",
|
||||
"BREVO_CREDENTIALS",
|
||||
"CALCOM_CREDENTIALS",
|
||||
"CALENDLY_CREDENTIALS",
|
||||
"CLOUDINARY_CREDENTIALS",
|
||||
"CONFLUENCE_CREDENTIALS",
|
||||
"DATABRICKS_CREDENTIALS",
|
||||
"DISCORD_CREDENTIALS",
|
||||
"DOCKER_HUB_CREDENTIALS",
|
||||
"EMAIL_CREDENTIALS",
|
||||
"GCP_VISION_CREDENTIALS",
|
||||
"GITHUB_CREDENTIALS",
|
||||
"GREENHOUSE_CREDENTIALS",
|
||||
"GITLAB_CREDENTIALS",
|
||||
"GOOGLE_ANALYTICS_CREDENTIALS",
|
||||
"GOOGLE_DOCS_CREDENTIALS",
|
||||
"GOOGLE_MAPS_CREDENTIALS",
|
||||
"GOOGLE_SEARCH_CONSOLE_CREDENTIALS",
|
||||
"HUGGINGFACE_CREDENTIALS",
|
||||
"HUBSPOT_CREDENTIALS",
|
||||
"HUGGINGFACE_CREDENTIALS",
|
||||
"INTERCOM_CREDENTIALS",
|
||||
"JIRA_CREDENTIALS",
|
||||
"KAFKA_CREDENTIALS",
|
||||
"LANGFUSE_CREDENTIALS",
|
||||
"LINEAR_CREDENTIALS",
|
||||
"MONGODB_CREDENTIALS",
|
||||
"PAGERDUTY_CREDENTIALS",
|
||||
"GOOGLE_CALENDAR_CREDENTIALS",
|
||||
"SLACK_CREDENTIALS",
|
||||
"APIFY_CREDENTIALS",
|
||||
"AWS_S3_CREDENTIALS",
|
||||
"ASANA_CREDENTIALS",
|
||||
"APOLLO_CREDENTIALS",
|
||||
"ATTIO_CREDENTIALS",
|
||||
"SERPAPI_CREDENTIALS",
|
||||
"RAZORPAY_CREDENTIALS",
|
||||
"TELEGRAM_CREDENTIALS",
|
||||
"BIGQUERY_CREDENTIALS",
|
||||
"CALCOM_CREDENTIALS",
|
||||
"CALENDLY_CREDENTIALS",
|
||||
"DATABRICKS_CREDENTIALS",
|
||||
"DISCORD_CREDENTIALS",
|
||||
"DOCKER_HUB_CREDENTIALS",
|
||||
"PIPEDRIVE_CREDENTIALS",
|
||||
"STRIPE_CREDENTIALS",
|
||||
"BREVO_CREDENTIALS",
|
||||
"POSTGRES_CREDENTIALS",
|
||||
"QUICKBOOKS_CREDENTIALS",
|
||||
"LUSHA_CREDENTIALS",
|
||||
"MICROSOFT_GRAPH_CREDENTIALS",
|
||||
"MONGODB_CREDENTIALS",
|
||||
"N8N_CREDENTIALS",
|
||||
"NEWS_CREDENTIALS",
|
||||
"NOTION_CREDENTIALS",
|
||||
"OBSIDIAN_CREDENTIALS",
|
||||
"PAGERDUTY_CREDENTIALS",
|
||||
"PINECONE_CREDENTIALS",
|
||||
"PIPEDRIVE_CREDENTIALS",
|
||||
"PLAID_CREDENTIALS",
|
||||
"POSTGRES_CREDENTIALS",
|
||||
"POWERBI_CREDENTIALS",
|
||||
"PUSHOVER_CREDENTIALS",
|
||||
"QUICKBOOKS_CREDENTIALS",
|
||||
"RAZORPAY_CREDENTIALS",
|
||||
"REDDIT_CREDENTIALS",
|
||||
"REDIS_CREDENTIALS",
|
||||
"REDSHIFT_CREDENTIALS",
|
||||
"SALESFORCE_CREDENTIALS",
|
||||
"SAP_CREDENTIALS",
|
||||
"SEARCH_CREDENTIALS",
|
||||
"SERPAPI_CREDENTIALS",
|
||||
"SHOPIFY_CREDENTIALS",
|
||||
"SLACK_CREDENTIALS",
|
||||
"SNOWFLAKE_CREDENTIALS",
|
||||
"STRIPE_CREDENTIALS",
|
||||
"SUPABASE_CREDENTIALS",
|
||||
"TELEGRAM_CREDENTIALS",
|
||||
"TERRAFORM_CREDENTIALS",
|
||||
"TINES_CREDENTIALS",
|
||||
"TRELLO_CREDENTIALS",
|
||||
"TWILIO_CREDENTIALS",
|
||||
"TWITTER_CREDENTIALS",
|
||||
"VERCEL_CREDENTIALS",
|
||||
"YOUTUBE_CREDENTIALS",
|
||||
"PINECONE_CREDENTIALS",
|
||||
"PLAID_CREDENTIALS",
|
||||
"TRELLO_CREDENTIALS",
|
||||
"CONFLUENCE_CREDENTIALS",
|
||||
"CLOUDINARY_CREDENTIALS",
|
||||
"GITLAB_CREDENTIALS",
|
||||
"GOOGLE_SHEETS_CREDENTIALS",
|
||||
"GREENHOUSE_CREDENTIALS",
|
||||
"JIRA_CREDENTIALS",
|
||||
"NOTION_CREDENTIALS",
|
||||
"REDDIT_CREDENTIALS",
|
||||
"TINES_CREDENTIALS",
|
||||
"TWITTER_CREDENTIALS",
|
||||
"TWILIO_CREDENTIALS",
|
||||
"ZENDESK_CREDENTIALS",
|
||||
"ZOHO_CRM_CREDENTIALS",
|
||||
"TERRAFORM_CREDENTIALS",
|
||||
"LUSHA_CREDENTIALS",
|
||||
"POWERBI_CREDENTIALS",
|
||||
"SNOWFLAKE_CREDENTIALS",
|
||||
"AZURE_SQL_CREDENTIALS",
|
||||
"KAFKA_CREDENTIALS",
|
||||
"REDSHIFT_CREDENTIALS",
|
||||
"SAP_CREDENTIALS",
|
||||
"SALESFORCE_CREDENTIALS",
|
||||
"SHOPIFY_CREDENTIALS",
|
||||
"ZOOM_CREDENTIALS",
|
||||
"N8N_CREDENTIALS",
|
||||
"LANGFUSE_CREDENTIALS",
|
||||
"OBSIDIAN_CREDENTIALS",
|
||||
]
|
||||
|
||||
@@ -11,7 +11,7 @@ EMAIL_CREDENTIALS = {
|
||||
env_var="RESEND_API_KEY",
|
||||
tools=["send_email"],
|
||||
node_types=[],
|
||||
required=True,
|
||||
required=False,
|
||||
startup_required=False,
|
||||
help_url="https://resend.com/api-keys",
|
||||
description="API key for Resend email service",
|
||||
@@ -36,7 +36,9 @@ EMAIL_CREDENTIALS = {
|
||||
"google": CredentialSpec(
|
||||
env_var="GOOGLE_ACCESS_TOKEN",
|
||||
tools=[
|
||||
"send_email",
|
||||
# send_email is excluded: it's a multi-provider tool that checks
|
||||
# credentials at runtime based on the provider parameter.
|
||||
# Gmail tools
|
||||
"gmail_reply_email",
|
||||
"gmail_list_messages",
|
||||
"gmail_get_message",
|
||||
@@ -47,12 +49,32 @@ EMAIL_CREDENTIALS = {
|
||||
"gmail_create_draft",
|
||||
"gmail_list_labels",
|
||||
"gmail_create_label",
|
||||
# Google Calendar tools
|
||||
"calendar_list_events",
|
||||
"calendar_get_event",
|
||||
"calendar_create_event",
|
||||
"calendar_update_event",
|
||||
"calendar_delete_event",
|
||||
"calendar_list_calendars",
|
||||
"calendar_get_calendar",
|
||||
"calendar_check_availability",
|
||||
# Google Sheets tools
|
||||
"google_sheets_get_spreadsheet",
|
||||
"google_sheets_create_spreadsheet",
|
||||
"google_sheets_get_values",
|
||||
"google_sheets_update_values",
|
||||
"google_sheets_append_values",
|
||||
"google_sheets_clear_values",
|
||||
"google_sheets_batch_update_values",
|
||||
"google_sheets_batch_clear_values",
|
||||
"google_sheets_add_sheet",
|
||||
"google_sheets_delete_sheet",
|
||||
],
|
||||
node_types=[],
|
||||
required=True,
|
||||
startup_required=False,
|
||||
help_url="https://hive.adenhq.com",
|
||||
description="Google OAuth2 access token (via Aden) - used for Gmail",
|
||||
description="Google OAuth2 access token (via Aden) - used for Gmail, Calendar, and Sheets",
|
||||
aden_supported=True,
|
||||
aden_provider_name="google",
|
||||
direct_api_key_supported=False,
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
"""
|
||||
Google Calendar tool credentials.
|
||||
|
||||
Contains credentials for Google Calendar integration.
|
||||
"""
|
||||
|
||||
from .base import CredentialSpec
|
||||
|
||||
GOOGLE_CALENDAR_CREDENTIALS = {
|
||||
"google_calendar_oauth": CredentialSpec(
|
||||
env_var="GOOGLE_CALENDAR_ACCESS_TOKEN",
|
||||
tools=[
|
||||
"calendar_list_events",
|
||||
"calendar_get_event",
|
||||
"calendar_create_event",
|
||||
"calendar_update_event",
|
||||
"calendar_delete_event",
|
||||
"calendar_list_calendars",
|
||||
"calendar_get_calendar",
|
||||
"calendar_check_availability",
|
||||
],
|
||||
node_types=[],
|
||||
required=False,
|
||||
startup_required=False,
|
||||
help_url="https://hive.adenhq.com",
|
||||
description="Google Calendar OAuth2 access token (via Aden) - used for Google Calendar",
|
||||
# Auth method support
|
||||
aden_supported=True,
|
||||
aden_provider_name="google-calendar",
|
||||
direct_api_key_supported=False,
|
||||
api_key_instructions="Google Calendar OAuth requires OAuth2. Connect via hive.adenhq.com",
|
||||
# Health check configuration
|
||||
health_check_endpoint="https://www.googleapis.com/calendar/v3/users/me/calendarList",
|
||||
health_check_method="GET",
|
||||
# Credential store mapping
|
||||
credential_id="google_calendar_oauth",
|
||||
credential_key="access_token",
|
||||
),
|
||||
}
|
||||
@@ -233,7 +233,7 @@ class OAuthBearerHealthChecker:
|
||||
"""Generic health checker for OAuth2 Bearer token credentials.
|
||||
|
||||
Validates by making a GET request with ``Authorization: Bearer <token>``
|
||||
to the given endpoint. Reused for Google Gmail, Google Calendar, and as
|
||||
to the given endpoint. Reused for Google Docs, Intercom, and as
|
||||
the automatic fallback for any credential spec that defines a
|
||||
``health_check_endpoint`` but has no dedicated checker.
|
||||
"""
|
||||
@@ -478,23 +478,83 @@ class BaseHttpHealthChecker:
|
||||
)
|
||||
|
||||
|
||||
class GoogleCalendarHealthChecker(OAuthBearerHealthChecker):
|
||||
"""Health checker for Google Calendar OAuth tokens."""
|
||||
class GoogleHealthChecker:
|
||||
"""Health checker for Google OAuth tokens (Gmail, Calendar, Sheets)."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
endpoint="https://www.googleapis.com/calendar/v3/users/me/calendarList?maxResults=1",
|
||||
service_name="Google Calendar",
|
||||
)
|
||||
ENDPOINTS: dict[str, str] = {
|
||||
"gmail": "https://gmail.googleapis.com/gmail/v1/users/me/profile",
|
||||
"calendar": "https://www.googleapis.com/calendar/v3/users/me/calendarList",
|
||||
"sheets": "https://sheets.googleapis.com/v4/spreadsheets/healthcheck_nonexistent",
|
||||
}
|
||||
TIMEOUT = 10.0
|
||||
|
||||
def _extract_identity(self, data: dict) -> dict[str, str]:
|
||||
# Primary calendar ID is the user's email
|
||||
for item in data.get("items", []):
|
||||
if item.get("primary"):
|
||||
cal_id = item.get("id", "")
|
||||
if "@" in cal_id:
|
||||
return {"email": cal_id}
|
||||
return {}
|
||||
def check(self, access_token: str) -> HealthCheckResult:
|
||||
"""
|
||||
Validate Google OAuth token against Gmail, Calendar, and Sheets APIs.
|
||||
|
||||
Hits a lightweight endpoint for each service. A 401 on any endpoint
|
||||
means the token is invalid (fail fast). A 403 means the token lacks
|
||||
that service's scope. For Sheets, a 404 counts as success (scope is
|
||||
valid, the spreadsheet just doesn't exist).
|
||||
"""
|
||||
headers = {
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
"Accept": "application/json",
|
||||
}
|
||||
missing_scopes: list[str] = []
|
||||
|
||||
try:
|
||||
with httpx.Client(timeout=self.TIMEOUT) as client:
|
||||
for scope, url in self.ENDPOINTS.items():
|
||||
params = {"maxResults": "1"} if scope == "calendar" else {}
|
||||
response = client.get(url, headers=headers, params=params)
|
||||
|
||||
if response.status_code == 401:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message="Google token is invalid or expired",
|
||||
details={"status_code": 401},
|
||||
)
|
||||
if response.status_code == 403:
|
||||
missing_scopes.append(scope)
|
||||
continue
|
||||
# Sheets returns 404 for a non-existent spreadsheet — that's fine,
|
||||
# it means the token + scope are valid.
|
||||
if response.status_code in (200, 404):
|
||||
continue
|
||||
# Unexpected status — not a scope issue, but not healthy either
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"Google {scope} API returned status {response.status_code}",
|
||||
details={"status_code": response.status_code, "scope": scope},
|
||||
)
|
||||
|
||||
if missing_scopes:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"Google token lacks scopes for: {', '.join(missing_scopes)}",
|
||||
details={"status_code": 403, "missing_scopes": missing_scopes},
|
||||
)
|
||||
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message="Google credentials valid (Gmail, Calendar, Sheets)",
|
||||
)
|
||||
except httpx.TimeoutException:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message="Google API request timed out",
|
||||
details={"error": "timeout"},
|
||||
)
|
||||
except httpx.RequestError as e:
|
||||
error_msg = str(e)
|
||||
if "Bearer" in error_msg or "Authorization" in error_msg:
|
||||
error_msg = "Request failed (details redacted for security)"
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"Failed to connect to Google: {error_msg}",
|
||||
details={"error": error_msg},
|
||||
)
|
||||
|
||||
|
||||
class GoogleSearchHealthChecker:
|
||||
@@ -567,54 +627,50 @@ class GoogleSearchHealthChecker:
|
||||
|
||||
|
||||
class SlackHealthChecker:
|
||||
"""Health checker for Slack bot tokens."""
|
||||
"""Health checker for Slack Bot tokens."""
|
||||
|
||||
ENDPOINT = "https://slack.com/api/auth.test"
|
||||
TIMEOUT = 10.0
|
||||
|
||||
def check(self, bot_token: str) -> HealthCheckResult:
|
||||
"""
|
||||
Validate Slack bot token by calling auth.test.
|
||||
Validate Slack Bot token via auth.test API.
|
||||
|
||||
Makes a POST request to auth.test to verify the token works.
|
||||
This is Slack's recommended way to verify a token.
|
||||
"""
|
||||
try:
|
||||
with httpx.Client(timeout=self.TIMEOUT) as client:
|
||||
response = client.post(
|
||||
self.ENDPOINT,
|
||||
headers={"Authorization": f"Bearer {bot_token}"},
|
||||
headers={
|
||||
"Authorization": f"Bearer {bot_token}",
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"Slack API returned HTTP {response.status_code}",
|
||||
details={"status_code": response.status_code},
|
||||
)
|
||||
|
||||
data = response.json()
|
||||
if data.get("ok"):
|
||||
identity: dict[str, str] = {}
|
||||
if data.get("team"):
|
||||
identity["workspace"] = data["team"]
|
||||
if data.get("user"):
|
||||
identity["username"] = data["user"]
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message="Slack bot token valid",
|
||||
details={
|
||||
"team": data.get("team"),
|
||||
"user": data.get("user"),
|
||||
"bot_id": data.get("bot_id"),
|
||||
"identity": identity,
|
||||
},
|
||||
)
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
if data.get("ok"):
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message=f"Slack token valid (team: {data.get('team', 'unknown')})",
|
||||
details={
|
||||
"team": data.get("team"),
|
||||
"user": data.get("user"),
|
||||
"team_id": data.get("team_id"),
|
||||
},
|
||||
)
|
||||
else:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"Slack token invalid: {data.get('error', 'unknown error')}",
|
||||
details={"slack_error": data.get("error")},
|
||||
)
|
||||
else:
|
||||
error = data.get("error", "unknown_error")
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"Slack token invalid: {error}",
|
||||
details={"error": error},
|
||||
message=f"Slack API returned status {response.status_code}",
|
||||
details={"status_code": response.status_code},
|
||||
)
|
||||
except httpx.TimeoutException:
|
||||
return HealthCheckResult(
|
||||
@@ -625,49 +681,50 @@ class SlackHealthChecker:
|
||||
except httpx.RequestError as e:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"Failed to connect to Slack: {e}",
|
||||
message=f"Failed to connect to Slack API: {e}",
|
||||
details={"error": str(e)},
|
||||
)
|
||||
|
||||
|
||||
class CalendlyHealthChecker:
|
||||
"""Health checker for Calendly API tokens."""
|
||||
"""Health checker for Calendly Personal Access Tokens."""
|
||||
|
||||
ENDPOINT = "https://api.calendly.com/users/me"
|
||||
TIMEOUT = 10.0
|
||||
|
||||
def check(self, api_token: str) -> HealthCheckResult:
|
||||
def check(self, pat: str) -> HealthCheckResult:
|
||||
"""
|
||||
Validate Calendly token by calling /users/me.
|
||||
|
||||
Makes a GET request to verify the token works.
|
||||
Validate Calendly PAT by fetching the authenticated user.
|
||||
"""
|
||||
try:
|
||||
with httpx.Client(timeout=self.TIMEOUT) as client:
|
||||
response = client.get(
|
||||
self.ENDPOINT,
|
||||
headers={
|
||||
"Authorization": f"Bearer {api_token}",
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {pat}",
|
||||
"Accept": "application/json",
|
||||
},
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
user = data.get("resource", {})
|
||||
name = user.get("name", "unknown")
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message="Calendly token valid",
|
||||
details={},
|
||||
message=f"Calendly PAT valid (user: {name})",
|
||||
details={"name": name, "email": user.get("email")},
|
||||
)
|
||||
elif response.status_code == 401:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message="Calendly token is invalid or expired",
|
||||
message="Calendly PAT is invalid or expired",
|
||||
details={"status_code": 401},
|
||||
)
|
||||
elif response.status_code == 403:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message="Calendly token access forbidden",
|
||||
message="Calendly PAT lacks required scopes",
|
||||
details={"status_code": 403},
|
||||
)
|
||||
else:
|
||||
@@ -690,101 +747,22 @@ class CalendlyHealthChecker:
|
||||
)
|
||||
|
||||
|
||||
class AnthropicHealthChecker:
|
||||
"""Health checker for Anthropic API credentials."""
|
||||
|
||||
ENDPOINT = "https://api.anthropic.com/v1/messages"
|
||||
TIMEOUT = 10.0
|
||||
|
||||
def check(self, api_key: str) -> HealthCheckResult:
|
||||
"""
|
||||
Validate Anthropic API key without consuming tokens.
|
||||
|
||||
Sends a deliberately invalid request (empty messages) to the messages endpoint.
|
||||
A 401 means invalid key; 400 (bad request) means the key authenticated
|
||||
but the payload was rejected — confirming the key is valid without
|
||||
generating any tokens. 429 (rate limited) also indicates a valid key.
|
||||
"""
|
||||
try:
|
||||
with httpx.Client(timeout=self.TIMEOUT) as client:
|
||||
response = client.post(
|
||||
self.ENDPOINT,
|
||||
headers={
|
||||
"x-api-key": api_key,
|
||||
"anthropic-version": "2023-06-01",
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
# Empty messages triggers 400 (not 200), so no tokens are consumed.
|
||||
json={
|
||||
"model": "claude-sonnet-4-20250514",
|
||||
"max_tokens": 1,
|
||||
"messages": [],
|
||||
},
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message="Anthropic API key valid",
|
||||
)
|
||||
elif response.status_code == 401:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message="Anthropic API key is invalid",
|
||||
details={"status_code": 401},
|
||||
)
|
||||
elif response.status_code == 429:
|
||||
# Rate limited but key is valid
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message="Anthropic API key valid (rate limited)",
|
||||
details={"status_code": 429, "rate_limited": True},
|
||||
)
|
||||
elif response.status_code == 400:
|
||||
# Bad request but key authenticated - key is valid
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message="Anthropic API key valid",
|
||||
details={"status_code": 400},
|
||||
)
|
||||
else:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"Anthropic API returned status {response.status_code}",
|
||||
details={"status_code": response.status_code},
|
||||
)
|
||||
except httpx.TimeoutException:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message="Anthropic API request timed out",
|
||||
details={"error": "timeout"},
|
||||
)
|
||||
except httpx.RequestError as e:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"Failed to connect to Anthropic API: {e}",
|
||||
details={"error": str(e)},
|
||||
)
|
||||
|
||||
|
||||
class GitHubHealthChecker:
|
||||
"""Health checker for GitHub Personal Access Token."""
|
||||
"""Health checker for GitHub Personal Access Tokens."""
|
||||
|
||||
ENDPOINT = "https://api.github.com/user"
|
||||
TIMEOUT = 10.0
|
||||
|
||||
def check(self, access_token: str) -> HealthCheckResult:
|
||||
def check(self, token: str) -> HealthCheckResult:
|
||||
"""
|
||||
Validate GitHub token by fetching the authenticated user.
|
||||
|
||||
Returns the authenticated username on success.
|
||||
Validate GitHub PAT by fetching the authenticated user.
|
||||
"""
|
||||
try:
|
||||
with httpx.Client(timeout=self.TIMEOUT) as client:
|
||||
response = client.get(
|
||||
self.ENDPOINT,
|
||||
headers={
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
"Authorization": f"Bearer {token}",
|
||||
"Accept": "application/vnd.github+json",
|
||||
"X-GitHub-Api-Version": "2022-11-28",
|
||||
},
|
||||
@@ -793,13 +771,10 @@ class GitHubHealthChecker:
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
username = data.get("login", "unknown")
|
||||
identity: dict[str, str] = {}
|
||||
if username and username != "unknown":
|
||||
identity["username"] = username
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message=f"GitHub token valid (authenticated as {username})",
|
||||
details={"username": username, "identity": identity},
|
||||
message=f"GitHub token valid (user: {username})",
|
||||
details={"username": username},
|
||||
)
|
||||
elif response.status_code == 401:
|
||||
return HealthCheckResult(
|
||||
@@ -810,7 +785,7 @@ class GitHubHealthChecker:
|
||||
elif response.status_code == 403:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message="GitHub token lacks required permissions",
|
||||
message="GitHub token lacks required scopes",
|
||||
details={"status_code": 403},
|
||||
)
|
||||
else:
|
||||
@@ -834,34 +809,32 @@ class GitHubHealthChecker:
|
||||
|
||||
|
||||
class DiscordHealthChecker:
|
||||
"""Health checker for Discord bot tokens."""
|
||||
"""Health checker for Discord Bot tokens."""
|
||||
|
||||
ENDPOINT = "https://discord.com/api/v10/users/@me"
|
||||
TIMEOUT = 10.0
|
||||
|
||||
def check(self, bot_token: str) -> HealthCheckResult:
|
||||
"""
|
||||
Validate Discord bot token by fetching the bot's user info.
|
||||
Validate Discord Bot token by fetching bot user info.
|
||||
"""
|
||||
try:
|
||||
with httpx.Client(timeout=self.TIMEOUT) as client:
|
||||
response = client.get(
|
||||
self.ENDPOINT,
|
||||
headers={"Authorization": f"Bot {bot_token}"},
|
||||
headers={
|
||||
"Authorization": f"Bot {bot_token}",
|
||||
"Accept": "application/json",
|
||||
},
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
username = data.get("username", "unknown")
|
||||
identity: dict[str, str] = {}
|
||||
if username and username != "unknown":
|
||||
identity["username"] = username
|
||||
if data.get("id"):
|
||||
identity["account_id"] = data["id"]
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message=f"Discord bot token valid (bot: {username})",
|
||||
details={"username": username, "id": data.get("id"), "identity": identity},
|
||||
details={"username": username, "bot": data.get("bot", True)},
|
||||
)
|
||||
elif response.status_code == 401:
|
||||
return HealthCheckResult(
|
||||
@@ -872,7 +845,7 @@ class DiscordHealthChecker:
|
||||
elif response.status_code == 403:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message="Discord bot token lacks required permissions",
|
||||
message="Discord bot token lacks required intents/permissions",
|
||||
details={"status_code": 403},
|
||||
)
|
||||
else:
|
||||
@@ -896,7 +869,7 @@ class DiscordHealthChecker:
|
||||
|
||||
|
||||
class ResendHealthChecker:
|
||||
"""Health checker for Resend API credentials."""
|
||||
"""Health checker for Resend API keys."""
|
||||
|
||||
ENDPOINT = "https://api.resend.com/domains"
|
||||
TIMEOUT = 10.0
|
||||
@@ -904,8 +877,6 @@ class ResendHealthChecker:
|
||||
def check(self, api_key: str) -> HealthCheckResult:
|
||||
"""
|
||||
Validate Resend API key by listing domains.
|
||||
|
||||
A successful response confirms the key is valid.
|
||||
"""
|
||||
try:
|
||||
with httpx.Client(timeout=self.TIMEOUT) as client:
|
||||
@@ -955,62 +926,58 @@ class ResendHealthChecker:
|
||||
|
||||
|
||||
class GoogleMapsHealthChecker:
|
||||
"""Health checker for Google Maps Platform API key."""
|
||||
"""Health checker for Google Maps API keys."""
|
||||
|
||||
ENDPOINT = "https://maps.googleapis.com/maps/api/geocode/json"
|
||||
TIMEOUT = 10.0
|
||||
|
||||
def check(self, api_key: str) -> HealthCheckResult:
|
||||
"""
|
||||
Validate Google Maps API key with a lightweight geocode request.
|
||||
|
||||
Makes a minimal geocode request for a well-known address to verify
|
||||
the key is valid and the Geocoding API is enabled.
|
||||
Validate Google Maps API key with a minimal geocoding request.
|
||||
"""
|
||||
try:
|
||||
with httpx.Client(timeout=self.TIMEOUT) as client:
|
||||
response = client.get(
|
||||
self.ENDPOINT,
|
||||
params={
|
||||
"address": "1600 Amphitheatre Parkway",
|
||||
"key": api_key,
|
||||
"address": "1600 Amphitheatre Parkway, Mountain View, CA",
|
||||
},
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
status = data.get("status", "")
|
||||
|
||||
if status == "OK":
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message="Google Maps API key valid",
|
||||
)
|
||||
elif status == "REQUEST_DENIED":
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message="Google Maps API key is invalid or restricted",
|
||||
details={"status": status},
|
||||
)
|
||||
elif status == "OVER_QUERY_LIMIT":
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message="Google Maps API key valid (quota exceeded)",
|
||||
details={"rate_limited": True},
|
||||
)
|
||||
else:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"Google Maps API returned status: {status}",
|
||||
details={"status": status},
|
||||
)
|
||||
else:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"Google Maps API returned HTTP {response.status_code}",
|
||||
details={"status_code": response.status_code},
|
||||
)
|
||||
|
||||
data = response.json()
|
||||
status = data.get("status", "UNKNOWN_ERROR")
|
||||
|
||||
if status == "OK":
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message="Google Maps API key valid",
|
||||
)
|
||||
elif status == "REQUEST_DENIED":
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message="Google Maps API key is invalid or Geocoding API not enabled",
|
||||
details={"status": status},
|
||||
)
|
||||
elif status in ("OVER_DAILY_LIMIT", "OVER_QUERY_LIMIT"):
|
||||
# Quota exceeded but key itself is valid
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message="Google Maps API key valid (quota exceeded)",
|
||||
details={"status": status, "rate_limited": True},
|
||||
)
|
||||
else:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message=f"Google Maps API returned status: {status}",
|
||||
details={"status": status},
|
||||
)
|
||||
except httpx.TimeoutException:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
@@ -1026,25 +993,21 @@ class GoogleMapsHealthChecker:
|
||||
|
||||
|
||||
class LushaHealthChecker:
|
||||
"""Health checker for Lusha API credentials."""
|
||||
"""Health checker for Lusha API keys."""
|
||||
|
||||
ENDPOINT = "https://api.lusha.com/account/usage"
|
||||
ENDPOINT = "https://api.lusha.com/person"
|
||||
TIMEOUT = 10.0
|
||||
|
||||
def check(self, api_key: str) -> HealthCheckResult:
|
||||
"""
|
||||
Validate Lusha API key by checking account usage endpoint.
|
||||
|
||||
This is a lightweight authenticated request that confirms API access.
|
||||
Validate Lusha API key with a minimal person lookup.
|
||||
"""
|
||||
try:
|
||||
with httpx.Client(timeout=self.TIMEOUT) as client:
|
||||
response = client.get(
|
||||
self.ENDPOINT,
|
||||
headers={
|
||||
"api_key": api_key,
|
||||
"Accept": "application/json",
|
||||
},
|
||||
headers={"api_key": api_key, "Accept": "application/json"},
|
||||
params={"firstName": "test", "lastName": "test", "company": "test"},
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
@@ -1058,17 +1021,11 @@ class LushaHealthChecker:
|
||||
message="Lusha API key is invalid",
|
||||
details={"status_code": 401},
|
||||
)
|
||||
elif response.status_code == 403:
|
||||
return HealthCheckResult(
|
||||
valid=False,
|
||||
message="Lusha API key lacks required permissions",
|
||||
details={"status_code": 403},
|
||||
)
|
||||
elif response.status_code == 429:
|
||||
return HealthCheckResult(
|
||||
valid=True,
|
||||
message="Lusha API key valid (rate/credit limited)",
|
||||
details={"status_code": 429, "rate_limited": True},
|
||||
message="Lusha API key valid (rate limited)",
|
||||
details={"rate_limited": True},
|
||||
)
|
||||
else:
|
||||
return HealthCheckResult(
|
||||
@@ -1090,20 +1047,6 @@ class LushaHealthChecker:
|
||||
)
|
||||
|
||||
|
||||
class GoogleGmailHealthChecker(OAuthBearerHealthChecker):
|
||||
"""Health checker for Google Gmail OAuth tokens."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
endpoint="https://gmail.googleapis.com/gmail/v1/users/me/profile",
|
||||
service_name="Gmail",
|
||||
)
|
||||
|
||||
def _extract_identity(self, data: dict) -> dict[str, str]:
|
||||
email = data.get("emailAddress")
|
||||
return {"email": email} if email else {}
|
||||
|
||||
|
||||
# --- New checkers using BaseHttpHealthChecker ---
|
||||
|
||||
|
||||
@@ -1376,7 +1319,6 @@ class YouTubeHealthChecker(BaseHttpHealthChecker):
|
||||
|
||||
# Registry of health checkers
|
||||
HEALTH_CHECKERS: dict[str, CredentialHealthChecker] = {
|
||||
"anthropic": AnthropicHealthChecker(),
|
||||
"apify": ApifyHealthChecker(),
|
||||
"apollo": ApolloHealthChecker(),
|
||||
"asana": AsanaHealthChecker(),
|
||||
@@ -1391,8 +1333,7 @@ HEALTH_CHECKERS: dict[str, CredentialHealthChecker] = {
|
||||
"finlight": FinlightHealthChecker(),
|
||||
"github": GitHubHealthChecker(),
|
||||
"gitlab_token": GitLabHealthChecker(),
|
||||
"google": GoogleGmailHealthChecker(),
|
||||
"google_calendar_oauth": GoogleCalendarHealthChecker(),
|
||||
"google": GoogleHealthChecker(),
|
||||
"google_docs": GoogleDocsHealthChecker(),
|
||||
"google_maps": GoogleMapsHealthChecker(),
|
||||
"google_search": GoogleSearchHealthChecker(),
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
"""
|
||||
LLM provider credentials.
|
||||
|
||||
Contains credentials for language model providers like Anthropic, OpenAI, etc.
|
||||
"""
|
||||
|
||||
from .base import CredentialSpec
|
||||
|
||||
LLM_CREDENTIALS = {
|
||||
"anthropic": CredentialSpec(
|
||||
env_var="ANTHROPIC_API_KEY",
|
||||
tools=[],
|
||||
node_types=["event_loop"],
|
||||
required=False, # Not required - agents can use other providers via LiteLLM
|
||||
startup_required=False, # MCP server doesn't need LLM credentials
|
||||
help_url="https://console.anthropic.com/settings/keys",
|
||||
description="API key for Anthropic Claude models",
|
||||
# Auth method support
|
||||
direct_api_key_supported=True,
|
||||
api_key_instructions="""To get an Anthropic API key:
|
||||
1. Go to https://console.anthropic.com/settings/keys
|
||||
2. Sign in or create an Anthropic account
|
||||
3. Click "Create Key"
|
||||
4. Give your key a descriptive name (e.g., "Hive Agent")
|
||||
5. Copy the API key (starts with sk-ant-)
|
||||
6. Store it securely - you won't be able to see the full key again!""",
|
||||
# Health check configuration
|
||||
health_check_endpoint="https://api.anthropic.com/v1/messages",
|
||||
health_check_method="POST",
|
||||
# Credential store mapping
|
||||
credential_id="anthropic",
|
||||
credential_key="api_key",
|
||||
),
|
||||
# Future LLM providers:
|
||||
# "openai": CredentialSpec(
|
||||
# env_var="OPENAI_API_KEY",
|
||||
# tools=[],
|
||||
# node_types=["openai_generate"],
|
||||
# required=False,
|
||||
# startup_required=False,
|
||||
# help_url="https://platform.openai.com/api-keys",
|
||||
# description="API key for OpenAI models",
|
||||
# ),
|
||||
}
|
||||
@@ -12,6 +12,14 @@ TELEGRAM_CREDENTIALS = {
|
||||
tools=[
|
||||
"telegram_send_message",
|
||||
"telegram_send_document",
|
||||
"telegram_edit_message",
|
||||
"telegram_delete_message",
|
||||
"telegram_forward_message",
|
||||
"telegram_send_photo",
|
||||
"telegram_send_chat_action",
|
||||
"telegram_get_chat",
|
||||
"telegram_pin_message",
|
||||
"telegram_unpin_message",
|
||||
],
|
||||
required=True,
|
||||
startup_required=False,
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
"""
|
||||
Shared file operation tools for MCP servers.
|
||||
|
||||
Provides 6 tools (read_file, write_file, edit_file, list_directory, search_files,
|
||||
run_command) plus supporting helpers. Used by both files_server.py (unsandboxed)
|
||||
and coder_tools_server.py (project-root sandboxed with git snapshots).
|
||||
Provides 7 tools (read_file, write_file, edit_file, hashline_edit,
|
||||
list_directory, search_files, run_command) plus supporting helpers.
|
||||
Used by both files_server.py (unsandboxed) and coder_tools_server.py
|
||||
(project-root sandboxed with git snapshots).
|
||||
|
||||
Usage:
|
||||
from aden_tools.file_ops import register_file_tools
|
||||
@@ -15,16 +16,31 @@ Usage:
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import difflib
|
||||
import fnmatch
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import tempfile
|
||||
from collections.abc import Callable
|
||||
from pathlib import Path
|
||||
|
||||
from fastmcp import FastMCP
|
||||
|
||||
from aden_tools.hashline import (
|
||||
HASHLINE_MAX_FILE_BYTES,
|
||||
compute_line_hash,
|
||||
format_hashlines,
|
||||
maybe_strip,
|
||||
parse_anchor,
|
||||
strip_boundary_echo,
|
||||
strip_content_prefixes,
|
||||
strip_insert_echo,
|
||||
validate_anchor,
|
||||
)
|
||||
|
||||
# ── Constants ─────────────────────────────────────────────────────────────
|
||||
|
||||
MAX_READ_LINES = 2000
|
||||
@@ -245,16 +261,21 @@ def register_file_tools(
|
||||
_resolve = resolve_path or _default_resolve_path
|
||||
|
||||
@mcp.tool()
|
||||
def read_file(path: str, offset: int = 1, limit: int = 0) -> str:
|
||||
def read_file(path: str, offset: int = 1, limit: int = 0, hashline: bool = False) -> str:
|
||||
"""Read file contents with line numbers and byte-budget truncation.
|
||||
|
||||
Binary files are detected and rejected. Large files are automatically
|
||||
truncated at 2000 lines or 50KB. Use offset and limit to paginate.
|
||||
|
||||
Set hashline=True to get N:hhhh|content format with content-hash
|
||||
anchors for use with hashline_edit. Line truncation is disabled in
|
||||
hashline mode to preserve hash integrity.
|
||||
|
||||
Args:
|
||||
path: Absolute file path to read.
|
||||
offset: Starting line number, 1-indexed (default: 1).
|
||||
limit: Max lines to return, 0 = up to 2000 (default: 0).
|
||||
hashline: If True, return N:hhhh|content anchors (default: False).
|
||||
"""
|
||||
resolved = _resolve(path)
|
||||
|
||||
@@ -276,8 +297,10 @@ def register_file_tools(
|
||||
|
||||
try:
|
||||
with open(resolved, encoding="utf-8", errors="replace") as f:
|
||||
all_lines = f.readlines()
|
||||
content = f.read()
|
||||
|
||||
# Use splitlines() for consistent line splitting with hashline module
|
||||
all_lines = content.splitlines()
|
||||
total_lines = len(all_lines)
|
||||
start_idx = max(0, offset - 1)
|
||||
effective_limit = limit if limit > 0 else MAX_READ_LINES
|
||||
@@ -287,10 +310,15 @@ def register_file_tools(
|
||||
byte_count = 0
|
||||
truncated_by_bytes = False
|
||||
for i in range(start_idx, end_idx):
|
||||
line = all_lines[i].rstrip("\n\r")
|
||||
if len(line) > MAX_LINE_LENGTH:
|
||||
line = line[:MAX_LINE_LENGTH] + "..."
|
||||
formatted = f"{i + 1:>6}\t{line}"
|
||||
line = all_lines[i]
|
||||
if hashline:
|
||||
# No line truncation in hashline mode (would corrupt hashes)
|
||||
h = compute_line_hash(line)
|
||||
formatted = f"{i + 1}:{h}|{line}"
|
||||
else:
|
||||
if len(line) > MAX_LINE_LENGTH:
|
||||
line = line[:MAX_LINE_LENGTH] + "..."
|
||||
formatted = f"{i + 1:>6}\t{line}"
|
||||
line_bytes = len(formatted.encode("utf-8")) + 1
|
||||
if byte_count + line_bytes > MAX_OUTPUT_BYTES:
|
||||
truncated_by_bytes = True
|
||||
@@ -323,19 +351,31 @@ def register_file_tools(
|
||||
content: Complete file content to write.
|
||||
"""
|
||||
resolved = _resolve(path)
|
||||
resolved_path = Path(resolved)
|
||||
|
||||
try:
|
||||
# Create parent dirs first (before git snapshot) so structure exists
|
||||
resolved_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
if before_write:
|
||||
before_write()
|
||||
try:
|
||||
before_write()
|
||||
except Exception:
|
||||
# Don't block the write if git snapshot fails. Do NOT log here —
|
||||
# logging writes to stderr and can deadlock the MCP stdio pipe.
|
||||
pass
|
||||
|
||||
existed = os.path.isfile(resolved)
|
||||
os.makedirs(os.path.dirname(resolved), exist_ok=True)
|
||||
with open(resolved, "w", encoding="utf-8") as f:
|
||||
f.write(content)
|
||||
existed = resolved_path.is_file()
|
||||
content_str = content if content is not None else ""
|
||||
with open(resolved_path, "w", encoding="utf-8") as f:
|
||||
f.write(content_str)
|
||||
f.flush()
|
||||
os.fsync(f.fileno())
|
||||
|
||||
line_count = content.count("\n") + (1 if content and not content.endswith("\n") else 0)
|
||||
line_count = content_str.count("\n") + (
|
||||
1 if content_str and not content_str.endswith("\n") else 0
|
||||
)
|
||||
action = "Updated" if existed else "Created"
|
||||
return f"{action} {path} ({len(content):,} bytes, {line_count} lines)"
|
||||
return f"{action} {path} ({len(content_str):,} bytes, {line_count} lines)"
|
||||
except Exception as e:
|
||||
return f"Error writing file: {e}"
|
||||
|
||||
@@ -472,15 +512,19 @@ def register_file_tools(
|
||||
return f"Error listing directory: {e}"
|
||||
|
||||
@mcp.tool()
|
||||
def search_files(pattern: str, path: str = ".", include: str = "") -> str:
|
||||
def search_files(
|
||||
pattern: str, path: str = ".", include: str = "", hashline: bool = False
|
||||
) -> str:
|
||||
"""Search file contents using regex. Uses ripgrep if available.
|
||||
|
||||
Results sorted by file with line numbers.
|
||||
Results sorted by file with line numbers. Set hashline=True to include
|
||||
content-hash anchors (N:hhhh) for use with hashline_edit.
|
||||
|
||||
Args:
|
||||
pattern: Regex pattern to search for.
|
||||
path: Absolute directory path to search (default: current directory).
|
||||
include: File glob filter (e.g. '*.py').
|
||||
hashline: If True, include hash anchors in results (default: False).
|
||||
"""
|
||||
resolved = _resolve(path)
|
||||
if not os.path.isdir(resolved):
|
||||
@@ -507,6 +551,7 @@ def register_file_tools(
|
||||
text=True,
|
||||
timeout=30,
|
||||
encoding="utf-8",
|
||||
stdin=subprocess.DEVNULL,
|
||||
)
|
||||
if rg_result.returncode <= 1:
|
||||
output = rg_result.stdout.strip()
|
||||
@@ -517,6 +562,30 @@ def register_file_tools(
|
||||
for line in output.split("\n")[:SEARCH_RESULT_LIMIT]:
|
||||
if project_root:
|
||||
line = line.replace(project_root + "/", "")
|
||||
if hashline:
|
||||
# Parse file:linenum:content and insert hash anchor
|
||||
parts = line.split(":", 2)
|
||||
if len(parts) >= 3:
|
||||
content = parts[2]
|
||||
h = compute_line_hash(content)
|
||||
line = f"{parts[0]}:{parts[1]}:{h}|{content}"
|
||||
else:
|
||||
# Platform-agnostic relativization: ripgrep may output
|
||||
# forward or backslash paths; normalize before relpath (Windows).
|
||||
match = re.match(r"^(.+):(\d+):", line)
|
||||
if match:
|
||||
path_part, line_num, rest = (
|
||||
match.group(1),
|
||||
match.group(2),
|
||||
line[match.end() :],
|
||||
)
|
||||
path_part = os.path.normpath(path_part.replace("/", os.sep))
|
||||
proj_norm = os.path.normpath(project_root.replace("/", os.sep))
|
||||
try:
|
||||
rel = os.path.relpath(path_part, proj_norm)
|
||||
line = f"{rel}:{line_num}:{rest}"
|
||||
except ValueError:
|
||||
pass
|
||||
if len(line) > MAX_LINE_LENGTH:
|
||||
line = line[:MAX_LINE_LENGTH] + "..."
|
||||
lines.append(line)
|
||||
@@ -544,14 +613,26 @@ def register_file_tools(
|
||||
if include and not fnmatch.fnmatch(fname, include):
|
||||
continue
|
||||
fpath = os.path.join(root, fname)
|
||||
display_path = os.path.relpath(fpath, project_root) if project_root else fpath
|
||||
if project_root:
|
||||
proj_norm = os.path.normpath(project_root.replace("/", os.sep))
|
||||
try:
|
||||
display_path = os.path.relpath(fpath, proj_norm)
|
||||
except ValueError:
|
||||
display_path = fpath
|
||||
else:
|
||||
display_path = fpath
|
||||
try:
|
||||
with open(fpath, encoding="utf-8", errors="ignore") as f:
|
||||
for i, line in enumerate(f, 1):
|
||||
if compiled.search(line):
|
||||
matches.append(
|
||||
f"{display_path}:{i}:{line.rstrip()[:MAX_LINE_LENGTH]}"
|
||||
)
|
||||
stripped = line.rstrip()
|
||||
if compiled.search(stripped):
|
||||
if hashline:
|
||||
h = compute_line_hash(stripped)
|
||||
matches.append(f"{display_path}:{i}:{h}|{stripped}")
|
||||
else:
|
||||
matches.append(
|
||||
f"{display_path}:{i}:{stripped[:MAX_LINE_LENGTH]}"
|
||||
)
|
||||
if len(matches) >= SEARCH_RESULT_LIMIT:
|
||||
return "\n".join(matches) + "\n... (truncated)"
|
||||
except (OSError, UnicodeDecodeError):
|
||||
@@ -560,3 +641,370 @@ def register_file_tools(
|
||||
return "\n".join(matches) if matches else "No matches found."
|
||||
except re.error as e:
|
||||
return f"Error: Invalid regex: {e}"
|
||||
|
||||
@mcp.tool()
|
||||
def hashline_edit(
|
||||
path: str,
|
||||
edits: str,
|
||||
auto_cleanup: bool = True,
|
||||
encoding: str = "utf-8",
|
||||
) -> str:
|
||||
"""Edit a file using anchor-based line references (N:hash) for precise edits.
|
||||
|
||||
After reading a file with read_file(hashline=True), use the anchors to make
|
||||
targeted edits without reproducing exact file content.
|
||||
|
||||
Anchors must match current file content (hash validation). All edits in a
|
||||
batch are validated before any are applied (atomic). Overlapping line ranges
|
||||
within a single call are rejected.
|
||||
|
||||
Args:
|
||||
path: Absolute file path to edit.
|
||||
edits: JSON string containing a list of edit operations. Each op is a
|
||||
dict with "op" key and operation-specific fields:
|
||||
- set_line: anchor, content (single line replacement)
|
||||
- replace_lines: start_anchor, end_anchor, content (multi-line)
|
||||
- insert_after: anchor, content
|
||||
- insert_before: anchor, content
|
||||
- replace: old_content, new_content, allow_multiple
|
||||
- append: content
|
||||
auto_cleanup: Strip hashline prefixes and echoed context from edit
|
||||
content (default: True).
|
||||
encoding: File encoding (default: "utf-8").
|
||||
"""
|
||||
# 1. Parse JSON
|
||||
try:
|
||||
edit_ops = json.loads(edits)
|
||||
except (json.JSONDecodeError, TypeError) as e:
|
||||
return f"Error: Invalid JSON in edits: {e}"
|
||||
|
||||
if not isinstance(edit_ops, list):
|
||||
return "Error: edits must be a JSON array of operations"
|
||||
if not edit_ops:
|
||||
return "Error: edits array is empty"
|
||||
if len(edit_ops) > 100:
|
||||
return "Error: Too many edits in one call (max 100). Split into multiple calls."
|
||||
|
||||
# 2. Read file
|
||||
resolved = _resolve(path)
|
||||
if not os.path.isfile(resolved):
|
||||
return f"Error: File not found: {path}"
|
||||
|
||||
try:
|
||||
with open(resolved, "rb") as f:
|
||||
raw_head = f.read(8192)
|
||||
eol = "\r\n" if b"\r\n" in raw_head else "\n"
|
||||
|
||||
with open(resolved, encoding=encoding) as f:
|
||||
content = f.read()
|
||||
except Exception as e:
|
||||
return f"Error: Failed to read file: {e}"
|
||||
|
||||
content_bytes = len(content.encode(encoding))
|
||||
if content_bytes > HASHLINE_MAX_FILE_BYTES:
|
||||
return f"Error: File too large for hashline_edit ({content_bytes} bytes, max 10MB)"
|
||||
|
||||
trailing_newline = content.endswith("\n")
|
||||
lines = content.splitlines()
|
||||
|
||||
# 3. Categorize and validate ops
|
||||
splices = [] # (start_0idx, end_0idx, new_lines, op_index)
|
||||
replaces = [] # (old_content, new_content, op_index, allow_multiple)
|
||||
cleanup_actions: list[str] = []
|
||||
|
||||
for i, op in enumerate(edit_ops):
|
||||
if not isinstance(op, dict):
|
||||
return f"Error: Edit #{i + 1}: operation must be a dict"
|
||||
|
||||
match op.get("op"):
|
||||
case "set_line":
|
||||
anchor = op.get("anchor", "")
|
||||
err = validate_anchor(anchor, lines)
|
||||
if err:
|
||||
return f"Error: Edit #{i + 1} (set_line): {err}"
|
||||
if "content" not in op:
|
||||
return f"Error: Edit #{i + 1} (set_line): missing required field 'content'"
|
||||
if not isinstance(op["content"], str):
|
||||
return f"Error: Edit #{i + 1} (set_line): content must be a string"
|
||||
if "\n" in op["content"] or "\r" in op["content"]:
|
||||
return (
|
||||
f"Error: Edit #{i + 1} (set_line): content must be a single line. "
|
||||
f"Use replace_lines for multi-line replacement."
|
||||
)
|
||||
line_num, _ = parse_anchor(anchor)
|
||||
idx = line_num - 1
|
||||
new_content = op["content"]
|
||||
new_lines = [new_content] if new_content else []
|
||||
new_lines = maybe_strip(
|
||||
new_lines,
|
||||
strip_content_prefixes,
|
||||
"prefix_strip",
|
||||
auto_cleanup,
|
||||
cleanup_actions,
|
||||
)
|
||||
splices.append((idx, idx, new_lines, i))
|
||||
|
||||
case "replace_lines":
|
||||
start_anchor = op.get("start_anchor", "")
|
||||
end_anchor = op.get("end_anchor", "")
|
||||
err = validate_anchor(start_anchor, lines)
|
||||
if err:
|
||||
return f"Error: Edit #{i + 1} (replace_lines start): {err}"
|
||||
err = validate_anchor(end_anchor, lines)
|
||||
if err:
|
||||
return f"Error: Edit #{i + 1} (replace_lines end): {err}"
|
||||
start_num, _ = parse_anchor(start_anchor)
|
||||
end_num, _ = parse_anchor(end_anchor)
|
||||
if start_num > end_num:
|
||||
return (
|
||||
f"Error: Edit #{i + 1} (replace_lines): "
|
||||
f"start line {start_num} > end line {end_num}"
|
||||
)
|
||||
if "content" not in op:
|
||||
return (
|
||||
f"Error: Edit #{i + 1} (replace_lines): "
|
||||
f"missing required field 'content'"
|
||||
)
|
||||
if not isinstance(op["content"], str):
|
||||
return f"Error: Edit #{i + 1} (replace_lines): content must be a string"
|
||||
new_content = op["content"]
|
||||
new_lines = new_content.splitlines() if new_content else []
|
||||
new_lines = maybe_strip(
|
||||
new_lines,
|
||||
strip_content_prefixes,
|
||||
"prefix_strip",
|
||||
auto_cleanup,
|
||||
cleanup_actions,
|
||||
)
|
||||
new_lines = maybe_strip(
|
||||
new_lines,
|
||||
lambda nl, s=start_num, e=end_num: strip_boundary_echo(lines, s, e, nl),
|
||||
"boundary_echo_strip",
|
||||
auto_cleanup,
|
||||
cleanup_actions,
|
||||
)
|
||||
splices.append((start_num - 1, end_num - 1, new_lines, i))
|
||||
|
||||
case "insert_after":
|
||||
anchor = op.get("anchor", "")
|
||||
err = validate_anchor(anchor, lines)
|
||||
if err:
|
||||
return f"Error: Edit #{i + 1} (insert_after): {err}"
|
||||
line_num, _ = parse_anchor(anchor)
|
||||
idx = line_num - 1
|
||||
new_content = op.get("content", "")
|
||||
if not isinstance(new_content, str):
|
||||
return f"Error: Edit #{i + 1} (insert_after): content must be a string"
|
||||
if not new_content:
|
||||
return f"Error: Edit #{i + 1} (insert_after): content is empty"
|
||||
new_lines = new_content.splitlines()
|
||||
new_lines = maybe_strip(
|
||||
new_lines,
|
||||
strip_content_prefixes,
|
||||
"prefix_strip",
|
||||
auto_cleanup,
|
||||
cleanup_actions,
|
||||
)
|
||||
new_lines = maybe_strip(
|
||||
new_lines,
|
||||
lambda nl, _idx=idx: strip_insert_echo(lines[_idx], nl),
|
||||
"insert_echo_strip",
|
||||
auto_cleanup,
|
||||
cleanup_actions,
|
||||
)
|
||||
splices.append((idx + 1, idx, new_lines, i))
|
||||
|
||||
case "insert_before":
|
||||
anchor = op.get("anchor", "")
|
||||
err = validate_anchor(anchor, lines)
|
||||
if err:
|
||||
return f"Error: Edit #{i + 1} (insert_before): {err}"
|
||||
line_num, _ = parse_anchor(anchor)
|
||||
idx = line_num - 1
|
||||
new_content = op.get("content", "")
|
||||
if not isinstance(new_content, str):
|
||||
return f"Error: Edit #{i + 1} (insert_before): content must be a string"
|
||||
if not new_content:
|
||||
return f"Error: Edit #{i + 1} (insert_before): content is empty"
|
||||
new_lines = new_content.splitlines()
|
||||
new_lines = maybe_strip(
|
||||
new_lines,
|
||||
strip_content_prefixes,
|
||||
"prefix_strip",
|
||||
auto_cleanup,
|
||||
cleanup_actions,
|
||||
)
|
||||
new_lines = maybe_strip(
|
||||
new_lines,
|
||||
lambda nl, _idx=idx: strip_insert_echo(lines[_idx], nl, position="last"),
|
||||
"insert_echo_strip",
|
||||
auto_cleanup,
|
||||
cleanup_actions,
|
||||
)
|
||||
splices.append((idx, idx - 1, new_lines, i))
|
||||
|
||||
case "replace":
|
||||
old_content = op.get("old_content")
|
||||
new_content = op.get("new_content")
|
||||
if old_content is None:
|
||||
return f"Error: Edit #{i + 1} (replace): missing old_content"
|
||||
if not isinstance(old_content, str):
|
||||
return f"Error: Edit #{i + 1} (replace): old_content must be a string"
|
||||
if not old_content:
|
||||
return f"Error: Edit #{i + 1} (replace): old_content must not be empty"
|
||||
if new_content is None:
|
||||
return f"Error: Edit #{i + 1} (replace): missing new_content"
|
||||
if not isinstance(new_content, str):
|
||||
return f"Error: Edit #{i + 1} (replace): new_content must be a string"
|
||||
allow_multiple = op.get("allow_multiple", False)
|
||||
if not isinstance(allow_multiple, bool):
|
||||
return f"Error: Edit #{i + 1} (replace): allow_multiple must be a boolean"
|
||||
replaces.append((old_content, new_content, i, allow_multiple))
|
||||
|
||||
case "append":
|
||||
new_content = op.get("content")
|
||||
if new_content is None:
|
||||
return f"Error: Edit #{i + 1} (append): missing content"
|
||||
if not isinstance(new_content, str):
|
||||
return f"Error: Edit #{i + 1} (append): content must be a string"
|
||||
if not new_content:
|
||||
return f"Error: Edit #{i + 1} (append): content must not be empty"
|
||||
new_lines = new_content.splitlines()
|
||||
new_lines = maybe_strip(
|
||||
new_lines,
|
||||
strip_content_prefixes,
|
||||
"prefix_strip",
|
||||
auto_cleanup,
|
||||
cleanup_actions,
|
||||
)
|
||||
insert_point = len(lines)
|
||||
splices.append((insert_point, insert_point - 1, new_lines, i))
|
||||
|
||||
case unknown:
|
||||
return f"Error: Edit #{i + 1}: unknown op '{unknown}'"
|
||||
|
||||
# 4. Check for overlapping splice ranges
|
||||
for j in range(len(splices)):
|
||||
for k in range(j + 1, len(splices)):
|
||||
s_a, e_a, _, idx_a = splices[j]
|
||||
s_b, e_b, _, idx_b = splices[k]
|
||||
is_insert_a = s_a > e_a
|
||||
is_insert_b = s_b > e_b
|
||||
|
||||
if is_insert_a and is_insert_b:
|
||||
continue
|
||||
if is_insert_a and not is_insert_b:
|
||||
if s_b <= s_a <= e_b + 1:
|
||||
return (
|
||||
f"Error: Overlapping edits: edit #{idx_a + 1} "
|
||||
f"and edit #{idx_b + 1} affect overlapping line ranges"
|
||||
)
|
||||
continue
|
||||
if is_insert_b and not is_insert_a:
|
||||
if s_a <= s_b <= e_a + 1:
|
||||
return (
|
||||
f"Error: Overlapping edits: edit #{idx_a + 1} "
|
||||
f"and edit #{idx_b + 1} affect overlapping line ranges"
|
||||
)
|
||||
continue
|
||||
if not (e_a < s_b or e_b < s_a):
|
||||
return (
|
||||
f"Error: Overlapping edits: edit #{idx_a + 1} "
|
||||
f"and edit #{idx_b + 1} affect overlapping line ranges"
|
||||
)
|
||||
|
||||
# 5. Apply splices bottom-up
|
||||
changes_made = 0
|
||||
working = list(lines)
|
||||
for start, end, new_lines, _ in sorted(splices, key=lambda s: (s[0], s[3]), reverse=True):
|
||||
if start > end:
|
||||
changes_made += 1
|
||||
for k, nl in enumerate(new_lines):
|
||||
working.insert(start + k, nl)
|
||||
else:
|
||||
old_slice = working[start : end + 1]
|
||||
if old_slice != new_lines:
|
||||
changes_made += 1
|
||||
working[start : end + 1] = new_lines
|
||||
|
||||
# 6. Apply str_replace ops
|
||||
joined = "\n".join(working)
|
||||
replace_counts = []
|
||||
for old_content, new_content, op_idx, allow_multiple in replaces:
|
||||
count = joined.count(old_content)
|
||||
if count == 0:
|
||||
return (
|
||||
f"Error: Edit #{op_idx + 1} (replace): "
|
||||
f"old_content not found "
|
||||
f"(note: anchor-based edits in this batch are applied first)"
|
||||
)
|
||||
if count > 1 and not allow_multiple:
|
||||
return (
|
||||
f"Error: Edit #{op_idx + 1} (replace): "
|
||||
f"old_content found {count} times (must be unique). "
|
||||
f"Include more surrounding context to make it unique, "
|
||||
f"or use anchor-based ops instead."
|
||||
)
|
||||
if allow_multiple:
|
||||
joined = joined.replace(old_content, new_content)
|
||||
replace_counts.append((op_idx, count))
|
||||
else:
|
||||
joined = joined.replace(old_content, new_content, 1)
|
||||
if count > 0 and old_content != new_content:
|
||||
changes_made += 1
|
||||
|
||||
# 7. Restore trailing newline
|
||||
if trailing_newline and joined and not joined.endswith("\n"):
|
||||
joined += "\n"
|
||||
|
||||
# 8. Restore original EOL style (only convert bare \n, not existing \r\n)
|
||||
if eol == "\r\n":
|
||||
joined = re.sub(r"(?<!\r)\n", "\r\n", joined)
|
||||
|
||||
# 9. Snapshot + atomic write
|
||||
try:
|
||||
if before_write:
|
||||
before_write()
|
||||
original_mode = os.stat(resolved).st_mode
|
||||
fd, tmp_path = tempfile.mkstemp(dir=os.path.dirname(resolved))
|
||||
fd_open = True
|
||||
try:
|
||||
if hasattr(os, "fchmod"):
|
||||
os.fchmod(fd, original_mode)
|
||||
with os.fdopen(fd, "w", encoding=encoding, newline="") as f:
|
||||
fd_open = False
|
||||
f.write(joined)
|
||||
os.replace(tmp_path, resolved)
|
||||
except BaseException:
|
||||
if fd_open:
|
||||
os.close(fd)
|
||||
with contextlib.suppress(OSError):
|
||||
os.unlink(tmp_path)
|
||||
raise
|
||||
except Exception as e:
|
||||
return f"Error: Failed to write file: {e}"
|
||||
|
||||
# 10. Build response
|
||||
updated_lines = joined.splitlines()
|
||||
total_lines = len(updated_lines)
|
||||
|
||||
# Limit returned content to first 200 lines
|
||||
preview_limit = 200
|
||||
hashline_content = format_hashlines(updated_lines, limit=preview_limit)
|
||||
|
||||
parts = [f"Applied {changes_made} edit(s) to {path}"]
|
||||
if changes_made == 0:
|
||||
parts.append("(content unchanged after applying edits)")
|
||||
if cleanup_actions:
|
||||
parts.append(f"Auto-cleanup: {', '.join(cleanup_actions)}")
|
||||
if replace_counts:
|
||||
for op_idx, count in replace_counts:
|
||||
parts.append(f"Edit #{op_idx + 1} replaced {count} occurrence(s)")
|
||||
parts.append("")
|
||||
parts.append(hashline_content)
|
||||
if total_lines > preview_limit:
|
||||
parts.append(
|
||||
f"\n(Showing first {preview_limit} of {total_lines} lines. "
|
||||
f"Use read_file with offset to see more.)"
|
||||
)
|
||||
return "\n".join(parts)
|
||||
|
||||
@@ -0,0 +1,230 @@
|
||||
"""Hashline utilities for anchor-based file editing.
|
||||
|
||||
Each line gets a short content hash anchor (line_number:hash). Models reference
|
||||
lines by anchor instead of reproducing text. If the file changed since the model
|
||||
read it, the hash won't match and the edit is cleanly rejected.
|
||||
"""
|
||||
|
||||
import re
|
||||
import zlib
|
||||
|
||||
# ── Constants ─────────────────────────────────────────────────────────────
|
||||
|
||||
# Files beyond this size are skipped/rejected in hashline mode because
|
||||
# hashline anchors are not practical on files this large (minified
|
||||
# bundles, logs, data dumps). Shared by view_file, grep_search, and
|
||||
# hashline_edit.
|
||||
HASHLINE_MAX_FILE_BYTES = 10 * 1024 * 1024 # 10 MB
|
||||
|
||||
# ── Hash computation ──────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def compute_line_hash(line: str) -> str:
|
||||
"""Compute a 4-char hex hash for a line of text.
|
||||
|
||||
Uses CRC32 mod 65536, formatted as lowercase hex. Only trailing spaces
|
||||
and tabs are stripped before hashing. Leading whitespace (indentation)
|
||||
is included in the hash so indentation changes invalidate anchors.
|
||||
This keeps stale-anchor detection safe for indentation-sensitive files
|
||||
while still ignoring common trailing-whitespace noise.
|
||||
|
||||
Collision probability is ~0.0015% per changed line (4-char hex,
|
||||
migrated from 2-char hex which had ~0.39% collision rate).
|
||||
"""
|
||||
stripped = line.rstrip(" \t")
|
||||
crc = zlib.crc32(stripped.encode("utf-8")) & 0xFFFFFFFF
|
||||
return f"{crc % 65536:04x}"
|
||||
|
||||
|
||||
def format_hashlines(lines: list[str], offset: int = 1, limit: int = 0) -> str:
|
||||
"""Format lines with N:hhhh|content prefixes.
|
||||
|
||||
Args:
|
||||
lines: The file content split into lines.
|
||||
offset: 1-indexed start line (default 1).
|
||||
limit: Maximum lines to return, 0 means all.
|
||||
|
||||
Returns:
|
||||
Formatted string with hashline prefixes.
|
||||
"""
|
||||
start = offset - 1 # convert to 0-indexed
|
||||
if limit > 0:
|
||||
selected = lines[start : start + limit]
|
||||
else:
|
||||
selected = lines[start:]
|
||||
|
||||
result_parts = []
|
||||
for i, line in enumerate(selected):
|
||||
line_num = offset + i
|
||||
h = compute_line_hash(line)
|
||||
result_parts.append(f"{line_num}:{h}|{line}")
|
||||
|
||||
return "\n".join(result_parts)
|
||||
|
||||
|
||||
# ── Anchor parsing & validation ───────────────────────────────────────────
|
||||
|
||||
|
||||
def parse_anchor(anchor: str) -> tuple[int, str]:
|
||||
"""Parse an anchor string like '2:a3b1' into (line_number, hash).
|
||||
|
||||
Raises:
|
||||
ValueError: If the anchor format is invalid.
|
||||
"""
|
||||
if ":" not in anchor:
|
||||
raise ValueError(f"Invalid anchor format (no colon): '{anchor}'")
|
||||
|
||||
parts = anchor.split(":", 1)
|
||||
try:
|
||||
line_num = int(parts[0])
|
||||
except ValueError as exc:
|
||||
raise ValueError(f"Invalid anchor format (line number not an integer): '{anchor}'") from exc
|
||||
|
||||
hash_str = parts[1]
|
||||
if len(hash_str) != 4:
|
||||
raise ValueError(f"Invalid anchor format (hash must be 4 chars): '{anchor}'")
|
||||
if not all(c in "0123456789abcdef" for c in hash_str):
|
||||
raise ValueError(f"Invalid anchor format (hash must be lowercase hex): '{anchor}'")
|
||||
|
||||
return line_num, hash_str
|
||||
|
||||
|
||||
def validate_anchor(anchor: str, lines: list[str]) -> str | None:
|
||||
"""Validate an anchor against file lines.
|
||||
|
||||
Returns:
|
||||
None if valid, error message string if invalid.
|
||||
"""
|
||||
try:
|
||||
line_num, expected_hash = parse_anchor(anchor)
|
||||
except ValueError as e:
|
||||
return str(e)
|
||||
|
||||
if line_num < 1 or line_num > len(lines):
|
||||
return f"Line {line_num} out of range (file has {len(lines)} lines)"
|
||||
|
||||
actual_line = lines[line_num - 1]
|
||||
actual_hash = compute_line_hash(actual_line)
|
||||
if actual_hash != expected_hash:
|
||||
preview = actual_line.strip()
|
||||
if len(preview) > 80:
|
||||
preview = preview[:77] + "..."
|
||||
return (
|
||||
f"Hash mismatch at line {line_num}: expected '{expected_hash}', "
|
||||
f"got '{actual_hash}'. Current content: {preview!r}. "
|
||||
f"Re-read the file to get current anchors."
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
# ── Auto-cleanup helpers ──────────────────────────────────────────────────
|
||||
# Shared by both file_ops.hashline_edit and file_system_toolkits.hashline_edit.
|
||||
|
||||
HASHLINE_PREFIX_RE = re.compile(r"^\d+:[0-9a-f]{4}\|")
|
||||
|
||||
|
||||
def strip_content_prefixes(lines: list[str]) -> list[str]:
|
||||
"""Strip hashline prefixes from content lines when all have them.
|
||||
|
||||
LLMs frequently copy hashline-formatted text (e.g. '5:a3b1|content') into
|
||||
their content fields. Only strips when 2+ non-empty lines all match the
|
||||
exact hashline prefix pattern (N:hhhh|). Single-line content is left alone
|
||||
to avoid false positives on literal text that happens to match the pattern.
|
||||
"""
|
||||
if not lines:
|
||||
return lines
|
||||
non_empty = [ln for ln in lines if ln]
|
||||
if len(non_empty) < 2:
|
||||
return lines
|
||||
prefix_count = sum(1 for ln in non_empty if HASHLINE_PREFIX_RE.match(ln))
|
||||
if prefix_count < len(non_empty):
|
||||
return lines
|
||||
return [HASHLINE_PREFIX_RE.sub("", ln) for ln in lines]
|
||||
|
||||
|
||||
def whitespace_equal(a: str, b: str) -> bool:
|
||||
"""Compare strings ignoring spaces and tabs."""
|
||||
return a.replace(" ", "").replace("\t", "") == b.replace(" ", "").replace("\t", "")
|
||||
|
||||
|
||||
def strip_insert_echo(
|
||||
anchor_line: str, new_lines: list[str], *, position: str = "first"
|
||||
) -> list[str]:
|
||||
"""Strip echoed anchor line from insert content.
|
||||
|
||||
If the model echoes the anchor line in inserted content, remove it to
|
||||
avoid duplication. Only applies when content has 2+ lines and both the
|
||||
anchor and checked content line are non-blank.
|
||||
|
||||
position="first" (insert_after): check first line, strip from front.
|
||||
position="last" (insert_before): check last line, strip from end.
|
||||
"""
|
||||
if len(new_lines) <= 1:
|
||||
return new_lines
|
||||
if position == "last":
|
||||
if not anchor_line.strip() or not new_lines[-1].strip():
|
||||
return new_lines
|
||||
if whitespace_equal(new_lines[-1], anchor_line):
|
||||
return new_lines[:-1]
|
||||
else:
|
||||
if not anchor_line.strip() or not new_lines[0].strip():
|
||||
return new_lines
|
||||
if whitespace_equal(new_lines[0], anchor_line):
|
||||
return new_lines[1:]
|
||||
return new_lines
|
||||
|
||||
|
||||
def strip_boundary_echo(
|
||||
file_lines: list[str], start_1idx: int, end_1idx: int, new_lines: list[str]
|
||||
) -> list[str]:
|
||||
"""Strip echoed boundary context from replace_lines content.
|
||||
|
||||
If the model includes the line before AND after the replaced range as part
|
||||
of the replacement content, strip those echoed boundary lines. Both
|
||||
boundaries must echo simultaneously before either is stripped (a single
|
||||
boundary match is too likely to be a coincidence with real content).
|
||||
Only applies when the replacement has more lines than the range being
|
||||
replaced, and both the boundary line and content line are non-blank.
|
||||
"""
|
||||
range_count = end_1idx - start_1idx + 1
|
||||
if len(new_lines) <= 1 or len(new_lines) <= range_count:
|
||||
return new_lines
|
||||
|
||||
# Check if leading boundary echoes
|
||||
before_idx = start_1idx - 2 # 0-indexed line before range
|
||||
leading_echoes = (
|
||||
before_idx >= 0
|
||||
and new_lines[0].strip()
|
||||
and file_lines[before_idx].strip()
|
||||
and whitespace_equal(new_lines[0], file_lines[before_idx])
|
||||
)
|
||||
|
||||
# Check if trailing boundary echoes
|
||||
after_idx = end_1idx # 0-indexed line after range
|
||||
trailing_echoes = (
|
||||
after_idx < len(file_lines)
|
||||
and new_lines[-1].strip()
|
||||
and file_lines[after_idx].strip()
|
||||
and whitespace_equal(new_lines[-1], file_lines[after_idx])
|
||||
)
|
||||
|
||||
# Only strip if BOTH boundaries echo and there is content between them.
|
||||
# len < 3 means no real content between the two boundary lines, so
|
||||
# stripping would produce an empty list (accidental deletion).
|
||||
if not (leading_echoes and trailing_echoes) or len(new_lines) < 3:
|
||||
return new_lines
|
||||
|
||||
return new_lines[1:-1]
|
||||
|
||||
|
||||
def maybe_strip(new_lines, strip_fn, action_name, auto_cleanup, cleanup_actions):
|
||||
"""Apply a strip function if auto_cleanup is enabled, tracking actions."""
|
||||
if not auto_cleanup:
|
||||
return new_lines
|
||||
cleaned = strip_fn(new_lines)
|
||||
if cleaned != new_lines:
|
||||
if action_name not in cleanup_actions:
|
||||
cleanup_actions.append(action_name)
|
||||
return cleaned
|
||||
return new_lines
|
||||
@@ -56,6 +56,8 @@ from .email_tool import register_tools as register_email
|
||||
from .exa_search_tool import register_tools as register_exa_search
|
||||
from .example_tool import register_tools as register_example
|
||||
from .excel_tool import register_tools as register_excel
|
||||
|
||||
# File system toolkits
|
||||
from .file_system_toolkits.apply_diff import register_tools as register_apply_diff
|
||||
from .file_system_toolkits.apply_patch import register_tools as register_apply_patch
|
||||
from .file_system_toolkits.data_tools import register_tools as register_data_tools
|
||||
@@ -63,6 +65,7 @@ from .file_system_toolkits.execute_command_tool import (
|
||||
register_tools as register_execute_command,
|
||||
)
|
||||
from .file_system_toolkits.grep_search import register_tools as register_grep_search
|
||||
from .file_system_toolkits.hashline_edit import register_tools as register_hashline_edit
|
||||
from .file_system_toolkits.list_dir import register_tools as register_list_dir
|
||||
from .file_system_toolkits.replace_file_content import (
|
||||
register_tools as register_replace_file_content,
|
||||
@@ -154,6 +157,34 @@ def _register_verified(
|
||||
register_wikipedia(mcp)
|
||||
register_arxiv(mcp)
|
||||
|
||||
# Tools that need credentials (pass credentials if provided)
|
||||
# web_search supports multiple providers (Google, Brave) with auto-detection
|
||||
register_web_search(mcp, credentials=credentials)
|
||||
register_github(mcp, credentials=credentials)
|
||||
# email supports multiple providers (Gmail, Resend)
|
||||
register_email(mcp, credentials=credentials)
|
||||
# Gmail inbox management (read, trash, modify labels)
|
||||
register_gmail(mcp, credentials=credentials)
|
||||
register_hubspot(mcp, credentials=credentials)
|
||||
register_intercom(mcp, credentials=credentials)
|
||||
register_apollo(mcp, credentials=credentials)
|
||||
register_bigquery(mcp, credentials=credentials)
|
||||
register_calcom(mcp, credentials=credentials)
|
||||
register_calendar(mcp, credentials=credentials)
|
||||
register_discord(mcp, credentials=credentials)
|
||||
register_exa_search(mcp, credentials=credentials)
|
||||
register_news(mcp, credentials=credentials)
|
||||
register_razorpay(mcp, credentials=credentials)
|
||||
register_serpapi(mcp, credentials=credentials)
|
||||
register_slack(mcp, credentials=credentials)
|
||||
register_telegram(mcp, credentials=credentials)
|
||||
register_vision(mcp, credentials=credentials)
|
||||
register_google_analytics(mcp, credentials=credentials)
|
||||
register_google_docs(mcp, credentials=credentials)
|
||||
register_google_maps(mcp, credentials=credentials)
|
||||
register_google_sheets(mcp, credentials=credentials)
|
||||
register_account_info(mcp, credentials=credentials)
|
||||
|
||||
# --- File system toolkits ---
|
||||
register_view_file(mcp)
|
||||
register_write_to_file(mcp)
|
||||
@@ -162,6 +193,8 @@ def _register_verified(
|
||||
register_apply_diff(mcp)
|
||||
register_apply_patch(mcp)
|
||||
register_grep_search(mcp)
|
||||
# hashline_edit: anchor-based editing, pairs with view_file/grep_search hashline mode
|
||||
register_hashline_edit(mcp)
|
||||
register_execute_command(mcp)
|
||||
register_data_tools(mcp)
|
||||
register_csv(mcp)
|
||||
|
||||
@@ -0,0 +1,104 @@
|
||||
# Account Info Tool
|
||||
|
||||
Query connected accounts and their identities at runtime.
|
||||
|
||||
## Features
|
||||
|
||||
- **get_account_info** - List connected accounts with provider and identity details
|
||||
|
||||
## Overview
|
||||
|
||||
This tool allows agents to discover which external accounts are connected and available for use. It queries the credential store to retrieve account metadata without exposing secrets.
|
||||
|
||||
## Setup
|
||||
|
||||
No additional configuration required. The tool reads from the configured credential store.
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### List All Connected Accounts
|
||||
```python
|
||||
get_account_info()
|
||||
```
|
||||
|
||||
Returns:
|
||||
```python
|
||||
{
|
||||
"accounts": [
|
||||
{
|
||||
"account_id": "google_main",
|
||||
"provider": "google",
|
||||
"identity": "user@gmail.com"
|
||||
},
|
||||
{
|
||||
"account_id": "slack_workspace",
|
||||
"provider": "slack",
|
||||
"identity": "My Workspace"
|
||||
}
|
||||
],
|
||||
"count": 2
|
||||
}
|
||||
```
|
||||
|
||||
### Filter by Provider
|
||||
```python
|
||||
get_account_info(provider="google")
|
||||
```
|
||||
|
||||
Returns only Google-connected accounts:
|
||||
```python
|
||||
{
|
||||
"accounts": [
|
||||
{
|
||||
"account_id": "google_main",
|
||||
"provider": "google",
|
||||
"identity": "user@gmail.com"
|
||||
}
|
||||
],
|
||||
"count": 1
|
||||
}
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
### get_account_info
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| provider | str | No | Filter by provider type (e.g., "google", "slack") |
|
||||
|
||||
### Response Fields
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| accounts | list | List of connected account objects |
|
||||
| count | int | Number of accounts returned |
|
||||
|
||||
### Account Object
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| account_id | str | Unique identifier for the account |
|
||||
| provider | str | Provider type (google, slack, github, etc.) |
|
||||
| identity | str | Human-readable identity (email, username, workspace) |
|
||||
|
||||
## Supported Providers
|
||||
|
||||
Common providers that may appear:
|
||||
- `google` - Google accounts (Gmail, Drive, Calendar)
|
||||
- `slack` - Slack workspaces
|
||||
- `github` - GitHub accounts
|
||||
- `hubspot` - HubSpot CRM accounts
|
||||
- `brevo` - Brevo email/SMS accounts
|
||||
- And any other configured OAuth or API integrations
|
||||
|
||||
## Error Handling
|
||||
```python
|
||||
{"accounts": [], "message": "No credential store configured"}
|
||||
```
|
||||
|
||||
## Use Cases
|
||||
|
||||
- **Multi-account workflows**: Determine which accounts are available before making API calls
|
||||
- **User context**: Show users which accounts are connected in chat interfaces
|
||||
- **Conditional logic**: Route tasks to different accounts based on availability
|
||||
@@ -28,7 +28,7 @@ For quick testing, get a token from the [Google OAuth Playground](https://develo
|
||||
4. Set the environment variable:
|
||||
|
||||
```bash
|
||||
export GOOGLE_CALENDAR_ACCESS_TOKEN="your-access-token"
|
||||
export GOOGLE_ACCESS_TOKEN="your-access-token"
|
||||
```
|
||||
|
||||
**Note:** Access tokens from OAuth Playground expire after ~1 hour. For production, use Aden OAuth.
|
||||
@@ -224,7 +224,7 @@ All tools return a dict with either success data or an error:
|
||||
```json
|
||||
{
|
||||
"error": "Calendar credentials not configured",
|
||||
"help": "Set GOOGLE_CALENDAR_ACCESS_TOKEN environment variable"
|
||||
"help": "Set GOOGLE_ACCESS_TOKEN environment variable"
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ Supports:
|
||||
|
||||
Requires OAuth 2.0 credentials:
|
||||
- Aden: Use aden_provider_name="google-calendar" for managed OAuth (recommended)
|
||||
- Direct: Set GOOGLE_CALENDAR_ACCESS_TOKEN with token from OAuth Playground
|
||||
- Direct: Set GOOGLE_ACCESS_TOKEN with token from OAuth Playground
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -65,7 +65,7 @@ def register_tools(
|
||||
if lifecycle_manager:
|
||||
logger.info("Google Calendar OAuth auto-refresh enabled")
|
||||
|
||||
def _get_token(account: str = "") -> str | None:
|
||||
def _get_token() -> str | None:
|
||||
"""
|
||||
Get OAuth token, refreshing if needed.
|
||||
|
||||
@@ -82,22 +82,17 @@ def register_tools(
|
||||
|
||||
# Fall back to credential store adapter
|
||||
if credentials is not None:
|
||||
if account:
|
||||
return credentials.get_by_alias(
|
||||
"google_calendar_oauth",
|
||||
account,
|
||||
)
|
||||
return credentials.get("google_calendar_oauth")
|
||||
return credentials.get("google")
|
||||
|
||||
# Fall back to environment variable
|
||||
return os.getenv("GOOGLE_CALENDAR_ACCESS_TOKEN")
|
||||
return os.getenv("GOOGLE_ACCESS_TOKEN")
|
||||
|
||||
def _get_headers(account: str = "") -> dict[str, str]:
|
||||
def _get_headers() -> dict[str, str]:
|
||||
"""Get authorization headers for API requests.
|
||||
|
||||
Note: Callers must use _check_credentials() first to ensure token exists.
|
||||
"""
|
||||
token = _get_token(account)
|
||||
token = _get_token()
|
||||
if token is None:
|
||||
token = "" # Will fail auth but prevents "Bearer None" in logs
|
||||
return {
|
||||
@@ -105,13 +100,13 @@ def register_tools(
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
def _check_credentials(account: str = "") -> dict | None:
|
||||
def _check_credentials() -> dict | None:
|
||||
"""Check if credentials are configured. Returns error dict if not."""
|
||||
token = _get_token(account)
|
||||
token = _get_token()
|
||||
if not token:
|
||||
return {
|
||||
"error": "Calendar credentials not configured",
|
||||
"help": "Set GOOGLE_CALENDAR_ACCESS_TOKEN environment variable",
|
||||
"help": "Set GOOGLE_ACCESS_TOKEN environment variable",
|
||||
}
|
||||
return None
|
||||
|
||||
@@ -182,7 +177,6 @@ def register_tools(
|
||||
time_max: str | None = None,
|
||||
max_results: int = 10,
|
||||
query: str | None = None,
|
||||
account: str = "",
|
||||
# Tracking parameters (injected by framework, ignored by tool)
|
||||
workspace_id: str | None = None,
|
||||
agent_id: str | None = None,
|
||||
@@ -204,7 +198,7 @@ def register_tools(
|
||||
Returns:
|
||||
Dict with list of events or error message
|
||||
"""
|
||||
cred_error = _check_credentials(account)
|
||||
cred_error = _check_credentials()
|
||||
if cred_error:
|
||||
return cred_error
|
||||
|
||||
@@ -230,7 +224,7 @@ def register_tools(
|
||||
try:
|
||||
response = httpx.get(
|
||||
f"{CALENDAR_API_BASE}/calendars/{_encode_id(calendar_id)}/events",
|
||||
headers=_get_headers(account),
|
||||
headers=_get_headers(),
|
||||
params=params,
|
||||
timeout=30.0,
|
||||
)
|
||||
@@ -274,7 +268,6 @@ def register_tools(
|
||||
def calendar_get_event(
|
||||
event_id: str,
|
||||
calendar_id: str = "primary",
|
||||
account: str = "",
|
||||
# Tracking parameters (injected by framework, ignored by tool)
|
||||
workspace_id: str | None = None,
|
||||
agent_id: str | None = None,
|
||||
@@ -293,7 +286,7 @@ def register_tools(
|
||||
Returns:
|
||||
Dict with event details or error message
|
||||
"""
|
||||
cred_error = _check_credentials(account)
|
||||
cred_error = _check_credentials()
|
||||
if cred_error:
|
||||
return cred_error
|
||||
|
||||
@@ -303,7 +296,7 @@ def register_tools(
|
||||
try:
|
||||
response = httpx.get(
|
||||
f"{CALENDAR_API_BASE}/calendars/{_encode_id(calendar_id)}/events/{_encode_id(event_id)}",
|
||||
headers=_get_headers(account),
|
||||
headers=_get_headers(),
|
||||
timeout=30.0,
|
||||
)
|
||||
return _handle_response(response)
|
||||
@@ -325,7 +318,6 @@ def register_tools(
|
||||
send_notifications: bool = True,
|
||||
timezone: str | None = None,
|
||||
all_day: bool = False,
|
||||
account: str = "",
|
||||
# Tracking parameters (injected by framework, ignored by tool)
|
||||
workspace_id: str | None = None,
|
||||
agent_id: str | None = None,
|
||||
@@ -355,7 +347,7 @@ def register_tools(
|
||||
Returns:
|
||||
Dict with created event details or error message
|
||||
"""
|
||||
cred_error = _check_credentials(account)
|
||||
cred_error = _check_credentials()
|
||||
if cred_error:
|
||||
return cred_error
|
||||
|
||||
@@ -420,7 +412,7 @@ def register_tools(
|
||||
try:
|
||||
response = httpx.post(
|
||||
f"{CALENDAR_API_BASE}/calendars/{_encode_id(calendar_id)}/events",
|
||||
headers=_get_headers(account),
|
||||
headers=_get_headers(),
|
||||
json=event_body,
|
||||
params=params,
|
||||
timeout=30.0,
|
||||
@@ -447,7 +439,6 @@ def register_tools(
|
||||
timezone: str | None = None,
|
||||
all_day: bool = False,
|
||||
add_meet_link: bool = False,
|
||||
account: str = "",
|
||||
# Tracking parameters (injected by framework, ignored by tool)
|
||||
workspace_id: str | None = None,
|
||||
agent_id: str | None = None,
|
||||
@@ -479,7 +470,7 @@ def register_tools(
|
||||
Returns:
|
||||
Dict with updated event details or error message
|
||||
"""
|
||||
cred_error = _check_credentials(account)
|
||||
cred_error = _check_credentials()
|
||||
if cred_error:
|
||||
return cred_error
|
||||
|
||||
@@ -507,7 +498,7 @@ def register_tools(
|
||||
try:
|
||||
get_response = httpx.get(
|
||||
f"{CALENDAR_API_BASE}/calendars/{_encode_id(calendar_id)}/events/{_encode_id(event_id)}",
|
||||
headers=_get_headers(account),
|
||||
headers=_get_headers(),
|
||||
timeout=30.0,
|
||||
)
|
||||
event_data = _handle_response(get_response)
|
||||
@@ -574,7 +565,7 @@ def register_tools(
|
||||
try:
|
||||
response = httpx.patch(
|
||||
f"{CALENDAR_API_BASE}/calendars/{_encode_id(calendar_id)}/events/{_encode_id(event_id)}",
|
||||
headers=_get_headers(account),
|
||||
headers=_get_headers(),
|
||||
json=patch_body,
|
||||
params=params,
|
||||
timeout=30.0,
|
||||
@@ -591,7 +582,6 @@ def register_tools(
|
||||
event_id: str,
|
||||
calendar_id: str = "primary",
|
||||
send_notifications: bool = True,
|
||||
account: str = "",
|
||||
# Tracking parameters (injected by framework, ignored by tool)
|
||||
workspace_id: str | None = None,
|
||||
agent_id: str | None = None,
|
||||
@@ -611,7 +601,7 @@ def register_tools(
|
||||
Returns:
|
||||
Dict with success status or error message
|
||||
"""
|
||||
cred_error = _check_credentials(account)
|
||||
cred_error = _check_credentials()
|
||||
if cred_error:
|
||||
return cred_error
|
||||
|
||||
@@ -623,7 +613,7 @@ def register_tools(
|
||||
try:
|
||||
response = httpx.delete(
|
||||
f"{CALENDAR_API_BASE}/calendars/{_encode_id(calendar_id)}/events/{_encode_id(event_id)}",
|
||||
headers=_get_headers(account),
|
||||
headers=_get_headers(),
|
||||
params=params,
|
||||
timeout=30.0,
|
||||
)
|
||||
@@ -641,7 +631,6 @@ def register_tools(
|
||||
@mcp.tool()
|
||||
def calendar_list_calendars(
|
||||
max_results: int = 100,
|
||||
account: str = "",
|
||||
# Tracking parameters (injected by framework, ignored by tool)
|
||||
workspace_id: str | None = None,
|
||||
agent_id: str | None = None,
|
||||
@@ -659,7 +648,7 @@ def register_tools(
|
||||
Returns:
|
||||
Dict with list of calendars or error message
|
||||
"""
|
||||
cred_error = _check_credentials(account)
|
||||
cred_error = _check_credentials()
|
||||
if cred_error:
|
||||
return cred_error
|
||||
|
||||
@@ -669,7 +658,7 @@ def register_tools(
|
||||
try:
|
||||
response = httpx.get(
|
||||
f"{CALENDAR_API_BASE}/users/me/calendarList",
|
||||
headers=_get_headers(account),
|
||||
headers=_get_headers(),
|
||||
params={"maxResults": max_results},
|
||||
timeout=30.0,
|
||||
)
|
||||
@@ -704,7 +693,6 @@ def register_tools(
|
||||
@mcp.tool()
|
||||
def calendar_get_calendar(
|
||||
calendar_id: str,
|
||||
account: str = "",
|
||||
# Tracking parameters (injected by framework, ignored by tool)
|
||||
workspace_id: str | None = None,
|
||||
agent_id: str | None = None,
|
||||
@@ -722,7 +710,7 @@ def register_tools(
|
||||
Returns:
|
||||
Dict with calendar details or error message
|
||||
"""
|
||||
cred_error = _check_credentials(account)
|
||||
cred_error = _check_credentials()
|
||||
if cred_error:
|
||||
return cred_error
|
||||
|
||||
@@ -732,7 +720,7 @@ def register_tools(
|
||||
try:
|
||||
response = httpx.get(
|
||||
f"{CALENDAR_API_BASE}/calendars/{_encode_id(calendar_id)}",
|
||||
headers=_get_headers(account),
|
||||
headers=_get_headers(),
|
||||
timeout=30.0,
|
||||
)
|
||||
return _handle_response(response)
|
||||
@@ -742,20 +730,110 @@ def register_tools(
|
||||
except httpx.RequestError as e:
|
||||
return {"error": f"Network error: {_sanitize_error(e)}"}
|
||||
|
||||
def _parse_event_dt(dt_str: str) -> datetime:
|
||||
"""Parse an ISO 8601 datetime string into a timezone-aware datetime."""
|
||||
dt = datetime.fromisoformat(dt_str)
|
||||
if dt.tzinfo is None:
|
||||
dt = dt.replace(tzinfo=UTC)
|
||||
return dt
|
||||
|
||||
def _compute_busy_free_conflicts(
|
||||
events: list[dict], window_start: datetime, window_end: datetime
|
||||
) -> tuple[list[dict], list[dict], list[dict]]:
|
||||
"""Compute merged busy blocks, free slots, and conflicts from events.
|
||||
|
||||
Returns (busy, free_slots, conflicts).
|
||||
"""
|
||||
# Build intervals from events, skipping transparent/cancelled
|
||||
intervals: list[tuple[datetime, datetime, str]] = []
|
||||
for ev in events:
|
||||
if ev.get("transparency") == "transparent" or ev.get("status") == "cancelled":
|
||||
continue
|
||||
start_str = ev.get("start")
|
||||
end_str = ev.get("end")
|
||||
if not start_str or not end_str:
|
||||
continue
|
||||
# Skip all-day events (date-only strings) for time-based availability
|
||||
if _DATE_ONLY_RE.match(start_str) or _DATE_ONLY_RE.match(end_str):
|
||||
continue
|
||||
intervals.append(
|
||||
(
|
||||
_parse_event_dt(start_str),
|
||||
_parse_event_dt(end_str),
|
||||
ev.get("summary", "(No title)"),
|
||||
)
|
||||
)
|
||||
|
||||
intervals.sort(key=lambda x: x[0])
|
||||
|
||||
# Merge overlapping intervals into busy blocks and detect conflicts
|
||||
busy: list[dict] = []
|
||||
conflicts: list[dict] = []
|
||||
if intervals:
|
||||
cur_start, cur_end, cur_name = intervals[0]
|
||||
cur_names = [cur_name]
|
||||
for iv_start, iv_end, iv_name in intervals[1:]:
|
||||
if iv_start < cur_end:
|
||||
# Overlap detected
|
||||
cur_names.append(iv_name)
|
||||
if iv_end > cur_end:
|
||||
cur_end = iv_end
|
||||
else:
|
||||
# No overlap — flush current block
|
||||
if len(cur_names) > 1:
|
||||
conflicts.append(
|
||||
{
|
||||
"events": cur_names,
|
||||
"overlap_start": cur_start.isoformat(),
|
||||
"overlap_end": cur_end.isoformat(),
|
||||
}
|
||||
)
|
||||
busy.append({"start": cur_start.isoformat(), "end": cur_end.isoformat()})
|
||||
cur_start, cur_end = iv_start, iv_end
|
||||
cur_names = [iv_name]
|
||||
# Flush last block
|
||||
if len(cur_names) > 1:
|
||||
conflicts.append(
|
||||
{
|
||||
"events": cur_names,
|
||||
"overlap_start": cur_start.isoformat(),
|
||||
"overlap_end": cur_end.isoformat(),
|
||||
}
|
||||
)
|
||||
busy.append({"start": cur_start.isoformat(), "end": cur_end.isoformat()})
|
||||
|
||||
# Compute free slots as gaps between busy blocks within the window
|
||||
free_slots: list[dict] = []
|
||||
cursor = window_start
|
||||
for block in busy:
|
||||
block_start = _parse_event_dt(block["start"])
|
||||
if block_start > cursor:
|
||||
free_slots.append({"start": cursor.isoformat(), "end": block_start.isoformat()})
|
||||
block_end = _parse_event_dt(block["end"])
|
||||
if block_end > cursor:
|
||||
cursor = block_end
|
||||
if cursor < window_end:
|
||||
free_slots.append({"start": cursor.isoformat(), "end": window_end.isoformat()})
|
||||
|
||||
return busy, free_slots, conflicts
|
||||
|
||||
@mcp.tool()
|
||||
def calendar_check_availability(
|
||||
time_min: str,
|
||||
time_max: str,
|
||||
calendars: list[str] | None = None,
|
||||
timezone: str = "UTC",
|
||||
account: str = "",
|
||||
# Tracking parameters (injected by framework, ignored by tool)
|
||||
workspace_id: str | None = None,
|
||||
agent_id: str | None = None,
|
||||
session_id: str | None = None,
|
||||
) -> dict:
|
||||
"""
|
||||
Check free/busy availability for scheduling.
|
||||
Check availability by listing actual events in the time range.
|
||||
|
||||
Returns individual events, merged busy blocks, free slots, and any
|
||||
scheduling conflicts (overlapping events). Uses the Events API instead
|
||||
of FreeBusy for accurate per-event visibility.
|
||||
|
||||
Args:
|
||||
time_min: Start of time range (ISO 8601 format)
|
||||
@@ -767,9 +845,9 @@ def register_tools(
|
||||
session_id: Tracking parameter (injected by framework)
|
||||
|
||||
Returns:
|
||||
Dict with busy periods for each calendar or error message
|
||||
Dict with events, busy periods, free slots, and conflicts
|
||||
"""
|
||||
cred_error = _check_credentials(account)
|
||||
cred_error = _check_credentials()
|
||||
if cred_error:
|
||||
return cred_error
|
||||
|
||||
@@ -781,43 +859,67 @@ def register_tools(
|
||||
if calendars is None:
|
||||
calendars = ["primary"]
|
||||
|
||||
request_body = {
|
||||
"timeMin": time_min,
|
||||
"timeMax": time_max,
|
||||
"timeZone": timezone,
|
||||
"items": [{"id": cal_id} for cal_id in calendars],
|
||||
}
|
||||
formatted_calendars = {}
|
||||
|
||||
try:
|
||||
response = httpx.post(
|
||||
f"{CALENDAR_API_BASE}/freeBusy",
|
||||
headers=_get_headers(account),
|
||||
json=request_body,
|
||||
timeout=30.0,
|
||||
)
|
||||
result = _handle_response(response)
|
||||
|
||||
if "error" in result:
|
||||
return result
|
||||
|
||||
# Format the response for easier consumption
|
||||
formatted_calendars = {}
|
||||
for cal_id, cal_data in result.get("calendars", {}).items():
|
||||
if "errors" in cal_data:
|
||||
formatted_calendars[cal_id] = {
|
||||
"error": cal_data["errors"][0].get("reason", "Unknown error")
|
||||
}
|
||||
else:
|
||||
formatted_calendars[cal_id] = {"busy": cal_data.get("busy", [])}
|
||||
|
||||
return {
|
||||
"time_min": time_min,
|
||||
"time_max": time_max,
|
||||
"timezone": timezone,
|
||||
"calendars": formatted_calendars,
|
||||
for cal_id in calendars:
|
||||
params: dict = {
|
||||
"timeMin": time_min,
|
||||
"timeMax": time_max,
|
||||
"singleEvents": "true",
|
||||
"orderBy": "startTime",
|
||||
"maxResults": 250,
|
||||
}
|
||||
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": f"Network error: {_sanitize_error(e)}"}
|
||||
try:
|
||||
response = httpx.get(
|
||||
f"{CALENDAR_API_BASE}/calendars/{_encode_id(cal_id)}/events",
|
||||
headers=_get_headers(),
|
||||
params=params,
|
||||
timeout=30.0,
|
||||
)
|
||||
result = _handle_response(response)
|
||||
|
||||
if "error" in result:
|
||||
formatted_calendars[cal_id] = {"error": result["error"]}
|
||||
continue
|
||||
|
||||
# Format events
|
||||
events = []
|
||||
for item in result.get("items", []):
|
||||
start = item.get("start", {})
|
||||
end = item.get("end", {})
|
||||
events.append(
|
||||
{
|
||||
"summary": item.get("summary", "(No title)"),
|
||||
"start": start.get("dateTime") or start.get("date"),
|
||||
"end": end.get("dateTime") or end.get("date"),
|
||||
"status": item.get("status", "confirmed"),
|
||||
"transparency": item.get("transparency", "opaque"),
|
||||
}
|
||||
)
|
||||
|
||||
# Compute busy/free/conflicts
|
||||
window_start = _parse_event_dt(time_min)
|
||||
window_end = _parse_event_dt(time_max)
|
||||
busy, free_slots, conflicts = _compute_busy_free_conflicts(
|
||||
events, window_start, window_end
|
||||
)
|
||||
|
||||
formatted_calendars[cal_id] = {
|
||||
"events": events,
|
||||
"busy": busy,
|
||||
"free_slots": free_slots,
|
||||
"conflicts": conflicts,
|
||||
}
|
||||
|
||||
except httpx.TimeoutException:
|
||||
formatted_calendars[cal_id] = {"error": "Request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
formatted_calendars[cal_id] = {"error": f"Network error: {_sanitize_error(e)}"}
|
||||
|
||||
return {
|
||||
"time_min": time_min,
|
||||
"time_max": time_max,
|
||||
"timezone": timezone,
|
||||
"calendars": formatted_calendars,
|
||||
}
|
||||
|
||||
@@ -0,0 +1,168 @@
|
||||
# CSV Tool
|
||||
|
||||
Read, write, and query CSV files with SQL support via DuckDB.
|
||||
|
||||
## Features
|
||||
|
||||
- **csv_read** - Read CSV file contents with pagination
|
||||
- **csv_write** - Create new CSV files
|
||||
- **csv_append** - Append rows to existing CSV files
|
||||
- **csv_info** - Get CSV metadata without loading all data
|
||||
- **csv_sql** - Query CSV files using SQL (powered by DuckDB)
|
||||
|
||||
## Setup
|
||||
|
||||
No API keys required. Files are accessed within the session sandbox.
|
||||
|
||||
For SQL queries, DuckDB must be installed:
|
||||
```bash
|
||||
pip install duckdb
|
||||
# or
|
||||
uv pip install tools[sql]
|
||||
```
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Read a CSV File
|
||||
```python
|
||||
csv_read(
|
||||
path="data/sales.csv",
|
||||
workspace_id="ws_123",
|
||||
agent_id="agent_1",
|
||||
session_id="session_1",
|
||||
limit=100,
|
||||
offset=0
|
||||
)
|
||||
```
|
||||
|
||||
### Write a New CSV
|
||||
```python
|
||||
csv_write(
|
||||
path="output/report.csv",
|
||||
workspace_id="ws_123",
|
||||
agent_id="agent_1",
|
||||
session_id="session_1",
|
||||
columns=["name", "email", "score"],
|
||||
rows=[
|
||||
{"name": "Alice", "email": "alice@example.com", "score": 95},
|
||||
{"name": "Bob", "email": "bob@example.com", "score": 87}
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
### Append Rows
|
||||
```python
|
||||
csv_append(
|
||||
path="data/log.csv",
|
||||
workspace_id="ws_123",
|
||||
agent_id="agent_1",
|
||||
session_id="session_1",
|
||||
rows=[
|
||||
{"timestamp": "2024-01-15", "event": "login", "user": "alice"}
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
### Get File Info
|
||||
```python
|
||||
csv_info(
|
||||
path="data/large_file.csv",
|
||||
workspace_id="ws_123",
|
||||
agent_id="agent_1",
|
||||
session_id="session_1"
|
||||
)
|
||||
# Returns: columns, row count, file size (without loading all data)
|
||||
```
|
||||
|
||||
### Query with SQL
|
||||
```python
|
||||
csv_sql(
|
||||
path="data/sales.csv",
|
||||
workspace_id="ws_123",
|
||||
agent_id="agent_1",
|
||||
session_id="session_1",
|
||||
query="SELECT category, SUM(amount) as total FROM data GROUP BY category ORDER BY total DESC"
|
||||
)
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
### csv_read
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| path | str | Yes | Path to CSV file (relative to sandbox) |
|
||||
| workspace_id | str | Yes | Workspace identifier |
|
||||
| agent_id | str | Yes | Agent identifier |
|
||||
| session_id | str | Yes | Session identifier |
|
||||
| limit | int | No | Max rows to return (None = all) |
|
||||
| offset | int | No | Rows to skip (default: 0) |
|
||||
|
||||
### csv_write
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| path | str | Yes | Path for new CSV file |
|
||||
| workspace_id | str | Yes | Workspace identifier |
|
||||
| agent_id | str | Yes | Agent identifier |
|
||||
| session_id | str | Yes | Session identifier |
|
||||
| columns | list[str] | Yes | Column names for header |
|
||||
| rows | list[dict] | Yes | Row data as dictionaries |
|
||||
|
||||
### csv_append
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| path | str | Yes | Path to existing CSV file |
|
||||
| workspace_id | str | Yes | Workspace identifier |
|
||||
| agent_id | str | Yes | Agent identifier |
|
||||
| session_id | str | Yes | Session identifier |
|
||||
| rows | list[dict] | Yes | Rows to append |
|
||||
|
||||
### csv_info
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| path | str | Yes | Path to CSV file |
|
||||
| workspace_id | str | Yes | Workspace identifier |
|
||||
| agent_id | str | Yes | Agent identifier |
|
||||
| session_id | str | Yes | Session identifier |
|
||||
|
||||
### csv_sql
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| path | str | Yes | Path to CSV file |
|
||||
| workspace_id | str | Yes | Workspace identifier |
|
||||
| agent_id | str | Yes | Agent identifier |
|
||||
| session_id | str | Yes | Session identifier |
|
||||
| query | str | Yes | SQL query (table name is `data`) |
|
||||
|
||||
## SQL Query Examples
|
||||
```sql
|
||||
-- Filter rows
|
||||
SELECT * FROM data WHERE status = 'pending'
|
||||
|
||||
-- Aggregate data
|
||||
SELECT category, COUNT(*) as count, AVG(price) as avg_price
|
||||
FROM data GROUP BY category
|
||||
|
||||
-- Sort and limit
|
||||
SELECT name, price FROM data ORDER BY price DESC LIMIT 5
|
||||
|
||||
-- Case-insensitive search
|
||||
SELECT * FROM data WHERE LOWER(name) LIKE '%phone%'
|
||||
```
|
||||
|
||||
**Note:** Only SELECT queries are allowed for security.
|
||||
|
||||
## Error Handling
|
||||
```python
|
||||
{"error": "File not found: path/to/file.csv"}
|
||||
{"error": "File must have .csv extension"}
|
||||
{"error": "CSV file is empty or has no headers"}
|
||||
{"error": "CSV parsing error: ..."}
|
||||
{"error": "File encoding error: unable to decode as UTF-8"}
|
||||
{"error": "DuckDB not installed. Install with: uv pip install duckdb"}
|
||||
{"error": "Only SELECT queries are allowed for security reasons"}
|
||||
```
|
||||
@@ -36,12 +36,13 @@ grep_search(
|
||||
| `agent_id` | str | Yes | - | The ID of the agent |
|
||||
| `session_id` | str | Yes | - | The ID of the current session |
|
||||
| `recursive` | bool | No | False | Whether to search recursively in subdirectories |
|
||||
| `hashline` | bool | No | False | If True, include an `anchor` field (`N:hhhh`) in each match for use with `hashline_edit` |
|
||||
|
||||
## Returns
|
||||
|
||||
Returns a dictionary with the following structure:
|
||||
|
||||
**Success:**
|
||||
**Success (default mode):**
|
||||
```python
|
||||
{
|
||||
"success": True,
|
||||
@@ -64,6 +65,25 @@ Returns a dictionary with the following structure:
|
||||
}
|
||||
```
|
||||
|
||||
**Success (hashline mode):**
|
||||
```python
|
||||
{
|
||||
"success": True,
|
||||
"pattern": "def \\w+\\(",
|
||||
"path": "src",
|
||||
"recursive": True,
|
||||
"matches": [
|
||||
{
|
||||
"file": "src/main.py",
|
||||
"line_number": 10,
|
||||
"line_content": "def process_data(args):",
|
||||
"anchor": "10:a3f2"
|
||||
}
|
||||
],
|
||||
"total_matches": 1
|
||||
}
|
||||
```
|
||||
|
||||
**No matches:**
|
||||
```python
|
||||
{
|
||||
|
||||
@@ -3,6 +3,8 @@ import re
|
||||
|
||||
from mcp.server.fastmcp import FastMCP
|
||||
|
||||
from aden_tools.hashline import HASHLINE_MAX_FILE_BYTES, compute_line_hash
|
||||
|
||||
from ..security import WORKSPACES_DIR, get_secure_path
|
||||
|
||||
|
||||
@@ -17,12 +19,14 @@ def register_tools(mcp: FastMCP) -> None:
|
||||
agent_id: str,
|
||||
session_id: str,
|
||||
recursive: bool = False,
|
||||
hashline: bool = False,
|
||||
) -> dict:
|
||||
"""
|
||||
Search for a pattern in a file or directory within the session sandbox.
|
||||
|
||||
Use this when you need to find specific content or patterns in files using regex.
|
||||
Set recursive=True to search through all subdirectories.
|
||||
Set hashline=True to include anchor hashes in results for use with hashline_edit.
|
||||
|
||||
Args:
|
||||
path: The path to search in (file or directory, relative to session root)
|
||||
@@ -31,6 +35,7 @@ def register_tools(mcp: FastMCP) -> None:
|
||||
agent_id: The ID of the agent
|
||||
session_id: The ID of the current session
|
||||
recursive: Whether to search recursively in directories (default: False)
|
||||
hashline: If True, include anchor field (N:hhhh) in each match (default: False)
|
||||
|
||||
Returns:
|
||||
Dict with search results and match details, or error dict
|
||||
@@ -48,6 +53,7 @@ def register_tools(mcp: FastMCP) -> None:
|
||||
session_root = os.path.join(WORKSPACES_DIR, workspace_id, agent_id, session_id)
|
||||
|
||||
matches = []
|
||||
skipped_large_files = []
|
||||
|
||||
if os.path.isfile(secure_path):
|
||||
files = [secure_path]
|
||||
@@ -67,21 +73,46 @@ def register_tools(mcp: FastMCP) -> None:
|
||||
# Calculate relative path for display
|
||||
display_path = os.path.relpath(file_path, session_root)
|
||||
try:
|
||||
with open(file_path, encoding="utf-8") as f:
|
||||
for i, line in enumerate(f, 1):
|
||||
if regex.search(line):
|
||||
if hashline:
|
||||
# Use splitlines() for anchor consistency with
|
||||
# view_file/hashline_edit (handles Unicode line
|
||||
# separators like \u2028, \x85).
|
||||
# Skip files > 10MB to avoid excessive memory use.
|
||||
file_size = os.path.getsize(file_path)
|
||||
if file_size > HASHLINE_MAX_FILE_BYTES:
|
||||
skipped_large_files.append(display_path)
|
||||
continue
|
||||
with open(file_path, encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
for i, line in enumerate(content.splitlines(), 1):
|
||||
if not regex.search(line):
|
||||
continue
|
||||
matches.append(
|
||||
{
|
||||
"file": display_path,
|
||||
"line_number": i,
|
||||
"line_content": line,
|
||||
"anchor": f"{i}:{compute_line_hash(line)}",
|
||||
}
|
||||
)
|
||||
else:
|
||||
with open(file_path, encoding="utf-8") as f:
|
||||
for i, line in enumerate(f, 1):
|
||||
bare = line.rstrip("\n\r")
|
||||
if not regex.search(bare):
|
||||
continue
|
||||
matches.append(
|
||||
{
|
||||
"file": display_path,
|
||||
"line_number": i,
|
||||
"line_content": line.strip(),
|
||||
"line_content": bare.strip(),
|
||||
}
|
||||
)
|
||||
except (UnicodeDecodeError, PermissionError):
|
||||
# Skips files that cannot be decoded or lack permissions
|
||||
continue
|
||||
|
||||
return {
|
||||
result = {
|
||||
"success": True,
|
||||
"pattern": pattern,
|
||||
"path": path,
|
||||
@@ -89,6 +120,9 @@ def register_tools(mcp: FastMCP) -> None:
|
||||
"matches": matches,
|
||||
"total_matches": len(matches),
|
||||
}
|
||||
if skipped_large_files:
|
||||
result["skipped_large_files"] = skipped_large_files
|
||||
return result
|
||||
|
||||
# 2. Specific Exception Handling (Issue #55 Requirements)
|
||||
except FileNotFoundError:
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
"""Backward-compatible re-exports from aden_tools.hashline.
|
||||
|
||||
This module has been moved to aden_tools.hashline for shared use across
|
||||
both file_system_toolkits and file_ops (coder tools). All imports continue
|
||||
to work via this shim.
|
||||
"""
|
||||
|
||||
from aden_tools.hashline import ( # noqa: F401
|
||||
HASHLINE_PREFIX_RE,
|
||||
compute_line_hash,
|
||||
format_hashlines,
|
||||
maybe_strip,
|
||||
parse_anchor,
|
||||
strip_boundary_echo,
|
||||
strip_content_prefixes,
|
||||
strip_insert_echo,
|
||||
validate_anchor,
|
||||
whitespace_equal,
|
||||
)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user