Compare commits
391 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| d78473ff20 | |||
| 8ecb728148 | |||
| 4a2141bce9 | |||
| 3b4d6e4602 | |||
| 89ccc664bd | |||
| 4872c01886 | |||
| 4620380341 | |||
| fca2deb980 | |||
| d7ce923ca6 | |||
| 403b47db61 | |||
| 0d0e78579f | |||
| 447bfdfab8 | |||
| c77d21e393 | |||
| 6ded508b4d | |||
| 75f8bf5696 | |||
| 62fc02220b | |||
| 5d4f279646 | |||
| 920a840756 | |||
| 8680a35c39 | |||
| c9134cfd91 | |||
| 55ce751385 | |||
| aca2dfb536 | |||
| d11f539209 | |||
| 64a223353a | |||
| 2d154c2db6 | |||
| a00c934d9d | |||
| 18bee9cb90 | |||
| c1664e47e5 | |||
| 2cb972fc5a | |||
| 0bd841ce01 | |||
| 88ec4b7e64 | |||
| 27d5061d97 | |||
| a2cd96a1a7 | |||
| 07b82a51f6 | |||
| 3e1282b31e | |||
| 736756b257 | |||
| 90efe7009d | |||
| 4adb369bde | |||
| d4a30eb2f3 | |||
| 94bb4a2984 | |||
| 648bad26ed | |||
| f0c7470f3d | |||
| fe533b72a6 | |||
| e581767cab | |||
| 0663ee5950 | |||
| 4b97baa34b | |||
| a89296d397 | |||
| d568912ba2 | |||
| c4d7980058 | |||
| 8549fe8238 | |||
| 2b8d85bb95 | |||
| 07f7801166 | |||
| 1f12a45151 | |||
| 936e02e8e6 | |||
| d59fe1e109 | |||
| 274318d3e5 | |||
| 0f0884c2e0 | |||
| 764012c598 | |||
| fd4dc1a69a | |||
| 377cd39c2a | |||
| e92caeef24 | |||
| b7e6226478 | |||
| a995818db2 | |||
| 0772b4d300 | |||
| 684e0d8dc6 | |||
| d284c5d790 | |||
| 7a9b9666c4 | |||
| a852cb91bf | |||
| 2f21e9eb4b | |||
| 8390ef8731 | |||
| 8d21479c24 | |||
| 965dec3ba1 | |||
| d4b54446be | |||
| 7992b862c2 | |||
| 44b3e0eaa2 | |||
| f480fc2b94 | |||
| 2844dbf19f | |||
| 22b7e4b0c3 | |||
| 5413833a69 | |||
| 02e1a4584a | |||
| 520840b1dd | |||
| ee96147336 | |||
| 705cef4dc1 | |||
| ab26e64122 | |||
| f365e219cb | |||
| 01621881c2 | |||
| f7639f8572 | |||
| fc643060ce | |||
| 9aebeb181e | |||
| acbbfaaa79 | |||
| bf170bce10 | |||
| 0a090d058b | |||
| 47bfadaad9 | |||
| d968dcd44c | |||
| 6fdaa9ea50 | |||
| 4d251fbdc2 | |||
| 6acceed288 | |||
| 8dd1d6e3aa | |||
| 1da28644a6 | |||
| 6452fe7fef | |||
| acff008bd2 | |||
| 651d6850a1 | |||
| c7fdc92594 | |||
| 43602a8801 | |||
| 3da04265a6 | |||
| 4c98f0d2d0 | |||
| d84c3364d0 | |||
| ae921f6cee | |||
| 6b506a1c08 | |||
| 0c9f4fa97e | |||
| 95e30bc607 | |||
| 0f1f0090b0 | |||
| c0da3bec02 | |||
| 9dadb5264d | |||
| e39e6a75cc | |||
| 23c66d1059 | |||
| b9d529d94e | |||
| 1c9b09fb78 | |||
| 9fb14f23d2 | |||
| 4795dc4f68 | |||
| acf0f804c5 | |||
| 4e2951854b | |||
| 80dfb429d7 | |||
| 9c0ba77e22 | |||
| 46b4651073 | |||
| 86dd5246c6 | |||
| a1227c88ee | |||
| 535d7ab568 | |||
| af10494b31 | |||
| 39c1042827 | |||
| 16e7dc11f4 | |||
| 7a27babefd | |||
| d53ae9d51d | |||
| 910cf7727d | |||
| 1698605f15 | |||
| eda124a123 | |||
| 15e9ce8d2f | |||
| c01dd603d7 | |||
| 9d5157d69f | |||
| d78795bdf5 | |||
| ff2b7f473e | |||
| 73c9a91811 | |||
| 27b765d902 | |||
| fddba419be | |||
| f42d6308e8 | |||
| c167002754 | |||
| ea26ee7d0c | |||
| 5280e908b2 | |||
| 1c5dd8c664 | |||
| 3aca153be5 | |||
| 65c8e1653c | |||
| 58e4fa918c | |||
| 3af13d3f90 | |||
| b799789dbe | |||
| 2cd73dfccc | |||
| 57d77d5479 | |||
| 5814021773 | |||
| 4f4cc9c8ce | |||
| d9c840eee5 | |||
| d2eb86e534 | |||
| 03842353e4 | |||
| 48747e20af | |||
| 58af593af6 | |||
| 450575a927 | |||
| eac2bb19b2 | |||
| 756a815bf0 | |||
| 23a7b080eb | |||
| bf39bcdec9 | |||
| 0276632491 | |||
| ae2993d0d1 | |||
| d14d71f760 | |||
| ef6efc2f55 | |||
| 738641d35f | |||
| 22f5534f08 | |||
| b79e7eca73 | |||
| 28250dc45e | |||
| fe5df6a87a | |||
| 07e4b593dd | |||
| 497591bf3b | |||
| a2a3e334d6 | |||
| 1ccbfaf800 | |||
| a9afa0555c | |||
| 83b2183cf0 | |||
| c2dea88398 | |||
| f49e7a760e | |||
| dc95c88da0 | |||
| 6e0255ebec | |||
| b51e688d1a | |||
| 379d3df46b | |||
| b77a3031fe | |||
| c10eea04ec | |||
| 491a3f24da | |||
| c7d70e0fb1 | |||
| d59f8e99cb | |||
| 0a91b49417 | |||
| ced64541b9 | |||
| 88253883a3 | |||
| 3c30cfe02b | |||
| 0d6267bcf1 | |||
| b47175d1df | |||
| 6f23a30eed | |||
| ff7b5c7e27 | |||
| 69f0ff7ac9 | |||
| c3f13c50eb | |||
| 5477408d40 | |||
| 9fad385ddf | |||
| cf44ee1d9b | |||
| 4ab33a39d6 | |||
| ae19121802 | |||
| b518525418 | |||
| ac3fe38b33 | |||
| 3c6a30fcae | |||
| 2ced873fb5 | |||
| 6ed6e5b286 | |||
| 30bb0ad5d8 | |||
| cb0845f5ba | |||
| ce2525b59c | |||
| 1f77ec3831 | |||
| ab995d8b96 | |||
| 6ab5aa8004 | |||
| 4449cd8ee8 | |||
| 8b60c03a0a | |||
| c2e560fc07 | |||
| 19f7ae862e | |||
| 5e9f74744a | |||
| 0e98023e40 | |||
| 7787179a5a | |||
| b63205b91a | |||
| 347bccb9ee | |||
| 22bb07f00e | |||
| 660f883197 | |||
| 9d83f0298f | |||
| 988de80b66 | |||
| dc6aa226ee | |||
| 48a54b4ee2 | |||
| 7f7e8b4dff | |||
| f48a7380f5 | |||
| 3c7f129d86 | |||
| 4533b27aa1 | |||
| 3adf268c29 | |||
| ac8579900f | |||
| abbaaa68f3 | |||
| 11089093ef | |||
| 99b7cb07d5 | |||
| 70d61ae67a | |||
| dd054815a3 | |||
| 8e5eaae9dd | |||
| 2d0128eb5c | |||
| 06f1d4dcef | |||
| 0e7b11b5b2 | |||
| 291b78f934 | |||
| e196a03972 | |||
| a0abe2685d | |||
| e8f642c8b6 | |||
| 6260f628eb | |||
| 4a4f17ed40 | |||
| 36dcf2025b | |||
| 85c70c94e6 | |||
| 336e82ba22 | |||
| a7b6b080ab | |||
| 9202cbd4d4 | |||
| f2ddd1051d | |||
| 2dd60c8d52 | |||
| ff01c1fd99 | |||
| 421b25fdb7 | |||
| 795c3c33e2 | |||
| 97821f4d80 | |||
| 505e1e30fd | |||
| 3fb2b285fb | |||
| a76109840c | |||
| 1db8484402 | |||
| 39212350ba | |||
| f3399fe95b | |||
| d02e1155ed | |||
| 7ede3ba171 | |||
| cdaec8a837 | |||
| 2272491cf5 | |||
| bb38cb974f | |||
| 635d2976f4 | |||
| 4e1525880d | |||
| b80559df68 | |||
| 08d93ef90a | |||
| 22bf035522 | |||
| 15944a42ab | |||
| 8440ec70ba | |||
| eacf2520cf | |||
| def4f62a51 | |||
| b0c5bcd210 | |||
| 2fe1343343 | |||
| de0dcff50f | |||
| 20427e213a | |||
| 1fb5c6337a | |||
| 1e74f194a1 | |||
| 08157d2bd6 | |||
| ef036257a9 | |||
| 16ce984c74 | |||
| 1e8b5b96eb | |||
| 094ba89f19 | |||
| 7008c9f310 | |||
| 94d7cbacc2 | |||
| bddc2b413a | |||
| 48c8fb7fff | |||
| 52b1a3f472 | |||
| 079e00c8f7 | |||
| 60bba38941 | |||
| ea8e7b11c6 | |||
| 3dc2b25b01 | |||
| 543b90b34f | |||
| 2ad78ec8a2 | |||
| 412658e9f2 | |||
| 9bfddec322 | |||
| bbd9c10169 | |||
| 51fdc4ddde | |||
| 04685d33ca | |||
| 729a0e0cec | |||
| 2bcb0cacee | |||
| 44bf191f53 | |||
| 993b31f19b | |||
| 41b3b9619f | |||
| 2a4fe4020c | |||
| 9d1f268078 | |||
| 2185e127b1 | |||
| 99ed885fd0 | |||
| d8a390a685 | |||
| f50cf1735b | |||
| 04eb57f54e | |||
| 7378408eb8 | |||
| cf05420417 | |||
| f5ed4c7d43 | |||
| 5547432b6e | |||
| 336557d7c7 | |||
| 87c172227c | |||
| c2c4929de8 | |||
| a978338738 | |||
| 8eb59b1f66 | |||
| f9d5f95936 | |||
| 651e99ffe3 | |||
| c01cd528d2 | |||
| 2434c86cdf | |||
| c4a5e621aa | |||
| 0f5b83d86a | |||
| b5aadcd51e | |||
| 290d2f6823 | |||
| 944567dc31 | |||
| 674cf05601 | |||
| 6fa71fa27d | |||
| 8c7065ad37 | |||
| a18ed5bbe6 | |||
| 9f3339650d | |||
| d5e5d3e83d | |||
| 5ea27dda09 | |||
| 6f9066ef20 | |||
| c37185732a | |||
| 0c900fb50e | |||
| 4d3ac28878 | |||
| 270c1f8c50 | |||
| 3d0859d06a | |||
| ed3d4bfe33 | |||
| 596ce9878d | |||
| ffe47c0f71 | |||
| bf4652db4b | |||
| 2acd526b71 | |||
| df71834e4b | |||
| bc3c5a5899 | |||
| 726016d24a | |||
| 4895cea08a | |||
| c9723a3ff2 | |||
| 6cb73a6fea | |||
| 0c7f43f595 | |||
| ea5cfcc5d6 | |||
| 34e85019c3 | |||
| c979dba958 | |||
| b4caa045e1 | |||
| e82133741c | |||
| 5076278dcb | |||
| 2398e04e11 | |||
| d00f321627 | |||
| e76b6cb575 | |||
| cba0ec110f | |||
| 0256e0c944 | |||
| 4d9d0362a0 | |||
| f474d0bc8e | |||
| 6a0681b9aa | |||
| c7e634851b | |||
| cdb7155960 | |||
| 3f7790c26a | |||
| 5676b115f4 | |||
| 61c59d57e8 | |||
| 151fbd7b00 | |||
| f88483f964 | |||
| b61ec8c94d |
@@ -0,0 +1,78 @@
|
||||
name: Standard Bounty
|
||||
description: A bounty task for general framework contributions (not integration-specific)
|
||||
title: "[Bounty]: "
|
||||
labels: []
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
## Standard Bounty
|
||||
|
||||
This issue is part of the [Bounty Program](../../docs/bounty-program/README.md).
|
||||
**Claim this bounty** by commenting below — a maintainer will assign you within 24 hours.
|
||||
|
||||
- type: dropdown
|
||||
id: bounty-size
|
||||
attributes:
|
||||
label: Bounty Size
|
||||
options:
|
||||
- "Small (10 pts)"
|
||||
- "Medium (30 pts)"
|
||||
- "Large (75 pts)"
|
||||
- "Extreme (150 pts)"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: difficulty
|
||||
attributes:
|
||||
label: Difficulty
|
||||
options:
|
||||
- Easy
|
||||
- Medium
|
||||
- Hard
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: What needs to be done to complete this bounty.
|
||||
placeholder: |
|
||||
Describe the specific task, including:
|
||||
- What the contributor needs to do
|
||||
- Links to relevant files in the repo
|
||||
- Any context or motivation for the change
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: acceptance-criteria
|
||||
attributes:
|
||||
label: Acceptance Criteria
|
||||
description: What "done" looks like. The PR must meet all criteria.
|
||||
placeholder: |
|
||||
- [ ] Criterion 1
|
||||
- [ ] Criterion 2
|
||||
- [ ] CI passes
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: relevant-files
|
||||
attributes:
|
||||
label: Relevant Files
|
||||
description: Links to files or directories related to this bounty.
|
||||
placeholder: |
|
||||
- `path/to/file.py`
|
||||
- `path/to/directory/`
|
||||
|
||||
- type: textarea
|
||||
id: resources
|
||||
attributes:
|
||||
label: Resources
|
||||
description: Links to docs, issues, or external references that will help.
|
||||
placeholder: |
|
||||
- Related issue: #XXXX
|
||||
- Docs: https://...
|
||||
@@ -2,14 +2,22 @@ name: Bounty completed
|
||||
description: Awards points and notifies Discord when a bounty PR is merged
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
pull_request_target:
|
||||
types: [closed]
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
pr_number:
|
||||
description: "PR number to process (for missed bounties)"
|
||||
required: true
|
||||
type: number
|
||||
|
||||
jobs:
|
||||
bounty-notify:
|
||||
if: >
|
||||
github.event.pull_request.merged == true &&
|
||||
contains(join(github.event.pull_request.labels.*.name, ','), 'bounty:')
|
||||
github.event_name == 'workflow_dispatch' ||
|
||||
(github.event.pull_request.merged == true &&
|
||||
contains(join(github.event.pull_request.labels.*.name, ','), 'bounty:'))
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
@@ -32,6 +40,8 @@ jobs:
|
||||
GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }}
|
||||
GITHUB_REPOSITORY_NAME: ${{ github.event.repository.name }}
|
||||
DISCORD_WEBHOOK_URL: ${{ secrets.DISCORD_BOUNTY_WEBHOOK_URL }}
|
||||
BOT_API_URL: ${{ secrets.BOT_API_URL }}
|
||||
BOT_API_KEY: ${{ secrets.BOT_API_KEY }}
|
||||
LURKR_API_KEY: ${{ secrets.LURKR_API_KEY }}
|
||||
LURKR_GUILD_ID: ${{ secrets.LURKR_GUILD_ID }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PR_NUMBER: ${{ inputs.pr_number || github.event.pull_request.number }}
|
||||
|
||||
@@ -1,126 +0,0 @@
|
||||
name: Link Discord account
|
||||
description: Auto-creates a PR to add contributor to contributors.yml when a link-discord issue is opened
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
|
||||
jobs:
|
||||
link-discord:
|
||||
if: contains(github.event.issue.labels.*.name, 'link-discord')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 2
|
||||
permissions:
|
||||
contents: write
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Parse issue and update contributors.yml
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const fs = require('fs');
|
||||
|
||||
const issue = context.payload.issue;
|
||||
const githubUsername = issue.user.login;
|
||||
|
||||
// Parse the issue body for form fields
|
||||
const body = issue.body || '';
|
||||
|
||||
// Extract Discord ID — look for the numeric value after the "Discord User ID" heading
|
||||
const discordMatch = body.match(/### Discord User ID\s*\n\s*(\d{17,20})/);
|
||||
if (!discordMatch) {
|
||||
await github.rest.issues.createComment({
|
||||
...context.repo,
|
||||
issue_number: issue.number,
|
||||
body: `Could not find a valid Discord ID in the issue body. Please make sure you entered a numeric ID (17-20 digits), not a username.\n\nExample: \`123456789012345678\``
|
||||
});
|
||||
await github.rest.issues.update({
|
||||
...context.repo,
|
||||
issue_number: issue.number,
|
||||
state: 'closed',
|
||||
state_reason: 'not_planned'
|
||||
});
|
||||
return;
|
||||
}
|
||||
const discordId = discordMatch[1];
|
||||
|
||||
// Extract display name (optional)
|
||||
const nameMatch = body.match(/### Display Name \(optional\)\s*\n\s*(.+)/);
|
||||
const displayName = nameMatch ? nameMatch[1].trim() : '';
|
||||
|
||||
// Check if user already exists
|
||||
const yml = fs.readFileSync('contributors.yml', 'utf-8');
|
||||
if (yml.includes(`github: ${githubUsername}`)) {
|
||||
await github.rest.issues.createComment({
|
||||
...context.repo,
|
||||
issue_number: issue.number,
|
||||
body: `@${githubUsername} is already in \`contributors.yml\`. If you need to update your Discord ID, please edit the file directly via PR.`
|
||||
});
|
||||
await github.rest.issues.update({
|
||||
...context.repo,
|
||||
issue_number: issue.number,
|
||||
state: 'closed',
|
||||
state_reason: 'completed'
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Append entry to contributors.yml
|
||||
let entry = ` - github: ${githubUsername}\n discord: "${discordId}"`;
|
||||
if (displayName && displayName !== '_No response_') {
|
||||
entry += `\n name: ${displayName}`;
|
||||
}
|
||||
entry += '\n';
|
||||
|
||||
const updated = yml.trimEnd() + '\n' + entry;
|
||||
fs.writeFileSync('contributors.yml', updated);
|
||||
|
||||
// Set outputs for commit step
|
||||
core.exportVariable('GITHUB_USERNAME', githubUsername);
|
||||
core.exportVariable('DISCORD_ID', discordId);
|
||||
core.exportVariable('ISSUE_NUMBER', issue.number.toString());
|
||||
|
||||
- name: Create PR
|
||||
run: |
|
||||
# Check if there are changes
|
||||
if git diff --quiet contributors.yml; then
|
||||
echo "No changes to contributors.yml"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
BRANCH="docs/link-discord-${GITHUB_USERNAME}"
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||
git checkout -b "$BRANCH"
|
||||
git add contributors.yml
|
||||
git commit -m "docs: link @${GITHUB_USERNAME} to Discord"
|
||||
git push origin "$BRANCH"
|
||||
|
||||
gh pr create \
|
||||
--title "docs: link @${GITHUB_USERNAME} to Discord" \
|
||||
--body "Adds @${GITHUB_USERNAME} (Discord \`${DISCORD_ID}\`) to \`contributors.yml\` for bounty XP tracking.
|
||||
|
||||
Closes #${ISSUE_NUMBER}" \
|
||||
--base main \
|
||||
--head "$BRANCH" \
|
||||
--label "link-discord"
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Notify on issue
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const username = process.env.GITHUB_USERNAME;
|
||||
const issueNumber = parseInt(process.env.ISSUE_NUMBER);
|
||||
|
||||
await github.rest.issues.createComment({
|
||||
...context.repo,
|
||||
issue_number: issueNumber,
|
||||
body: `A PR has been created to link your account. A maintainer will merge it shortly — once merged, you'll receive XP and Discord pings when your bounty PRs are merged.`
|
||||
});
|
||||
@@ -35,6 +35,8 @@ jobs:
|
||||
GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }}
|
||||
GITHUB_REPOSITORY_NAME: ${{ github.event.repository.name }}
|
||||
DISCORD_WEBHOOK_URL: ${{ secrets.DISCORD_BOUNTY_WEBHOOK_URL }}
|
||||
BOT_API_URL: ${{ secrets.BOT_API_URL }}
|
||||
BOT_API_KEY: ${{ secrets.BOT_API_KEY }}
|
||||
LURKR_API_KEY: ${{ secrets.LURKR_API_KEY }}
|
||||
LURKR_GUILD_ID: ${{ secrets.LURKR_GUILD_ID }}
|
||||
SINCE_DATE: ${{ github.event.inputs.since_date || '' }}
|
||||
|
||||
@@ -68,7 +68,6 @@ temp/
|
||||
exports/*
|
||||
|
||||
.claude/settings.local.json
|
||||
.claude/skills/ship-it/
|
||||
|
||||
.venv
|
||||
|
||||
|
||||
+150
-27
@@ -1,17 +1,149 @@
|
||||
# Release Notes
|
||||
|
||||
## v0.7.1
|
||||
|
||||
**Release Date:** March 13, 2026
|
||||
**Tag:** v0.7.1
|
||||
|
||||
### Chrome-Native Browser Control
|
||||
|
||||
v0.7.1 replaces Playwright with direct Chrome DevTools Protocol (CDP) integration. The GCU now launches the user's system Chrome via `open -n` on macOS, connects over CDP, and manages browser lifecycle end-to-end -- no extra browser binary required.
|
||||
|
||||
---
|
||||
|
||||
### Highlights
|
||||
|
||||
#### System Chrome via CDP
|
||||
|
||||
The entire GCU browser stack has been rewritten:
|
||||
|
||||
- **Chrome finder & launcher** -- New `chrome_finder.py` discovers installed Chrome and `chrome_launcher.py` manages process lifecycle with `--remote-debugging-port`
|
||||
- **Coexist with user's browser** -- `open -n` on macOS launches a separate Chrome instance so the user's tabs stay untouched
|
||||
- **Dynamic viewport sizing** -- Viewport auto-sizes to the available display area, suppressing Chrome warning bars
|
||||
- **Orphan cleanup** -- Chrome processes are killed on GCU server shutdown to prevent leaks
|
||||
- **`--no-startup-window`** -- Chrome launches headlessly by default until a page is needed
|
||||
|
||||
#### Per-Subagent Browser Isolation
|
||||
|
||||
Each GCU subagent gets its own Chrome user-data directory, preventing cookie/session cross-contamination:
|
||||
|
||||
- Unique browser profiles injected per subagent
|
||||
- Profiles cleaned up after top-level GCU node execution
|
||||
- Tab origin and age metadata tracked per subagent
|
||||
|
||||
#### Dummy Agent Testing Framework
|
||||
|
||||
A comprehensive test suite for validating agent graph patterns without LLM calls:
|
||||
|
||||
- 8 test modules covering echo, pipeline, branch, parallel merge, retry, feedback loop, worker, and GCU subagent patterns
|
||||
- Shared fixtures and a `run_all.py` runner for CI integration
|
||||
- Subagent lifecycle tests
|
||||
|
||||
---
|
||||
|
||||
### What's New
|
||||
|
||||
#### GCU Browser
|
||||
|
||||
- **Switch from Playwright to system Chrome via CDP** -- Direct CDP connection replaces Playwright dependency. (@bryanadenhq)
|
||||
- **Chrome finder and launcher modules** -- `chrome_finder.py` and `chrome_launcher.py` for cross-platform Chrome discovery and process management. (@bryanadenhq)
|
||||
- **Dynamic viewport sizing** -- Auto-size viewport and suppress Chrome warning bar. (@bryanadenhq)
|
||||
- **Per-subagent browser profile isolation** -- Unique user-data directories per subagent with cleanup. (@bryanadenhq)
|
||||
- **Tab origin/age metadata** -- Track which subagent opened each tab and when. (@bryanadenhq)
|
||||
- **`browser_close_all` tool** -- Bulk tab cleanup for agents managing many pages. (@bryanadenhq)
|
||||
- **Auto-track popup pages** -- Popups are automatically captured and tracked. (@bryanadenhq)
|
||||
- **Auto-snapshot from browser interactions** -- Browser interaction tools return screenshots automatically. (@bryanadenhq)
|
||||
- **Kill orphaned Chrome processes** -- GCU server shutdown cleans up lingering Chrome instances. (@bryanadenhq)
|
||||
- **`--no-startup-window` Chrome flag** -- Prevent empty window on launch. (@bryanadenhq)
|
||||
- **Launch Chrome via `open -n` on macOS** -- Coexist with the user's running browser. (@bryanadenhq)
|
||||
|
||||
#### Framework & Runtime
|
||||
|
||||
- **Session resume fix for new agents** -- Correctly resume sessions when a new agent is loaded. (@bryanadenhq)
|
||||
- **Queen upsert fix** -- Prevent duplicate queen entries on session restore. (@bryanadenhq)
|
||||
- **Anchor worker monitoring to queen's session ID on cold-restore** -- Worker monitors reconnect to the correct queen after restart. (@bryanadenhq)
|
||||
- **Update meta.json when loading workers** -- Worker metadata stays in sync with runtime state. (@RichardTang-Aden)
|
||||
- **Generate worker MCP file correctly** -- Fix MCP config generation for spawned workers. (@RichardTang-Aden)
|
||||
- **Share event bus so tool events are visible to parent** -- Tool execution events propagate up to parent graphs. (@bryanadenhq)
|
||||
- **Subagent activity tracking in queen status** -- Queen instructions include live subagent status. (@bryanadenhq)
|
||||
- **GCU system prompt updates** -- Auto-snapshots, batching, popup tracking, and close_all guidance. (@bryanadenhq)
|
||||
|
||||
#### Frontend
|
||||
|
||||
- **Loading spinner in draft panel** -- Shows spinner during planning phase instead of blank panel. (@bryanadenhq)
|
||||
- **Fix credential modal errors** -- Modal no longer eats errors; banner stays visible. (@bryanadenhq)
|
||||
- **Fix credentials_required loop** -- Stop clearing the flag on modal close to prevent infinite re-prompting. (@bryanadenhq)
|
||||
- **Fix "Add tab" dropdown overflow** -- Dropdown no longer hidden when many agents are open. (@prasoonmhwr)
|
||||
|
||||
#### Testing
|
||||
|
||||
- **Dummy agent test framework** -- 8 test modules (echo, pipeline, branch, parallel merge, retry, feedback loop, worker, GCU subagent) with shared fixtures and CI runner. (@bryanadenhq)
|
||||
- **Subagent lifecycle tests** -- Validate subagent spawn and completion flows. (@bryanadenhq)
|
||||
|
||||
#### Documentation & Infrastructure
|
||||
|
||||
- **MCP integration PRD** -- Product requirements for MCP server registry. (@TimothyZhang7)
|
||||
- **Skills registry PRD** -- Product requirements for skill registry system. (@bryanadenhq)
|
||||
- **Bounty program updates** -- Standard bounty issue template and updated contributor guide. (@bryanadenhq)
|
||||
- **Windows quickstart** -- Add default context limit for PowerShell setup. (@bryanadenhq)
|
||||
- **Remove deprecated files** -- Clean up `setup_mcp.py`, `verify_mcp.py`, `antigravity-setup.md`, and `setup-antigravity-mcp.sh`. (@bryanadenhq)
|
||||
|
||||
---
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- Fix credential modal eating errors and banner staying open
|
||||
- Stop clearing `credentials_required` on modal close to prevent infinite loop
|
||||
- Share event bus so tool events are visible to parent graph
|
||||
- Use lazy %-formatting in subagent completion log to avoid f-string in logger
|
||||
- Anchor worker monitoring to queen's session ID on cold-restore
|
||||
- Update meta.json when loading workers
|
||||
- Generate worker MCP file correctly
|
||||
- Fix "Add tab" dropdown partially hidden when creating multiple agents
|
||||
|
||||
---
|
||||
|
||||
### Community Contributors
|
||||
|
||||
- **Prasoon Mahawar** (@prasoonmhwr) -- Fix UI overflow on agent tab dropdown
|
||||
- **Richard Tang** (@RichardTang-Aden) -- Worker MCP generation and meta.json fixes
|
||||
|
||||
---
|
||||
|
||||
### Upgrading
|
||||
|
||||
```bash
|
||||
git pull origin main
|
||||
uv sync
|
||||
```
|
||||
|
||||
The Playwright dependency is no longer required for GCU browser operations. Chrome must be installed on the host system.
|
||||
|
||||
---
|
||||
|
||||
## v0.7.0
|
||||
|
||||
**Release Date:** March 5, 2026
|
||||
**Tag:** v0.7.0
|
||||
|
||||
Session management refactor release.
|
||||
|
||||
---
|
||||
|
||||
## v0.5.1
|
||||
|
||||
**Release Date:** February 18, 2026
|
||||
**Tag:** v0.5.1
|
||||
|
||||
## The Hive Gets a Brain
|
||||
### The Hive Gets a Brain
|
||||
|
||||
v0.5.1 is our most ambitious release yet. Hive agents can now **build other agents** -- the new Hive Coder meta-agent writes, tests, and fixes agent packages from natural language. The runtime grows multi-graph support so one session can orchestrate multiple agents simultaneously. The TUI gets a complete overhaul with an in-app agent picker, live streaming, and seamless escalation to the Coder. And we're now provider-agnostic: Claude Code subscriptions, OpenAI-compatible endpoints, and any LiteLLM-supported model work out of the box.
|
||||
|
||||
---
|
||||
|
||||
## Highlights
|
||||
### Highlights
|
||||
|
||||
### Hive Coder -- The Agent That Builds Agents
|
||||
#### Hive Coder -- The Agent That Builds Agents
|
||||
|
||||
A native meta-agent that lives inside the framework at `core/framework/agents/hive_coder/`. Give it a natural-language specification and it produces a complete agent package -- goal definition, node prompts, edge routing, MCP tool wiring, tests, and all boilerplate files.
|
||||
|
||||
@@ -30,7 +162,7 @@ The Coder ships with:
|
||||
- **Coder Tools MCP server** -- file I/O, fuzzy-match editing, git snapshots, and sandboxed shell execution (`tools/coder_tools_server.py`)
|
||||
- **Test generation** -- structural tests for forever-alive agents that don't hang on `runner.run()`
|
||||
|
||||
### Multi-Graph Agent Runtime
|
||||
#### Multi-Graph Agent Runtime
|
||||
|
||||
`AgentRuntime` now supports loading, managing, and switching between multiple agent graphs within a single session. Six new lifecycle tools give agents (and the TUI) full control:
|
||||
|
||||
@@ -44,7 +176,7 @@ await runtime.add_graph("exports/deep_research_agent")
|
||||
|
||||
The Hive Coder uses multi-graph internally -- when you escalate from a worker agent, the Coder loads as a separate graph while the worker stays alive in the background.
|
||||
|
||||
### TUI Revamp
|
||||
#### TUI Revamp
|
||||
|
||||
The Terminal UI gets a ground-up rebuild with five major additions:
|
||||
|
||||
@@ -54,7 +186,7 @@ The Terminal UI gets a ground-up rebuild with five major additions:
|
||||
- **PDF attachments** -- `/attach` and `/detach` commands with native OS file dialog (macOS, Linux, Windows)
|
||||
- **Multi-graph commands** -- `/graphs`, `/graph <id>`, `/load <path>`, `/unload <id>` for managing agent graphs in-session
|
||||
|
||||
### Provider-Agnostic LLM Support
|
||||
#### Provider-Agnostic LLM Support
|
||||
|
||||
Hive is no longer Anthropic-only. v0.5.1 adds first-class support for:
|
||||
|
||||
@@ -66,9 +198,9 @@ The quickstart script auto-detects Claude Code subscriptions and ZAI Code instal
|
||||
|
||||
---
|
||||
|
||||
## What's New
|
||||
### What's New
|
||||
|
||||
### Architecture & Runtime
|
||||
#### Architecture & Runtime
|
||||
|
||||
- **Hive Coder meta-agent** -- Natural-language agent builder with reference docs, guardian watchdog, and `hive code` CLI command. (@TimothyZhang7)
|
||||
- **Multi-graph agent sessions** -- `add_graph`/`remove_graph` on AgentRuntime with 6 lifecycle tools (`load_agent`, `unload_agent`, `start_agent`, `restart_agent`, `list_agents`, `get_user_presence`). (@TimothyZhang7)
|
||||
@@ -79,7 +211,7 @@ The quickstart script auto-detects Claude Code subscriptions and ZAI Code instal
|
||||
- **Pre-start confirmation prompt** -- Interactive prompt before agent execution allowing credential updates or abort. (@RichardTang-Aden)
|
||||
- **Event bus multi-graph support** -- `graph_id` on events, `filter_graph` on subscriptions, `ESCALATION_REQUESTED` event type, `exclude_own_graph` filter. (@TimothyZhang7)
|
||||
|
||||
### TUI Improvements
|
||||
#### TUI Improvements
|
||||
|
||||
- **In-app agent picker** (Ctrl+A) -- Tabbed modal for browsing agents with metadata badges (nodes, tools, sessions, tags). (@TimothyZhang7)
|
||||
- **Runtime-optional TUI startup** -- Launches without a pre-loaded agent, shows agent picker on startup. (@TimothyZhang7)
|
||||
@@ -89,7 +221,7 @@ The quickstart script auto-detects Claude Code subscriptions and ZAI Code instal
|
||||
- **Multi-graph TUI commands** -- `/graphs`, `/graph <id>`, `/load <path>`, `/unload <id>`. (@TimothyZhang7)
|
||||
- **Agent Guardian watchdog** -- Event-driven monitor that catches secondary agent failures and triggers automatic remediation, with `--no-guardian` CLI flag. (@TimothyZhang7)
|
||||
|
||||
### New Tool Integrations
|
||||
#### New Tool Integrations
|
||||
|
||||
| Tool | Description | Contributor |
|
||||
| ---------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------ |
|
||||
@@ -99,7 +231,7 @@ The quickstart script auto-detects Claude Code subscriptions and ZAI Code instal
|
||||
| **Google Docs** | Document creation, reading, and editing with OAuth credential support | @haliaeetusvocifer |
|
||||
| **Gmail enhancements** | Expanded mail operations for inbox management | @bryanadenhq |
|
||||
|
||||
### Infrastructure
|
||||
#### Infrastructure
|
||||
|
||||
- **Default node type → `event_loop`** -- `NodeSpec.node_type` defaults to `"event_loop"` instead of `"llm_tool_use"`. (@TimothyZhang7)
|
||||
- **Default `max_node_visits` → 0 (unlimited)** -- Nodes default to unlimited visits, reducing friction for feedback loops and forever-alive agents. (@TimothyZhang7)
|
||||
@@ -112,7 +244,7 @@ The quickstart script auto-detects Claude Code subscriptions and ZAI Code instal
|
||||
|
||||
---
|
||||
|
||||
## Bug Fixes
|
||||
### Bug Fixes
|
||||
|
||||
- Flush WIP accumulator outputs on cancel/failure so edge conditions see correct values on resume
|
||||
- Stall detection state preserved across resume (no more resets on checkpoint restore)
|
||||
@@ -125,13 +257,13 @@ The quickstart script auto-detects Claude Code subscriptions and ZAI Code instal
|
||||
- Fix email agent version conflicts (@RichardTang-Aden)
|
||||
- Fix coder tool timeouts (120s for tests, 300s cap for commands)
|
||||
|
||||
## Documentation
|
||||
### Documentation
|
||||
|
||||
- Clarify installation and prevent root pip install misuse (@paarths-collab)
|
||||
|
||||
---
|
||||
|
||||
## Agent Updates
|
||||
### Agent Updates
|
||||
|
||||
- **Email Inbox Management** -- Consolidate `gmail_inbox_guardian` and `inbox_management` into a single unified agent with updated prompts and config. (@RichardTang-Aden, @bryanadenhq)
|
||||
- **Job Hunter** -- Updated node prompts, config, and agent metadata; added PDF resume selection. (@bryanadenhq)
|
||||
@@ -141,7 +273,7 @@ The quickstart script auto-detects Claude Code subscriptions and ZAI Code instal
|
||||
|
||||
---
|
||||
|
||||
## Breaking Changes
|
||||
### Breaking Changes
|
||||
|
||||
- **Deprecated node types raise `RuntimeError`** -- `llm_tool_use`, `llm_generate`, `function`, `router`, `human_input` now fail instead of warning. Migrate to `event_loop`.
|
||||
- **`NodeSpec.node_type` defaults to `"event_loop"`** (was `"llm_tool_use"`)
|
||||
@@ -150,7 +282,7 @@ The quickstart script auto-detects Claude Code subscriptions and ZAI Code instal
|
||||
|
||||
---
|
||||
|
||||
## Community Contributors
|
||||
### Community Contributors
|
||||
|
||||
A huge thank you to everyone who contributed to this release:
|
||||
|
||||
@@ -165,14 +297,14 @@ A huge thank you to everyone who contributed to this release:
|
||||
|
||||
---
|
||||
|
||||
## Upgrading
|
||||
### Upgrading
|
||||
|
||||
```bash
|
||||
git pull origin main
|
||||
uv sync
|
||||
```
|
||||
|
||||
### Migration Guide
|
||||
#### Migration Guide
|
||||
|
||||
If your agents use deprecated node types, update them:
|
||||
|
||||
@@ -196,12 +328,3 @@ hive code
|
||||
# Or from TUI -- press Ctrl+E to escalate
|
||||
hive tui
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## What's Next
|
||||
|
||||
- **Agent-to-agent communication** -- one agent's output triggers another agent's entry point
|
||||
- **Cost visibility** -- detailed runtime log of LLM costs per node and per session
|
||||
- **Persistent webhook subscriptions** -- survive agent restarts without re-registering
|
||||
- **Remote agent deployment** -- run agents as long-lived services with HTTP APIs
|
||||
|
||||
+16
-5
@@ -4,7 +4,7 @@
|
||||
|
||||
Welcome to Aden Hive, an open-source AI agent framework built for developers who demand production-grade reliability, cross-platform support, and real-world performance. This guide will help you contribute effectively, whether you're fixing bugs, adding features, improving documentation, or building new tools.
|
||||
|
||||
Thank you for your interest in contributing! We're especially looking for help building tools, integrations ([check #2805](https://github.com/adenhq/hive/issues/2805)), and example agents for the framework.
|
||||
Thank you for your interest in contributing! We're especially looking for help building tools, integrations ([check #2805](https://github.com/aden-hive/hive/issues/2805)), and example agents for the framework.
|
||||
|
||||
---
|
||||
|
||||
@@ -121,9 +121,15 @@ uv sync
|
||||
6. Make your changes
|
||||
7. Run checks and tests:
|
||||
```bash
|
||||
make check # Lint and format checks (ruff check + ruff format --check)
|
||||
make check # Lint and format checks
|
||||
make test # Core tests
|
||||
```
|
||||
On Windows (no make), run directly:
|
||||
```powershell
|
||||
uv run ruff check core/ tools/
|
||||
uv run ruff format --check core/ tools/
|
||||
uv run pytest core/tests/
|
||||
```
|
||||
8. Commit your changes following our commit conventions
|
||||
9. Push to your fork and submit a Pull Request
|
||||
|
||||
@@ -222,8 +228,7 @@ else: # linux
|
||||
- **Node.js 18+** (optional, for frontend development)
|
||||
|
||||
> **Windows Users:**
|
||||
> If you are on native Windows, it is recommended to use **WSL (Windows Subsystem for Linux)**.
|
||||
> Alternatively, make sure to run PowerShell or Git Bash with Python 3.11+ installed, and disable "App Execution Aliases" in Windows settings.
|
||||
> Native Windows is supported. Use `.\quickstart.ps1` for setup and `.\hive.ps1` to run (PowerShell 5.1+). Disable "App Execution Aliases" in Windows settings to avoid Python path conflicts. WSL is also an option but not required.
|
||||
|
||||
> **Tip:** Installing Claude Code skills is optional for running existing agents, but required if you plan to **build new agents**.
|
||||
|
||||
@@ -385,6 +390,8 @@ Aden Hive supports **100+ LLM providers** via LiteLLM, giving users maximum flex
|
||||
|----------|--------|-------|
|
||||
| **Anthropic** | Claude 3.5 Sonnet, Haiku, Opus | Default provider, best for reasoning |
|
||||
| **OpenAI** | GPT-4, GPT-4 Turbo, GPT-4o | Function calling, vision |
|
||||
| **OpenRouter** | Any OpenRouter catalog model | Uses `OPENROUTER_API_KEY` and `https://openrouter.ai/api/v1` |
|
||||
| **Hive LLM** | `queen`, `kimi-2.5`, `GLM-5` | Uses `HIVE_API_KEY` and the Hive-managed endpoint |
|
||||
| **Google** | Gemini 1.5 Pro, Flash | Long context windows |
|
||||
| **DeepSeek** | DeepSeek V3 | Cost-effective, strong reasoning |
|
||||
| **Mistral** | Mistral Large, Medium, Small | Open weights, EU hosting |
|
||||
@@ -410,6 +417,10 @@ DEFAULT_MODEL = "claude-haiku-4-5-20251001"
|
||||
- **Cost**: DeepSeek or Gemini Flash (budget-conscious)
|
||||
- **Privacy**: Ollama with local models (no data leaves server)
|
||||
|
||||
**Provider-Specific Notes**
|
||||
- **OpenRouter**: store `provider` as `openrouter`, use the raw OpenRouter model ID in `model` (for example `x-ai/grok-4.20-beta`), and use `OPENROUTER_API_KEY`
|
||||
- **Hive LLM**: store `provider` as `hive`, use Hive model names such as `queen`, `kimi-2.5`, or `GLM-5`, and use `HIVE_API_KEY`
|
||||
|
||||
**For Development**
|
||||
- Use cheaper/faster models (Haiku, GPT-4o-mini)
|
||||
- Test with multiple providers to catch provider-specific issues
|
||||
@@ -421,7 +432,7 @@ DEFAULT_MODEL = "claude-haiku-4-5-20251001"
|
||||
2. **Add credential handling** in `core/framework/credentials/`
|
||||
3. **Add provider-specific configuration** in `core/framework/llm/`
|
||||
4. **Write tests** in `core/tests/test_llm_provider.py`
|
||||
5. **Update documentation** in `docs/llm_providers.md`
|
||||
5. **Update documentation** in `README.md`, `docs/configuration.md`, and any setup guides that mention provider configuration
|
||||
|
||||
**Example: Testing LLM Integration**
|
||||
|
||||
|
||||
@@ -1,24 +1,31 @@
|
||||
.PHONY: lint format check test install-hooks help frontend-install frontend-dev frontend-build
|
||||
.PHONY: lint format check test test-tools test-live test-all install-hooks help frontend-install frontend-dev frontend-build
|
||||
|
||||
# ── Ensure uv is findable in Git Bash on Windows ──────────────────────────────
|
||||
# uv installs to ~/.local/bin on Windows/Linux/macOS. Git Bash may not include
|
||||
# this in PATH by default, so we prepend it here.
|
||||
export PATH := $(HOME)/.local/bin:$(PATH)
|
||||
|
||||
# ── Targets ───────────────────────────────────────────────────────────────────
|
||||
|
||||
help: ## Show this help
|
||||
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
|
||||
awk 'BEGIN {FS = ":.*?## "}; {printf " \033[36m%-15s\033[0m %s\n", $$1, $$2}'
|
||||
|
||||
lint: ## Run ruff linter and formatter (with auto-fix)
|
||||
cd core && ruff check --fix .
|
||||
cd tools && ruff check --fix .
|
||||
cd core && ruff format .
|
||||
cd tools && ruff format .
|
||||
cd core && uv run ruff check --fix .
|
||||
cd tools && uv run ruff check --fix .
|
||||
cd core && uv run ruff format .
|
||||
cd tools && uv run ruff format .
|
||||
|
||||
format: ## Run ruff formatter
|
||||
cd core && ruff format .
|
||||
cd tools && ruff format .
|
||||
cd core && uv run ruff format .
|
||||
cd tools && uv run ruff format .
|
||||
|
||||
check: ## Run all checks without modifying files (CI-safe)
|
||||
cd core && ruff check .
|
||||
cd tools && ruff check .
|
||||
cd core && ruff format --check .
|
||||
cd tools && ruff format --check .
|
||||
cd core && uv run ruff check .
|
||||
cd tools && uv run ruff check .
|
||||
cd core && uv run ruff format --check .
|
||||
cd tools && uv run ruff format --check .
|
||||
|
||||
test: ## Run all tests (core + tools, excludes live)
|
||||
cd core && uv run python -m pytest tests/ -v
|
||||
@@ -46,4 +53,4 @@ frontend-dev: ## Start frontend dev server
|
||||
cd core/frontend && npm run dev
|
||||
|
||||
frontend-build: ## Build frontend for production
|
||||
cd core/frontend && npm run build
|
||||
cd core/frontend && npm run build
|
||||
@@ -27,7 +27,7 @@
|
||||
<img src="https://img.shields.io/badge/Multi--Agent-Systems-blue?style=flat-square" alt="Multi-Agent" />
|
||||
<img src="https://img.shields.io/badge/Headless-Development-purple?style=flat-square" alt="Headless" />
|
||||
<img src="https://img.shields.io/badge/Human--in--the--Loop-orange?style=flat-square" alt="HITL" />
|
||||
<img src="https://img.shields.io/badge/Production--Ready-red?style=flat-square" alt="Production" />
|
||||
<img src="https://img.shields.io/badge/Browser-Use-red?style=flat-square" alt="Browser Use" />
|
||||
</p>
|
||||
<p align="center">
|
||||
<img src="https://img.shields.io/badge/OpenAI-supported-412991?style=flat-square&logo=openai" alt="OpenAI" />
|
||||
@@ -37,15 +37,17 @@
|
||||
|
||||
## Overview
|
||||
|
||||
Build autonomous, reliable, self-improving AI agents without hardcoding workflows. Define your goal through conversation with hive coding agent(queen), and the framework generates a node graph with dynamically created connection code. When things break, the framework captures failure data, evolves the agent through the coding agent, and redeploys. Built-in human-in-the-loop nodes, credential management, and real-time monitoring give you control without sacrificing adaptability.
|
||||
Generate a swarm of worker agents with a coding agent(queen) that control them. Define your goal through conversation with hive queen, and the framework generates a node graph with dynamically created connection code. When things break, the framework captures failure data, evolves the agent through the coding agent, and redeploys. Built-in human-in-the-loop nodes, browser use, credential management, and real-time monitoring give you control without sacrificing adaptability.
|
||||
|
||||
Visit [adenhq.com](https://adenhq.com) for complete documentation, examples, and guides.
|
||||
|
||||
[](https://www.youtube.com/watch?v=XDOG9fOaLjU)
|
||||
|
||||
https://github.com/user-attachments/assets/bf10edc3-06ba-48b6-98ba-d069b15fb69d
|
||||
|
||||
|
||||
## Who Is Hive For?
|
||||
|
||||
Hive is designed for developers and teams who want to build **production-grade AI agents** without manually wiring complex workflows.
|
||||
Hive is designed for developers and teams who want to build many **autonomous AI agents** fast without manually wiring complex workflows.
|
||||
|
||||
Hive is a good fit if you:
|
||||
|
||||
@@ -73,7 +75,7 @@ Use Hive when you need:
|
||||
- **[Self-Hosting Guide](https://docs.adenhq.com/getting-started/quickstart)** - Deploy Hive on your infrastructure
|
||||
- **[Changelog](https://github.com/aden-hive/hive/releases)** - Latest updates and releases
|
||||
- **[Roadmap](docs/roadmap.md)** - Upcoming features and plans
|
||||
- **[Report Issues](https://github.com/adenhq/hive/issues)** - Bug reports and feature requests
|
||||
- **[Report Issues](https://github.com/aden-hive/hive/issues)** - Bug reports and feature requests
|
||||
- **[Contributing](CONTRIBUTING.md)** - How to contribute and submit PRs
|
||||
|
||||
## Quick Start
|
||||
@@ -84,7 +86,7 @@ Use Hive when you need:
|
||||
- An LLM provider that powers the agents
|
||||
- **ripgrep (optional, recommended on Windows):** The `search_files` tool uses ripgrep for faster file search. If not installed, a Python fallback is used. On Windows: `winget install BurntSushi.ripgrep` or `scoop install ripgrep`
|
||||
|
||||
> **Note for Windows Users:** It is strongly recommended to use **WSL (Windows Subsystem for Linux)** or **Git Bash** to run this framework. Some core automation scripts may not execute correctly in standard Command Prompt or PowerShell.
|
||||
> **Windows Users:** Native Windows is supported via `quickstart.ps1` and `hive.ps1`. Run these in PowerShell 5.1+. WSL is also an option but not required.
|
||||
|
||||
### Installation
|
||||
|
||||
@@ -108,18 +110,16 @@ This sets up:
|
||||
- **framework** - Core agent runtime and graph executor (in `core/.venv`)
|
||||
- **aden_tools** - MCP tools for agent capabilities (in `tools/.venv`)
|
||||
- **credential store** - Encrypted API key storage (`~/.hive/credentials`)
|
||||
- **LLM provider** - Interactive default model configuration
|
||||
- **LLM provider** - Interactive default model configuration, including Hive LLM and OpenRouter
|
||||
- All required Python dependencies with `uv`
|
||||
|
||||
- Finally, it will open the Hive interface in your browser
|
||||
|
||||
> **Tip:** To reopen the dashboard later, run `hive open` from the project directory.
|
||||
|
||||
<img width="2500" height="1214" alt="home-screen" src="https://github.com/user-attachments/assets/134d897f-5e75-4874-b00b-e0505f6b45c4" />
|
||||
|
||||
### Build Your First Agent
|
||||
|
||||
Type the agent you want to build in the home input box
|
||||
Type the agent you want to build in the home input box. The queen is going to ask you questions and work out a solution with you.
|
||||
|
||||
<img width="2500" height="1214" alt="Image" src="https://github.com/user-attachments/assets/1ce19141-a78b-46f5-8d64-dbf987e048f4" />
|
||||
|
||||
@@ -131,7 +131,7 @@ Click "Try a sample agent" and check the templates. You can run a template direc
|
||||
|
||||
Now you can run an agent by selecting the agent (either an existing agent or example agent). You can click the Run button on the top left, or talk to the queen agent and it can run the agent for you.
|
||||
|
||||
<img width="2500" height="1214" alt="Image" src="https://github.com/user-attachments/assets/71c38206-2ad5-49aa-bde8-6698d0bc55f5" />
|
||||
<img width="2549" height="1174" alt="Screenshot 2026-03-12 at 9 27 36 PM" src="https://github.com/user-attachments/assets/7c7d30fa-9ceb-4c23-95af-b1caa405547d" />
|
||||
|
||||
## Features
|
||||
|
||||
@@ -143,14 +143,13 @@ Now you can run an agent by selecting the agent (either an existing agent or exa
|
||||
- **SDK-Wrapped Nodes** - Every node gets shared memory, local RLM memory, monitoring, tools, and LLM access out of the box
|
||||
- **[Human-in-the-Loop](docs/key_concepts/graph.md#human-in-the-loop)** - Intervention nodes that pause execution for human input with configurable timeouts and escalation
|
||||
- **Real-time Observability** - WebSocket streaming for live monitoring of agent execution, decisions, and node-to-node communication
|
||||
- **Production-Ready** - Self-hostable, built for scale and reliability
|
||||
|
||||
## Integration
|
||||
|
||||
<a href="https://github.com/aden-hive/hive/tree/main/tools/src/aden_tools/tools"><img width="100%" alt="Integration" src="https://github.com/user-attachments/assets/a1573f93-cf02-4bb8-b3d5-b305b05b1e51" /></a>
|
||||
Hive is built to be model-agnostic and system-agnostic.
|
||||
|
||||
- **LLM flexibility** - Hive Framework is designed to support various types of LLMs, including hosted and local models through LiteLLM-compatible providers.
|
||||
- **LLM flexibility** - Hive Framework supports Anthropic, OpenAI, OpenRouter, Hive LLM, and other hosted or local models through LiteLLM-compatible providers.
|
||||
- **Business system connectivity** - Hive Framework is designed to connect to all kinds of business systems as tools, such as CRM, support, messaging, data, file, and internal APIs via MCP.
|
||||
|
||||
## Why Aden
|
||||
@@ -378,7 +377,7 @@ This project is licensed under the Apache License 2.0 - see the [LICENSE](LICENS
|
||||
|
||||
**Q: What LLM providers does Hive support?**
|
||||
|
||||
Hive supports 100+ LLM providers through LiteLLM integration, including OpenAI (GPT-4, GPT-4o), Anthropic (Claude models), Google Gemini, DeepSeek, Mistral, Groq, and many more. Simply set the appropriate API key environment variable and specify the model name. We recommend using Claude, GLM and Gemini as they have the best performance.
|
||||
Hive supports 100+ LLM providers through LiteLLM integration, including OpenAI (GPT-4, GPT-4o), Anthropic (Claude models), Google Gemini, DeepSeek, Mistral, Groq, OpenRouter, and Hive LLM. Simply set the appropriate API key environment variable and specify the model name. See [docs/configuration.md](docs/configuration.md) for provider-specific configuration examples.
|
||||
|
||||
**Q: Can I use Hive with local AI models like Ollama?**
|
||||
|
||||
@@ -392,10 +391,6 @@ Hive generates your entire agent system from natural language goals using a codi
|
||||
|
||||
Yes, Hive is fully open-source under the Apache License 2.0. We actively encourage community contributions and collaboration.
|
||||
|
||||
**Q: Can Hive handle complex, production-scale use cases?**
|
||||
|
||||
Yes. Hive is explicitly designed for production environments with features like automatic failure recovery, real-time observability, cost controls, and horizontal scaling support. The framework handles both simple automations and complex multi-agent workflows.
|
||||
|
||||
**Q: Does Hive support human-in-the-loop workflows?**
|
||||
|
||||
Yes, Hive fully supports [human-in-the-loop](docs/key_concepts/graph.md#human-in-the-loop) workflows through intervention nodes that pause execution for human input. These include configurable timeouts and escalation policies, allowing seamless collaboration between human experts and AI agents.
|
||||
@@ -420,6 +415,16 @@ Visit [docs.adenhq.com](https://docs.adenhq.com/) for complete guides, API refer
|
||||
|
||||
Contributions are welcome! Fork the repository, create your feature branch, implement your changes, and submit a pull request. See [CONTRIBUTING.md](CONTRIBUTING.md) for detailed guidelines.
|
||||
|
||||
## Star History
|
||||
|
||||
<a href="https://star-history.com/#aden-hive/hive&Date">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=aden-hive/hive&type=Date&theme=dark" />
|
||||
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=aden-hive/hive&type=Date" />
|
||||
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=aden-hive/hive&type=Date" />
|
||||
</picture>
|
||||
</a>
|
||||
|
||||
---
|
||||
|
||||
<p align="center">
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
perf: reduce subprocess spawning in quickstart scripts (#4427)
|
||||
|
||||
## Problem
|
||||
Windows process creation (CreateProcess) is 10-100x slower than Linux fork/exec.
|
||||
The quickstart scripts were spawning 4+ separate `uv run python -c "import X"`
|
||||
processes to verify imports, adding ~600ms overhead on Windows.
|
||||
|
||||
## Solution
|
||||
Consolidated all import checks into a single batch script that checks multiple
|
||||
modules in one subprocess call, reducing spawn overhead by ~75%.
|
||||
|
||||
## Changes
|
||||
- **New**: `scripts/check_requirements.py` - Batched import checker
|
||||
- **New**: `scripts/test_check_requirements.py` - Test suite
|
||||
- **New**: `scripts/benchmark_quickstart.ps1` - Performance benchmark tool
|
||||
- **Modified**: `quickstart.ps1` - Updated import verification (2 sections)
|
||||
- **Modified**: `quickstart.sh` - Updated import verification
|
||||
|
||||
## Performance Impact
|
||||
**Benchmark results on Windows:**
|
||||
- Before: ~19.8 seconds for import checks
|
||||
- After: ~4.9 seconds for import checks
|
||||
- **Improvement: 14.9 seconds saved (75.2% faster)**
|
||||
|
||||
## Testing
|
||||
- ✅ All functional tests pass (`scripts/test_check_requirements.py`)
|
||||
- ✅ Quickstart scripts work correctly on Windows
|
||||
- ✅ Error handling verified (invalid imports reported correctly)
|
||||
- ✅ Performance benchmark confirms 75%+ improvement
|
||||
|
||||
Fixes #4427
|
||||
@@ -1,27 +0,0 @@
|
||||
# Identity mapping: GitHub username -> Discord ID
|
||||
#
|
||||
# This file links GitHub accounts to Discord accounts for the
|
||||
# Integration Bounty Program. When a bounty PR is merged, the
|
||||
# GitHub Action uses this file to ping the contributor on Discord.
|
||||
#
|
||||
# HOW TO ADD YOURSELF:
|
||||
# Open a "Link Discord Account" issue:
|
||||
# https://github.com/aden-hive/hive/issues/new?template=link-discord.yml
|
||||
# A GitHub Action will automatically add your entry here.
|
||||
#
|
||||
# To find your Discord ID:
|
||||
# 1. Open Discord Settings > Advanced > Enable Developer Mode
|
||||
# 2. Right-click your name > Copy User ID
|
||||
#
|
||||
# Format:
|
||||
# - github: your-github-username
|
||||
# discord: "your-discord-id" # quotes required (it's a number)
|
||||
# name: Your Display Name # optional
|
||||
|
||||
contributors:
|
||||
# - github: example-user
|
||||
# discord: "123456789012345678"
|
||||
# name: Example User
|
||||
- github: TimothyZhang7
|
||||
discord: "408460790061072384"
|
||||
name: Timothy@Aden
|
||||
@@ -0,0 +1,583 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Antigravity authentication CLI.
|
||||
|
||||
Implements OAuth2 flow for Google's Antigravity Code Assist gateway.
|
||||
Credentials are stored in ~/.hive/antigravity-accounts.json.
|
||||
|
||||
Usage:
|
||||
python -m antigravity_auth auth account add
|
||||
python -m antigravity_auth auth account list
|
||||
python -m antigravity_auth auth account remove <email>
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import secrets
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
import webbrowser
|
||||
from http.server import BaseHTTPRequestHandler, HTTPServer
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format="%(message)s")
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# OAuth endpoints
|
||||
_OAUTH_AUTH_URL = "https://accounts.google.com/o/oauth2/v2/auth"
|
||||
_OAUTH_TOKEN_URL = "https://oauth2.googleapis.com/token"
|
||||
|
||||
# Scopes for Antigravity/Cloud Code Assist
|
||||
_OAUTH_SCOPES = [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/userinfo.email",
|
||||
"https://www.googleapis.com/auth/userinfo.profile",
|
||||
]
|
||||
|
||||
# Credentials file path in ~/.hive/
|
||||
_ACCOUNTS_FILE = Path.home() / ".hive" / "antigravity-accounts.json"
|
||||
|
||||
# Default project ID
|
||||
_DEFAULT_PROJECT_ID = "rising-fact-p41fc"
|
||||
_DEFAULT_REDIRECT_PORT = 51121
|
||||
|
||||
# OAuth credentials fetched from the opencode-antigravity-auth project.
|
||||
# This project reverse-engineered and published the public OAuth credentials
|
||||
# for Google's Antigravity/Cloud Code Assist API.
|
||||
# Source: https://github.com/NoeFabris/opencode-antigravity-auth
|
||||
_CREDENTIALS_URL = (
|
||||
"https://raw.githubusercontent.com/NoeFabris/opencode-antigravity-auth/dev/src/constants.ts"
|
||||
)
|
||||
|
||||
# Cached credentials fetched from public source
|
||||
_cached_client_id: str | None = None
|
||||
_cached_client_secret: str | None = None
|
||||
|
||||
|
||||
def _fetch_credentials_from_public_source() -> tuple[str | None, str | None]:
|
||||
"""Fetch OAuth client ID and secret from the public npm package source on GitHub."""
|
||||
global _cached_client_id, _cached_client_secret
|
||||
if _cached_client_id and _cached_client_secret:
|
||||
return _cached_client_id, _cached_client_secret
|
||||
|
||||
try:
|
||||
req = urllib.request.Request(
|
||||
_CREDENTIALS_URL, headers={"User-Agent": "Hive-Antigravity-Auth/1.0"}
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
content = resp.read().decode("utf-8")
|
||||
import re
|
||||
|
||||
id_match = re.search(r'ANTIGRAVITY_CLIENT_ID\s*=\s*"([^"]+)"', content)
|
||||
secret_match = re.search(r'ANTIGRAVITY_CLIENT_SECRET\s*=\s*"([^"]+)"', content)
|
||||
if id_match:
|
||||
_cached_client_id = id_match.group(1)
|
||||
if secret_match:
|
||||
_cached_client_secret = secret_match.group(1)
|
||||
return _cached_client_id, _cached_client_secret
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to fetch credentials from public source: {e}")
|
||||
return None, None
|
||||
|
||||
|
||||
def get_client_id() -> str:
|
||||
"""Get OAuth client ID from env, config, or public source."""
|
||||
env_id = os.environ.get("ANTIGRAVITY_CLIENT_ID")
|
||||
if env_id:
|
||||
return env_id
|
||||
|
||||
# Try hive config
|
||||
hive_cfg = Path.home() / ".hive" / "configuration.json"
|
||||
if hive_cfg.exists():
|
||||
try:
|
||||
with open(hive_cfg) as f:
|
||||
cfg = json.load(f)
|
||||
cfg_id = cfg.get("llm", {}).get("antigravity_client_id")
|
||||
if cfg_id:
|
||||
return cfg_id
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Fetch from public source
|
||||
client_id, _ = _fetch_credentials_from_public_source()
|
||||
if client_id:
|
||||
return client_id
|
||||
|
||||
raise RuntimeError("Could not obtain Antigravity OAuth client ID")
|
||||
|
||||
|
||||
def get_client_secret() -> str | None:
|
||||
"""Get OAuth client secret from env, config, or public source."""
|
||||
secret = os.environ.get("ANTIGRAVITY_CLIENT_SECRET")
|
||||
if secret:
|
||||
return secret
|
||||
|
||||
# Try to read from hive config
|
||||
hive_cfg = Path.home() / ".hive" / "configuration.json"
|
||||
if hive_cfg.exists():
|
||||
try:
|
||||
with open(hive_cfg) as f:
|
||||
cfg = json.load(f)
|
||||
secret = cfg.get("llm", {}).get("antigravity_client_secret")
|
||||
if secret:
|
||||
return secret
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Fetch from public source (npm package on GitHub)
|
||||
_, secret = _fetch_credentials_from_public_source()
|
||||
return secret
|
||||
|
||||
|
||||
def find_free_port() -> int:
|
||||
"""Find an available local port."""
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
s.bind(("", 0))
|
||||
s.listen(1)
|
||||
return s.getsockname()[1]
|
||||
|
||||
|
||||
class OAuthCallbackHandler(BaseHTTPRequestHandler):
|
||||
"""Handle OAuth callback from browser."""
|
||||
|
||||
auth_code: str | None = None
|
||||
state: str | None = None
|
||||
error: str | None = None
|
||||
|
||||
def log_message(self, format: str, *args: Any) -> None:
|
||||
pass # Suppress default logging
|
||||
|
||||
def do_GET(self) -> None:
|
||||
parsed = urllib.parse.urlparse(self.path)
|
||||
|
||||
if parsed.path == "/oauth-callback":
|
||||
query = urllib.parse.parse_qs(parsed.query)
|
||||
|
||||
if "error" in query:
|
||||
self.error = query["error"][0]
|
||||
self._send_response("Authentication failed. You can close this window.")
|
||||
return
|
||||
|
||||
if "code" in query and "state" in query:
|
||||
OAuthCallbackHandler.auth_code = query["code"][0]
|
||||
OAuthCallbackHandler.state = query["state"][0]
|
||||
self._send_response(
|
||||
"Authentication successful! You can close this window "
|
||||
"and return to the terminal."
|
||||
)
|
||||
return
|
||||
|
||||
self._send_response("Waiting for authentication...")
|
||||
|
||||
def _send_response(self, message: str) -> None:
|
||||
self.send_response(200)
|
||||
self.send_header("Content-Type", "text/html")
|
||||
self.end_headers()
|
||||
html = f"""<!DOCTYPE html>
|
||||
<html>
|
||||
<head><title>Antigravity Auth</title></head>
|
||||
<body style="font-family: system-ui; display: flex; align-items: center;
|
||||
justify-content: center; height: 100vh; margin: 0; background: #1a1a2e;
|
||||
color: #eee;">
|
||||
<div style="text-align: center;">
|
||||
<h2>{message}</h2>
|
||||
</div>
|
||||
</body>
|
||||
</html>"""
|
||||
self.wfile.write(html.encode())
|
||||
|
||||
|
||||
def wait_for_callback(port: int, timeout: int = 300) -> tuple[str | None, str | None, str | None]:
|
||||
"""Start local server and wait for OAuth callback."""
|
||||
server = HTTPServer(("localhost", port), OAuthCallbackHandler)
|
||||
server.timeout = 1
|
||||
|
||||
start = time.time()
|
||||
while time.time() - start < timeout:
|
||||
if OAuthCallbackHandler.auth_code:
|
||||
return (
|
||||
OAuthCallbackHandler.auth_code,
|
||||
OAuthCallbackHandler.state,
|
||||
OAuthCallbackHandler.error,
|
||||
)
|
||||
server.handle_request()
|
||||
|
||||
return None, None, "timeout"
|
||||
|
||||
|
||||
def exchange_code_for_tokens(
|
||||
code: str, redirect_uri: str, client_id: str, client_secret: str | None
|
||||
) -> dict[str, Any] | None:
|
||||
"""Exchange authorization code for tokens."""
|
||||
data = {
|
||||
"code": code,
|
||||
"client_id": client_id,
|
||||
"redirect_uri": redirect_uri,
|
||||
"grant_type": "authorization_code",
|
||||
}
|
||||
if client_secret:
|
||||
data["client_secret"] = client_secret
|
||||
|
||||
body = urllib.parse.urlencode(data).encode()
|
||||
|
||||
req = urllib.request.Request(
|
||||
_OAUTH_TOKEN_URL,
|
||||
data=body,
|
||||
headers={"Content-Type": "application/x-www-form-urlencoded"},
|
||||
method="POST",
|
||||
)
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||
return json.loads(resp.read())
|
||||
except Exception as e:
|
||||
logger.error(f"Token exchange failed: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def get_user_email(access_token: str) -> str | None:
|
||||
"""Get user email from Google API."""
|
||||
req = urllib.request.Request(
|
||||
"https://www.googleapis.com/oauth2/v2/userinfo",
|
||||
headers={"Authorization": f"Bearer {access_token}"},
|
||||
)
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
data = json.loads(resp.read())
|
||||
return data.get("email")
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def load_accounts() -> dict[str, Any]:
|
||||
"""Load existing accounts from file."""
|
||||
if not _ACCOUNTS_FILE.exists():
|
||||
return {"schemaVersion": 4, "accounts": []}
|
||||
try:
|
||||
with open(_ACCOUNTS_FILE) as f:
|
||||
return json.load(f)
|
||||
except Exception:
|
||||
return {"schemaVersion": 4, "accounts": []}
|
||||
|
||||
|
||||
def save_accounts(data: dict[str, Any]) -> None:
|
||||
"""Save accounts to file."""
|
||||
_ACCOUNTS_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(_ACCOUNTS_FILE, "w") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
logger.info(f"Saved credentials to {_ACCOUNTS_FILE}")
|
||||
|
||||
|
||||
def validate_credentials(access_token: str, project_id: str = _DEFAULT_PROJECT_ID) -> bool:
|
||||
"""Test if credentials work by making a simple API call to Antigravity.
|
||||
|
||||
Returns True if credentials are valid, False otherwise.
|
||||
"""
|
||||
endpoint = "https://daily-cloudcode-pa.sandbox.googleapis.com"
|
||||
body = {
|
||||
"project": project_id,
|
||||
"model": "gemini-3-flash",
|
||||
"request": {
|
||||
"contents": [{"role": "user", "parts": [{"text": "hi"}]}],
|
||||
"generationConfig": {"maxOutputTokens": 10},
|
||||
},
|
||||
"requestType": "agent",
|
||||
"userAgent": "antigravity",
|
||||
"requestId": "validation-test",
|
||||
}
|
||||
headers = {
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
"Content-Type": "application/json",
|
||||
"User-Agent": (
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) "
|
||||
"AppleWebKit/537.36 (KHTML, like Gecko) Antigravity/1.18.3"
|
||||
),
|
||||
"X-Goog-Api-Client": "google-cloud-sdk vscode_cloudshelleditor/0.1",
|
||||
}
|
||||
|
||||
try:
|
||||
req = urllib.request.Request(
|
||||
f"{endpoint}/v1internal:generateContent",
|
||||
data=json.dumps(body).encode("utf-8"),
|
||||
headers=headers,
|
||||
method="POST",
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||
json.loads(resp.read())
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def refresh_access_token(
|
||||
refresh_token: str, client_id: str, client_secret: str | None
|
||||
) -> dict | None:
|
||||
"""Refresh the access token using the refresh token."""
|
||||
data = {
|
||||
"grant_type": "refresh_token",
|
||||
"refresh_token": refresh_token,
|
||||
"client_id": client_id,
|
||||
}
|
||||
if client_secret:
|
||||
data["client_secret"] = client_secret
|
||||
|
||||
body = urllib.parse.urlencode(data).encode()
|
||||
req = urllib.request.Request(
|
||||
_OAUTH_TOKEN_URL,
|
||||
data=body,
|
||||
headers={"Content-Type": "application/x-www-form-urlencoded"},
|
||||
method="POST",
|
||||
)
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||
return json.loads(resp.read())
|
||||
except Exception as e:
|
||||
logger.debug(f"Token refresh failed: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def cmd_account_add(args: argparse.Namespace) -> int:
|
||||
"""Add a new Antigravity account via OAuth2.
|
||||
|
||||
First checks if valid credentials already exist. If so, validates them
|
||||
and skips OAuth if they work. Otherwise, proceeds with OAuth flow.
|
||||
"""
|
||||
client_id = get_client_id()
|
||||
client_secret = get_client_secret()
|
||||
|
||||
# Check if credentials already exist
|
||||
accounts_data = load_accounts()
|
||||
accounts = accounts_data.get("accounts", [])
|
||||
|
||||
if accounts:
|
||||
account = next((a for a in accounts if a.get("enabled", True) is not False), accounts[0])
|
||||
access_token = account.get("access")
|
||||
refresh_token_str = account.get("refresh", "")
|
||||
refresh_token = refresh_token_str.split("|")[0] if refresh_token_str else None
|
||||
project_id = (
|
||||
refresh_token_str.split("|")[1] if "|" in refresh_token_str else _DEFAULT_PROJECT_ID
|
||||
)
|
||||
email = account.get("email", "unknown")
|
||||
expires_ms = account.get("expires", 0)
|
||||
expires_at = expires_ms / 1000.0 if expires_ms else 0.0
|
||||
|
||||
# Check if token is expired or near expiry
|
||||
if access_token and expires_at and time.time() < expires_at - 60:
|
||||
# Token still valid, test it
|
||||
logger.info(f"Found existing credentials for: {email}")
|
||||
logger.info("Validating existing credentials...")
|
||||
if validate_credentials(access_token, project_id):
|
||||
logger.info("✓ Credentials valid! Skipping OAuth.")
|
||||
return 0
|
||||
else:
|
||||
logger.info("Credentials failed validation, refreshing...")
|
||||
elif refresh_token:
|
||||
logger.info(f"Found expired credentials for: {email}")
|
||||
logger.info("Attempting token refresh...")
|
||||
|
||||
tokens = refresh_access_token(refresh_token, client_id, client_secret)
|
||||
if tokens:
|
||||
new_access = tokens.get("access_token")
|
||||
expires_in = tokens.get("expires_in", 3600)
|
||||
if new_access:
|
||||
# Update the account
|
||||
account["access"] = new_access
|
||||
account["expires"] = int((time.time() + expires_in) * 1000)
|
||||
accounts_data["last_refresh"] = time.strftime(
|
||||
"%Y-%m-%dT%H:%M:%SZ", time.gmtime()
|
||||
)
|
||||
save_accounts(accounts_data)
|
||||
|
||||
# Validate the refreshed token
|
||||
logger.info("Validating refreshed credentials...")
|
||||
if validate_credentials(new_access, project_id):
|
||||
logger.info("✓ Credentials refreshed and validated!")
|
||||
return 0
|
||||
else:
|
||||
logger.info("Refreshed token failed validation, proceeding with OAuth...")
|
||||
else:
|
||||
logger.info("Token refresh failed, proceeding with OAuth...")
|
||||
|
||||
# No valid credentials, proceed with OAuth
|
||||
if not client_secret:
|
||||
logger.warning(
|
||||
"No client secret configured. Token refresh may fail.\n"
|
||||
"Set ANTIGRAVITY_CLIENT_SECRET env var or add "
|
||||
"'antigravity_client_secret' to ~/.hive/configuration.json"
|
||||
)
|
||||
|
||||
# Use fixed port and path matching Google's expected OAuth redirect URI
|
||||
port = _DEFAULT_REDIRECT_PORT
|
||||
redirect_uri = f"http://localhost:{port}/oauth-callback"
|
||||
|
||||
# Generate state for CSRF protection
|
||||
state = secrets.token_urlsafe(16)
|
||||
|
||||
# Build authorization URL
|
||||
params = {
|
||||
"client_id": client_id,
|
||||
"redirect_uri": redirect_uri,
|
||||
"response_type": "code",
|
||||
"scope": " ".join(_OAUTH_SCOPES),
|
||||
"state": state,
|
||||
"access_type": "offline",
|
||||
"prompt": "consent",
|
||||
}
|
||||
auth_url = f"{_OAUTH_AUTH_URL}?{urllib.parse.urlencode(params)}"
|
||||
|
||||
logger.info("Opening browser for authentication...")
|
||||
logger.info(f"If the browser doesn't open, visit: {auth_url}\n")
|
||||
|
||||
# Open browser
|
||||
webbrowser.open(auth_url)
|
||||
|
||||
# Wait for callback
|
||||
logger.info(f"Listening for callback on port {port}...")
|
||||
code, received_state, error = wait_for_callback(port)
|
||||
|
||||
if error:
|
||||
logger.error(f"Authentication failed: {error}")
|
||||
return 1
|
||||
|
||||
if not code:
|
||||
logger.error("No authorization code received")
|
||||
return 1
|
||||
|
||||
if received_state != state:
|
||||
logger.error("State mismatch - possible CSRF attack")
|
||||
return 1
|
||||
|
||||
# Exchange code for tokens
|
||||
logger.info("Exchanging authorization code for tokens...")
|
||||
tokens = exchange_code_for_tokens(code, redirect_uri, client_id, client_secret)
|
||||
|
||||
if not tokens:
|
||||
return 1
|
||||
|
||||
access_token = tokens.get("access_token")
|
||||
refresh_token = tokens.get("refresh_token")
|
||||
expires_in = tokens.get("expires_in", 3600)
|
||||
|
||||
if not access_token:
|
||||
logger.error("No access token in response")
|
||||
return 1
|
||||
|
||||
# Get user email
|
||||
email = get_user_email(access_token)
|
||||
if email:
|
||||
logger.info(f"Authenticated as: {email}")
|
||||
|
||||
# Load existing accounts and add/update
|
||||
accounts_data = load_accounts()
|
||||
accounts = accounts_data.get("accounts", [])
|
||||
|
||||
# Build new account entry (V4 schema)
|
||||
expires_ms = int((time.time() + expires_in) * 1000)
|
||||
refresh_entry = f"{refresh_token}|{_DEFAULT_PROJECT_ID}"
|
||||
|
||||
new_account = {
|
||||
"access": access_token,
|
||||
"refresh": refresh_entry,
|
||||
"expires": expires_ms,
|
||||
"email": email,
|
||||
"enabled": True,
|
||||
}
|
||||
|
||||
# Update existing account or add new one
|
||||
existing_idx = next((i for i, a in enumerate(accounts) if a.get("email") == email), None)
|
||||
if existing_idx is not None:
|
||||
accounts[existing_idx] = new_account
|
||||
logger.info(f"Updated existing account: {email}")
|
||||
else:
|
||||
accounts.append(new_account)
|
||||
logger.info(f"Added new account: {email}")
|
||||
|
||||
accounts_data["accounts"] = accounts
|
||||
accounts_data["schemaVersion"] = 4
|
||||
accounts_data["last_refresh"] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
|
||||
|
||||
save_accounts(accounts_data)
|
||||
logger.info("\n✓ Authentication complete!")
|
||||
return 0
|
||||
|
||||
|
||||
def cmd_account_list(args: argparse.Namespace) -> int:
|
||||
"""List all stored accounts."""
|
||||
data = load_accounts()
|
||||
accounts = data.get("accounts", [])
|
||||
|
||||
if not accounts:
|
||||
logger.info("No accounts configured.")
|
||||
logger.info("Run 'antigravity auth account add' to add one.")
|
||||
return 0
|
||||
|
||||
logger.info("Configured accounts:\n")
|
||||
for i, account in enumerate(accounts, 1):
|
||||
email = account.get("email", "unknown")
|
||||
enabled = "enabled" if account.get("enabled", True) else "disabled"
|
||||
logger.info(f" {i}. {email} ({enabled})")
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def cmd_account_remove(args: argparse.Namespace) -> int:
|
||||
"""Remove an account by email."""
|
||||
email = args.email
|
||||
data = load_accounts()
|
||||
accounts = data.get("accounts", [])
|
||||
|
||||
original_len = len(accounts)
|
||||
accounts = [a for a in accounts if a.get("email") != email]
|
||||
|
||||
if len(accounts) == original_len:
|
||||
logger.error(f"No account found with email: {email}")
|
||||
return 1
|
||||
|
||||
data["accounts"] = accounts
|
||||
save_accounts(data)
|
||||
logger.info(f"Removed account: {email}")
|
||||
return 0
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Antigravity authentication CLI",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
)
|
||||
subparsers = parser.add_subparsers(dest="command", help="Commands")
|
||||
|
||||
# auth account add
|
||||
auth_parser = subparsers.add_parser("auth", help="Authentication commands")
|
||||
auth_subparsers = auth_parser.add_subparsers(dest="auth_command")
|
||||
|
||||
account_parser = auth_subparsers.add_parser("account", help="Account management")
|
||||
account_subparsers = account_parser.add_subparsers(dest="account_command")
|
||||
|
||||
add_parser = account_subparsers.add_parser("add", help="Add a new account via OAuth2")
|
||||
add_parser.set_defaults(func=cmd_account_add)
|
||||
|
||||
list_parser = account_subparsers.add_parser("list", help="List configured accounts")
|
||||
list_parser.set_defaults(func=cmd_account_list)
|
||||
|
||||
remove_parser = account_subparsers.add_parser("remove", help="Remove an account")
|
||||
remove_parser.add_argument("email", help="Email of account to remove")
|
||||
remove_parser.set_defaults(func=cmd_account_remove)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if hasattr(args, "func"):
|
||||
return args.func(args)
|
||||
|
||||
parser.print_help()
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
+81
-27
@@ -17,6 +17,7 @@ import http.server
|
||||
import json
|
||||
import os
|
||||
import platform
|
||||
import queue
|
||||
import secrets
|
||||
import subprocess
|
||||
import sys
|
||||
@@ -27,6 +28,7 @@ import urllib.parse
|
||||
import urllib.request
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from typing import TextIO
|
||||
|
||||
# OAuth constants (from the Codex CLI binary)
|
||||
CLIENT_ID = "app_EMoamEEZ73f0CkXaXp7hrann"
|
||||
@@ -165,11 +167,11 @@ def open_browser(url: str) -> bool:
|
||||
if system == "Darwin":
|
||||
subprocess.Popen(["open", url], stdout=devnull, stderr=devnull)
|
||||
elif system == "Windows":
|
||||
subprocess.Popen(["cmd", "/c", "start", url], stdout=devnull, stderr=devnull)
|
||||
os.startfile(url) # type: ignore[attr-defined]
|
||||
else:
|
||||
subprocess.Popen(["xdg-open", url], stdout=devnull, stderr=devnull)
|
||||
return True
|
||||
except OSError:
|
||||
except (AttributeError, OSError):
|
||||
return False
|
||||
|
||||
|
||||
@@ -266,6 +268,71 @@ def parse_manual_input(value: str, expected_state: str) -> str | None:
|
||||
return None
|
||||
|
||||
|
||||
def _read_manual_input_lines(
|
||||
manual_inputs: queue.Queue[str],
|
||||
stop_event: threading.Event,
|
||||
stdin: TextIO | None = None,
|
||||
) -> None:
|
||||
stream = sys.stdin if stdin is None else stdin
|
||||
|
||||
while not stop_event.is_set():
|
||||
try:
|
||||
manual = stream.readline()
|
||||
except (EOFError, OSError):
|
||||
return
|
||||
|
||||
if not manual:
|
||||
return
|
||||
|
||||
if manual.strip():
|
||||
manual_inputs.put(manual)
|
||||
|
||||
|
||||
def wait_for_code_from_callback_or_stdin(
|
||||
expected_state: str,
|
||||
callback_result: list[str | None],
|
||||
callback_done: threading.Event,
|
||||
timeout_secs: float = 120,
|
||||
poll_interval: float = 0.1,
|
||||
stdin: TextIO | None = None,
|
||||
) -> str | None:
|
||||
manual_inputs: queue.Queue[str] = queue.Queue()
|
||||
stop_event = threading.Event()
|
||||
|
||||
# Read stdin on a daemon thread so manual paste works on platforms where
|
||||
# select() cannot poll console handles, including Windows terminals.
|
||||
threading.Thread(
|
||||
target=_read_manual_input_lines,
|
||||
args=(manual_inputs, stop_event, stdin),
|
||||
daemon=True,
|
||||
).start()
|
||||
|
||||
deadline = time.time() + timeout_secs
|
||||
try:
|
||||
while time.time() < deadline:
|
||||
if callback_result[0]:
|
||||
return callback_result[0]
|
||||
|
||||
while True:
|
||||
try:
|
||||
manual = manual_inputs.get_nowait()
|
||||
except queue.Empty:
|
||||
break
|
||||
|
||||
code = parse_manual_input(manual, expected_state)
|
||||
if code:
|
||||
return code
|
||||
|
||||
if callback_done.is_set():
|
||||
return callback_result[0]
|
||||
|
||||
time.sleep(poll_interval)
|
||||
|
||||
return callback_result[0]
|
||||
finally:
|
||||
stop_event.set()
|
||||
|
||||
|
||||
def main() -> int:
|
||||
# Generate PKCE and state
|
||||
verifier, challenge = generate_pkce()
|
||||
@@ -315,41 +382,28 @@ def main() -> int:
|
||||
|
||||
# Start callback server in background
|
||||
callback_result: list[str | None] = [None]
|
||||
callback_done = threading.Event()
|
||||
|
||||
def run_server() -> None:
|
||||
callback_result[0] = wait_for_callback(state, timeout_secs=120)
|
||||
try:
|
||||
callback_result[0] = wait_for_callback(state, timeout_secs=120)
|
||||
finally:
|
||||
callback_done.set()
|
||||
|
||||
server_thread = threading.Thread(target=run_server)
|
||||
server_thread.daemon = True
|
||||
server_thread.start()
|
||||
|
||||
# Also accept manual input in parallel
|
||||
# We poll for both the server result and stdin
|
||||
try:
|
||||
import select
|
||||
|
||||
while server_thread.is_alive():
|
||||
# Check if stdin has data (non-blocking on unix)
|
||||
if hasattr(select, "select"):
|
||||
ready, _, _ = select.select([sys.stdin], [], [], 0.5)
|
||||
if ready:
|
||||
manual = sys.stdin.readline()
|
||||
if manual.strip():
|
||||
code = parse_manual_input(manual, state)
|
||||
if code:
|
||||
break
|
||||
else:
|
||||
time.sleep(0.5)
|
||||
|
||||
if callback_result[0]:
|
||||
code = callback_result[0]
|
||||
break
|
||||
except (KeyboardInterrupt, EOFError):
|
||||
code = wait_for_code_from_callback_or_stdin(
|
||||
state,
|
||||
callback_result,
|
||||
callback_done,
|
||||
timeout_secs=120,
|
||||
)
|
||||
except KeyboardInterrupt:
|
||||
print("\n\033[0;31mCancelled.\033[0m")
|
||||
return 1
|
||||
|
||||
if not code:
|
||||
code = callback_result[0]
|
||||
else:
|
||||
# Manual paste mode
|
||||
try:
|
||||
|
||||
@@ -1,740 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
EventLoopNode WebSocket Demo
|
||||
|
||||
Real LLM, real FileConversationStore, real EventBus.
|
||||
Streams EventLoopNode execution to a browser via WebSocket.
|
||||
|
||||
Usage:
|
||||
cd /home/timothy/oss/hive/core
|
||||
python demos/event_loop_wss_demo.py
|
||||
|
||||
Then open http://localhost:8765 in your browser.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
import tempfile
|
||||
from http import HTTPStatus
|
||||
from pathlib import Path
|
||||
|
||||
import httpx
|
||||
import websockets
|
||||
from bs4 import BeautifulSoup
|
||||
from websockets.http11 import Request, Response
|
||||
|
||||
# Add core, tools, and hive root to path
|
||||
_CORE_DIR = Path(__file__).resolve().parent.parent
|
||||
_HIVE_DIR = _CORE_DIR.parent
|
||||
sys.path.insert(0, str(_CORE_DIR)) # framework.*
|
||||
sys.path.insert(0, str(_HIVE_DIR / "tools" / "src")) # aden_tools.*
|
||||
sys.path.insert(0, str(_HIVE_DIR)) # core.framework.* (for aden_tools imports)
|
||||
|
||||
import os # noqa: E402
|
||||
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS, CredentialStoreAdapter # noqa: E402
|
||||
from core.framework.credentials import CredentialStore # noqa: E402
|
||||
|
||||
from framework.credentials.storage import ( # noqa: E402
|
||||
CompositeStorage,
|
||||
EncryptedFileStorage,
|
||||
EnvVarStorage,
|
||||
)
|
||||
from framework.graph.event_loop_node import EventLoopNode, LoopConfig # noqa: E402
|
||||
from framework.graph.node import NodeContext, NodeSpec, SharedMemory # noqa: E402
|
||||
from framework.llm.litellm import LiteLLMProvider # noqa: E402
|
||||
from framework.llm.provider import Tool # noqa: E402
|
||||
from framework.runner.tool_registry import ToolRegistry # noqa: E402
|
||||
from framework.runtime.core import Runtime # noqa: E402
|
||||
from framework.runtime.event_bus import EventBus, EventType # noqa: E402
|
||||
from framework.storage.conversation_store import FileConversationStore # noqa: E402
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(name)s %(message)s")
|
||||
logger = logging.getLogger("demo")
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Persistent state (shared across WebSocket connections)
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
STORE_DIR = Path(tempfile.mkdtemp(prefix="hive_demo_"))
|
||||
STORE = FileConversationStore(STORE_DIR / "conversation")
|
||||
RUNTIME = Runtime(STORE_DIR / "runtime")
|
||||
LLM = LiteLLMProvider(model="claude-sonnet-4-5-20250929")
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Tool Registry — real tools via ToolRegistry (same pattern as GraphExecutor)
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
TOOL_REGISTRY = ToolRegistry()
|
||||
|
||||
# Credential store: Aden sync (OAuth2 tokens) + encrypted files + env var fallback
|
||||
_env_mapping = {name: spec.env_var for name, spec in CREDENTIAL_SPECS.items()}
|
||||
_local_storage = CompositeStorage(
|
||||
primary=EncryptedFileStorage(),
|
||||
fallbacks=[EnvVarStorage(env_mapping=_env_mapping)],
|
||||
)
|
||||
|
||||
if os.environ.get("ADEN_API_KEY"):
|
||||
try:
|
||||
from framework.credentials.aden import ( # noqa: E402
|
||||
AdenCachedStorage,
|
||||
AdenClientConfig,
|
||||
AdenCredentialClient,
|
||||
AdenSyncProvider,
|
||||
)
|
||||
|
||||
_client = AdenCredentialClient(AdenClientConfig(base_url="https://api.adenhq.com"))
|
||||
_provider = AdenSyncProvider(client=_client)
|
||||
_storage = AdenCachedStorage(
|
||||
local_storage=_local_storage,
|
||||
aden_provider=_provider,
|
||||
)
|
||||
_cred_store = CredentialStore(storage=_storage, providers=[_provider], auto_refresh=True)
|
||||
_synced = _provider.sync_all(_cred_store)
|
||||
logger.info("Synced %d credentials from Aden", _synced)
|
||||
except Exception as e:
|
||||
logger.warning("Aden sync unavailable: %s", e)
|
||||
_cred_store = CredentialStore(storage=_local_storage)
|
||||
else:
|
||||
logger.info("ADEN_API_KEY not set, using local credential storage")
|
||||
_cred_store = CredentialStore(storage=_local_storage)
|
||||
|
||||
CREDENTIALS = CredentialStoreAdapter(_cred_store)
|
||||
|
||||
# Debug: log which credentials resolved
|
||||
for _name in ["brave_search", "hubspot", "anthropic"]:
|
||||
_val = CREDENTIALS.get(_name)
|
||||
if _val:
|
||||
logger.debug("credential %s: OK (len=%d)", _name, len(_val))
|
||||
else:
|
||||
logger.debug("credential %s: not found", _name)
|
||||
|
||||
# --- web_search (Brave Search API) ---
|
||||
|
||||
TOOL_REGISTRY.register(
|
||||
name="web_search",
|
||||
tool=Tool(
|
||||
name="web_search",
|
||||
description=(
|
||||
"Search the web for current information. "
|
||||
"Returns titles, URLs, and snippets from search results."
|
||||
),
|
||||
parameters={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "The search query (1-500 characters)",
|
||||
},
|
||||
"num_results": {
|
||||
"type": "integer",
|
||||
"description": "Number of results to return (1-20, default 10)",
|
||||
},
|
||||
},
|
||||
"required": ["query"],
|
||||
},
|
||||
),
|
||||
executor=lambda inputs: _exec_web_search(inputs),
|
||||
)
|
||||
|
||||
|
||||
def _exec_web_search(inputs: dict) -> dict:
|
||||
api_key = CREDENTIALS.get("brave_search")
|
||||
if not api_key:
|
||||
return {"error": "brave_search credential not configured"}
|
||||
query = inputs.get("query", "")
|
||||
num_results = min(inputs.get("num_results", 10), 20)
|
||||
resp = httpx.get(
|
||||
"https://api.search.brave.com/res/v1/web/search",
|
||||
params={"q": query, "count": num_results},
|
||||
headers={"X-Subscription-Token": api_key, "Accept": "application/json"},
|
||||
timeout=30.0,
|
||||
)
|
||||
if resp.status_code != 200:
|
||||
return {"error": f"Brave API HTTP {resp.status_code}"}
|
||||
data = resp.json()
|
||||
results = [
|
||||
{
|
||||
"title": item.get("title", ""),
|
||||
"url": item.get("url", ""),
|
||||
"snippet": item.get("description", ""),
|
||||
}
|
||||
for item in data.get("web", {}).get("results", [])[:num_results]
|
||||
]
|
||||
return {"query": query, "results": results, "total": len(results)}
|
||||
|
||||
|
||||
# --- web_scrape (httpx + BeautifulSoup, no playwright for sync compat) ---
|
||||
|
||||
TOOL_REGISTRY.register(
|
||||
name="web_scrape",
|
||||
tool=Tool(
|
||||
name="web_scrape",
|
||||
description=(
|
||||
"Scrape and extract text content from a webpage URL. "
|
||||
"Returns the page title and main text content."
|
||||
),
|
||||
parameters={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"url": {
|
||||
"type": "string",
|
||||
"description": "URL of the webpage to scrape",
|
||||
},
|
||||
"max_length": {
|
||||
"type": "integer",
|
||||
"description": "Maximum text length (default 50000)",
|
||||
},
|
||||
},
|
||||
"required": ["url"],
|
||||
},
|
||||
),
|
||||
executor=lambda inputs: _exec_web_scrape(inputs),
|
||||
)
|
||||
|
||||
_SCRAPE_HEADERS = {
|
||||
"User-Agent": (
|
||||
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
||||
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/131.0.0.0 Safari/537.36"
|
||||
),
|
||||
"Accept": "text/html,application/xhtml+xml",
|
||||
}
|
||||
|
||||
|
||||
def _exec_web_scrape(inputs: dict) -> dict:
|
||||
url = inputs.get("url", "")
|
||||
max_length = max(1000, min(inputs.get("max_length", 50000), 500000))
|
||||
if not url.startswith(("http://", "https://")):
|
||||
url = "https://" + url
|
||||
try:
|
||||
resp = httpx.get(url, timeout=30.0, follow_redirects=True, headers=_SCRAPE_HEADERS)
|
||||
if resp.status_code != 200:
|
||||
return {"error": f"HTTP {resp.status_code}"}
|
||||
soup = BeautifulSoup(resp.text, "html.parser")
|
||||
for tag in soup(["script", "style", "nav", "footer", "header", "aside", "noscript"]):
|
||||
tag.decompose()
|
||||
title = soup.title.get_text(strip=True) if soup.title else ""
|
||||
main = (
|
||||
soup.find("article")
|
||||
or soup.find("main")
|
||||
or soup.find(attrs={"role": "main"})
|
||||
or soup.find("body")
|
||||
)
|
||||
text = main.get_text(separator=" ", strip=True) if main else ""
|
||||
text = " ".join(text.split())
|
||||
if len(text) > max_length:
|
||||
text = text[:max_length] + "..."
|
||||
return {"url": url, "title": title, "content": text, "length": len(text)}
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Request timed out"}
|
||||
except Exception as e:
|
||||
return {"error": f"Scrape failed: {e}"}
|
||||
|
||||
|
||||
# --- HubSpot CRM tools (optional, requires HUBSPOT_ACCESS_TOKEN) ---
|
||||
|
||||
_HUBSPOT_API = "https://api.hubapi.com"
|
||||
|
||||
|
||||
def _hubspot_headers() -> dict | None:
|
||||
token = CREDENTIALS.get("hubspot")
|
||||
if token:
|
||||
logger.debug("HubSpot token: %s...%s (len=%d)", token[:8], token[-4:], len(token))
|
||||
else:
|
||||
logger.debug("HubSpot token: not found")
|
||||
if not token:
|
||||
return None
|
||||
return {
|
||||
"Authorization": f"Bearer {token}",
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
}
|
||||
|
||||
|
||||
def _exec_hubspot_search(inputs: dict) -> dict:
|
||||
headers = _hubspot_headers()
|
||||
if not headers:
|
||||
return {"error": "HUBSPOT_ACCESS_TOKEN not set"}
|
||||
object_type = inputs.get("object_type", "contacts")
|
||||
query = inputs.get("query", "")
|
||||
limit = min(inputs.get("limit", 10), 100)
|
||||
body: dict = {"limit": limit}
|
||||
if query:
|
||||
body["query"] = query
|
||||
try:
|
||||
resp = httpx.post(
|
||||
f"{_HUBSPOT_API}/crm/v3/objects/{object_type}/search",
|
||||
headers=headers,
|
||||
json=body,
|
||||
timeout=30.0,
|
||||
)
|
||||
if resp.status_code != 200:
|
||||
return {"error": f"HubSpot API HTTP {resp.status_code}: {resp.text[:200]}"}
|
||||
return resp.json()
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Request timed out"}
|
||||
except Exception as e:
|
||||
return {"error": f"HubSpot error: {e}"}
|
||||
|
||||
|
||||
TOOL_REGISTRY.register(
|
||||
name="hubspot_search",
|
||||
tool=Tool(
|
||||
name="hubspot_search",
|
||||
description=(
|
||||
"Search HubSpot CRM objects (contacts, companies, or deals). "
|
||||
"Returns matching records with their properties."
|
||||
),
|
||||
parameters={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"object_type": {
|
||||
"type": "string",
|
||||
"description": "CRM object type: 'contacts', 'companies', or 'deals'",
|
||||
},
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "Search query (name, email, domain, etc.)",
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"description": "Max results (1-100, default 10)",
|
||||
},
|
||||
},
|
||||
"required": ["object_type"],
|
||||
},
|
||||
),
|
||||
executor=lambda inputs: _exec_hubspot_search(inputs),
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"ToolRegistry loaded: %s",
|
||||
", ".join(TOOL_REGISTRY.get_registered_names()),
|
||||
)
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# HTML page (embedded)
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
HTML_PAGE = ( # noqa: E501
|
||||
"""<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>EventLoopNode Live Demo</title>
|
||||
<style>
|
||||
* { box-sizing: border-box; margin: 0; padding: 0; }
|
||||
body {
|
||||
font-family: 'SF Mono', 'Fira Code', monospace;
|
||||
background: #0d1117; color: #c9d1d9;
|
||||
height: 100vh; display: flex; flex-direction: column;
|
||||
}
|
||||
header {
|
||||
background: #161b22; padding: 12px 20px;
|
||||
border-bottom: 1px solid #30363d;
|
||||
display: flex; align-items: center; gap: 16px;
|
||||
}
|
||||
header h1 { font-size: 16px; color: #58a6ff; font-weight: 600; }
|
||||
.status {
|
||||
font-size: 12px; padding: 3px 10px; border-radius: 12px;
|
||||
background: #21262d; color: #8b949e;
|
||||
}
|
||||
.status.running { background: #1a4b2e; color: #3fb950; }
|
||||
.status.done { background: #1a3a5c; color: #58a6ff; }
|
||||
.status.error { background: #4b1a1a; color: #f85149; }
|
||||
.chat { flex: 1; overflow-y: auto; padding: 16px; }
|
||||
.msg {
|
||||
margin: 8px 0; padding: 10px 14px; border-radius: 8px;
|
||||
line-height: 1.6; white-space: pre-wrap; word-wrap: break-word;
|
||||
}
|
||||
.msg.user { background: #1a3a5c; color: #58a6ff; }
|
||||
.msg.assistant { background: #161b22; color: #c9d1d9; }
|
||||
.msg.event {
|
||||
background: transparent; color: #8b949e; font-size: 11px;
|
||||
padding: 4px 14px; border-left: 3px solid #30363d;
|
||||
}
|
||||
.msg.event.loop { border-left-color: #58a6ff; }
|
||||
.msg.event.tool { border-left-color: #d29922; }
|
||||
.msg.event.stall { border-left-color: #f85149; }
|
||||
.input-bar {
|
||||
padding: 12px 16px; background: #161b22;
|
||||
border-top: 1px solid #30363d; display: flex; gap: 8px;
|
||||
}
|
||||
.input-bar input {
|
||||
flex: 1; background: #0d1117; border: 1px solid #30363d;
|
||||
color: #c9d1d9; padding: 8px 12px; border-radius: 6px;
|
||||
font-family: inherit; font-size: 14px; outline: none;
|
||||
}
|
||||
.input-bar input:focus { border-color: #58a6ff; }
|
||||
.input-bar button {
|
||||
background: #238636; color: #fff; border: none;
|
||||
padding: 8px 20px; border-radius: 6px; cursor: pointer;
|
||||
font-family: inherit; font-weight: 600;
|
||||
}
|
||||
.input-bar button:hover { background: #2ea043; }
|
||||
.input-bar button:disabled {
|
||||
background: #21262d; color: #484f58; cursor: not-allowed;
|
||||
}
|
||||
.input-bar button.clear { background: #da3633; }
|
||||
.input-bar button.clear:hover { background: #f85149; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<header>
|
||||
<h1>EventLoopNode Live</h1>
|
||||
<span id="status" class="status">Idle</span>
|
||||
<span id="iter" class="status" style="display:none">Step 0</span>
|
||||
</header>
|
||||
<div id="chat" class="chat"></div>
|
||||
<div class="input-bar">
|
||||
<input id="input" type="text"
|
||||
placeholder="Ask anything..." autofocus />
|
||||
<button id="go" onclick="run()">Send</button>
|
||||
<button class="clear"
|
||||
onclick="clearConversation()">Clear</button>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
let ws = null;
|
||||
let currentAssistantEl = null;
|
||||
let iterCount = 0;
|
||||
const chat = document.getElementById('chat');
|
||||
const status = document.getElementById('status');
|
||||
const iterEl = document.getElementById('iter');
|
||||
const goBtn = document.getElementById('go');
|
||||
const inputEl = document.getElementById('input');
|
||||
|
||||
inputEl.addEventListener('keydown', e => {
|
||||
if (e.key === 'Enter') run();
|
||||
});
|
||||
|
||||
function setStatus(text, cls) {
|
||||
status.textContent = text;
|
||||
status.className = 'status ' + cls;
|
||||
}
|
||||
|
||||
function addMsg(text, cls) {
|
||||
const el = document.createElement('div');
|
||||
el.className = 'msg ' + cls;
|
||||
el.textContent = text;
|
||||
chat.appendChild(el);
|
||||
chat.scrollTop = chat.scrollHeight;
|
||||
return el;
|
||||
}
|
||||
|
||||
function connect() {
|
||||
ws = new WebSocket('ws://' + location.host + '/ws');
|
||||
ws.onopen = () => {
|
||||
setStatus('Ready', 'done');
|
||||
goBtn.disabled = false;
|
||||
};
|
||||
ws.onmessage = handleEvent;
|
||||
ws.onerror = () => { setStatus('Error', 'error'); };
|
||||
ws.onclose = () => {
|
||||
setStatus('Reconnecting...', '');
|
||||
goBtn.disabled = true;
|
||||
setTimeout(connect, 2000);
|
||||
};
|
||||
}
|
||||
|
||||
function handleEvent(msg) {
|
||||
const evt = JSON.parse(msg.data);
|
||||
|
||||
if (evt.type === 'llm_text_delta') {
|
||||
if (currentAssistantEl) {
|
||||
currentAssistantEl.textContent += evt.content;
|
||||
chat.scrollTop = chat.scrollHeight;
|
||||
}
|
||||
}
|
||||
else if (evt.type === 'ready') {
|
||||
setStatus('Ready', 'done');
|
||||
if (currentAssistantEl && !currentAssistantEl.textContent)
|
||||
currentAssistantEl.remove();
|
||||
goBtn.disabled = false;
|
||||
}
|
||||
else if (evt.type === 'node_loop_iteration') {
|
||||
iterCount = evt.iteration || (iterCount + 1);
|
||||
iterEl.textContent = 'Step ' + iterCount;
|
||||
iterEl.style.display = '';
|
||||
}
|
||||
else if (evt.type === 'tool_call_started') {
|
||||
var info = evt.tool_name + '('
|
||||
+ JSON.stringify(evt.tool_input).slice(0, 120) + ')';
|
||||
addMsg('TOOL ' + info, 'event tool');
|
||||
}
|
||||
else if (evt.type === 'tool_call_completed') {
|
||||
var preview = (evt.result || '').slice(0, 200);
|
||||
var cls = evt.is_error ? 'stall' : 'tool';
|
||||
addMsg('RESULT ' + evt.tool_name + ': ' + preview,
|
||||
'event ' + cls);
|
||||
currentAssistantEl = addMsg('', 'assistant');
|
||||
}
|
||||
else if (evt.type === 'result') {
|
||||
setStatus('Session ended', evt.success ? 'done' : 'error');
|
||||
if (evt.error) addMsg('ERROR ' + evt.error, 'event stall');
|
||||
if (currentAssistantEl && !currentAssistantEl.textContent)
|
||||
currentAssistantEl.remove();
|
||||
goBtn.disabled = false;
|
||||
}
|
||||
else if (evt.type === 'node_stalled') {
|
||||
addMsg('STALLED ' + evt.reason, 'event stall');
|
||||
}
|
||||
else if (evt.type === 'cleared') {
|
||||
chat.innerHTML = '';
|
||||
iterCount = 0;
|
||||
iterEl.textContent = 'Step 0';
|
||||
iterEl.style.display = 'none';
|
||||
setStatus('Ready', 'done');
|
||||
goBtn.disabled = false;
|
||||
}
|
||||
}
|
||||
|
||||
function run() {
|
||||
const text = inputEl.value.trim();
|
||||
if (!text || !ws || ws.readyState !== 1) return;
|
||||
addMsg(text, 'user');
|
||||
currentAssistantEl = addMsg('', 'assistant');
|
||||
inputEl.value = '';
|
||||
setStatus('Running', 'running');
|
||||
goBtn.disabled = true;
|
||||
ws.send(JSON.stringify({ topic: text }));
|
||||
}
|
||||
|
||||
function clearConversation() {
|
||||
if (ws && ws.readyState === 1) {
|
||||
ws.send(JSON.stringify({ command: 'clear' }));
|
||||
}
|
||||
}
|
||||
|
||||
connect();
|
||||
</script>
|
||||
</body>
|
||||
</html>"""
|
||||
)
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# WebSocket handler
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def handle_ws(websocket):
|
||||
"""Persistent WebSocket: long-lived EventLoopNode with client_facing blocking."""
|
||||
global STORE
|
||||
|
||||
# -- Event forwarding (WebSocket ← EventBus) ----------------------------
|
||||
bus = EventBus()
|
||||
|
||||
async def forward_event(event):
|
||||
try:
|
||||
payload = {"type": event.type.value, **event.data}
|
||||
if event.node_id:
|
||||
payload["node_id"] = event.node_id
|
||||
await websocket.send(json.dumps(payload))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
bus.subscribe(
|
||||
event_types=[
|
||||
EventType.NODE_LOOP_STARTED,
|
||||
EventType.NODE_LOOP_ITERATION,
|
||||
EventType.NODE_LOOP_COMPLETED,
|
||||
EventType.LLM_TEXT_DELTA,
|
||||
EventType.TOOL_CALL_STARTED,
|
||||
EventType.TOOL_CALL_COMPLETED,
|
||||
EventType.NODE_STALLED,
|
||||
],
|
||||
handler=forward_event,
|
||||
)
|
||||
|
||||
# -- Per-connection state -----------------------------------------------
|
||||
node = None
|
||||
loop_task = None
|
||||
|
||||
tools = list(TOOL_REGISTRY.get_tools().values())
|
||||
tool_executor = TOOL_REGISTRY.get_executor()
|
||||
|
||||
node_spec = NodeSpec(
|
||||
id="assistant",
|
||||
name="Chat Assistant",
|
||||
description="A conversational assistant that remembers context across messages",
|
||||
node_type="event_loop",
|
||||
client_facing=True,
|
||||
system_prompt=(
|
||||
"You are a helpful assistant with access to tools. "
|
||||
"You can search the web, scrape webpages, and query HubSpot CRM. "
|
||||
"Use tools when the user asks for current information or external data. "
|
||||
"You have full conversation history, so you can reference previous messages."
|
||||
),
|
||||
)
|
||||
|
||||
# -- Ready callback: subscribe to CLIENT_INPUT_REQUESTED on the bus ---
|
||||
async def on_input_requested(event):
|
||||
try:
|
||||
await websocket.send(json.dumps({"type": "ready"}))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
bus.subscribe(
|
||||
event_types=[EventType.CLIENT_INPUT_REQUESTED],
|
||||
handler=on_input_requested,
|
||||
)
|
||||
|
||||
async def start_loop(first_message: str):
|
||||
"""Create an EventLoopNode and run it as a background task."""
|
||||
nonlocal node, loop_task
|
||||
|
||||
memory = SharedMemory()
|
||||
ctx = NodeContext(
|
||||
runtime=RUNTIME,
|
||||
node_id="assistant",
|
||||
node_spec=node_spec,
|
||||
memory=memory,
|
||||
input_data={},
|
||||
llm=LLM,
|
||||
available_tools=tools,
|
||||
)
|
||||
node = EventLoopNode(
|
||||
event_bus=bus,
|
||||
config=LoopConfig(max_iterations=10_000, max_context_tokens=32_000),
|
||||
conversation_store=STORE,
|
||||
tool_executor=tool_executor,
|
||||
)
|
||||
await node.inject_event(first_message)
|
||||
|
||||
async def _run():
|
||||
try:
|
||||
result = await node.execute(ctx)
|
||||
try:
|
||||
await websocket.send(
|
||||
json.dumps(
|
||||
{
|
||||
"type": "result",
|
||||
"success": result.success,
|
||||
"output": result.output,
|
||||
"error": result.error,
|
||||
"tokens": result.tokens_used,
|
||||
}
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
logger.info(f"Loop ended: success={result.success}, tokens={result.tokens_used}")
|
||||
except websockets.exceptions.ConnectionClosed:
|
||||
logger.info("Loop stopped: WebSocket closed")
|
||||
except Exception as e:
|
||||
logger.exception("Loop error")
|
||||
try:
|
||||
await websocket.send(
|
||||
json.dumps(
|
||||
{
|
||||
"type": "result",
|
||||
"success": False,
|
||||
"error": str(e),
|
||||
"output": {},
|
||||
}
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
loop_task = asyncio.create_task(_run())
|
||||
|
||||
async def stop_loop():
|
||||
"""Signal the node and wait for the loop task to finish."""
|
||||
nonlocal node, loop_task
|
||||
if loop_task and not loop_task.done():
|
||||
if node:
|
||||
node.signal_shutdown()
|
||||
try:
|
||||
await asyncio.wait_for(loop_task, timeout=5.0)
|
||||
except (TimeoutError, asyncio.CancelledError):
|
||||
loop_task.cancel()
|
||||
node = None
|
||||
loop_task = None
|
||||
|
||||
# -- Message loop (runs for the lifetime of this WebSocket) -------------
|
||||
try:
|
||||
async for raw in websocket:
|
||||
try:
|
||||
msg = json.loads(raw)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# Clear command
|
||||
if msg.get("command") == "clear":
|
||||
import shutil
|
||||
|
||||
await stop_loop()
|
||||
await STORE.close()
|
||||
conv_dir = STORE_DIR / "conversation"
|
||||
if conv_dir.exists():
|
||||
shutil.rmtree(conv_dir)
|
||||
STORE = FileConversationStore(conv_dir)
|
||||
await websocket.send(json.dumps({"type": "cleared"}))
|
||||
logger.info("Conversation cleared")
|
||||
continue
|
||||
|
||||
topic = msg.get("topic", "")
|
||||
if not topic:
|
||||
continue
|
||||
|
||||
if node is None:
|
||||
# First message — spin up the loop
|
||||
logger.info(f"Starting persistent loop: {topic}")
|
||||
await start_loop(topic)
|
||||
else:
|
||||
# Subsequent message — inject into the running loop
|
||||
logger.info(f"Injecting message: {topic}")
|
||||
await node.inject_event(topic)
|
||||
|
||||
except websockets.exceptions.ConnectionClosed:
|
||||
pass
|
||||
finally:
|
||||
await stop_loop()
|
||||
logger.info("WebSocket closed, loop stopped")
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# HTTP handler for serving the HTML page
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def process_request(connection, request: Request):
|
||||
"""Serve HTML on GET /, upgrade to WebSocket on /ws."""
|
||||
if request.path == "/ws":
|
||||
return None # let websockets handle the upgrade
|
||||
# Serve the HTML page for any other path
|
||||
return Response(
|
||||
HTTPStatus.OK,
|
||||
"OK",
|
||||
websockets.Headers({"Content-Type": "text/html; charset=utf-8"}),
|
||||
HTML_PAGE.encode(),
|
||||
)
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Main
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def main():
|
||||
port = 8765
|
||||
async with websockets.serve(
|
||||
handle_ws,
|
||||
"0.0.0.0",
|
||||
port,
|
||||
process_request=process_request,
|
||||
):
|
||||
logger.info(f"Demo running at http://localhost:{port}")
|
||||
logger.info("Open in your browser and enter a topic to research.")
|
||||
await asyncio.Future() # run forever
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,930 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Two-Node ContextHandoff Demo
|
||||
|
||||
Demonstrates ContextHandoff between two EventLoopNode instances:
|
||||
Node A (Researcher) → ContextHandoff → Node B (Analyst)
|
||||
|
||||
Real LLM, real FileConversationStore, real EventBus.
|
||||
Streams both nodes to a browser via WebSocket.
|
||||
|
||||
Usage:
|
||||
cd /home/timothy/oss/hive/core
|
||||
python demos/handoff_demo.py
|
||||
|
||||
Then open http://localhost:8766 in your browser.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
import tempfile
|
||||
from http import HTTPStatus
|
||||
from pathlib import Path
|
||||
|
||||
import httpx
|
||||
import websockets
|
||||
from bs4 import BeautifulSoup
|
||||
from websockets.http11 import Request, Response
|
||||
|
||||
# Add core, tools, and hive root to path
|
||||
_CORE_DIR = Path(__file__).resolve().parent.parent
|
||||
_HIVE_DIR = _CORE_DIR.parent
|
||||
sys.path.insert(0, str(_CORE_DIR)) # framework.*
|
||||
sys.path.insert(0, str(_HIVE_DIR / "tools" / "src")) # aden_tools.*
|
||||
sys.path.insert(0, str(_HIVE_DIR)) # core.framework.* (for aden_tools imports)
|
||||
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS, CredentialStoreAdapter # noqa: E402
|
||||
from core.framework.credentials import CredentialStore # noqa: E402
|
||||
|
||||
from framework.credentials.storage import ( # noqa: E402
|
||||
CompositeStorage,
|
||||
EncryptedFileStorage,
|
||||
EnvVarStorage,
|
||||
)
|
||||
from framework.graph.context_handoff import ContextHandoff # noqa: E402
|
||||
from framework.graph.conversation import NodeConversation # noqa: E402
|
||||
from framework.graph.event_loop_node import EventLoopNode, LoopConfig # noqa: E402
|
||||
from framework.graph.node import NodeContext, NodeSpec, SharedMemory # noqa: E402
|
||||
from framework.llm.litellm import LiteLLMProvider # noqa: E402
|
||||
from framework.llm.provider import Tool # noqa: E402
|
||||
from framework.runner.tool_registry import ToolRegistry # noqa: E402
|
||||
from framework.runtime.core import Runtime # noqa: E402
|
||||
from framework.runtime.event_bus import EventBus, EventType # noqa: E402
|
||||
from framework.storage.conversation_store import FileConversationStore # noqa: E402
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(name)s %(message)s")
|
||||
logger = logging.getLogger("handoff_demo")
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Persistent state
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
STORE_DIR = Path(tempfile.mkdtemp(prefix="hive_handoff_"))
|
||||
RUNTIME = Runtime(STORE_DIR / "runtime")
|
||||
LLM = LiteLLMProvider(model="claude-sonnet-4-5-20250929")
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Credentials
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
# Composite credential store: encrypted files (primary) + env vars (fallback)
|
||||
_env_mapping = {name: spec.env_var for name, spec in CREDENTIAL_SPECS.items()}
|
||||
_composite = CompositeStorage(
|
||||
primary=EncryptedFileStorage(),
|
||||
fallbacks=[EnvVarStorage(env_mapping=_env_mapping)],
|
||||
)
|
||||
CREDENTIALS = CredentialStoreAdapter(CredentialStore(storage=_composite))
|
||||
|
||||
for _name in ["brave_search", "hubspot"]:
|
||||
_val = CREDENTIALS.get(_name)
|
||||
if _val:
|
||||
logger.debug("credential %s: OK (len=%d)", _name, len(_val))
|
||||
else:
|
||||
logger.debug("credential %s: not found", _name)
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Tool Registry — web_search + web_scrape for Node A (Researcher)
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
TOOL_REGISTRY = ToolRegistry()
|
||||
|
||||
|
||||
def _exec_web_search(inputs: dict) -> dict:
|
||||
api_key = CREDENTIALS.get("brave_search")
|
||||
if not api_key:
|
||||
return {"error": "brave_search credential not configured"}
|
||||
query = inputs.get("query", "")
|
||||
num_results = min(inputs.get("num_results", 10), 20)
|
||||
resp = httpx.get(
|
||||
"https://api.search.brave.com/res/v1/web/search",
|
||||
params={"q": query, "count": num_results},
|
||||
headers={
|
||||
"X-Subscription-Token": api_key,
|
||||
"Accept": "application/json",
|
||||
},
|
||||
timeout=30.0,
|
||||
)
|
||||
if resp.status_code != 200:
|
||||
return {"error": f"Brave API HTTP {resp.status_code}"}
|
||||
data = resp.json()
|
||||
results = [
|
||||
{
|
||||
"title": item.get("title", ""),
|
||||
"url": item.get("url", ""),
|
||||
"snippet": item.get("description", ""),
|
||||
}
|
||||
for item in data.get("web", {}).get("results", [])[:num_results]
|
||||
]
|
||||
return {"query": query, "results": results, "total": len(results)}
|
||||
|
||||
|
||||
TOOL_REGISTRY.register(
|
||||
name="web_search",
|
||||
tool=Tool(
|
||||
name="web_search",
|
||||
description=(
|
||||
"Search the web for current information. "
|
||||
"Returns titles, URLs, and snippets from search results."
|
||||
),
|
||||
parameters={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "The search query (1-500 characters)",
|
||||
},
|
||||
"num_results": {
|
||||
"type": "integer",
|
||||
"description": "Number of results (1-20, default 10)",
|
||||
},
|
||||
},
|
||||
"required": ["query"],
|
||||
},
|
||||
),
|
||||
executor=lambda inputs: _exec_web_search(inputs),
|
||||
)
|
||||
|
||||
_SCRAPE_HEADERS = {
|
||||
"User-Agent": (
|
||||
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
||||
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/131.0.0.0 Safari/537.36"
|
||||
),
|
||||
"Accept": "text/html,application/xhtml+xml",
|
||||
}
|
||||
|
||||
|
||||
def _exec_web_scrape(inputs: dict) -> dict:
|
||||
url = inputs.get("url", "")
|
||||
max_length = max(1000, min(inputs.get("max_length", 50000), 500000))
|
||||
if not url.startswith(("http://", "https://")):
|
||||
url = "https://" + url
|
||||
try:
|
||||
resp = httpx.get(
|
||||
url,
|
||||
timeout=30.0,
|
||||
follow_redirects=True,
|
||||
headers=_SCRAPE_HEADERS,
|
||||
)
|
||||
if resp.status_code != 200:
|
||||
return {"error": f"HTTP {resp.status_code}"}
|
||||
soup = BeautifulSoup(resp.text, "html.parser")
|
||||
for tag in soup(["script", "style", "nav", "footer", "header", "aside", "noscript"]):
|
||||
tag.decompose()
|
||||
title = soup.title.get_text(strip=True) if soup.title else ""
|
||||
main = (
|
||||
soup.find("article")
|
||||
or soup.find("main")
|
||||
or soup.find(attrs={"role": "main"})
|
||||
or soup.find("body")
|
||||
)
|
||||
text = main.get_text(separator=" ", strip=True) if main else ""
|
||||
text = " ".join(text.split())
|
||||
if len(text) > max_length:
|
||||
text = text[:max_length] + "..."
|
||||
return {
|
||||
"url": url,
|
||||
"title": title,
|
||||
"content": text,
|
||||
"length": len(text),
|
||||
}
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Request timed out"}
|
||||
except Exception as e:
|
||||
return {"error": f"Scrape failed: {e}"}
|
||||
|
||||
|
||||
TOOL_REGISTRY.register(
|
||||
name="web_scrape",
|
||||
tool=Tool(
|
||||
name="web_scrape",
|
||||
description=(
|
||||
"Scrape and extract text content from a webpage URL. "
|
||||
"Returns the page title and main text content."
|
||||
),
|
||||
parameters={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"url": {
|
||||
"type": "string",
|
||||
"description": "URL of the webpage to scrape",
|
||||
},
|
||||
"max_length": {
|
||||
"type": "integer",
|
||||
"description": "Maximum text length (default 50000)",
|
||||
},
|
||||
},
|
||||
"required": ["url"],
|
||||
},
|
||||
),
|
||||
executor=lambda inputs: _exec_web_scrape(inputs),
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"ToolRegistry loaded: %s",
|
||||
", ".join(TOOL_REGISTRY.get_registered_names()),
|
||||
)
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Node Specs
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
RESEARCHER_SPEC = NodeSpec(
|
||||
id="researcher",
|
||||
name="Researcher",
|
||||
description="Researches a topic using web search and scraping tools",
|
||||
node_type="event_loop",
|
||||
input_keys=["topic"],
|
||||
output_keys=["research_summary"],
|
||||
system_prompt=(
|
||||
"You are a thorough research assistant. Your job is to research "
|
||||
"the given topic using the web_search and web_scrape tools.\n\n"
|
||||
"1. Search for relevant information on the topic\n"
|
||||
"2. Scrape 1-2 of the most promising URLs for details\n"
|
||||
"3. Synthesize your findings into a comprehensive summary\n"
|
||||
"4. Use set_output with key='research_summary' to save your "
|
||||
"findings\n\n"
|
||||
"Be thorough but efficient. Aim for 2-4 search/scrape calls, "
|
||||
"then summarize and set_output."
|
||||
),
|
||||
)
|
||||
|
||||
ANALYST_SPEC = NodeSpec(
|
||||
id="analyst",
|
||||
name="Analyst",
|
||||
description="Analyzes research findings and provides insights",
|
||||
node_type="event_loop",
|
||||
input_keys=["context"],
|
||||
output_keys=["analysis"],
|
||||
system_prompt=(
|
||||
"You are a strategic analyst. You receive research findings from "
|
||||
"a previous researcher and must:\n\n"
|
||||
"1. Identify key themes and patterns\n"
|
||||
"2. Assess the reliability and significance of the findings\n"
|
||||
"3. Provide actionable insights and recommendations\n"
|
||||
"4. Use set_output with key='analysis' to save your analysis\n\n"
|
||||
"Be concise but insightful. Focus on what matters most."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# HTML page
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
HTML_PAGE = ( # noqa: E501
|
||||
"""<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>ContextHandoff Demo</title>
|
||||
<style>
|
||||
* {
|
||||
box-sizing: border-box;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
body {
|
||||
font-family: 'SF Mono', 'Fira Code', monospace;
|
||||
background: #0d1117;
|
||||
color: #c9d1d9;
|
||||
height: 100vh;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
header {
|
||||
background: #161b22;
|
||||
padding: 12px 20px;
|
||||
border-bottom: 1px solid #30363d;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 16px;
|
||||
}
|
||||
header h1 {
|
||||
font-size: 16px;
|
||||
color: #58a6ff;
|
||||
font-weight: 600;
|
||||
}
|
||||
.badge {
|
||||
font-size: 12px;
|
||||
padding: 3px 10px;
|
||||
border-radius: 12px;
|
||||
background: #21262d;
|
||||
color: #8b949e;
|
||||
}
|
||||
.badge.researcher {
|
||||
background: #1a3a5c;
|
||||
color: #58a6ff;
|
||||
}
|
||||
.badge.analyst {
|
||||
background: #1a4b2e;
|
||||
color: #3fb950;
|
||||
}
|
||||
.badge.handoff {
|
||||
background: #3d1f00;
|
||||
color: #d29922;
|
||||
}
|
||||
.badge.done {
|
||||
background: #21262d;
|
||||
color: #8b949e;
|
||||
}
|
||||
.badge.error {
|
||||
background: #4b1a1a;
|
||||
color: #f85149;
|
||||
}
|
||||
.chat {
|
||||
flex: 1;
|
||||
overflow-y: auto;
|
||||
padding: 16px;
|
||||
}
|
||||
.msg {
|
||||
margin: 8px 0;
|
||||
padding: 10px 14px;
|
||||
border-radius: 8px;
|
||||
line-height: 1.6;
|
||||
white-space: pre-wrap;
|
||||
word-wrap: break-word;
|
||||
}
|
||||
.msg.user {
|
||||
background: #1a3a5c;
|
||||
color: #58a6ff;
|
||||
}
|
||||
.msg.assistant {
|
||||
background: #161b22;
|
||||
color: #c9d1d9;
|
||||
}
|
||||
.msg.assistant.analyst-msg {
|
||||
border-left: 3px solid #3fb950;
|
||||
}
|
||||
.msg.event {
|
||||
background: transparent;
|
||||
color: #8b949e;
|
||||
font-size: 11px;
|
||||
padding: 4px 14px;
|
||||
border-left: 3px solid #30363d;
|
||||
}
|
||||
.msg.event.loop {
|
||||
border-left-color: #58a6ff;
|
||||
}
|
||||
.msg.event.tool {
|
||||
border-left-color: #d29922;
|
||||
}
|
||||
.msg.event.stall {
|
||||
border-left-color: #f85149;
|
||||
}
|
||||
.handoff-banner {
|
||||
margin: 16px 0;
|
||||
padding: 16px;
|
||||
background: #1c1200;
|
||||
border: 1px solid #d29922;
|
||||
border-radius: 8px;
|
||||
text-align: center;
|
||||
}
|
||||
.handoff-banner h3 {
|
||||
color: #d29922;
|
||||
font-size: 14px;
|
||||
margin-bottom: 8px;
|
||||
}
|
||||
.handoff-banner p, .result-banner p {
|
||||
color: #8b949e;
|
||||
font-size: 12px;
|
||||
line-height: 1.5;
|
||||
max-height: 200px;
|
||||
overflow-y: auto;
|
||||
white-space: pre-wrap;
|
||||
text-align: left;
|
||||
}
|
||||
.result-banner {
|
||||
margin: 16px 0;
|
||||
padding: 16px;
|
||||
background: #0a2614;
|
||||
border: 1px solid #3fb950;
|
||||
border-radius: 8px;
|
||||
}
|
||||
.result-banner h3 {
|
||||
color: #3fb950;
|
||||
font-size: 14px;
|
||||
margin-bottom: 8px;
|
||||
text-align: center;
|
||||
}
|
||||
.result-banner .label {
|
||||
color: #58a6ff;
|
||||
font-size: 11px;
|
||||
font-weight: 600;
|
||||
margin-top: 10px;
|
||||
margin-bottom: 2px;
|
||||
}
|
||||
.result-banner .tokens {
|
||||
color: #484f58;
|
||||
font-size: 11px;
|
||||
text-align: center;
|
||||
margin-top: 10px;
|
||||
}
|
||||
.input-bar {
|
||||
padding: 12px 16px;
|
||||
background: #161b22;
|
||||
border-top: 1px solid #30363d;
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
}
|
||||
.input-bar input {
|
||||
flex: 1;
|
||||
background: #0d1117;
|
||||
border: 1px solid #30363d;
|
||||
color: #c9d1d9;
|
||||
padding: 8px 12px;
|
||||
border-radius: 6px;
|
||||
font-family: inherit;
|
||||
font-size: 14px;
|
||||
outline: none;
|
||||
}
|
||||
.input-bar input:focus {
|
||||
border-color: #58a6ff;
|
||||
}
|
||||
.input-bar button {
|
||||
background: #238636;
|
||||
color: #fff;
|
||||
border: none;
|
||||
padding: 8px 20px;
|
||||
border-radius: 6px;
|
||||
cursor: pointer;
|
||||
font-family: inherit;
|
||||
font-weight: 600;
|
||||
}
|
||||
.input-bar button:hover {
|
||||
background: #2ea043;
|
||||
}
|
||||
.input-bar button:disabled {
|
||||
background: #21262d;
|
||||
color: #484f58;
|
||||
cursor: not-allowed;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<header>
|
||||
<h1>ContextHandoff Demo</h1>
|
||||
<span id="phase" class="badge">Idle</span>
|
||||
<span id="iter" class="badge" style="display:none">Step 0</span>
|
||||
</header>
|
||||
<div id="chat" class="chat"></div>
|
||||
<div class="input-bar">
|
||||
<input id="input" type="text"
|
||||
placeholder="Enter a research topic..." autofocus />
|
||||
<button id="go" onclick="run()">Research</button>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
let ws = null;
|
||||
let currentAssistantEl = null;
|
||||
let iterCount = 0;
|
||||
let currentPhase = 'idle';
|
||||
const chat = document.getElementById('chat');
|
||||
const phase = document.getElementById('phase');
|
||||
const iterEl = document.getElementById('iter');
|
||||
const goBtn = document.getElementById('go');
|
||||
const inputEl = document.getElementById('input');
|
||||
|
||||
inputEl.addEventListener('keydown', e => {
|
||||
if (e.key === 'Enter') run();
|
||||
});
|
||||
|
||||
function setPhase(text, cls) {
|
||||
phase.textContent = text;
|
||||
phase.className = 'badge ' + cls;
|
||||
currentPhase = cls;
|
||||
}
|
||||
|
||||
function addMsg(text, cls) {
|
||||
const el = document.createElement('div');
|
||||
el.className = 'msg ' + cls;
|
||||
el.textContent = text;
|
||||
chat.appendChild(el);
|
||||
chat.scrollTop = chat.scrollHeight;
|
||||
return el;
|
||||
}
|
||||
|
||||
function addHandoffBanner(summary) {
|
||||
const banner = document.createElement('div');
|
||||
banner.className = 'handoff-banner';
|
||||
const h3 = document.createElement('h3');
|
||||
h3.textContent = 'Context Handoff: Researcher -> Analyst';
|
||||
const p = document.createElement('p');
|
||||
p.textContent = summary || 'Passing research context...';
|
||||
banner.appendChild(h3);
|
||||
banner.appendChild(p);
|
||||
chat.appendChild(banner);
|
||||
chat.scrollTop = chat.scrollHeight;
|
||||
}
|
||||
|
||||
function addResultBanner(researcher, analyst, tokens) {
|
||||
const banner = document.createElement('div');
|
||||
banner.className = 'result-banner';
|
||||
const h3 = document.createElement('h3');
|
||||
h3.textContent = 'Pipeline Complete';
|
||||
banner.appendChild(h3);
|
||||
|
||||
if (researcher && researcher.research_summary) {
|
||||
const lbl = document.createElement('div');
|
||||
lbl.className = 'label';
|
||||
lbl.textContent = 'RESEARCH SUMMARY';
|
||||
banner.appendChild(lbl);
|
||||
const p = document.createElement('p');
|
||||
p.textContent = researcher.research_summary;
|
||||
banner.appendChild(p);
|
||||
}
|
||||
|
||||
if (analyst && analyst.analysis) {
|
||||
const lbl = document.createElement('div');
|
||||
lbl.className = 'label';
|
||||
lbl.textContent = 'ANALYSIS';
|
||||
lbl.style.color = '#3fb950';
|
||||
banner.appendChild(lbl);
|
||||
const p = document.createElement('p');
|
||||
p.textContent = analyst.analysis;
|
||||
banner.appendChild(p);
|
||||
}
|
||||
|
||||
if (tokens) {
|
||||
const t = document.createElement('div');
|
||||
t.className = 'tokens';
|
||||
t.textContent = 'Total tokens: ' + tokens.toLocaleString();
|
||||
banner.appendChild(t);
|
||||
}
|
||||
|
||||
chat.appendChild(banner);
|
||||
chat.scrollTop = chat.scrollHeight;
|
||||
}
|
||||
|
||||
function connect() {
|
||||
ws = new WebSocket('ws://' + location.host + '/ws');
|
||||
ws.onopen = () => {
|
||||
setPhase('Ready', 'done');
|
||||
goBtn.disabled = false;
|
||||
};
|
||||
ws.onmessage = handleEvent;
|
||||
ws.onerror = () => { setPhase('Error', 'error'); };
|
||||
ws.onclose = () => {
|
||||
setPhase('Reconnecting...', '');
|
||||
goBtn.disabled = true;
|
||||
setTimeout(connect, 2000);
|
||||
};
|
||||
}
|
||||
|
||||
function handleEvent(msg) {
|
||||
const evt = JSON.parse(msg.data);
|
||||
|
||||
if (evt.type === 'phase') {
|
||||
if (evt.phase === 'researcher') {
|
||||
setPhase('Researcher', 'researcher');
|
||||
} else if (evt.phase === 'handoff') {
|
||||
setPhase('Handoff', 'handoff');
|
||||
} else if (evt.phase === 'analyst') {
|
||||
setPhase('Analyst', 'analyst');
|
||||
}
|
||||
iterCount = 0;
|
||||
iterEl.style.display = 'none';
|
||||
}
|
||||
else if (evt.type === 'llm_text_delta') {
|
||||
if (currentAssistantEl) {
|
||||
currentAssistantEl.textContent += evt.content;
|
||||
chat.scrollTop = chat.scrollHeight;
|
||||
}
|
||||
}
|
||||
else if (evt.type === 'node_loop_iteration') {
|
||||
iterCount = evt.iteration || (iterCount + 1);
|
||||
iterEl.textContent = 'Step ' + iterCount;
|
||||
iterEl.style.display = '';
|
||||
}
|
||||
else if (evt.type === 'tool_call_started') {
|
||||
var info = evt.tool_name + '('
|
||||
+ JSON.stringify(evt.tool_input).slice(0, 120) + ')';
|
||||
addMsg('TOOL ' + info, 'event tool');
|
||||
}
|
||||
else if (evt.type === 'tool_call_completed') {
|
||||
var preview = (evt.result || '').slice(0, 200);
|
||||
var cls = evt.is_error ? 'stall' : 'tool';
|
||||
addMsg(
|
||||
'RESULT ' + evt.tool_name + ': ' + preview,
|
||||
'event ' + cls
|
||||
);
|
||||
var assistCls = currentPhase === 'analyst'
|
||||
? 'assistant analyst-msg' : 'assistant';
|
||||
currentAssistantEl = addMsg('', assistCls);
|
||||
}
|
||||
else if (evt.type === 'handoff_context') {
|
||||
addHandoffBanner(evt.summary);
|
||||
var assistCls = 'assistant analyst-msg';
|
||||
currentAssistantEl = addMsg('', assistCls);
|
||||
}
|
||||
else if (evt.type === 'node_result') {
|
||||
if (evt.node_id === 'researcher') {
|
||||
if (currentAssistantEl
|
||||
&& !currentAssistantEl.textContent) {
|
||||
currentAssistantEl.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (evt.type === 'done') {
|
||||
setPhase('Done', 'done');
|
||||
iterEl.style.display = 'none';
|
||||
if (currentAssistantEl
|
||||
&& !currentAssistantEl.textContent) {
|
||||
currentAssistantEl.remove();
|
||||
}
|
||||
currentAssistantEl = null;
|
||||
addResultBanner(
|
||||
evt.researcher, evt.analyst, evt.total_tokens
|
||||
);
|
||||
goBtn.disabled = false;
|
||||
inputEl.placeholder = 'Enter another topic...';
|
||||
}
|
||||
else if (evt.type === 'error') {
|
||||
setPhase('Error', 'error');
|
||||
addMsg('ERROR ' + evt.message, 'event stall');
|
||||
goBtn.disabled = false;
|
||||
}
|
||||
else if (evt.type === 'node_stalled') {
|
||||
addMsg('STALLED ' + evt.reason, 'event stall');
|
||||
}
|
||||
}
|
||||
|
||||
function run() {
|
||||
const text = inputEl.value.trim();
|
||||
if (!text || !ws || ws.readyState !== 1) return;
|
||||
chat.innerHTML = '';
|
||||
addMsg(text, 'user');
|
||||
currentAssistantEl = addMsg('', 'assistant');
|
||||
inputEl.value = '';
|
||||
goBtn.disabled = true;
|
||||
ws.send(JSON.stringify({ topic: text }));
|
||||
}
|
||||
|
||||
connect();
|
||||
</script>
|
||||
</body>
|
||||
</html>"""
|
||||
)
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# WebSocket handler — sequential Node A → Handoff → Node B
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def handle_ws(websocket):
|
||||
"""Run the two-node handoff pipeline per user message."""
|
||||
try:
|
||||
async for raw in websocket:
|
||||
try:
|
||||
msg = json.loads(raw)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
topic = msg.get("topic", "")
|
||||
if not topic:
|
||||
continue
|
||||
|
||||
logger.info(f"Starting handoff pipeline for: {topic}")
|
||||
|
||||
try:
|
||||
await _run_pipeline(websocket, topic)
|
||||
except websockets.exceptions.ConnectionClosed:
|
||||
logger.info("WebSocket closed during pipeline")
|
||||
return
|
||||
except Exception as e:
|
||||
logger.exception("Pipeline error")
|
||||
try:
|
||||
await websocket.send(json.dumps({"type": "error", "message": str(e)}))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
except websockets.exceptions.ConnectionClosed:
|
||||
pass
|
||||
|
||||
|
||||
async def _run_pipeline(websocket, topic: str):
|
||||
"""Execute: Node A (research) → ContextHandoff → Node B (analysis)."""
|
||||
import shutil
|
||||
|
||||
# Fresh stores for each run
|
||||
run_dir = Path(tempfile.mkdtemp(prefix="hive_run_", dir=STORE_DIR))
|
||||
store_a = FileConversationStore(run_dir / "node_a")
|
||||
store_b = FileConversationStore(run_dir / "node_b")
|
||||
|
||||
# Shared event bus
|
||||
bus = EventBus()
|
||||
|
||||
async def forward_event(event):
|
||||
try:
|
||||
payload = {"type": event.type.value, **event.data}
|
||||
if event.node_id:
|
||||
payload["node_id"] = event.node_id
|
||||
await websocket.send(json.dumps(payload))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
bus.subscribe(
|
||||
event_types=[
|
||||
EventType.NODE_LOOP_STARTED,
|
||||
EventType.NODE_LOOP_ITERATION,
|
||||
EventType.NODE_LOOP_COMPLETED,
|
||||
EventType.LLM_TEXT_DELTA,
|
||||
EventType.TOOL_CALL_STARTED,
|
||||
EventType.TOOL_CALL_COMPLETED,
|
||||
EventType.NODE_STALLED,
|
||||
],
|
||||
handler=forward_event,
|
||||
)
|
||||
|
||||
tools = list(TOOL_REGISTRY.get_tools().values())
|
||||
tool_executor = TOOL_REGISTRY.get_executor()
|
||||
|
||||
# ---- Phase 1: Researcher ------------------------------------------------
|
||||
await websocket.send(json.dumps({"type": "phase", "phase": "researcher"}))
|
||||
|
||||
node_a = EventLoopNode(
|
||||
event_bus=bus,
|
||||
judge=None, # implicit judge: accept when output_keys filled
|
||||
config=LoopConfig(
|
||||
max_iterations=20,
|
||||
max_tool_calls_per_turn=30,
|
||||
max_context_tokens=32_000,
|
||||
),
|
||||
conversation_store=store_a,
|
||||
tool_executor=tool_executor,
|
||||
)
|
||||
|
||||
ctx_a = NodeContext(
|
||||
runtime=RUNTIME,
|
||||
node_id="researcher",
|
||||
node_spec=RESEARCHER_SPEC,
|
||||
memory=SharedMemory(),
|
||||
input_data={"topic": topic},
|
||||
llm=LLM,
|
||||
available_tools=tools,
|
||||
)
|
||||
|
||||
result_a = await node_a.execute(ctx_a)
|
||||
logger.info(
|
||||
"Researcher done: success=%s, tokens=%s",
|
||||
result_a.success,
|
||||
result_a.tokens_used,
|
||||
)
|
||||
|
||||
await websocket.send(
|
||||
json.dumps(
|
||||
{
|
||||
"type": "node_result",
|
||||
"node_id": "researcher",
|
||||
"success": result_a.success,
|
||||
"output": result_a.output,
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
if not result_a.success:
|
||||
await websocket.send(
|
||||
json.dumps(
|
||||
{
|
||||
"type": "error",
|
||||
"message": f"Researcher failed: {result_a.error}",
|
||||
}
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
# ---- Phase 2: Context Handoff -------------------------------------------
|
||||
await websocket.send(json.dumps({"type": "phase", "phase": "handoff"}))
|
||||
|
||||
# Restore the researcher's conversation from store
|
||||
conversation_a = await NodeConversation.restore(store_a)
|
||||
if conversation_a is None:
|
||||
await websocket.send(
|
||||
json.dumps(
|
||||
{
|
||||
"type": "error",
|
||||
"message": "Failed to restore researcher conversation",
|
||||
}
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
handoff_engine = ContextHandoff(llm=LLM)
|
||||
handoff_context = handoff_engine.summarize_conversation(
|
||||
conversation=conversation_a,
|
||||
node_id="researcher",
|
||||
output_keys=["research_summary"],
|
||||
)
|
||||
|
||||
formatted_handoff = ContextHandoff.format_as_input(handoff_context)
|
||||
logger.info(
|
||||
"Handoff: %d turns, ~%d tokens, keys=%s",
|
||||
handoff_context.turn_count,
|
||||
handoff_context.total_tokens_used,
|
||||
list(handoff_context.key_outputs.keys()),
|
||||
)
|
||||
|
||||
# Send handoff context to browser
|
||||
await websocket.send(
|
||||
json.dumps(
|
||||
{
|
||||
"type": "handoff_context",
|
||||
"summary": handoff_context.summary[:500],
|
||||
"turn_count": handoff_context.turn_count,
|
||||
"tokens": handoff_context.total_tokens_used,
|
||||
"key_outputs": handoff_context.key_outputs,
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
# ---- Phase 3: Analyst ---------------------------------------------------
|
||||
await websocket.send(json.dumps({"type": "phase", "phase": "analyst"}))
|
||||
|
||||
node_b = EventLoopNode(
|
||||
event_bus=bus,
|
||||
judge=None, # implicit judge
|
||||
config=LoopConfig(
|
||||
max_iterations=10,
|
||||
max_tool_calls_per_turn=30,
|
||||
max_context_tokens=32_000,
|
||||
),
|
||||
conversation_store=store_b,
|
||||
)
|
||||
|
||||
ctx_b = NodeContext(
|
||||
runtime=RUNTIME,
|
||||
node_id="analyst",
|
||||
node_spec=ANALYST_SPEC,
|
||||
memory=SharedMemory(),
|
||||
input_data={"context": formatted_handoff},
|
||||
llm=LLM,
|
||||
available_tools=[],
|
||||
)
|
||||
|
||||
result_b = await node_b.execute(ctx_b)
|
||||
logger.info(
|
||||
"Analyst done: success=%s, tokens=%s",
|
||||
result_b.success,
|
||||
result_b.tokens_used,
|
||||
)
|
||||
|
||||
# ---- Done ---------------------------------------------------------------
|
||||
await websocket.send(
|
||||
json.dumps(
|
||||
{
|
||||
"type": "done",
|
||||
"researcher": result_a.output,
|
||||
"analyst": result_b.output,
|
||||
"total_tokens": ((result_a.tokens_used or 0) + (result_b.tokens_used or 0)),
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
# Clean up temp stores
|
||||
try:
|
||||
shutil.rmtree(run_dir)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# HTTP handler
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def process_request(connection, request: Request):
|
||||
"""Serve HTML on GET /, upgrade to WebSocket on /ws."""
|
||||
if request.path == "/ws":
|
||||
return None
|
||||
return Response(
|
||||
HTTPStatus.OK,
|
||||
"OK",
|
||||
websockets.Headers({"Content-Type": "text/html; charset=utf-8"}),
|
||||
HTML_PAGE.encode(),
|
||||
)
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Main
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def main():
|
||||
port = 8766
|
||||
async with websockets.serve(
|
||||
handle_ws,
|
||||
"0.0.0.0",
|
||||
port,
|
||||
process_request=process_request,
|
||||
):
|
||||
logger.info(f"Handoff demo at http://localhost:{port}")
|
||||
logger.info("Enter a research topic to start the pipeline.")
|
||||
await asyncio.Future()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
File diff suppressed because it is too large
Load Diff
@@ -79,7 +79,7 @@ async def example_3_config_file():
|
||||
# Copy example config (in practice, you'd place this in your agent folder)
|
||||
import shutil
|
||||
|
||||
shutil.copy("examples/mcp_servers.json", test_agent_path / "mcp_servers.json")
|
||||
shutil.copy(Path(__file__).parent / "mcp_servers.json", test_agent_path / "mcp_servers.json")
|
||||
|
||||
# Load agent - MCP servers will be auto-discovered
|
||||
runner = AgentRunner.load(test_agent_path)
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
"""CLI entry point for Credential Tester agent."""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import click
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ after the user picks an account programmatically.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
@@ -25,6 +26,7 @@ from framework.graph.checkpoint_config import CheckpointConfig
|
||||
from framework.graph.edge import GraphSpec
|
||||
from framework.graph.executor import ExecutionResult
|
||||
from framework.llm import LiteLLMProvider
|
||||
from framework.runner.mcp_registry import MCPRegistry
|
||||
from framework.runner.tool_registry import ToolRegistry
|
||||
from framework.runtime.agent_runtime import AgentRuntime, create_agent_runtime
|
||||
from framework.runtime.execution_stream import EntryPointSpec
|
||||
@@ -32,9 +34,13 @@ from framework.runtime.execution_stream import EntryPointSpec
|
||||
from .config import default_config
|
||||
from .nodes import build_tester_node
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from framework.runner import AgentRunner
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Goal
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -107,7 +113,11 @@ def _list_aden_accounts() -> list[dict]:
|
||||
for c in integrations
|
||||
if c.status == "active"
|
||||
]
|
||||
except (ImportError, OSError) as exc:
|
||||
logger.debug("Could not list Aden accounts: %s", exc)
|
||||
return []
|
||||
except Exception:
|
||||
logger.warning("Unexpected error listing Aden accounts", exc_info=True)
|
||||
return []
|
||||
|
||||
|
||||
@@ -119,7 +129,11 @@ def _list_local_accounts() -> list[dict]:
|
||||
return [
|
||||
info.to_account_dict() for info in LocalCredentialRegistry.default().list_accounts()
|
||||
]
|
||||
except ImportError as exc:
|
||||
logger.debug("Local credential registry unavailable: %s", exc)
|
||||
return []
|
||||
except Exception:
|
||||
logger.warning("Unexpected error listing local accounts", exc_info=True)
|
||||
return []
|
||||
|
||||
|
||||
@@ -140,7 +154,11 @@ def _list_env_fallback_accounts() -> list[dict]:
|
||||
from framework.credentials.storage import EncryptedFileStorage
|
||||
|
||||
encrypted_ids: set[str] = set(EncryptedFileStorage().list_all())
|
||||
except (ImportError, OSError) as exc:
|
||||
logger.debug("Could not read encrypted store: %s", exc)
|
||||
encrypted_ids = set()
|
||||
except Exception:
|
||||
logger.warning("Unexpected error reading encrypted store", exc_info=True)
|
||||
encrypted_ids = set()
|
||||
|
||||
def _is_configured(cred_name: str, spec) -> bool:
|
||||
@@ -300,8 +318,10 @@ def _activate_local_account(credential_id: str, alias: str) -> None:
|
||||
|
||||
if key:
|
||||
os.environ[spec.env_var] = key
|
||||
except (ImportError, KeyError, OSError) as exc:
|
||||
logger.debug("Could not inject credentials: %s", exc)
|
||||
except Exception:
|
||||
pass
|
||||
logger.warning("Unexpected error injecting credentials", exc_info=True)
|
||||
|
||||
|
||||
def _configure_aden_node(
|
||||
@@ -563,6 +583,15 @@ class CredentialTesterAgent:
|
||||
if mcp_config_path.exists():
|
||||
self._tool_registry.load_mcp_config(mcp_config_path)
|
||||
|
||||
try:
|
||||
registry = MCPRegistry()
|
||||
registry.initialize()
|
||||
registry_configs = registry.load_agent_selection(Path(__file__).parent)
|
||||
if registry_configs:
|
||||
self._tool_registry.load_registry_servers(registry_configs)
|
||||
except Exception:
|
||||
logger.warning("MCP registry config failed to load", exc_info=True)
|
||||
|
||||
extra_kwargs = getattr(self.config, "extra_kwargs", {}) or {}
|
||||
llm = LiteLLMProvider(
|
||||
model=self.config.model,
|
||||
|
||||
@@ -16,31 +16,63 @@ class AgentEntry:
|
||||
description: str
|
||||
category: str
|
||||
session_count: int = 0
|
||||
run_count: int = 0
|
||||
node_count: int = 0
|
||||
tool_count: int = 0
|
||||
tags: list[str] = field(default_factory=list)
|
||||
last_active: str | None = None
|
||||
|
||||
|
||||
def _get_last_active(agent_name: str) -> str | None:
|
||||
"""Return the most recent updated_at timestamp across all sessions."""
|
||||
sessions_dir = Path.home() / ".hive" / "agents" / agent_name / "sessions"
|
||||
if not sessions_dir.exists():
|
||||
return None
|
||||
def _get_last_active(agent_path: Path) -> str | None:
|
||||
"""Return the most recent updated_at timestamp across all sessions.
|
||||
|
||||
Checks both worker sessions (``~/.hive/agents/{name}/sessions/``) and
|
||||
queen sessions (``~/.hive/queen/session/``) whose ``meta.json`` references
|
||||
the same *agent_path*.
|
||||
"""
|
||||
from datetime import datetime
|
||||
|
||||
agent_name = agent_path.name
|
||||
latest: str | None = None
|
||||
for session_dir in sessions_dir.iterdir():
|
||||
if not session_dir.is_dir() or not session_dir.name.startswith("session_"):
|
||||
continue
|
||||
state_file = session_dir / "state.json"
|
||||
if not state_file.exists():
|
||||
continue
|
||||
try:
|
||||
data = json.loads(state_file.read_text(encoding="utf-8"))
|
||||
ts = data.get("timestamps", {}).get("updated_at")
|
||||
if ts and (latest is None or ts > latest):
|
||||
latest = ts
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# 1. Worker sessions
|
||||
sessions_dir = Path.home() / ".hive" / "agents" / agent_name / "sessions"
|
||||
if sessions_dir.exists():
|
||||
for session_dir in sessions_dir.iterdir():
|
||||
if not session_dir.is_dir() or not session_dir.name.startswith("session_"):
|
||||
continue
|
||||
state_file = session_dir / "state.json"
|
||||
if not state_file.exists():
|
||||
continue
|
||||
try:
|
||||
data = json.loads(state_file.read_text(encoding="utf-8"))
|
||||
ts = data.get("timestamps", {}).get("updated_at")
|
||||
if ts and (latest is None or ts > latest):
|
||||
latest = ts
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# 2. Queen sessions
|
||||
queen_sessions_dir = Path.home() / ".hive" / "queen" / "session"
|
||||
if queen_sessions_dir.exists():
|
||||
resolved = agent_path.resolve()
|
||||
for d in queen_sessions_dir.iterdir():
|
||||
if not d.is_dir():
|
||||
continue
|
||||
meta_file = d / "meta.json"
|
||||
if not meta_file.exists():
|
||||
continue
|
||||
try:
|
||||
meta = json.loads(meta_file.read_text(encoding="utf-8"))
|
||||
stored = meta.get("agent_path")
|
||||
if not stored or Path(stored).resolve() != resolved:
|
||||
continue
|
||||
ts = datetime.fromtimestamp(d.stat().st_mtime).isoformat()
|
||||
if latest is None or ts > latest:
|
||||
latest = ts
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return latest
|
||||
|
||||
|
||||
@@ -52,6 +84,31 @@ def _count_sessions(agent_name: str) -> int:
|
||||
return sum(1 for d in sessions_dir.iterdir() if d.is_dir() and d.name.startswith("session_"))
|
||||
|
||||
|
||||
def _count_runs(agent_name: str) -> int:
|
||||
"""Count unique run_ids across all sessions for an agent."""
|
||||
sessions_dir = Path.home() / ".hive" / "agents" / agent_name / "sessions"
|
||||
if not sessions_dir.exists():
|
||||
return 0
|
||||
run_ids: set[str] = set()
|
||||
for session_dir in sessions_dir.iterdir():
|
||||
if not session_dir.is_dir() or not session_dir.name.startswith("session_"):
|
||||
continue
|
||||
# runs.jsonl lives inside workspace subdirectories
|
||||
for runs_file in session_dir.rglob("runs.jsonl"):
|
||||
try:
|
||||
for line in runs_file.read_text(encoding="utf-8").splitlines():
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
record = json.loads(line)
|
||||
rid = record.get("run_id")
|
||||
if rid:
|
||||
run_ids.add(rid)
|
||||
except Exception:
|
||||
continue
|
||||
return len(run_ids)
|
||||
|
||||
|
||||
def _extract_agent_stats(agent_path: Path) -> tuple[int, int, list[str]]:
|
||||
"""Extract node count, tool count, and tags from an agent directory.
|
||||
|
||||
@@ -139,10 +196,11 @@ def discover_agents() -> dict[str, list[AgentEntry]]:
|
||||
description=desc,
|
||||
category=category,
|
||||
session_count=_count_sessions(path.name),
|
||||
run_count=_count_runs(path.name),
|
||||
node_count=node_count,
|
||||
tool_count=tool_count,
|
||||
tags=tags,
|
||||
last_active=_get_last_active(path.name),
|
||||
last_active=_get_last_active(path),
|
||||
)
|
||||
)
|
||||
if entries:
|
||||
|
||||
@@ -14,8 +14,7 @@ queen_goal = Goal(
|
||||
id="queen-manager",
|
||||
name="Queen Manager",
|
||||
description=(
|
||||
"Manage the worker agent lifecycle and serve as the user's primary "
|
||||
"interactive interface. Triage health escalations from the judge."
|
||||
"Manage the worker agent lifecycle and serve as the user's primary interactive interface."
|
||||
),
|
||||
success_criteria=[],
|
||||
constraints=[],
|
||||
|
||||
@@ -62,6 +62,12 @@ _SHARED_TOOLS = [
|
||||
"get_agent_checkpoint",
|
||||
]
|
||||
|
||||
# Episodic memory tools — available in every queen phase.
|
||||
_QUEEN_MEMORY_TOOLS = [
|
||||
"write_to_diary",
|
||||
"recall_diary",
|
||||
]
|
||||
|
||||
# Queen phase-specific tool sets.
|
||||
|
||||
# Planning phase: read-only exploration + design, no write tools.
|
||||
@@ -84,16 +90,19 @@ _QUEEN_PLANNING_TOOLS = [
|
||||
"initialize_and_build_agent",
|
||||
# Load existing agent (after user confirms)
|
||||
"load_built_agent",
|
||||
]
|
||||
] + _QUEEN_MEMORY_TOOLS
|
||||
|
||||
# Building phase: full coding + agent construction tools.
|
||||
_QUEEN_BUILDING_TOOLS = _SHARED_TOOLS + [
|
||||
"load_built_agent",
|
||||
"list_credentials",
|
||||
"replan_agent",
|
||||
"save_agent_draft", # Re-draft during building → auto-dissolves + updates flowchart
|
||||
"write_to_diary", # Episodic memory — available in all phases
|
||||
]
|
||||
_QUEEN_BUILDING_TOOLS = (
|
||||
_SHARED_TOOLS
|
||||
+ [
|
||||
"load_built_agent",
|
||||
"list_credentials",
|
||||
"replan_agent",
|
||||
"save_agent_draft", # Re-draft during building → auto-dissolves + updates flowchart
|
||||
]
|
||||
+ _QUEEN_MEMORY_TOOLS
|
||||
)
|
||||
|
||||
# Staging phase: agent loaded but not yet running — inspect, configure, launch.
|
||||
_QUEEN_STAGING_TOOLS = [
|
||||
@@ -110,7 +119,11 @@ _QUEEN_STAGING_TOOLS = [
|
||||
"stop_worker_and_edit",
|
||||
"stop_worker_and_plan",
|
||||
"write_to_diary", # Episodic memory — available in all phases
|
||||
]
|
||||
# Trigger management
|
||||
"set_trigger",
|
||||
"remove_trigger",
|
||||
"list_triggers",
|
||||
] + _QUEEN_MEMORY_TOOLS
|
||||
|
||||
# Running phase: worker is executing — monitor and control.
|
||||
_QUEEN_RUNNING_TOOLS = [
|
||||
@@ -126,12 +139,16 @@ _QUEEN_RUNNING_TOOLS = [
|
||||
"stop_worker_and_edit",
|
||||
"stop_worker_and_plan",
|
||||
"get_worker_status",
|
||||
"run_agent_with_input",
|
||||
"inject_worker_message",
|
||||
# Monitoring
|
||||
"get_worker_health_summary",
|
||||
"notify_operator",
|
||||
"set_trigger",
|
||||
"remove_trigger",
|
||||
"list_triggers",
|
||||
"write_to_diary", # Episodic memory — available in all phases
|
||||
]
|
||||
] + _QUEEN_MEMORY_TOOLS
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -173,12 +190,8 @@ search_files, or list_directory — those are YOUR tools, not theirs.
|
||||
)
|
||||
|
||||
_planning_knowledge = """\
|
||||
**A responsible engineer doesn't jump into building. First, \
|
||||
understand the problem and be transparent about what the framework can and cannot do.**
|
||||
|
||||
Use the user's selection (or their custom description if they chose "Other") \
|
||||
as context when shaping the goal below. If the user already described \
|
||||
what they want before this step, skip the question and proceed directly.
|
||||
**Be responsible, understand the problem by asking practical qualify questions \
|
||||
and be transparent about what the framework can and cannot do.**
|
||||
|
||||
# Core Mandates (Planning)
|
||||
- **DO NOT propose a complete goal on your own.** Instead, \
|
||||
@@ -194,10 +207,12 @@ Before designing any agent, discover tools progressively — start compact, dril
|
||||
what you need. ONLY use tools from this list in your node definitions. \
|
||||
NEVER guess or fabricate tool names from memory.
|
||||
|
||||
list_agent_tools() # Step 1: provider summary (counts + credential status)
|
||||
list_agent_tools(group="google", output_schema="summary") # Step 2: service breakdown within a provider
|
||||
list_agent_tools(group="google", service="gmail") # Step 3: tool names for one service
|
||||
list_agent_tools(group="google", service="gmail", output_schema="full") # Step 4: full detail for specific tools
|
||||
list_agent_tools() # Step 1: provider summary
|
||||
list_agent_tools(group="google", output_schema="summary") # Step 2: service breakdown
|
||||
list_agent_tools(group="google", service="gmail") # Step 3: tool names
|
||||
list_agent_tools( # Step 4: full detail
|
||||
group="google", service="gmail", output_schema="full"
|
||||
)
|
||||
|
||||
Step 1 is MANDATORY. Returns provider names, tool counts, credential availability — very compact. \
|
||||
Step 2 breaks a provider into services (e.g. google → gmail/calendar/sheets/drive). Only do this \
|
||||
@@ -208,30 +223,13 @@ Use credentials="available" at any step to filter to tools whose credentials are
|
||||
|
||||
# Discovery & Design Workflow
|
||||
|
||||
## 1: Fast Discovery (3-6 Turns)
|
||||
## 1: Discovery (3-6 Turns)
|
||||
|
||||
**The core principle**: Discovery should feel like progress, not paperwork. \
|
||||
The stakeholder should walk away feeling like you understood them faster \
|
||||
than anyone else would have.
|
||||
|
||||
**Communication sytle**: Be concise. Say less. Mean more. Impatient stakeholders \
|
||||
don't want a wall of text — they want to know you get it. Every sentence you say \
|
||||
should either move the conversation forward or prove you understood something. \
|
||||
If it does neither, cut it.
|
||||
|
||||
**Ask Question Rules: Respect Their Time.** Every question must earn its place by:
|
||||
1. **Preventing a costly wrong turn** — you're about to build the wrong thing
|
||||
2. **Unlocking a shortcut** — their answer lets you simplify the design
|
||||
3. **Surfacing a dealbreaker** — there's a constraint that changes everything
|
||||
4. **Provide Options** - Provide options to your questions if possible, \
|
||||
but also always allow the user to type something beyong the options.
|
||||
|
||||
If a question doesn't do one of these, don't ask it. Make an assumption, state it, and move on.
|
||||
|
||||
---
|
||||
|
||||
### 1.1: Let Them Talk, But Listen Like an Solution Architect
|
||||
|
||||
Ask questions to help the user find bridge the goal and the solution \
|
||||
When the stakeholder describes what they want, mentally construct:
|
||||
|
||||
- **The pain**: What about today's situation is broken, slow, or missing?
|
||||
@@ -242,57 +240,6 @@ When the stakeholder describes what they want, mentally construct:
|
||||
|
||||
---
|
||||
|
||||
### 1.2: Use Domain Knowledge to Fill In the Blanks
|
||||
|
||||
You have broad knowledge of how systems work. Use it aggressively.
|
||||
|
||||
If they say "I need a research agent," you already know it probably involves: \
|
||||
search, summarization, source tracking, and iteration. Don't ask about each — \
|
||||
use them as your starting mental model and let their specifics override your defaults.
|
||||
|
||||
If they say "I need to monitor files and alert me," you know this probably involves: \
|
||||
watch patterns, triggers, notifications, and state tracking.
|
||||
|
||||
---
|
||||
|
||||
### 1.3: Play Back a Proposed Model (Not a List of Questions)
|
||||
|
||||
After listening, present a **concrete picture** of what you think they need. \
|
||||
Make it specific enough that they can spot what's wrong. \
|
||||
Can you ASCII to show the user
|
||||
|
||||
**Pattern: "Here's what I heard — tell me where I'm off"**
|
||||
|
||||
> "OK here's how I'm picturing this: [User type] needs to [core action]. \
|
||||
Right now they're [current painful workflow]. \
|
||||
What you want is [proposed solution that replaces the pain].
|
||||
> The way I'd structure this: [key entities] connected by [key relationships], \
|
||||
with the main flow being [trigger → steps → outcome].
|
||||
> For the MVP, I'd focus on [the one thing that delivers the most value] \
|
||||
and hold off on [things that can wait].
|
||||
> Before I start — [1-2 specific questions you genuinely can't infer]."
|
||||
|
||||
---
|
||||
|
||||
### 1.4: Ask Only What You Cannot Infer
|
||||
|
||||
Your questions should be **narrow, specific, and consequential**. \
|
||||
Never ask what you could answer yourself.
|
||||
|
||||
**Good questions** (high-stakes, can't infer):
|
||||
- "Who's the primary user — you or your end customers?"
|
||||
- "Is this replacing a spreadsheet, or is there literally nothing today?"
|
||||
- "Does this need to integrate with anything, or standalone?"
|
||||
- "Is there existing data to migrate, or starting fresh?"
|
||||
|
||||
**Bad questions** (low-stakes, inferable):
|
||||
- "What should happen if there's an error?" *(handle gracefully, obviously)*
|
||||
- "Should it have search?" *(if there's a list, yes)*
|
||||
- "How should we handle permissions?" *(follow standard patterns)*
|
||||
- "What tools should I use?" *(your call, not theirs)*
|
||||
|
||||
---
|
||||
|
||||
## 2: Capability Assessment & Gap Analysis
|
||||
|
||||
**After the user responds, assess fit and gaps together.** Be honest and specific. \
|
||||
@@ -329,52 +276,10 @@ Example:
|
||||
configured yet. Do you have a Google service account or OAuth credentials \
|
||||
you can set up? If not, I can use CSV file output instead."
|
||||
|
||||
## 3: Design Graph and Create Draft
|
||||
## 3: Design flowchart
|
||||
|
||||
Act like an experienced AI solution architect. Design the agent architecture:
|
||||
- Goal: id, name, description, 3-5 success criteria, 2-4 constraints
|
||||
- Nodes: **3-6 nodes** (HARD RULE: never fewer than 3, never more than 6). \
|
||||
2 nodes is ALWAYS wrong — it means you under-decomposed the task. \
|
||||
Use as many nodes as the use case requires, but don't create nodes without \
|
||||
tools — merge them into nodes that do real work.
|
||||
- Edges: on_success for linear, conditional for routing
|
||||
- Lifecycle: ALWAYS have terminal_nodes
|
||||
|
||||
**MERGE nodes when:**
|
||||
- Node has NO tools (pure LLM reasoning) → merge into predecessor/successor
|
||||
- Node sets only 1 trivial output → collapse into predecessor
|
||||
|
||||
**SEPARATE nodes when:**
|
||||
- Fundamentally different tool sets (e.g., search vs. write vs. validate)
|
||||
- Fan-out parallelism (parallel branches MUST be separate)
|
||||
- Different failure/retry semantics (e.g., gather can retry, transform cannot)
|
||||
- Distinct phases of work (e.g., research, transform, validate, deliver)
|
||||
- A node would need more than ~5 tools — split by responsibility
|
||||
|
||||
**Typical patterns (queen manages all user interaction):**
|
||||
- 3 nodes: `gather → work → review`
|
||||
- 4 nodes: `gather → analyze → transform → review`
|
||||
- 5 nodes: `gather → research → transform → validate → deliver`
|
||||
- WRONG: 2 nodes where everything is crammed into one giant node
|
||||
- WRONG: 7 nodes where half have no tools and just do LLM reasoning
|
||||
|
||||
Read reference agents before designing:
|
||||
list_agents()
|
||||
read_file("exports/deep_research_agent/agent.py")
|
||||
read_file("exports/deep_research_agent/nodes/__init__.py")
|
||||
|
||||
**IMPORTANT: Call save_agent_draft() early and often.** \
|
||||
The flowchart is a live collaboration artifact, not a final deliverable. \
|
||||
Call save_agent_draft() as soon as you have a rough shape — even before \
|
||||
all details are finalized. Then **update it interactively** as the \
|
||||
conversation progresses:
|
||||
|
||||
- After the user gives feedback ("add a validation step", "split that node") \
|
||||
→ immediately call save_agent_draft() with the updated graph so they see \
|
||||
the change reflected in the visualizer.
|
||||
- After you refine your understanding of requirements → update the draft.
|
||||
- When the user asks "what about X?" and it changes the design → update.
|
||||
- Don't wait until everything is perfect — iterate visually with the user.
|
||||
Act like an experienced AI solution architect. Design the agent architecture \
|
||||
in the flowchart
|
||||
|
||||
The flowchart is the shared canvas. Every structural change should be \
|
||||
visible to the user immediately. The draft captures business logic \
|
||||
@@ -382,45 +287,28 @@ visible to the user immediately. The draft captures business logic \
|
||||
Include in each node: id, name, description, planned tools, \
|
||||
input/output keys, and success criteria as high-level hints.
|
||||
|
||||
Each node is auto-classified into an ISO 5807 flowchart symbol type \
|
||||
with a unique color. You can override auto-detection by setting \
|
||||
`flowchart_type` explicitly on a node. Common types:
|
||||
Each node is auto-classified into a flowchart symbol type with a unique \
|
||||
color. You can override auto-detection by setting `flowchart_type` \
|
||||
explicitly on a node. Available types:
|
||||
|
||||
**Core symbols:**
|
||||
- **start** (green, stadium): Entry point / trigger
|
||||
- **terminal** (red, stadium): End of flow
|
||||
- **process** (blue, rectangle): Standard processing step
|
||||
- **decision** (amber, diamond): Conditional branching
|
||||
- **io** (purple, parallelogram): External data input/output
|
||||
- **document** (blue-grey, wavy rect): Report or document generation
|
||||
- **subprocess** (teal, subroutine): Delegated sub-agent / predefined process
|
||||
- **preparation** (brown, hexagon): Setup / initialization step
|
||||
- **manual_operation** (pink, trapezoid): Human-in-the-loop / manual review
|
||||
- **delay** (orange, D-shape): Wait / throttle / cooldown
|
||||
- **display** (cyan): Present results to user
|
||||
|
||||
**Data storage:**
|
||||
- **database** (light green, cylinder): Database or data store
|
||||
- **stored_data** (lime): Generic persistent data
|
||||
- **internal_storage** (amber): In-memory / cache
|
||||
|
||||
**Flow operations:**
|
||||
- **merge** (indigo, inv. triangle): Combine multiple inputs
|
||||
- **extract** (indigo, triangle): Split or filter data
|
||||
- **connector** (grey, circle): On-page link
|
||||
- **offpage_connector** (dark grey, pentagon): Cross-page link
|
||||
|
||||
**Domain-specific:**
|
||||
- **browser** (dark indigo, hexagon): GCU browser automation
|
||||
- **subagent** (dark teal, subroutine): Planning-only sub-agent delegation \
|
||||
(dissolved into parent's sub_agents at build time)
|
||||
- **start** (sage green, stadium): Entry point / trigger
|
||||
- **terminal** (dusty red, stadium): End of flow
|
||||
- **process** (blue-gray, rectangle): Standard processing step
|
||||
- **decision** (warm amber, diamond): Conditional branching
|
||||
- **io** (dusty purple, parallelogram): External data input/output
|
||||
- **document** (steel blue, wavy rect): Report or document generation
|
||||
- **database** (muted teal, cylinder): Database or data store
|
||||
- **subprocess** (dark cyan, subroutine): Delegated sub-agent / predefined process
|
||||
- **browser** (deep blue, hexagon): GCU browser automation / sub-agent \
|
||||
delegation. At build time, browser nodes are dissolved into the parent \
|
||||
node's sub_agents list. Use for any GCU or sub-agent leaf node.
|
||||
|
||||
Auto-detection works well for most cases: first node → start, nodes with \
|
||||
no outgoing edges → terminal, nodes with multiple conditional outgoing \
|
||||
edges → decision, GCU nodes → browser, nodes mentioning "database" → \
|
||||
database, nodes mentioning "report/document" → document, etc. Set \
|
||||
flowchart_type explicitly only when auto-detection would be wrong. \
|
||||
Note: `subagent` is never auto-detected — you must set it explicitly.
|
||||
database, nodes mentioning "report/document" → document, I/O tools like \
|
||||
send_email → io. Everything else defaults to process. Set flowchart_type \
|
||||
explicitly only when auto-detection would be wrong.
|
||||
|
||||
## Decision Nodes — Planning-Only Conditional Branching
|
||||
|
||||
@@ -469,11 +357,11 @@ sub-agent nodes are **dissolved** into their parent node:
|
||||
- At runtime, the parent node can invoke the sub-agent via `delegate_to_sub_agent`
|
||||
|
||||
**Rules for sub-agent nodes (INCLUDING GCU nodes):**
|
||||
- Set `flowchart_type: "subagent"` explicitly (never auto-detected)
|
||||
- GCU nodes are auto-detected as `flowchart_type: "browser"` (hexagon)
|
||||
- Connect from the managing parent node to the sub-agent node
|
||||
- Sub-agent nodes must be **leaf nodes** — NO outgoing edges to other nodes
|
||||
- The sub-agent node's ID must match a real node ID in the runtime graph \
|
||||
(the node it represents will be invokable as a sub-agent)
|
||||
- At build time, browser/GCU nodes are dissolved into the parent's \
|
||||
`sub_agents` list, just like decision nodes are dissolved into criteria
|
||||
|
||||
**CRITICAL: GCU nodes (`node_type: "gcu"`) are ALWAYS sub-agents.** \
|
||||
They MUST NOT appear in the linear flow. NEVER chain GCU nodes \
|
||||
@@ -481,50 +369,23 @@ sequentially (A → gcu1 → gcu2 → B is WRONG). Instead, attach them \
|
||||
as leaves to the parent that orchestrates them:
|
||||
```
|
||||
WRONG: intake → gcu_find_prospect → gcu_scan_mutuals → check_results
|
||||
WRONG: decision_node → gcu_node (as a yes/no branch)
|
||||
RIGHT: intake (sub_agents: [gcu_find, gcu_scan]) → check_results
|
||||
```
|
||||
The parent node delegates to its GCU sub-agents and collects results. \
|
||||
The main flow continues from the parent, not from the GCU node.
|
||||
The main flow continues from the parent, not from the GCU node. \
|
||||
GCU nodes MUST NOT be children of decision nodes — decision nodes \
|
||||
dissolve at build time, which would leave the GCU as a dangling \
|
||||
workflow step.
|
||||
|
||||
**How to show delegation in the flowchart:**
|
||||
```
|
||||
research → (deep_searcher) ← subagent node, leaf
|
||||
research → (deep_searcher) ← browser/GCU node, leaf
|
||||
research → [Enough results?] ← decision node
|
||||
```
|
||||
After dissolution: `research` node gets `sub_agents: ["deep_searcher"]` \
|
||||
and `success_criteria: "Enough results?"`.
|
||||
|
||||
After calling save_agent_draft(), also present an ASCII graph in your message \
|
||||
alongside a brief summary of each node's purpose. The user sees both the \
|
||||
interactive visualizer AND your textual explanation.
|
||||
|
||||
```
|
||||
┌─────────────────────────┐
|
||||
│ gather │
|
||||
│ subagent: gcu_search │
|
||||
│ input: user_request │
|
||||
│ tools: load_data, │
|
||||
│ save_data │
|
||||
└────────────┬────────────┘
|
||||
│ on_success
|
||||
▼
|
||||
┌─────────────────────────┐
|
||||
│ work │
|
||||
│ subagent: gcu_interact │
|
||||
│ tools: load_data, │
|
||||
│ save_data │
|
||||
└────────────┬────────────┘
|
||||
│ on_success
|
||||
▼
|
||||
┌─────────────────────────┐
|
||||
│ review │
|
||||
│ tools: save_data │
|
||||
│ serve_file_to_user │
|
||||
└────────────┬────────────┘
|
||||
│ on_failure
|
||||
└──────► back to gather
|
||||
```
|
||||
|
||||
If the worker agent start from some initial input it is okay. \
|
||||
The queen(you) owns intake: you gathers user requirements, then calls \
|
||||
`run_agent_with_input(task)` with a structured task description. \
|
||||
@@ -636,8 +497,8 @@ nodes/__init__.py
|
||||
- Goal description, success criteria values, constraint values, edge \
|
||||
definitions, identity_prompt in agent.py
|
||||
- CLI options in __main__.py
|
||||
- For async entry points (timers/webhooks), add AsyncEntryPointSpec \
|
||||
and AgentRuntimeConfig to agent.py
|
||||
- For triggers (timers/webhooks), add entries to triggers.json in the \
|
||||
agent's export directory
|
||||
|
||||
Do NOT modify or rewrite:
|
||||
- Import statements at top of agent.py (they are correct)
|
||||
@@ -672,12 +533,15 @@ _package_builder_knowledge = _shared_building_knowledge + _planning_knowledge +
|
||||
_queen_identity_planning = """\
|
||||
You are an experienced, responsible and curious Solution Architect. \
|
||||
"Queen" is the internal alias. \
|
||||
You ask smart questions to guide user to the solution \
|
||||
You are in PLANNING phase — your job is to either: \
|
||||
(a) understand what the user wants and design a new agent, or \
|
||||
(b) diagnose issues with an existing agent, discuss a fix plan with the user, \
|
||||
then transition to building to implement. \
|
||||
You have read-only tools for exploration but no write/edit tools. \
|
||||
Focus on conversation, research, and design.\
|
||||
Focus on conversation, research, and design. \
|
||||
You MUST use ask_user / ask_user_multiple tools for ALL questions — \
|
||||
never ask questions in plain text without calling the tool.\
|
||||
"""
|
||||
|
||||
_queen_identity_building = """\
|
||||
@@ -735,11 +599,12 @@ document, database, subprocess, etc.) with unique shapes and colors. Set \
|
||||
flowchart_type on a node to override. Nodes need only an id. \
|
||||
Use decision nodes (flowchart_type: "decision", with decision_clause and \
|
||||
labeled yes/no edges) to make conditional branching explicit. \
|
||||
Use subagent nodes (flowchart_type: "subagent") as leaf nodes connected \
|
||||
to a parent to show sub-agent delegation visually.
|
||||
GCU/sub-agent nodes (node_type: "gcu") are auto-detected as browser \
|
||||
hexagons — connect them as leaf nodes to their parent.
|
||||
- confirm_and_build() — Record user confirmation of the draft. Dissolves \
|
||||
planning-only nodes (decision → predecessor criteria; subagent → predecessor \
|
||||
sub_agents list). Call this ONLY after the user explicitly approves via ask_user.
|
||||
planning-only nodes (decision → predecessor criteria; browser/GCU → \
|
||||
predecessor sub_agents list). Call this ONLY after the user explicitly \
|
||||
approves via ask_user.
|
||||
- initialize_and_build_agent(agent_name?, nodes?) — Scaffold the agent package \
|
||||
and transition to BUILDING phase. For new agents, this REQUIRES \
|
||||
save_agent_draft() + confirm_and_build() first. The draft metadata is used to \
|
||||
@@ -773,13 +638,14 @@ list_agent_checkpoints, get_agent_checkpoint
|
||||
- load_built_agent(agent_path) — Load the agent and switch to STAGING phase
|
||||
- list_credentials(credential_id?) — List authorized credentials
|
||||
- save_agent_draft(...) — **Re-draft the flowchart during building.** When \
|
||||
called during building, planning-only nodes (decision, subagent) are \
|
||||
called during building, planning-only nodes (decision, browser/GCU) are \
|
||||
dissolved automatically — no re-confirmation needed. The user sees the \
|
||||
updated flowchart immediately. Use this when you make structural changes \
|
||||
(add/remove nodes, change edges) so the flowchart stays in sync.
|
||||
- replan_agent() — Switch back to PLANNING phase. The previous draft is \
|
||||
restored (with decision/subagent nodes intact) so you can edit it. Use \
|
||||
when the user requests a major redesign that needs their approval.
|
||||
restored (with decision/browser nodes intact) so you can edit it. Use \
|
||||
when the user wants to change integrations, swap tools, rethink the \
|
||||
flow, or discuss any design changes before you build them.
|
||||
|
||||
When you finish building an agent, call load_built_agent(path) to stage it.
|
||||
"""
|
||||
@@ -795,6 +661,9 @@ The agent is loaded and ready to run. You can inspect it and launch it:
|
||||
- stop_worker_and_plan() — Go to PLANNING phase to discuss changes with the user \
|
||||
first (DEFAULT for most modification requests)
|
||||
- stop_worker_and_edit() — Go to BUILDING phase for immediate, specific fixes
|
||||
- set_trigger(trigger_id, trigger_type?, trigger_config?) — Activate a trigger (timer)
|
||||
- remove_trigger(trigger_id) — Deactivate a trigger
|
||||
- list_triggers() — List all triggers and their active/inactive status
|
||||
|
||||
You do NOT have write tools. To modify the agent, prefer \
|
||||
stop_worker_and_plan() unless the user gave a specific instruction.
|
||||
@@ -817,6 +686,15 @@ with the user first (DEFAULT for most modification requests)
|
||||
You do NOT have write tools. To modify the agent, prefer \
|
||||
stop_worker_and_plan() unless the user gave a specific instruction. \
|
||||
To just stop without modifying, call stop_worker().
|
||||
- stop_worker_and_edit() — Stop the worker and switch back to BUILDING phase
|
||||
- set_trigger(trigger_id, trigger_type?, trigger_config?) — Activate a trigger (timer)
|
||||
- remove_trigger(trigger_id) — Deactivate a trigger
|
||||
- list_triggers() — List all triggers and their active/inactive status
|
||||
|
||||
You do NOT have write tools or agent construction tools. \
|
||||
If you need to modify the agent, call stop_worker_and_edit() to switch back \
|
||||
to BUILDING phase. To stop the worker and ask the user what to do next, call \
|
||||
stop_worker() to return to STAGING phase.
|
||||
"""
|
||||
|
||||
# -- Behavior shared across all phases --
|
||||
@@ -824,6 +702,15 @@ To just stop without modifying, call stop_worker().
|
||||
_queen_behavior_always = """
|
||||
# Behavior
|
||||
|
||||
## Images attached by the user
|
||||
|
||||
Users can attach images directly to their chat messages. When you see an \
|
||||
image in the conversation, analyze it using your native vision capability — \
|
||||
do NOT say you cannot see images or that you lack access to files. The image \
|
||||
is embedded in the message; no tool call is needed to view it. Describe what \
|
||||
you see, answer questions about it, and use the visual content to inform your \
|
||||
response just as you would text.
|
||||
|
||||
## CRITICAL RULE — ask_user / ask_user_multiple
|
||||
|
||||
Every response that ends with a question, a prompt, or expects user \
|
||||
@@ -833,7 +720,8 @@ input unless you call one of these tools. You MUST call it as the LAST \
|
||||
action in your response.
|
||||
|
||||
NEVER end a response with a question in text without calling ask_user. \
|
||||
NEVER rely on the user seeing your text and replying — call ask_user.
|
||||
NEVER rely on the user seeing your text and replying — call ask_user. \
|
||||
NEVER list options as text bullets — the tool renders interactive buttons.
|
||||
|
||||
**When you have 2+ questions**, use ask_user_multiple instead of ask_user. \
|
||||
This renders all questions at once so the user answers in one interaction \
|
||||
@@ -847,21 +735,36 @@ appearing. Keep your text to a brief context/intro sentence only.
|
||||
Always provide 2-4 short options that cover the most likely answers. \
|
||||
The user can always type a custom response.
|
||||
|
||||
### WRONG — never do this:
|
||||
```
|
||||
I need a few details:
|
||||
- Documentation Source: Where should the agent look?
|
||||
- Trigger: Should the agent poll or get a URL?
|
||||
- Review Channel: Slack, Email, or Sheets?
|
||||
|
||||
Which of these would you like to define first?
|
||||
1. Documentation source
|
||||
2. Trigger
|
||||
3. Review channel
|
||||
```
|
||||
This lists questions as plain text with NO tool call — the user has no \
|
||||
interactive widget and the system doesn't know you're waiting for input.
|
||||
|
||||
### RIGHT — always do this:
|
||||
Write a brief intro (1-2 sentences), then call the tool:
|
||||
- ask_user_multiple(questions=[
|
||||
{"id": "docs", "prompt": "Where should the agent find answers?",
|
||||
"options": ["GitHub repo", "Documentation website", "Internal wiki"]},
|
||||
{"id": "trigger", "prompt": "How should questions be discovered?",
|
||||
"options": ["Poll search automatically", "I provide a URL"]},
|
||||
{"id": "review", "prompt": "Where to send drafted responses?",
|
||||
"options": ["Slack", "Email", "Google Sheets"]}
|
||||
])
|
||||
|
||||
Examples (single question):
|
||||
- ask_user("What do you need?",
|
||||
["Build a new agent", "Run the loaded worker", "Help with code"])
|
||||
- ask_user("Ready to proceed?",
|
||||
["Yes, go ahead", "Let me change something"])
|
||||
|
||||
Example (multiple questions — ALWAYS use ask_user_multiple):
|
||||
- ask_user_multiple(questions=[
|
||||
{"id": "goal", "prompt": "What should this agent do?"},
|
||||
{"id": "tools", "prompt": "Which integrations?",
|
||||
"options": ["Slack", "Gmail", "Google Sheets"]},
|
||||
{"id": "schedule", "prompt": "How often should it run?",
|
||||
"options": ["On demand", "Every hour", "Daily"]}
|
||||
])
|
||||
|
||||
## Greeting
|
||||
|
||||
When the user greets you, respond concisely (under 10 lines) with worker \
|
||||
@@ -956,6 +859,11 @@ You keep a diary. Use write_to_diary() when something worth remembering \
|
||||
happens: a pipeline went live, the user shared something important, a goal \
|
||||
was reached or abandoned. Write in first person, as you actually experienced \
|
||||
it. One or two paragraphs is enough.
|
||||
|
||||
Use recall_diary() to look up past diary entries when the user asks about \
|
||||
previous sessions ("what happened yesterday?", "what did we work on last \
|
||||
week?") or when you need past context to make a decision. You can filter by \
|
||||
keyword and control how far back to search.
|
||||
"""
|
||||
|
||||
_queen_behavior_always = _queen_behavior_always + _queen_memory_instructions
|
||||
@@ -986,10 +894,30 @@ flowchart immediately.
|
||||
|
||||
- **Minor changes** (add a node, rename, adjust edges): call \
|
||||
save_agent_draft() with the updated graph and keep building.
|
||||
- **Major redesign** (user requests fundamental restructuring): call \
|
||||
replan_agent() to go back to planning. The previous draft is restored \
|
||||
so you can edit it with the user rather than starting from scratch. \
|
||||
After they approve, confirm_and_build() → continue building.
|
||||
- **User wants to discuss, redesign, or change integrations/tools**: call \
|
||||
replan_agent(). The previous draft is restored so you can edit it with \
|
||||
the user. After they approve, confirm_and_build() → continue building.
|
||||
|
||||
**When to call replan_agent():** Changing which tools or integrations a \
|
||||
node uses, swapping data sources, rethinking the flow, or any time the \
|
||||
user says "replan", "go back", "let's redesign", "change the approach", \
|
||||
"use a different tool/API", etc. Do NOT stay in building to handle these \
|
||||
— switch to planning so the user can review and approve the new design.
|
||||
|
||||
## CRITICAL — Graph topology errors require replanning, not code edits
|
||||
|
||||
If you discover that the agent graph has structural problems — GCU nodes \
|
||||
in the linear flow, missing edges, wrong node connections, incorrect \
|
||||
sub-agent assignments — you MUST call replan_agent() and fix the draft. \
|
||||
Do NOT attempt to fix topology by editing agent.py directly. The graph \
|
||||
structure is defined by the draft → dissolution → code-gen pipeline. \
|
||||
Editing code to rewire nodes bypasses the flowchart and creates drift \
|
||||
between what the user sees and what the code does.
|
||||
|
||||
**WRONG:** "Let me fix agent.py to remove GCU nodes from edges..."
|
||||
**RIGHT:** Call replan_agent(), fix the draft with save_agent_draft(), \
|
||||
get user approval, then confirm_and_build() → the corrected code is \
|
||||
generated automatically.
|
||||
"""
|
||||
|
||||
# -- STAGING phase behavior --
|
||||
@@ -1067,6 +995,33 @@ Use stop_worker_and_edit() only when:
|
||||
- The user gave a specific, concrete instruction ("add save_data to the gather node")
|
||||
- You already discussed the fix in a previous planning session
|
||||
- The change is trivial and unambiguous (rename, toggle a flag)
|
||||
|
||||
## Trigger Management
|
||||
|
||||
Use list_triggers() to see available triggers from the loaded worker.
|
||||
Use set_trigger(trigger_id) to activate a timer. Once active, triggers \
|
||||
fire periodically and inject [TRIGGER: ...] messages so you can decide \
|
||||
whether to call run_agent_with_input(task).
|
||||
|
||||
### When the user says "Enable trigger <id>" (or clicks Enable in the UI):
|
||||
|
||||
1. Call get_worker_status(focus="memory") to check if the worker has \
|
||||
saved configuration (rules, preferences, settings from a prior run).
|
||||
2. If memory contains saved config: compose a task string from it \
|
||||
(e.g. "Process inbox emails using saved rules") and call \
|
||||
set_trigger(trigger_id, task="...") immediately. Tell the user the \
|
||||
trigger is now active and what schedule it uses. Do NOT ask them to \
|
||||
provide the task — you derive it from memory.
|
||||
3. If memory is empty (no prior run): tell the user the agent needs to \
|
||||
run once first so its configuration can be saved. Offer to run it now. \
|
||||
Once the worker finishes, enable the trigger.
|
||||
4. If the user just provided config this session (rules/task context \
|
||||
already in conversation): use that directly, no memory lookup needed. \
|
||||
Enable the trigger immediately.
|
||||
|
||||
Never ask "what should the task be?" when enabling a trigger for an \
|
||||
agent with a clear purpose. The task string is a brief description of \
|
||||
what the worker does, derived from its saved state or your current context.
|
||||
"""
|
||||
|
||||
# -- RUNNING phase behavior --
|
||||
@@ -1081,12 +1036,24 @@ NOT ask the user directly.
|
||||
You wake up when:
|
||||
- The user explicitly addresses you
|
||||
- A worker escalation arrives (`[WORKER_ESCALATION_REQUEST]`)
|
||||
- An escalation ticket arrives from the judge
|
||||
- The worker finishes (`[WORKER_TERMINAL]`)
|
||||
|
||||
If the user asks for progress, call get_worker_status() ONCE and report. \
|
||||
If the summary mentions issues, follow up with get_worker_status(focus="issues").
|
||||
|
||||
## Subagent delegations (browser automation, GCU)
|
||||
|
||||
When the worker delegates to a subagent (e.g., GCU browser automation), expect it \
|
||||
to take 2-5 minutes. During this time:
|
||||
- Progress will show 0% — this is NORMAL. The subagent only calls set_output at the end.
|
||||
- Check get_worker_status(focus="full") for "subagent_activity" — this shows the \
|
||||
subagent's latest reasoning text and confirms it is making real progress.
|
||||
- Do NOT conclude the subagent is stuck just because progress is 0% or because \
|
||||
you see repeated browser_click/browser_snapshot calls — that is the expected \
|
||||
pattern for web scraping.
|
||||
- Only intervene if: the subagent has been running for 5+ minutes with no new \
|
||||
subagent_activity updates, OR the judge escalates.
|
||||
|
||||
## Handling worker termination ([WORKER_TERMINAL])
|
||||
|
||||
When you receive a `[WORKER_TERMINAL]` event, the worker has finished:
|
||||
@@ -1115,19 +1082,30 @@ IMPORTANT: Only auto-handle if the user has NOT explicitly told you how to handl
|
||||
escalations. If the user gave you instructions (e.g., "just retry on errors", \
|
||||
"skip any auth issues"), follow those instructions instead.
|
||||
|
||||
CRITICAL — escalation relay protocol:
|
||||
When an escalation requires user input (auth blocks, human review), the worker \
|
||||
or its subagent is BLOCKED and waiting for your response. You MUST follow this \
|
||||
exact two-step sequence:
|
||||
Step 1: call ask_user() to get the user's answer.
|
||||
Step 2: call inject_worker_message() with the user's answer IMMEDIATELY after.
|
||||
If you skip Step 2, the worker/subagent stays blocked FOREVER and the task hangs. \
|
||||
NEVER respond to the user without also calling inject_worker_message() to unblock \
|
||||
the worker. Even if the user says "skip" or "cancel", you must still relay that \
|
||||
decision via inject_worker_message() so the worker can clean up.
|
||||
|
||||
**Auth blocks / credential issues:**
|
||||
- ALWAYS ask the user (unless user explicitly told you how to handle this).
|
||||
- The worker cannot proceed without valid credentials.
|
||||
- Explain which credential is missing or invalid.
|
||||
- Use ask_user to get guidance: "Provide credentials", "Skip this task", "Stop and edit agent"
|
||||
- Use inject_worker_message() to relay user decisions back to the worker.
|
||||
- Step 1: ask_user for guidance — "Provide credentials", "Skip this task", "Stop and edit agent"
|
||||
- Step 2: inject_worker_message() with the user's response to unblock the worker.
|
||||
|
||||
**Need human review / approval:**
|
||||
- ALWAYS ask the user (unless user explicitly told you how to handle this).
|
||||
- The worker is explicitly requesting human judgment.
|
||||
- Present the context clearly (what decision is needed, what are the options).
|
||||
- Use ask_user with the actual decision options.
|
||||
- Use inject_worker_message() to relay user decisions back to the worker.
|
||||
- Step 1: ask_user with the actual decision options.
|
||||
- Step 2: inject_worker_message() with the user's decision to unblock the worker.
|
||||
|
||||
**Errors / unexpected failures:**
|
||||
- Explain what went wrong in plain terms.
|
||||
@@ -1135,6 +1113,7 @@ escalations. If the user gave you instructions (e.g., "just retry on errors", \
|
||||
- Or offer: "Diagnose the issue" → use stop_worker_and_plan() to investigate first.
|
||||
- Or offer: "Retry as-is", "Skip this task", "Abort run"
|
||||
- (Skip asking if user explicitly told you to auto-retry or auto-skip errors.)
|
||||
- If the escalation had wait_for_response: inject_worker_message() with the decision.
|
||||
|
||||
**Informational / progress updates:**
|
||||
- Acknowledge briefly and let the worker continue.
|
||||
@@ -1159,6 +1138,23 @@ When the user asks to fix, change, modify, or update the loaded worker \
|
||||
**Default: use stop_worker_and_plan().** Most modification requests need \
|
||||
discussion first. Only use stop_worker_and_edit() when the user gave a \
|
||||
specific, unambiguous instruction or you already agreed on the fix.
|
||||
|
||||
## Trigger Handling
|
||||
|
||||
You will receive [TRIGGER: ...] messages when a scheduled timer fires. \
|
||||
These are framework-level signals, not user messages.
|
||||
|
||||
Rules:
|
||||
- Check get_worker_status() before calling run_agent_with_input(task). If the worker \
|
||||
is already RUNNING, decide: skip this trigger, or note it for after completion.
|
||||
- When multiple [TRIGGER] messages arrive at once, read them all before acting. \
|
||||
Batch your response — do not call run_agent_with_input() once per trigger.
|
||||
- If a trigger fires but the task no longer makes sense (e.g., user changed \
|
||||
config since last run), skip it and inform the user.
|
||||
- Never disable a trigger without telling the user. Use remove_trigger() only \
|
||||
when explicitly asked or when the trigger is clearly obsolete.
|
||||
- When the user asks to remove or disable a trigger, you MUST call remove_trigger(trigger_id). \
|
||||
Never just say "it's removed" without actually calling the tool.
|
||||
"""
|
||||
|
||||
# -- Backward-compatible composed versions (used by queen_node.system_prompt default) --
|
||||
@@ -1222,8 +1218,8 @@ ticket_triage_node = NodeSpec(
|
||||
id="ticket_triage",
|
||||
name="Ticket Triage",
|
||||
description=(
|
||||
"Queen's triage node. Receives an EscalationTicket from the Health Judge "
|
||||
"via event-driven entry point and decides: dismiss or notify the operator."
|
||||
"Queen's triage node. Receives an EscalationTicket via event-driven "
|
||||
"entry point and decides: dismiss or notify the operator."
|
||||
),
|
||||
node_type="event_loop",
|
||||
client_facing=True, # Operator can chat with queen once connected (Ctrl+Q)
|
||||
@@ -1237,8 +1233,8 @@ ticket_triage_node = NodeSpec(
|
||||
),
|
||||
tools=["notify_operator"],
|
||||
system_prompt="""\
|
||||
You are the Queen. The Worker Health Judge has escalated a worker \
|
||||
issue to you. The ticket is in your memory under key "ticket". Read it carefully.
|
||||
You are the Queen. A worker health issue has been escalated to you. \
|
||||
The ticket is in your memory under key "ticket". Read it carefully.
|
||||
|
||||
## Dismiss criteria — do NOT call notify_operator:
|
||||
- severity is "low" AND steps_since_last_accept < 8
|
||||
@@ -1277,7 +1273,7 @@ queen_node = NodeSpec(
|
||||
description=(
|
||||
"User's primary interactive interface with full coding capability. "
|
||||
"Can build agents directly or delegate to the worker. Manages the "
|
||||
"worker agent lifecycle and triages health escalations from the judge."
|
||||
"worker agent lifecycle."
|
||||
),
|
||||
node_type="event_loop",
|
||||
client_facing=True,
|
||||
|
||||
@@ -50,6 +50,23 @@ def read_episodic_memory(d: date | None = None) -> str:
|
||||
return path.read_text(encoding="utf-8").strip() if path.exists() else ""
|
||||
|
||||
|
||||
def _find_recent_episodic(lookback: int = 7) -> tuple[date, str] | None:
|
||||
"""Find the most recent non-empty episodic memory within *lookback* days."""
|
||||
from datetime import timedelta
|
||||
|
||||
today = date.today()
|
||||
for offset in range(lookback):
|
||||
d = today - timedelta(days=offset)
|
||||
content = read_episodic_memory(d)
|
||||
if content:
|
||||
return d, content
|
||||
return None
|
||||
|
||||
|
||||
# Budget (in characters) for episodic memory in the system prompt.
|
||||
_EPISODIC_CHAR_BUDGET = 6_000
|
||||
|
||||
|
||||
def format_for_injection() -> str:
|
||||
"""Format cross-session memory for system prompt injection.
|
||||
|
||||
@@ -57,7 +74,7 @@ def format_for_injection() -> str:
|
||||
session with only the seed template).
|
||||
"""
|
||||
semantic = read_semantic_memory()
|
||||
episodic = read_episodic_memory()
|
||||
recent = _find_recent_episodic()
|
||||
|
||||
# Suppress injection if semantic is still just the seed template
|
||||
if semantic and semantic.startswith("# My Understanding of the User\n\n*No sessions"):
|
||||
@@ -66,9 +83,18 @@ def format_for_injection() -> str:
|
||||
parts: list[str] = []
|
||||
if semantic:
|
||||
parts.append(semantic)
|
||||
if episodic:
|
||||
today_str = date.today().strftime("%B %-d, %Y")
|
||||
parts.append(f"## Today — {today_str}\n\n{episodic}")
|
||||
|
||||
if recent:
|
||||
d, content = recent
|
||||
# Trim oversized episodic entries to keep the prompt manageable
|
||||
if len(content) > _EPISODIC_CHAR_BUDGET:
|
||||
content = content[:_EPISODIC_CHAR_BUDGET] + "\n\n…(truncated)"
|
||||
today = date.today()
|
||||
if d == today:
|
||||
label = f"## Today — {d.strftime('%B %-d, %Y')}"
|
||||
else:
|
||||
label = f"## {d.strftime('%B %-d, %Y')}"
|
||||
parts.append(f"{label}\n\n{content}")
|
||||
|
||||
if not parts:
|
||||
return ""
|
||||
@@ -100,7 +126,8 @@ def append_episodic_entry(content: str) -> None:
|
||||
"""
|
||||
ep_path = episodic_memory_path()
|
||||
ep_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
today_str = date.today().strftime("%B %-d, %Y")
|
||||
today = date.today()
|
||||
today_str = f"{today.strftime('%B')} {today.day}, {today.year}"
|
||||
timestamp = datetime.now().strftime("%H:%M")
|
||||
if not ep_path.exists():
|
||||
header = f"# {today_str}\n\n"
|
||||
@@ -110,6 +137,32 @@ def append_episodic_entry(content: str) -> None:
|
||||
with ep_path.open("a", encoding="utf-8") as f:
|
||||
f.write(block)
|
||||
|
||||
# Immediately create a bare index entry (no enrichment — that happens at
|
||||
# consolidation time). Wrapped so any indexing failure never interrupts
|
||||
# the diary write.
|
||||
try:
|
||||
_post_append_index_hook(today.strftime("%Y-%m-%d"), timestamp, content.strip())
|
||||
except Exception:
|
||||
logger.warning("queen_memory: index hook failed on diary append", exc_info=True)
|
||||
|
||||
|
||||
def _post_append_index_hook(date_str: str, timestamp: str, prose: str) -> None:
|
||||
"""Create a bare MemoryEntry in the index for a freshly-appended diary section."""
|
||||
from framework.agents.queen.queen_memory_index import (
|
||||
get_entry,
|
||||
index_entry_from_diary_section,
|
||||
load_index,
|
||||
put_entry,
|
||||
save_index,
|
||||
)
|
||||
|
||||
index = load_index()
|
||||
entry_id = f"{date_str}:{timestamp}"
|
||||
if get_entry(index, entry_id) is None:
|
||||
entry = index_entry_from_diary_section(date_str, timestamp, prose)
|
||||
put_entry(index, entry)
|
||||
save_index(index)
|
||||
|
||||
|
||||
def seed_if_missing() -> None:
|
||||
"""Create MEMORY.md with a blank template if it doesn't exist yet."""
|
||||
@@ -199,7 +252,11 @@ def read_session_context(session_dir: Path, max_messages: int = 80) -> str:
|
||||
elif content:
|
||||
label = "user" if role == "user" else "queen"
|
||||
lines.append(f"[{label}]: {content[:600]}")
|
||||
except (KeyError, TypeError) as exc:
|
||||
logger.debug("Skipping malformed conversation message: %s", exc)
|
||||
continue
|
||||
except Exception:
|
||||
logger.warning("Unexpected error parsing conversation message", exc_info=True)
|
||||
continue
|
||||
if lines:
|
||||
parts.append("## Conversation\n\n" + "\n".join(lines))
|
||||
@@ -280,9 +337,10 @@ async def consolidate_queen_memory(
|
||||
llm: LLMProvider instance (must support acomplete()).
|
||||
"""
|
||||
try:
|
||||
logger.info("queen_memory: consolidation triggered for session %s", session_id)
|
||||
session_context = read_session_context(session_dir)
|
||||
if not session_context:
|
||||
logger.debug("queen_memory: no session context, skipping consolidation")
|
||||
logger.info("queen_memory: no session context found, skipping")
|
||||
return
|
||||
|
||||
logger.info("queen_memory: consolidating memory for session %s ...", session_id)
|
||||
@@ -299,7 +357,8 @@ async def consolidate_queen_memory(
|
||||
|
||||
existing_semantic = read_semantic_memory()
|
||||
today_journal = read_episodic_memory()
|
||||
today_str = date.today().strftime("%B %-d, %Y")
|
||||
today = date.today()
|
||||
today_str = f"{today.strftime('%B')} {today.day}, {today.year}"
|
||||
adapt_path = session_dir / "data" / "adapt.md"
|
||||
|
||||
user_msg = (
|
||||
@@ -356,6 +415,14 @@ async def consolidate_queen_memory(
|
||||
len(diary_entry),
|
||||
)
|
||||
|
||||
# Update the memory index for today's entries: enrich, embed, link,
|
||||
# and optionally evolve neighbour metadata. Wrapped so failures never
|
||||
# block or disrupt the main consolidation path.
|
||||
try:
|
||||
await _update_index_after_consolidation(today.strftime("%Y-%m-%d"), llm)
|
||||
except Exception:
|
||||
logger.warning("queen_memory: index update failed after consolidation", exc_info=True)
|
||||
|
||||
except Exception:
|
||||
tb = traceback.format_exc()
|
||||
logger.exception("queen_memory: consolidation failed")
|
||||
@@ -367,5 +434,70 @@ async def consolidate_queen_memory(
|
||||
f"session: {session_id}\ntime: {datetime.now().isoformat()}\n\n{tb}",
|
||||
encoding="utf-8",
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
except OSError:
|
||||
pass # Cannot write error file; original exception already logged
|
||||
|
||||
|
||||
async def _update_index_after_consolidation(date_str: str, llm: object) -> None:
|
||||
"""Enrich, embed, link, and evolve today's memory index entries.
|
||||
|
||||
Called after the main semantic/diary LLM writes complete. All failures
|
||||
are silently logged — this function must never propagate exceptions.
|
||||
"""
|
||||
from framework.agents.queen.queen_memory_index import (
|
||||
embed_text,
|
||||
embeddings_enabled,
|
||||
get_embed_model,
|
||||
link_entry,
|
||||
load_index,
|
||||
maybe_evolve_neighbors,
|
||||
put_entry,
|
||||
rebuild_index_for_date,
|
||||
save_index,
|
||||
)
|
||||
|
||||
# Phase 1 — ensure all diary sections are in the index and enriched
|
||||
await rebuild_index_for_date(date_str, llm=llm)
|
||||
|
||||
if not embeddings_enabled():
|
||||
logger.debug("queen_memory: embeddings not configured, skipping embed/link/evolve")
|
||||
return # Phases 2-5 require embeddings
|
||||
|
||||
logger.info("queen_memory: running embed/link/evolve for %s", date_str)
|
||||
# Phases 2-5 — embed, link, evolve any entries still missing vectors
|
||||
index = load_index()
|
||||
entries = index.get("entries", {})
|
||||
newly_embedded: list[str] = []
|
||||
|
||||
for entry_id, raw in entries.items():
|
||||
if not entry_id.startswith(date_str):
|
||||
continue
|
||||
if raw.get("embedding") is not None:
|
||||
continue
|
||||
prose = raw.get("summary", "")
|
||||
if not prose:
|
||||
continue
|
||||
vec = await embed_text(prose)
|
||||
if vec is not None:
|
||||
raw["embedding"] = vec
|
||||
index["embed_model"] = get_embed_model()
|
||||
index["embed_dim"] = len(vec)
|
||||
newly_embedded.append(entry_id)
|
||||
|
||||
if newly_embedded:
|
||||
save_index(index)
|
||||
|
||||
# Phase 3 — cross-reference linking for newly embedded entries
|
||||
for entry_id in newly_embedded:
|
||||
linked = link_entry(index, entry_id)
|
||||
# Phase 5 — memory evolution for top neighbours
|
||||
if linked:
|
||||
await maybe_evolve_neighbors(entry_id, linked, index, llm)
|
||||
|
||||
if newly_embedded:
|
||||
save_index(index)
|
||||
logger.debug(
|
||||
"queen_memory: indexed %d new embedding(s) for %s",
|
||||
len(newly_embedded),
|
||||
date_str,
|
||||
)
|
||||
|
||||
@@ -0,0 +1,788 @@
|
||||
"""Structured index for queen episodic memory entries.
|
||||
|
||||
Attaches rich metadata, embedding vectors, cross-reference links, and
|
||||
retrieval counts to every diary entry. The index lives at:
|
||||
|
||||
~/.hive/queen/memories/index.json
|
||||
|
||||
It is a *sidecar* to the existing markdown diary files — those files are
|
||||
never modified by this module.
|
||||
|
||||
Configuration
|
||||
-------------
|
||||
Set ``HIVE_EMBED_MODEL`` to an embedding model name supported by litellm
|
||||
(e.g. ``text-embedding-3-small``) to enable semantic search. When unset
|
||||
the system degrades gracefully: enrichment (keywords/tags/category) still
|
||||
works via the consolidation LLM, and recall_diary falls back to substring
|
||||
matching.
|
||||
|
||||
Phases implemented
|
||||
------------------
|
||||
Phase 1 - Index I/O + semantic enrichment (keywords, category, tags)
|
||||
Phase 2 - Embedding storage + semantic search via cosine similarity
|
||||
Phase 3 - Cross-reference linking (bidirectional related[] links)
|
||||
Phase 4 - Importance tracking (retrieval counts + recency decay)
|
||||
Phase 5 - Memory evolution (LLM-driven neighbour metadata refinement)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import math
|
||||
import re
|
||||
from dataclasses import asdict, dataclass, field
|
||||
from datetime import date, datetime, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Category vocabulary — fixed to prevent unbounded drift
|
||||
# ---------------------------------------------------------------------------
|
||||
_CATEGORIES = [
|
||||
"agent_build",
|
||||
"infrastructure",
|
||||
"user_preference",
|
||||
"communication_style",
|
||||
"diagnostic_learning",
|
||||
"milestone",
|
||||
"pipeline",
|
||||
"data_processing",
|
||||
"other",
|
||||
]
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# MemoryEntry dataclass
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@dataclass
|
||||
class MemoryEntry:
|
||||
"""Rich metadata record for a single diary section (one ### HH:MM block)."""
|
||||
|
||||
# Identity — "YYYY-MM-DD:HH:MM" matches the diary ### timestamp
|
||||
id: str
|
||||
date: str # "YYYY-MM-DD"
|
||||
timestamp: str # "HH:MM"
|
||||
|
||||
# Content preview (not full prose — just enough for search result context)
|
||||
summary: str # first 300 chars of the section's prose
|
||||
|
||||
# Phase 1 — semantic enrichment
|
||||
keywords: list[str] = field(default_factory=list)
|
||||
category: str = "other"
|
||||
tags: list[str] = field(default_factory=list)
|
||||
|
||||
# Phase 3 — cross-reference links
|
||||
related: list[str] = field(default_factory=list)
|
||||
|
||||
# Phase 4 — importance tracking
|
||||
retrieval_count: int = 0
|
||||
last_retrieved: str | None = None # ISO-format datetime string
|
||||
|
||||
# Phase 2 — embedding vector (None when HIVE_EMBED_MODEL is unset)
|
||||
embedding: list[float] | None = None
|
||||
|
||||
# Whether enrichment has been applied (used to skip re-enrichment)
|
||||
enriched: bool = False
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Index I/O
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_EMPTY_INDEX: dict[str, Any] = {
|
||||
"version": 1,
|
||||
"embed_model": None,
|
||||
"embed_dim": None,
|
||||
"entries": {},
|
||||
}
|
||||
|
||||
|
||||
def _queen_memories_dir() -> Path:
|
||||
return Path.home() / ".hive" / "queen" / "memories"
|
||||
|
||||
|
||||
def index_path() -> Path:
|
||||
return _queen_memories_dir() / "index.json"
|
||||
|
||||
|
||||
def load_index() -> dict[str, Any]:
|
||||
"""Load the index from disk. Returns a fresh empty index on any error."""
|
||||
p = index_path()
|
||||
if not p.exists():
|
||||
return {**_EMPTY_INDEX, "entries": {}}
|
||||
try:
|
||||
data = json.loads(p.read_text(encoding="utf-8"))
|
||||
if not isinstance(data, dict) or "entries" not in data:
|
||||
raise ValueError("Malformed index")
|
||||
return data
|
||||
except Exception as exc:
|
||||
logger.warning("queen_memory_index: index.json unreadable (%s), starting fresh", exc)
|
||||
return {**_EMPTY_INDEX, "entries": {}}
|
||||
|
||||
|
||||
def save_index(index: dict[str, Any]) -> None:
|
||||
"""Atomically write the index to disk (tmp file → rename)."""
|
||||
p = index_path()
|
||||
p.parent.mkdir(parents=True, exist_ok=True)
|
||||
tmp = p.with_suffix(".json.tmp")
|
||||
tmp.write_text(json.dumps(index, ensure_ascii=False), encoding="utf-8")
|
||||
tmp.replace(p)
|
||||
|
||||
|
||||
def get_entry(index: dict[str, Any], entry_id: str) -> MemoryEntry | None:
|
||||
"""Deserialise one entry from the index dict, or None if missing."""
|
||||
raw = index.get("entries", {}).get(entry_id)
|
||||
if raw is None:
|
||||
return None
|
||||
try:
|
||||
return MemoryEntry(**{k: raw[k] for k in MemoryEntry.__dataclass_fields__ if k in raw})
|
||||
except Exception as exc:
|
||||
logger.warning("queen_memory_index: failed to deserialise entry %s: %s", entry_id, exc)
|
||||
return None
|
||||
|
||||
|
||||
def put_entry(index: dict[str, Any], entry: MemoryEntry) -> None:
|
||||
"""Serialise and insert/overwrite one entry in the index dict (mutates in place)."""
|
||||
index.setdefault("entries", {})[entry.id] = asdict(entry)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Configuration helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def get_embed_model() -> str | None:
|
||||
"""Return the configured embedding model (e.g. 'openai/text-embedding-3-small').
|
||||
|
||||
Reads from the ``embedding`` section of ~/.hive/configuration.json.
|
||||
Falls back to the ``HIVE_EMBED_MODEL`` env var for backward compatibility.
|
||||
"""
|
||||
from framework.config import get_embed_model as _cfg_get_embed_model
|
||||
|
||||
return _cfg_get_embed_model()
|
||||
|
||||
|
||||
def embeddings_enabled() -> bool:
|
||||
return bool(get_embed_model())
|
||||
|
||||
|
||||
def _detect_model_change(index: dict[str, Any]) -> bool:
|
||||
"""Return True if the stored embed model differs from the current env var."""
|
||||
current = get_embed_model()
|
||||
stored = index.get("embed_model")
|
||||
return current != stored
|
||||
|
||||
|
||||
def _clear_embeddings(index: dict[str, Any]) -> None:
|
||||
"""Clear all stored vectors when the embedding model has changed."""
|
||||
for raw in index.get("entries", {}).values():
|
||||
raw["embedding"] = None
|
||||
index["embed_model"] = get_embed_model()
|
||||
index["embed_dim"] = None
|
||||
logger.info("queen_memory_index: embedding model changed — cleared cached vectors")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Embedding calls (Phase 2)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _embed_kwargs() -> dict[str, Any]:
|
||||
"""Build the kwargs dict for litellm.aembedding() from configuration."""
|
||||
from framework.config import get_embed_api_base, get_embed_api_key
|
||||
|
||||
kwargs: dict[str, Any] = {}
|
||||
api_key = get_embed_api_key()
|
||||
if api_key:
|
||||
kwargs["api_key"] = api_key
|
||||
api_base = get_embed_api_base()
|
||||
if api_base:
|
||||
kwargs["api_base"] = api_base
|
||||
return kwargs
|
||||
|
||||
|
||||
async def embed_text(text: str) -> list[float] | None:
|
||||
"""Embed *text* via litellm.aembedding().
|
||||
|
||||
Returns None (with a WARNING log) on any failure or when no embedding
|
||||
model is configured.
|
||||
"""
|
||||
model = get_embed_model()
|
||||
if not model:
|
||||
return None
|
||||
try:
|
||||
import litellm # already a project dependency
|
||||
|
||||
logger.info("queen_memory_index: embedding text (%d chars) via %s", len(text), model)
|
||||
resp = await litellm.aembedding(model=model, input=[text], **_embed_kwargs())
|
||||
vec: list[float] = resp.data[0]["embedding"]
|
||||
logger.info("queen_memory_index: embedding complete (dim=%d)", len(vec))
|
||||
return vec
|
||||
except Exception as exc:
|
||||
logger.warning("queen_memory_index: embed_text failed (%s)", exc)
|
||||
return None
|
||||
|
||||
|
||||
async def embed_batch(texts: list[str]) -> list[list[float] | None]:
|
||||
"""Embed a list of texts, returning a parallel list of vectors (or None)."""
|
||||
model = get_embed_model()
|
||||
if not model:
|
||||
return [None] * len(texts)
|
||||
try:
|
||||
import litellm
|
||||
|
||||
logger.info(
|
||||
"queen_memory_index: batch embedding %d text(s) via %s", len(texts), model
|
||||
)
|
||||
resp = await litellm.aembedding(model=model, input=texts, **_embed_kwargs())
|
||||
vecs = [item["embedding"] for item in resp.data]
|
||||
logger.info(
|
||||
"queen_memory_index: batch embedding complete (dim=%d)", len(vecs[0]) if vecs else 0
|
||||
)
|
||||
return vecs
|
||||
except Exception as exc:
|
||||
logger.warning("queen_memory_index: embed_batch failed (%s), retrying individually", exc)
|
||||
# Fall back to individual calls
|
||||
results: list[list[float] | None] = []
|
||||
for t in texts:
|
||||
results.append(await embed_text(t))
|
||||
return results
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Vector math (Phase 2)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def cosine_similarity(a: list[float] | None, b: list[float] | None) -> float:
|
||||
"""Return cosine similarity in [0, 1]. Returns 0.0 on null or zero-norm inputs."""
|
||||
if not a or not b:
|
||||
return 0.0
|
||||
try:
|
||||
import numpy as np # already a project dependency
|
||||
|
||||
va = np.array(a, dtype=np.float32)
|
||||
vb = np.array(b, dtype=np.float32)
|
||||
norm_a = float(np.linalg.norm(va))
|
||||
norm_b = float(np.linalg.norm(vb))
|
||||
if norm_a == 0.0 or norm_b == 0.0:
|
||||
return 0.0
|
||||
return float(np.dot(va, vb) / (norm_a * norm_b))
|
||||
except Exception:
|
||||
return 0.0
|
||||
|
||||
|
||||
def find_knn(
|
||||
query_vec: list[float],
|
||||
index: dict[str, Any],
|
||||
k: int = 5,
|
||||
exclude_id: str | None = None,
|
||||
) -> list[tuple[str, float]]:
|
||||
"""Return up to *k* nearest neighbours as (entry_id, similarity) pairs, descending."""
|
||||
scores: list[tuple[str, float]] = []
|
||||
for entry_id, raw in index.get("entries", {}).items():
|
||||
if entry_id == exclude_id:
|
||||
continue
|
||||
vec = raw.get("embedding")
|
||||
if not vec:
|
||||
continue
|
||||
sim = cosine_similarity(query_vec, vec)
|
||||
scores.append((entry_id, sim))
|
||||
scores.sort(key=lambda x: x[1], reverse=True)
|
||||
return scores[:k]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Semantic search (Phase 2)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def semantic_search(
|
||||
query: str,
|
||||
index: dict[str, Any],
|
||||
*,
|
||||
k: int = 20,
|
||||
date_range: tuple[str, str] | None = None,
|
||||
) -> list[tuple[str, float]]:
|
||||
"""Embed *query* and return top-k (entry_id, score) pairs.
|
||||
|
||||
Returns [] if embeddings are disabled or the embed call fails.
|
||||
date_range is an inclusive (YYYY-MM-DD, YYYY-MM-DD) filter applied
|
||||
before ranking.
|
||||
"""
|
||||
if not embeddings_enabled():
|
||||
return []
|
||||
|
||||
query_vec = await embed_text(query)
|
||||
if query_vec is None:
|
||||
return []
|
||||
|
||||
candidates: list[tuple[str, float]] = []
|
||||
for entry_id, raw in index.get("entries", {}).items():
|
||||
if date_range:
|
||||
d = raw.get("date", "")
|
||||
if d < date_range[0] or d > date_range[1]:
|
||||
continue
|
||||
vec = raw.get("embedding")
|
||||
if not vec:
|
||||
continue
|
||||
sim = cosine_similarity(query_vec, vec)
|
||||
candidates.append((entry_id, sim))
|
||||
|
||||
candidates.sort(key=lambda x: x[1], reverse=True)
|
||||
return candidates[:k]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Importance tracking (Phase 4)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def importance_score(entry: MemoryEntry, now: datetime | None = None) -> float:
|
||||
"""Composite importance: log1p(count) * recency decay (half-life 30 days).
|
||||
|
||||
Returns 0.0 for entries that have never been retrieved.
|
||||
"""
|
||||
if entry.retrieval_count == 0:
|
||||
return 0.0
|
||||
count_score = math.log1p(entry.retrieval_count)
|
||||
if entry.last_retrieved:
|
||||
try:
|
||||
last = datetime.fromisoformat(entry.last_retrieved)
|
||||
days_since = ((now or datetime.now()) - last).total_seconds() / 86400
|
||||
decay = math.exp(-days_since / 30)
|
||||
except ValueError:
|
||||
decay = 0.0
|
||||
else:
|
||||
decay = 0.0
|
||||
return count_score * decay
|
||||
|
||||
|
||||
def record_retrieval(
|
||||
index: dict[str, Any],
|
||||
entry_ids: list[str],
|
||||
*,
|
||||
auto_save: bool = True,
|
||||
) -> None:
|
||||
"""Increment retrieval_count and update last_retrieved for each entry_id."""
|
||||
now_str = datetime.now().isoformat()
|
||||
entries = index.get("entries", {})
|
||||
for eid in entry_ids:
|
||||
if eid in entries:
|
||||
entries[eid]["retrieval_count"] = entries[eid].get("retrieval_count", 0) + 1
|
||||
entries[eid]["last_retrieved"] = now_str
|
||||
if auto_save:
|
||||
try:
|
||||
save_index(index)
|
||||
except Exception as exc:
|
||||
logger.warning("queen_memory_index: failed to save index after retrieval: %s", exc)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Hybrid re-ranking (Phase 4)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def hybrid_search(
|
||||
query: str,
|
||||
index: dict[str, Any],
|
||||
candidate_ids: list[str],
|
||||
semantic_scores: dict[str, float],
|
||||
*,
|
||||
keyword_weight: float = 0.3,
|
||||
semantic_weight: float = 0.7,
|
||||
) -> list[tuple[str, float]]:
|
||||
"""Re-rank candidates combining semantic cosine, keyword overlap, and importance.
|
||||
|
||||
Combined score = semantic_weight * cosine
|
||||
+ keyword_weight * keyword_overlap
|
||||
+ 0.1 * normalised_importance
|
||||
|
||||
keyword_overlap = |query_terms ∩ entry.keywords| / max(1, |entry.keywords|)
|
||||
normalised_importance is scaled to [0, 1] relative to the highest importance
|
||||
in the candidate set.
|
||||
"""
|
||||
query_terms = set(re.findall(r"\w+", query.lower()))
|
||||
now = datetime.now()
|
||||
|
||||
raw_scores: list[tuple[str, float]] = []
|
||||
imp_values: list[float] = []
|
||||
for eid in candidate_ids:
|
||||
entry = get_entry(index, eid)
|
||||
if entry is None:
|
||||
continue
|
||||
sem = semantic_scores.get(eid, 0.0)
|
||||
kw_list = [k.lower() for k in entry.keywords]
|
||||
overlap = len(query_terms & set(kw_list)) / max(1, len(kw_list))
|
||||
imp = importance_score(entry, now)
|
||||
imp_values.append(imp)
|
||||
raw_scores.append((eid, sem, overlap, imp))
|
||||
|
||||
# Normalise importance to [0, 1]
|
||||
max_imp = max(imp_values) if imp_values else 1.0
|
||||
if max_imp == 0.0:
|
||||
max_imp = 1.0
|
||||
|
||||
ranked: list[tuple[str, float]] = []
|
||||
for eid, sem, overlap, imp in raw_scores:
|
||||
score = (
|
||||
semantic_weight * sem
|
||||
+ keyword_weight * overlap
|
||||
+ 0.1 * (imp / max_imp)
|
||||
)
|
||||
ranked.append((eid, score))
|
||||
|
||||
ranked.sort(key=lambda x: x[1], reverse=True)
|
||||
return ranked
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Cross-reference linking (Phase 3)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def link_entry(
|
||||
index: dict[str, Any],
|
||||
entry_id: str,
|
||||
similarity_threshold: float = 0.85,
|
||||
) -> list[str]:
|
||||
"""Discover k-NN above threshold and add bidirectional related[] links.
|
||||
|
||||
Mutates the index dict in place. Returns the list of newly linked
|
||||
neighbour ids (may be empty).
|
||||
"""
|
||||
entries = index.get("entries", {})
|
||||
raw = entries.get(entry_id)
|
||||
if not raw or not raw.get("embedding"):
|
||||
return []
|
||||
|
||||
neighbours = find_knn(raw["embedding"], index, k=10, exclude_id=entry_id)
|
||||
linked: list[str] = []
|
||||
for nid, sim in neighbours:
|
||||
if sim < similarity_threshold:
|
||||
break # sorted descending, so we can stop early
|
||||
linked.append(nid)
|
||||
# Update entry
|
||||
if nid not in raw.setdefault("related", []):
|
||||
raw["related"].append(nid)
|
||||
# Update neighbour
|
||||
neighbour = entries.get(nid)
|
||||
if neighbour is not None and entry_id not in neighbour.setdefault("related", []):
|
||||
neighbour["related"].append(entry_id)
|
||||
|
||||
return linked
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Prompt constants for LLM calls
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_ENRICHMENT_SYSTEM = """\
|
||||
Analyse the following diary entry from an AI assistant's episodic memory.
|
||||
Extract structured metadata and return it as a JSON object with exactly these keys:
|
||||
"keywords": list of 5-8 important terms (nouns, verbs, proper names)
|
||||
"category": exactly one string from this list: agent_build, infrastructure,
|
||||
user_preference, communication_style, diagnostic_learning, milestone,
|
||||
pipeline, data_processing, other
|
||||
"tags": list of 3-5 freeform topic labels (short phrases)
|
||||
|
||||
Return ONLY the JSON object. No explanation, no code fences.
|
||||
"""
|
||||
|
||||
_EVOLUTION_SYSTEM = """\
|
||||
You are refining the metadata of an older memory entry based on a newly discovered
|
||||
related memory entry.
|
||||
|
||||
Given the TWO entries below, decide if the OLDER entry's tags or category should be
|
||||
updated to better reflect the thematic connection.
|
||||
|
||||
Rules:
|
||||
- Only suggest changes if the connection reveals a clearly missing tag or a category
|
||||
correction. When in doubt, return {}.
|
||||
- You may only modify "tags" and "category" — never the prose, never keywords.
|
||||
- Return a JSON object with only the keys you are changing: {"tags": [...], "category": "..."}
|
||||
or {} if no change is warranted.
|
||||
|
||||
Return ONLY the JSON object. No explanation, no code fences.
|
||||
"""
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Phase 1 — enrichment helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _parse_diary_sections(content: str) -> list[tuple[str, str]]:
|
||||
"""Return (timestamp, prose) pairs from a diary file's ### HH:MM blocks.
|
||||
|
||||
The date heading (# ...) is stripped. Non-timestamped content before the
|
||||
first ### block is ignored.
|
||||
"""
|
||||
sections: list[tuple[str, str]] = []
|
||||
# Split on ### HH:MM markers
|
||||
parts = re.split(r"###\s*(\d{2}:\d{2})\b", content)
|
||||
# parts = [pre_text, ts1, prose1, ts2, prose2, ...]
|
||||
i = 1
|
||||
while i + 1 < len(parts):
|
||||
ts = parts[i].strip()
|
||||
prose = parts[i + 1].strip()
|
||||
if prose:
|
||||
sections.append((ts, prose))
|
||||
i += 2
|
||||
return sections
|
||||
|
||||
|
||||
def index_entry_from_diary_section(
|
||||
date_str: str,
|
||||
timestamp: str,
|
||||
prose: str,
|
||||
) -> MemoryEntry:
|
||||
"""Construct a bare MemoryEntry (no enrichment, no embedding) from a diary section."""
|
||||
entry_id = f"{date_str}:{timestamp}"
|
||||
summary = prose[:300].replace("\n", " ")
|
||||
return MemoryEntry(
|
||||
id=entry_id,
|
||||
date=date_str,
|
||||
timestamp=timestamp,
|
||||
summary=summary,
|
||||
)
|
||||
|
||||
|
||||
async def enrich_entry(
|
||||
entry_text: str,
|
||||
llm: object,
|
||||
) -> tuple[list[str], str, list[str]]:
|
||||
"""Call the consolidation LLM to extract keywords, category, and tags.
|
||||
|
||||
Returns ([], "other", []) on any failure so the caller can continue.
|
||||
"""
|
||||
try:
|
||||
resp = await llm.acomplete(
|
||||
messages=[{"role": "user", "content": entry_text}],
|
||||
system=_ENRICHMENT_SYSTEM,
|
||||
max_tokens=256,
|
||||
json_mode=True,
|
||||
)
|
||||
data = json.loads(resp.content)
|
||||
keywords = [str(k) for k in data.get("keywords", [])][:8]
|
||||
raw_cat = str(data.get("category", "other"))
|
||||
category = raw_cat if raw_cat in _CATEGORIES else "other"
|
||||
tags = [str(t) for t in data.get("tags", [])][:5]
|
||||
return keywords, category, tags
|
||||
except Exception as exc:
|
||||
logger.warning("queen_memory_index: enrich_entry failed (%s)", exc)
|
||||
return [], "other", []
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Phase 5 — memory evolution
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def maybe_evolve_neighbors(
|
||||
new_entry_id: str,
|
||||
neighbor_ids: list[str],
|
||||
index: dict[str, Any],
|
||||
llm: object,
|
||||
*,
|
||||
max_neighbors_to_evolve: int = 2,
|
||||
) -> None:
|
||||
"""Potentially refine the tags/category of neighbour entries.
|
||||
|
||||
Only mutates metadata (tags, category) — never prose, never embeddings.
|
||||
Failures are logged and silently skipped.
|
||||
"""
|
||||
if not neighbor_ids:
|
||||
return
|
||||
|
||||
new_raw = index.get("entries", {}).get(new_entry_id)
|
||||
if not new_raw:
|
||||
return
|
||||
|
||||
for nid in neighbor_ids[:max_neighbors_to_evolve]:
|
||||
neighbor_raw = index.get("entries", {}).get(nid)
|
||||
if not neighbor_raw:
|
||||
continue
|
||||
try:
|
||||
prompt = (
|
||||
f"NEWER entry ({new_entry_id}):\n"
|
||||
f"Summary: {new_raw.get('summary', '')}\n"
|
||||
f"Keywords: {', '.join(new_raw.get('keywords', []))}\n"
|
||||
f"Tags: {', '.join(new_raw.get('tags', []))}\n\n"
|
||||
f"OLDER entry ({nid}):\n"
|
||||
f"Summary: {neighbor_raw.get('summary', '')}\n"
|
||||
f"Keywords: {', '.join(neighbor_raw.get('keywords', []))}\n"
|
||||
f"Tags: {', '.join(neighbor_raw.get('tags', []))}\n"
|
||||
f"Category: {neighbor_raw.get('category', 'other')}"
|
||||
)
|
||||
resp = await llm.acomplete(
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
system=_EVOLUTION_SYSTEM,
|
||||
max_tokens=128,
|
||||
json_mode=True,
|
||||
)
|
||||
updates = json.loads(resp.content)
|
||||
if not updates:
|
||||
continue
|
||||
if "tags" in updates and isinstance(updates["tags"], list):
|
||||
neighbor_raw["tags"] = [str(t) for t in updates["tags"]][:5]
|
||||
if "category" in updates:
|
||||
raw_cat = str(updates["category"])
|
||||
neighbor_raw["category"] = raw_cat if raw_cat in _CATEGORIES else "other"
|
||||
logger.debug("queen_memory_index: evolved metadata for entry %s", nid)
|
||||
except Exception as exc:
|
||||
logger.warning("queen_memory_index: evolution failed for %s: %s", nid, exc)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Index rebuild / backfill
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def rebuild_index_for_date(
|
||||
date_str: str,
|
||||
llm: object | None = None,
|
||||
) -> int:
|
||||
"""Parse today's diary file and index any sections not yet in the index.
|
||||
|
||||
Optionally enriches new entries via LLM if *llm* is provided.
|
||||
Returns the count of new entries added.
|
||||
"""
|
||||
from framework.agents.queen.queen_memory import episodic_memory_path
|
||||
from datetime import date as _date
|
||||
|
||||
try:
|
||||
year, month, day = map(int, date_str.split("-"))
|
||||
d = _date(year, month, day)
|
||||
except ValueError:
|
||||
logger.warning("queen_memory_index: invalid date_str %r", date_str)
|
||||
return 0
|
||||
|
||||
ep_path = episodic_memory_path(d)
|
||||
if not ep_path.exists():
|
||||
return 0
|
||||
|
||||
content = ep_path.read_text(encoding="utf-8")
|
||||
sections = _parse_diary_sections(content)
|
||||
if not sections:
|
||||
return 0
|
||||
|
||||
index = load_index()
|
||||
|
||||
# Detect embedding model change and clear stale vectors
|
||||
if embeddings_enabled() and _detect_model_change(index):
|
||||
_clear_embeddings(index)
|
||||
|
||||
added = 0
|
||||
for ts, prose in sections:
|
||||
entry_id = f"{date_str}:{ts}"
|
||||
existing = get_entry(index, entry_id)
|
||||
|
||||
if existing is None:
|
||||
entry = index_entry_from_diary_section(date_str, ts, prose)
|
||||
elif existing.enriched:
|
||||
# Already fully processed; update embedding only if missing
|
||||
entry = existing
|
||||
else:
|
||||
entry = existing
|
||||
|
||||
# Enrich if LLM provided and not yet enriched
|
||||
if llm is not None and not entry.enriched:
|
||||
keywords, category, tags = await enrich_entry(prose, llm)
|
||||
entry.keywords = keywords
|
||||
entry.category = category
|
||||
entry.tags = tags
|
||||
entry.enriched = True
|
||||
|
||||
# Embed if model is configured and vector is missing
|
||||
if embeddings_enabled() and entry.embedding is None:
|
||||
vec = await embed_text(prose[:1500]) # cap input length
|
||||
if vec is not None:
|
||||
entry.embedding = vec
|
||||
index["embed_model"] = get_embed_model()
|
||||
index["embed_dim"] = len(vec)
|
||||
|
||||
put_entry(index, entry)
|
||||
if existing is None:
|
||||
added += 1
|
||||
|
||||
save_index(index)
|
||||
logger.debug(
|
||||
"queen_memory_index: indexed %d section(s) for %s, %d new", len(sections), date_str, added
|
||||
)
|
||||
return added
|
||||
|
||||
|
||||
async def backfill_index(
|
||||
llm: object | None = None,
|
||||
embed: bool = True,
|
||||
) -> dict[str, int]:
|
||||
"""Walk all MEMORY-YYYY-MM-DD.md files and index unindexed entries.
|
||||
|
||||
This is a one-shot utility — call it once after initial deployment to
|
||||
catch up historical diary files. Not called automatically.
|
||||
|
||||
Usage:
|
||||
uv run python -c "
|
||||
import asyncio
|
||||
from framework.agents.queen.queen_memory_index import backfill_index
|
||||
print(asyncio.run(backfill_index()))
|
||||
"
|
||||
"""
|
||||
memories_dir = _queen_memories_dir()
|
||||
if not memories_dir.exists():
|
||||
return {"dates_processed": 0, "entries_added": 0}
|
||||
|
||||
total_added = 0
|
||||
dates_processed = 0
|
||||
for md_file in sorted(memories_dir.glob("MEMORY-????-??-??.md")):
|
||||
date_str = md_file.stem.removeprefix("MEMORY-")
|
||||
if not re.fullmatch(r"\d{4}-\d{2}-\d{2}", date_str):
|
||||
continue
|
||||
added = await rebuild_index_for_date(date_str, llm=llm)
|
||||
total_added += added
|
||||
dates_processed += 1
|
||||
|
||||
logger.info(
|
||||
"queen_memory_index: backfill complete — %d dates, %d entries added",
|
||||
dates_processed,
|
||||
total_added,
|
||||
)
|
||||
return {"dates_processed": dates_processed, "entries_added": total_added}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Resolve full prose from diary file by entry_id
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def resolve_prose(entry_id: str) -> str:
|
||||
"""Read the source diary file and return the full prose for *entry_id*.
|
||||
|
||||
Returns the summary from the index as a fallback if the file section
|
||||
cannot be found.
|
||||
"""
|
||||
from framework.agents.queen.queen_memory import episodic_memory_path
|
||||
from datetime import date as _date
|
||||
|
||||
try:
|
||||
date_str, ts = entry_id.split(":", 1)
|
||||
year, month, day = map(int, date_str.split("-"))
|
||||
d = _date(year, month, day)
|
||||
except ValueError:
|
||||
return ""
|
||||
|
||||
ep_path = episodic_memory_path(d)
|
||||
if not ep_path.exists():
|
||||
return ""
|
||||
|
||||
content = ep_path.read_text(encoding="utf-8")
|
||||
sections = _parse_diary_sections(content)
|
||||
for section_ts, prose in sections:
|
||||
if section_ts == ts:
|
||||
return prose
|
||||
return ""
|
||||
@@ -27,7 +27,9 @@
|
||||
## GCU Errors
|
||||
15. **Manually wiring browser tools on event_loop nodes** — Use `node_type="gcu"` which auto-includes browser tools. Do NOT manually list browser tool names.
|
||||
16. **Using GCU nodes as regular graph nodes** — GCU nodes are subagents only. They must ONLY appear in `sub_agents=["gcu-node-id"]` and be invoked via `delegate_to_sub_agent()`. Never connect via edges or use as entry/terminal nodes.
|
||||
17. **Reusing the same GCU node ID for parallel tasks** — Each concurrent browser task needs a distinct GCU node ID (e.g. `gcu-site-a`, `gcu-site-b`). Two `delegate_to_sub_agent` calls with the same `agent_id` share a browser profile and will interfere with each other's pages.
|
||||
18. **Passing `profile=` in GCU tool calls** — Profile isolation for parallel subagents is automatic. The framework injects a unique profile per subagent via an asyncio `ContextVar`. Hardcoding `profile="default"` in a GCU system prompt breaks this isolation.
|
||||
|
||||
## Worker Agent Errors
|
||||
17. **Adding client-facing intake node to workers** — The queen owns intake. Workers should start with an autonomous processing node. Client-facing nodes in workers are for mid-execution review/approval only.
|
||||
18. **Putting `escalate` or `set_output` in NodeSpec `tools=[]`** — These are synthetic framework tools, auto-injected at runtime. Only list MCP tools from `list_agent_tools()`.
|
||||
19. **Adding client-facing intake node to workers** — The queen owns intake. Workers should start with an autonomous processing node. Client-facing nodes in workers are for mid-execution review/approval only.
|
||||
20. **Putting `escalate` or `set_output` in NodeSpec `tools=[]`** — These are synthetic framework tools, auto-injected at runtime. Only list MCP tools from `list_agent_tools()`.
|
||||
|
||||
@@ -332,81 +332,46 @@ class MyAgent:
|
||||
default_agent = MyAgent()
|
||||
```
|
||||
|
||||
## agent.py — Async Entry Points Variant
|
||||
## triggers.json — Timer and Webhook Triggers
|
||||
|
||||
When an agent needs timers, webhooks, or event-driven triggers, add
|
||||
`async_entry_points` and optionally `runtime_config` as module-level variables.
|
||||
These are IN ADDITION to the standard variables above.
|
||||
When an agent needs timers, webhooks, or event-driven triggers, create a
|
||||
`triggers.json` file in the agent's directory (alongside `agent.py`).
|
||||
The queen loads these at session start and the user can manage them via
|
||||
the `set_trigger` / `remove_trigger` tools at runtime.
|
||||
|
||||
```python
|
||||
# Additional imports for async entry points
|
||||
from framework.graph.edge import GraphSpec, AsyncEntryPointSpec
|
||||
from framework.runtime.agent_runtime import (
|
||||
AgentRuntime, AgentRuntimeConfig, create_agent_runtime,
|
||||
)
|
||||
|
||||
# ... (goal, nodes, edges, entry_node, entry_points, etc. as above) ...
|
||||
|
||||
# Async entry points — event-driven triggers
|
||||
async_entry_points = [
|
||||
# Timer with cron: daily at 9am
|
||||
AsyncEntryPointSpec(
|
||||
id="daily-check",
|
||||
name="Daily Check",
|
||||
entry_node="process-node",
|
||||
trigger_type="timer",
|
||||
trigger_config={"cron": "0 9 * * *"},
|
||||
isolation_level="shared",
|
||||
max_concurrent=1,
|
||||
),
|
||||
# Timer with fixed interval: every 20 minutes
|
||||
AsyncEntryPointSpec(
|
||||
id="scheduled-check",
|
||||
name="Scheduled Check",
|
||||
entry_node="process-node",
|
||||
trigger_type="timer",
|
||||
trigger_config={"interval_minutes": 20, "run_immediately": False},
|
||||
isolation_level="shared",
|
||||
max_concurrent=1,
|
||||
),
|
||||
# Event: reacts to webhook events
|
||||
AsyncEntryPointSpec(
|
||||
id="webhook-event",
|
||||
name="Webhook Event Handler",
|
||||
entry_node="process-node",
|
||||
trigger_type="event",
|
||||
trigger_config={"event_types": ["webhook_received"]},
|
||||
isolation_level="shared",
|
||||
max_concurrent=10,
|
||||
),
|
||||
```json
|
||||
[
|
||||
{
|
||||
"id": "daily-check",
|
||||
"name": "Daily Check",
|
||||
"trigger_type": "timer",
|
||||
"trigger_config": {"cron": "0 9 * * *"},
|
||||
"task": "Run the daily check process"
|
||||
},
|
||||
{
|
||||
"id": "scheduled-check",
|
||||
"name": "Scheduled Check",
|
||||
"trigger_type": "timer",
|
||||
"trigger_config": {"interval_minutes": 20},
|
||||
"task": "Run the scheduled check"
|
||||
},
|
||||
{
|
||||
"id": "webhook-event",
|
||||
"name": "Webhook Event Handler",
|
||||
"trigger_type": "webhook",
|
||||
"trigger_config": {"event_types": ["webhook_received"]},
|
||||
"task": "Process incoming webhook event"
|
||||
}
|
||||
]
|
||||
|
||||
# Webhook server config (only needed if using webhooks)
|
||||
runtime_config = AgentRuntimeConfig(
|
||||
webhook_host="127.0.0.1",
|
||||
webhook_port=8080,
|
||||
webhook_routes=[
|
||||
{
|
||||
"source_id": "my-source",
|
||||
"path": "/webhooks/my-source",
|
||||
"methods": ["POST"],
|
||||
},
|
||||
],
|
||||
)
|
||||
```
|
||||
|
||||
**Key rules for async entry points:**
|
||||
- `async_entry_points` is a list of `AsyncEntryPointSpec` (NOT `EntryPointSpec`)
|
||||
- `runtime_config` is `AgentRuntimeConfig` (NOT `RuntimeConfig` from config.py)
|
||||
- Valid trigger_types: `timer`, `event`, `webhook`, `manual`, `api`
|
||||
- Valid isolation_levels: `isolated`, `shared`, `synchronized`
|
||||
**Key rules for triggers.json:**
|
||||
- Valid trigger_types: `timer`, `webhook`
|
||||
- Timer trigger_config (cron): `{"cron": "0 9 * * *"}` — standard 5-field cron expression
|
||||
- Timer trigger_config (interval): `{"interval_minutes": float, "run_immediately": bool}`
|
||||
- Event trigger_config: `{"event_types": ["webhook_received"], "filter_stream": "...", "filter_node": "..."}`
|
||||
- Use `isolation_level="shared"` for async entry points that need to read
|
||||
the primary session's memory (e.g., user-configured rules)
|
||||
- The `_build_graph()` method passes `async_entry_points` to GraphSpec
|
||||
- Reference: `exports/gmail_inbox_guardian/agent.py`
|
||||
- Timer trigger_config (interval): `{"interval_minutes": float}`
|
||||
- Each trigger must have a unique `id`
|
||||
- The `task` field describes what the worker should do when the trigger fires
|
||||
- Triggers are persisted back to `triggers.json` when modified via queen tools
|
||||
|
||||
## __init__.py
|
||||
|
||||
@@ -453,21 +418,6 @@ __all__ = [
|
||||
]
|
||||
```
|
||||
|
||||
**If the agent uses async entry points**, also import and export:
|
||||
```python
|
||||
from .agent import (
|
||||
...,
|
||||
async_entry_points,
|
||||
runtime_config, # Only if using webhooks
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
...,
|
||||
"async_entry_points",
|
||||
"runtime_config",
|
||||
]
|
||||
```
|
||||
|
||||
## __main__.py
|
||||
|
||||
```python
|
||||
|
||||
@@ -31,8 +31,7 @@ module-level variables via `getattr()`:
|
||||
| `conversation_mode` | no | not passed | Isolated mode (no context carryover) |
|
||||
| `identity_prompt` | no | not passed | No agent-level identity |
|
||||
| `loop_config` | no | `{}` | No iteration limits |
|
||||
| `async_entry_points` | no | `[]` | No async triggers (timers, webhooks, events) |
|
||||
| `runtime_config` | no | `None` | No webhook server |
|
||||
| `triggers.json` (file) | no | not present | No triggers (timers, webhooks) |
|
||||
|
||||
**CRITICAL:** `__init__.py` MUST import and re-export ALL of these from
|
||||
`agent.py`. Missing exports silently fall back to defaults, causing
|
||||
@@ -257,44 +256,28 @@ Multiple ON_SUCCESS edges from same source → parallel execution via asyncio.ga
|
||||
|
||||
Judge is the SOLE acceptance mechanism — no ad-hoc framework gating.
|
||||
|
||||
## Async Entry Points (Webhooks, Timers, Events)
|
||||
## Triggers (Timers, Webhooks)
|
||||
|
||||
For agents that react to external events, use `AsyncEntryPointSpec`:
|
||||
For agents that react to external events, create a `triggers.json` file
|
||||
in the agent's export directory:
|
||||
|
||||
```python
|
||||
from framework.graph.edge import AsyncEntryPointSpec
|
||||
from framework.runtime.agent_runtime import AgentRuntimeConfig
|
||||
|
||||
# Timer trigger (cron or interval)
|
||||
async_entry_points = [
|
||||
AsyncEntryPointSpec(
|
||||
id="daily-check",
|
||||
name="Daily Check",
|
||||
entry_node="process",
|
||||
trigger_type="timer",
|
||||
trigger_config={"cron": "0 9 * * *"}, # daily at 9am
|
||||
isolation_level="shared",
|
||||
)
|
||||
```json
|
||||
[
|
||||
{
|
||||
"id": "daily-check",
|
||||
"name": "Daily Check",
|
||||
"trigger_type": "timer",
|
||||
"trigger_config": {"cron": "0 9 * * *"},
|
||||
"task": "Run the daily check process"
|
||||
}
|
||||
]
|
||||
|
||||
# Webhook server (optional)
|
||||
runtime_config = AgentRuntimeConfig(
|
||||
webhook_host="127.0.0.1",
|
||||
webhook_port=8080,
|
||||
webhook_routes=[{"source_id": "gmail", "path": "/webhooks/gmail", "methods": ["POST"]}],
|
||||
)
|
||||
```
|
||||
|
||||
### Key Fields
|
||||
- `trigger_type`: `"timer"`, `"event"`, `"webhook"`, `"manual"`
|
||||
- `trigger_type`: `"timer"` or `"webhook"`
|
||||
- `trigger_config`: `{"cron": "0 9 * * *"}` or `{"interval_minutes": 20}`
|
||||
- `isolation_level`: `"shared"` (recommended), `"isolated"`, `"synchronized"`
|
||||
- `event_types`: For event triggers, e.g., `["webhook_received"]`
|
||||
|
||||
### Exports Required
|
||||
Both `async_entry_points` and `runtime_config` must be exported from `__init__.py`.
|
||||
|
||||
See `exports/gmail_inbox_guardian/agent.py` for complete example.
|
||||
- `task`: describes what the worker should do when the trigger fires
|
||||
- Triggers can also be created/removed at runtime via `set_trigger` / `remove_trigger` queen tools
|
||||
|
||||
## Tool Discovery
|
||||
|
||||
|
||||
@@ -109,9 +109,48 @@ Key rules to bake into GCU node prompts:
|
||||
- Keep tool calls per turn ≤10
|
||||
- Tab isolation: when browser is already running, use `browser_open(background=true)` and pass `target_id` to every call
|
||||
|
||||
## Multiple Concurrent GCU Subagents
|
||||
|
||||
When a task can be parallelized across multiple sites or profiles, declare a distinct GCU
|
||||
node for each and invoke them all in the same LLM turn. The framework batches all
|
||||
`delegate_to_sub_agent` calls made in one turn and runs them with `asyncio.gather`, so
|
||||
they execute concurrently — not sequentially.
|
||||
|
||||
**Each GCU subagent automatically gets its own isolated browser context** — no `profile=`
|
||||
argument is needed in tool calls. The framework derives a unique profile from the subagent's
|
||||
node ID and instance counter and injects it via an asyncio `ContextVar` before the subagent
|
||||
runs.
|
||||
|
||||
### Example: three sites in parallel
|
||||
|
||||
```python
|
||||
# Three distinct GCU nodes
|
||||
gcu_site_a = NodeSpec(id="gcu-site-a", node_type="gcu", ...)
|
||||
gcu_site_b = NodeSpec(id="gcu-site-b", node_type="gcu", ...)
|
||||
gcu_site_c = NodeSpec(id="gcu-site-c", node_type="gcu", ...)
|
||||
|
||||
orchestrator = NodeSpec(
|
||||
id="orchestrator",
|
||||
node_type="event_loop",
|
||||
sub_agents=["gcu-site-a", "gcu-site-b", "gcu-site-c"],
|
||||
system_prompt="""\
|
||||
Call all three subagents in a single response to run them in parallel:
|
||||
delegate_to_sub_agent(agent_id="gcu-site-a", task="Scrape prices from site A")
|
||||
delegate_to_sub_agent(agent_id="gcu-site-b", task="Scrape prices from site B")
|
||||
delegate_to_sub_agent(agent_id="gcu-site-c", task="Scrape prices from site C")
|
||||
""",
|
||||
)
|
||||
```
|
||||
|
||||
**Rules:**
|
||||
- Use distinct node IDs for each concurrent task — sharing an ID shares the browser context.
|
||||
- The GCU node prompts do not need to mention `profile=`; isolation is automatic.
|
||||
- Cleanup is automatic at session end, but GCU nodes can call `browser_stop()` explicitly
|
||||
if they want to release resources mid-run.
|
||||
|
||||
## GCU Anti-Patterns
|
||||
|
||||
- Using `browser_screenshot` to read text (use `browser_snapshot`)
|
||||
- Using `browser_screenshot` to read text (use `browser_snapshot` instead; screenshots are for visual context only)
|
||||
- Re-navigating after scrolling (resets scroll position)
|
||||
- Attempting login on auth walls
|
||||
- Forgetting `target_id` in multi-tab scenarios
|
||||
|
||||
@@ -0,0 +1,656 @@
|
||||
"""Unit tests for queen_memory_index.py.
|
||||
|
||||
All tests run without HIVE_EMBED_MODEL set. Embedding behaviour is tested
|
||||
via a lightweight mock that injects deterministic fixed vectors.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import math
|
||||
from dataclasses import asdict
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
|
||||
from framework.agents.queen.queen_memory_index import (
|
||||
MemoryEntry,
|
||||
_CATEGORIES,
|
||||
_parse_diary_sections,
|
||||
backfill_index,
|
||||
cosine_similarity,
|
||||
embed_text,
|
||||
embeddings_enabled,
|
||||
enrich_entry,
|
||||
find_knn,
|
||||
get_embed_model,
|
||||
get_entry,
|
||||
hybrid_search,
|
||||
importance_score,
|
||||
index_entry_from_diary_section,
|
||||
index_path,
|
||||
link_entry,
|
||||
load_index,
|
||||
maybe_evolve_neighbors,
|
||||
put_entry,
|
||||
rebuild_index_for_date,
|
||||
record_retrieval,
|
||||
resolve_prose,
|
||||
save_index,
|
||||
semantic_search,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _make_index(*entries: MemoryEntry) -> dict:
|
||||
idx = {"version": 1, "embed_model": None, "embed_dim": None, "entries": {}}
|
||||
for e in entries:
|
||||
put_entry(idx, e)
|
||||
return idx
|
||||
|
||||
|
||||
def _entry(
|
||||
date_str: str = "2026-03-01",
|
||||
ts: str = "10:00",
|
||||
summary: str = "test summary",
|
||||
keywords: list[str] | None = None,
|
||||
tags: list[str] | None = None,
|
||||
category: str = "other",
|
||||
embedding: list[float] | None = None,
|
||||
retrieval_count: int = 0,
|
||||
last_retrieved: str | None = None,
|
||||
related: list[str] | None = None,
|
||||
) -> MemoryEntry:
|
||||
return MemoryEntry(
|
||||
id=f"{date_str}:{ts}",
|
||||
date=date_str,
|
||||
timestamp=ts,
|
||||
summary=summary,
|
||||
keywords=keywords or [],
|
||||
tags=tags or [],
|
||||
category=category,
|
||||
embedding=embedding,
|
||||
retrieval_count=retrieval_count,
|
||||
last_retrieved=last_retrieved,
|
||||
related=related or [],
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# cosine_similarity
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestCosineSimilarity:
|
||||
def test_identical_vectors(self):
|
||||
v = [1.0, 0.0, 0.0]
|
||||
assert cosine_similarity(v, v) == pytest.approx(1.0)
|
||||
|
||||
def test_orthogonal_vectors(self):
|
||||
assert cosine_similarity([1.0, 0.0], [0.0, 1.0]) == pytest.approx(0.0)
|
||||
|
||||
def test_opposite_vectors(self):
|
||||
# cosine of 180° = -1, but our vectors are floats so it can be -1
|
||||
result = cosine_similarity([1.0, 0.0], [-1.0, 0.0])
|
||||
assert result == pytest.approx(-1.0)
|
||||
|
||||
def test_none_inputs(self):
|
||||
assert cosine_similarity(None, [1.0]) == 0.0
|
||||
assert cosine_similarity([1.0], None) == 0.0
|
||||
assert cosine_similarity(None, None) == 0.0
|
||||
|
||||
def test_zero_vector(self):
|
||||
assert cosine_similarity([0.0, 0.0], [1.0, 0.0]) == 0.0
|
||||
|
||||
def test_known_similarity(self):
|
||||
# [1, 1] vs [1, 0] → cos(45°) ≈ 0.707
|
||||
result = cosine_similarity([1.0, 1.0], [1.0, 0.0])
|
||||
assert result == pytest.approx(math.sqrt(2) / 2, abs=1e-4)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# find_knn
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestFindKnn:
|
||||
def test_returns_sorted_descending(self):
|
||||
e1 = _entry("2026-03-01", "09:00", embedding=[1.0, 0.0])
|
||||
e2 = _entry("2026-03-01", "10:00", embedding=[0.9, 0.1])
|
||||
e3 = _entry("2026-03-01", "11:00", embedding=[0.0, 1.0])
|
||||
idx = _make_index(e1, e2, e3)
|
||||
results = find_knn([1.0, 0.0], idx, k=3)
|
||||
ids = [r[0] for r in results]
|
||||
scores = [r[1] for r in results]
|
||||
assert ids[0] == "2026-03-01:09:00" # exact match
|
||||
assert scores[0] == pytest.approx(1.0)
|
||||
assert all(scores[i] >= scores[i + 1] for i in range(len(scores) - 1))
|
||||
|
||||
def test_excludes_self(self):
|
||||
e1 = _entry("2026-03-01", "09:00", embedding=[1.0, 0.0])
|
||||
idx = _make_index(e1)
|
||||
results = find_knn([1.0, 0.0], idx, k=5, exclude_id="2026-03-01:09:00")
|
||||
assert results == []
|
||||
|
||||
def test_skips_null_embeddings(self):
|
||||
e1 = _entry("2026-03-01", "09:00", embedding=None)
|
||||
e2 = _entry("2026-03-01", "10:00", embedding=[1.0, 0.0])
|
||||
idx = _make_index(e1, e2)
|
||||
results = find_knn([1.0, 0.0], idx, k=5)
|
||||
ids = [r[0] for r in results]
|
||||
assert "2026-03-01:09:00" not in ids
|
||||
assert "2026-03-01:10:00" in ids
|
||||
|
||||
def test_respects_k(self):
|
||||
entries = [_entry("2026-03-01", f"0{i}:00", embedding=[float(i), 0.0]) for i in range(5)]
|
||||
idx = _make_index(*entries)
|
||||
results = find_knn([1.0, 0.0], idx, k=2)
|
||||
assert len(results) <= 2
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# load_index / save_index (round-trip and atomic write)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestIndexIO:
|
||||
def test_round_trip(self, tmp_path, monkeypatch):
|
||||
monkeypatch.setattr(
|
||||
"framework.agents.queen.queen_memory_index._queen_memories_dir",
|
||||
lambda: tmp_path,
|
||||
)
|
||||
idx = _make_index(_entry())
|
||||
idx["embed_model"] = "test-model"
|
||||
save_index(idx)
|
||||
loaded = load_index()
|
||||
assert loaded["embed_model"] == "test-model"
|
||||
assert "2026-03-01:10:00" in loaded["entries"]
|
||||
|
||||
def test_missing_file_returns_empty(self, tmp_path, monkeypatch):
|
||||
monkeypatch.setattr(
|
||||
"framework.agents.queen.queen_memory_index._queen_memories_dir",
|
||||
lambda: tmp_path,
|
||||
)
|
||||
idx = load_index()
|
||||
assert idx["entries"] == {}
|
||||
assert idx["version"] == 1
|
||||
|
||||
def test_corrupt_file_returns_empty(self, tmp_path, monkeypatch):
|
||||
monkeypatch.setattr(
|
||||
"framework.agents.queen.queen_memory_index._queen_memories_dir",
|
||||
lambda: tmp_path,
|
||||
)
|
||||
(tmp_path / "index.json").write_text("not json at all", encoding="utf-8")
|
||||
idx = load_index()
|
||||
assert idx["entries"] == {}
|
||||
|
||||
def test_atomic_write_uses_tmp_then_rename(self, tmp_path, monkeypatch):
|
||||
monkeypatch.setattr(
|
||||
"framework.agents.queen.queen_memory_index._queen_memories_dir",
|
||||
lambda: tmp_path,
|
||||
)
|
||||
idx = _make_index()
|
||||
save_index(idx)
|
||||
# tmp file should be gone after rename
|
||||
assert not (tmp_path / "index.json.tmp").exists()
|
||||
assert (tmp_path / "index.json").exists()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# get_entry / put_entry
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestGetPutEntry:
|
||||
def test_put_and_get_roundtrip(self):
|
||||
e = _entry(keywords=["foo", "bar"], tags=["t1"], category="milestone")
|
||||
idx = _make_index()
|
||||
put_entry(idx, e)
|
||||
loaded = get_entry(idx, e.id)
|
||||
assert loaded is not None
|
||||
assert loaded.keywords == ["foo", "bar"]
|
||||
assert loaded.category == "milestone"
|
||||
|
||||
def test_get_missing_returns_none(self):
|
||||
idx = _make_index()
|
||||
assert get_entry(idx, "no-such-id") is None
|
||||
|
||||
def test_put_overwrites_existing(self):
|
||||
e = _entry(summary="original")
|
||||
idx = _make_index(e)
|
||||
e2 = _entry(summary="updated")
|
||||
put_entry(idx, e2)
|
||||
loaded = get_entry(idx, e.id)
|
||||
assert loaded.summary == "updated"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# index_entry_from_diary_section
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestIndexEntryFromDiarySection:
|
||||
def test_id_format(self):
|
||||
e = index_entry_from_diary_section("2026-03-01", "14:30", "Some prose here.")
|
||||
assert e.id == "2026-03-01:14:30"
|
||||
assert e.date == "2026-03-01"
|
||||
assert e.timestamp == "14:30"
|
||||
|
||||
def test_summary_truncated_to_300(self):
|
||||
prose = "x" * 500
|
||||
e = index_entry_from_diary_section("2026-03-01", "14:30", prose)
|
||||
assert len(e.summary) == 300
|
||||
|
||||
def test_defaults_empty_enrichment(self):
|
||||
e = index_entry_from_diary_section("2026-03-01", "14:30", "text")
|
||||
assert e.keywords == []
|
||||
assert e.tags == []
|
||||
assert e.category == "other"
|
||||
assert e.embedding is None
|
||||
assert not e.enriched
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _parse_diary_sections
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestParseDiarySections:
|
||||
def test_parses_two_sections(self):
|
||||
content = "# March 1, 2026\n\n### 09:00\n\nFirst entry.\n\n### 14:30\n\nSecond entry."
|
||||
sections = _parse_diary_sections(content)
|
||||
assert len(sections) == 2
|
||||
assert sections[0] == ("09:00", "First entry.")
|
||||
assert sections[1] == ("14:30", "Second entry.")
|
||||
|
||||
def test_ignores_content_before_first_timestamp(self):
|
||||
content = "# Heading\n\nIntro text.\n\n### 10:00\n\nEntry."
|
||||
sections = _parse_diary_sections(content)
|
||||
assert len(sections) == 1
|
||||
assert sections[0][0] == "10:00"
|
||||
|
||||
def test_empty_content(self):
|
||||
assert _parse_diary_sections("") == []
|
||||
|
||||
def test_no_timestamp_sections(self):
|
||||
assert _parse_diary_sections("# Just a heading\n\nSome text.") == []
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# record_retrieval
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestRecordRetrieval:
|
||||
def test_increments_count(self, tmp_path, monkeypatch):
|
||||
monkeypatch.setattr(
|
||||
"framework.agents.queen.queen_memory_index._queen_memories_dir",
|
||||
lambda: tmp_path,
|
||||
)
|
||||
e = _entry(retrieval_count=2)
|
||||
idx = _make_index(e)
|
||||
record_retrieval(idx, [e.id], auto_save=False)
|
||||
assert idx["entries"][e.id]["retrieval_count"] == 3
|
||||
|
||||
def test_sets_last_retrieved(self, tmp_path, monkeypatch):
|
||||
monkeypatch.setattr(
|
||||
"framework.agents.queen.queen_memory_index._queen_memories_dir",
|
||||
lambda: tmp_path,
|
||||
)
|
||||
e = _entry()
|
||||
idx = _make_index(e)
|
||||
record_retrieval(idx, [e.id], auto_save=False)
|
||||
assert idx["entries"][e.id]["last_retrieved"] is not None
|
||||
|
||||
def test_ignores_missing_ids(self, tmp_path, monkeypatch):
|
||||
monkeypatch.setattr(
|
||||
"framework.agents.queen.queen_memory_index._queen_memories_dir",
|
||||
lambda: tmp_path,
|
||||
)
|
||||
idx = _make_index()
|
||||
# Should not raise
|
||||
record_retrieval(idx, ["nonexistent:00:00"], auto_save=False)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# importance_score
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestImportanceScore:
|
||||
def test_zero_for_never_retrieved(self):
|
||||
e = _entry(retrieval_count=0)
|
||||
assert importance_score(e) == 0.0
|
||||
|
||||
def test_positive_for_retrieved_recently(self):
|
||||
now = datetime.now()
|
||||
e = _entry(retrieval_count=5, last_retrieved=now.isoformat())
|
||||
score = importance_score(e, now=now)
|
||||
assert score > 0.0
|
||||
|
||||
def test_decays_over_time(self):
|
||||
from datetime import timedelta
|
||||
|
||||
now = datetime.now()
|
||||
recent = _entry("2026-03-01", "10:00", retrieval_count=5,
|
||||
last_retrieved=now.isoformat())
|
||||
old = _entry("2026-03-01", "11:00", retrieval_count=5,
|
||||
last_retrieved=(now - timedelta(days=60)).isoformat())
|
||||
assert importance_score(recent, now=now) > importance_score(old, now=now)
|
||||
|
||||
def test_higher_count_higher_score(self):
|
||||
now = datetime.now()
|
||||
low = _entry("2026-03-01", "10:00", retrieval_count=1,
|
||||
last_retrieved=now.isoformat())
|
||||
high = _entry("2026-03-01", "11:00", retrieval_count=10,
|
||||
last_retrieved=now.isoformat())
|
||||
assert importance_score(high, now=now) > importance_score(low, now=now)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# link_entry (Phase 3)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestLinkEntry:
|
||||
def test_links_above_threshold(self):
|
||||
# Two nearly identical vectors should be linked
|
||||
e1 = _entry("2026-03-01", "09:00", embedding=[1.0, 0.0, 0.0])
|
||||
e2 = _entry("2026-03-01", "10:00", embedding=[0.99, 0.01, 0.0])
|
||||
idx = _make_index(e1, e2)
|
||||
linked = link_entry(idx, e1.id, similarity_threshold=0.90)
|
||||
assert e2.id in linked
|
||||
|
||||
def test_bidirectional_links(self):
|
||||
e1 = _entry("2026-03-01", "09:00", embedding=[1.0, 0.0])
|
||||
e2 = _entry("2026-03-01", "10:00", embedding=[1.0, 0.0])
|
||||
idx = _make_index(e1, e2)
|
||||
link_entry(idx, e1.id, similarity_threshold=0.90)
|
||||
assert e2.id in idx["entries"][e1.id]["related"]
|
||||
assert e1.id in idx["entries"][e2.id]["related"]
|
||||
|
||||
def test_does_not_link_below_threshold(self):
|
||||
e1 = _entry("2026-03-01", "09:00", embedding=[1.0, 0.0])
|
||||
e2 = _entry("2026-03-01", "10:00", embedding=[0.0, 1.0])
|
||||
idx = _make_index(e1, e2)
|
||||
linked = link_entry(idx, e1.id, similarity_threshold=0.90)
|
||||
assert linked == []
|
||||
|
||||
def test_skips_entry_without_embedding(self):
|
||||
e1 = _entry("2026-03-01", "09:00", embedding=None)
|
||||
idx = _make_index(e1)
|
||||
linked = link_entry(idx, e1.id)
|
||||
assert linked == []
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# hybrid_search (Phase 4)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestHybridSearch:
|
||||
def test_semantic_score_dominates(self):
|
||||
e_high = _entry("2026-03-01", "09:00", keywords=["unrelated"])
|
||||
e_low = _entry("2026-03-01", "10:00", keywords=["pipeline", "agent"])
|
||||
idx = _make_index(e_high, e_low)
|
||||
sem_scores = {e_high.id: 0.95, e_low.id: 0.40}
|
||||
ranked = hybrid_search("pipeline", idx, [e_high.id, e_low.id], sem_scores)
|
||||
# e_high has much higher semantic score, should still rank first
|
||||
assert ranked[0][0] == e_high.id
|
||||
|
||||
def test_keyword_overlap_breaks_tie(self):
|
||||
e_kw = _entry("2026-03-01", "09:00", keywords=["pipeline", "agent", "workflow"])
|
||||
e_no_kw = _entry("2026-03-01", "10:00", keywords=["unrelated", "other"])
|
||||
idx = _make_index(e_kw, e_no_kw)
|
||||
# Equal semantic scores
|
||||
sem_scores = {e_kw.id: 0.80, e_no_kw.id: 0.80}
|
||||
ranked = hybrid_search("pipeline agent", idx, [e_kw.id, e_no_kw.id], sem_scores)
|
||||
assert ranked[0][0] == e_kw.id
|
||||
|
||||
def test_returns_sorted_descending(self):
|
||||
entries = [_entry("2026-03-01", f"0{i}:00") for i in range(3)]
|
||||
idx = _make_index(*entries)
|
||||
sem_scores = {e.id: float(i) / 10 for i, e in enumerate(entries)}
|
||||
ids = [e.id for e in entries]
|
||||
ranked = hybrid_search("query", idx, ids, sem_scores)
|
||||
scores = [s for _, s in ranked]
|
||||
assert all(scores[i] >= scores[i + 1] for i in range(len(scores) - 1))
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# embeddings_enabled / get_embed_model
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestEmbeddingsEnabled:
|
||||
def test_disabled_when_env_unset(self, monkeypatch):
|
||||
monkeypatch.delenv("HIVE_EMBED_MODEL", raising=False)
|
||||
assert not embeddings_enabled()
|
||||
assert get_embed_model() is None
|
||||
|
||||
def test_enabled_when_env_set(self, monkeypatch):
|
||||
monkeypatch.setenv("HIVE_EMBED_MODEL", "text-embedding-3-small")
|
||||
assert embeddings_enabled()
|
||||
assert get_embed_model() == "text-embedding-3-small"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# embed_text — mocked
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
class TestEmbedText:
|
||||
async def test_returns_none_when_disabled(self, monkeypatch):
|
||||
monkeypatch.delenv("HIVE_EMBED_MODEL", raising=False)
|
||||
result = await embed_text("hello")
|
||||
assert result is None
|
||||
|
||||
async def test_returns_vector_when_enabled(self, monkeypatch):
|
||||
monkeypatch.setenv("HIVE_EMBED_MODEL", "text-embedding-3-small")
|
||||
fake_vec = [0.1, 0.2, 0.3]
|
||||
mock_resp = MagicMock()
|
||||
mock_resp.data = [{"embedding": fake_vec}]
|
||||
with patch("litellm.aembedding", new=AsyncMock(return_value=mock_resp)):
|
||||
result = await embed_text("hello world")
|
||||
assert result == fake_vec
|
||||
|
||||
async def test_returns_none_on_api_failure(self, monkeypatch):
|
||||
monkeypatch.setenv("HIVE_EMBED_MODEL", "text-embedding-3-small")
|
||||
with patch("litellm.aembedding", new=AsyncMock(side_effect=RuntimeError("API down"))):
|
||||
result = await embed_text("hello")
|
||||
assert result is None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# semantic_search — mocked embeddings
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
class TestSemanticSearch:
|
||||
async def test_returns_empty_when_disabled(self, monkeypatch):
|
||||
monkeypatch.delenv("HIVE_EMBED_MODEL", raising=False)
|
||||
idx = _make_index(_entry(embedding=[1.0, 0.0]))
|
||||
results = await semantic_search("query", idx)
|
||||
assert results == []
|
||||
|
||||
async def test_finds_nearest_neighbours(self, monkeypatch):
|
||||
monkeypatch.setenv("HIVE_EMBED_MODEL", "text-embedding-3-small")
|
||||
e1 = _entry("2026-03-01", "09:00", embedding=[1.0, 0.0])
|
||||
e2 = _entry("2026-03-01", "10:00", embedding=[0.0, 1.0])
|
||||
idx = _make_index(e1, e2)
|
||||
query_vec = [1.0, 0.0]
|
||||
mock_resp = MagicMock()
|
||||
mock_resp.data = [{"embedding": query_vec}]
|
||||
with patch("litellm.aembedding", new=AsyncMock(return_value=mock_resp)):
|
||||
results = await semantic_search("query", idx, k=2)
|
||||
assert results[0][0] == e1.id # closest to [1.0, 0.0]
|
||||
|
||||
async def test_date_range_filter(self, monkeypatch):
|
||||
monkeypatch.setenv("HIVE_EMBED_MODEL", "text-embedding-3-small")
|
||||
e_in = _entry("2026-03-15", "09:00", embedding=[1.0, 0.0])
|
||||
e_out = _entry("2026-02-01", "09:00", embedding=[1.0, 0.0])
|
||||
idx = _make_index(e_in, e_out)
|
||||
mock_resp = MagicMock()
|
||||
mock_resp.data = [{"embedding": [1.0, 0.0]}]
|
||||
with patch("litellm.aembedding", new=AsyncMock(return_value=mock_resp)):
|
||||
results = await semantic_search(
|
||||
"query", idx, k=10, date_range=("2026-03-01", "2026-03-31")
|
||||
)
|
||||
ids = [r[0] for r in results]
|
||||
assert e_in.id in ids
|
||||
assert e_out.id not in ids
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# enrich_entry — mocked LLM
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
class TestEnrichEntry:
|
||||
async def test_parses_llm_response(self):
|
||||
mock_llm = MagicMock()
|
||||
mock_resp = MagicMock()
|
||||
mock_resp.content = json.dumps(
|
||||
{"keywords": ["pipeline", "agent"], "category": "pipeline", "tags": ["build", "test"]}
|
||||
)
|
||||
mock_llm.acomplete = AsyncMock(return_value=mock_resp)
|
||||
kw, cat, tags = await enrich_entry("Some diary text", mock_llm)
|
||||
assert "pipeline" in kw
|
||||
assert cat == "pipeline"
|
||||
assert "build" in tags
|
||||
|
||||
async def test_rejects_invalid_category(self):
|
||||
mock_llm = MagicMock()
|
||||
mock_resp = MagicMock()
|
||||
mock_resp.content = json.dumps(
|
||||
{"keywords": [], "category": "invented_category", "tags": []}
|
||||
)
|
||||
mock_llm.acomplete = AsyncMock(return_value=mock_resp)
|
||||
_, cat, _ = await enrich_entry("text", mock_llm)
|
||||
assert cat == "other"
|
||||
|
||||
async def test_returns_defaults_on_failure(self):
|
||||
mock_llm = MagicMock()
|
||||
mock_llm.acomplete = AsyncMock(side_effect=RuntimeError("LLM down"))
|
||||
kw, cat, tags = await enrich_entry("text", mock_llm)
|
||||
assert kw == []
|
||||
assert cat == "other"
|
||||
assert tags == []
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# maybe_evolve_neighbors — mocked LLM
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
class TestMaybeEvolveNeighbors:
|
||||
async def test_updates_tags_on_non_empty_response(self):
|
||||
mock_llm = MagicMock()
|
||||
mock_resp = MagicMock()
|
||||
mock_resp.content = json.dumps({"tags": ["new_tag", "updated"]})
|
||||
mock_llm.acomplete = AsyncMock(return_value=mock_resp)
|
||||
|
||||
new_e = _entry("2026-03-01", "10:00", keywords=["new"], tags=["tag_a"])
|
||||
old_e = _entry("2026-03-01", "09:00", keywords=["old"], tags=["old_tag"])
|
||||
idx = _make_index(new_e, old_e)
|
||||
|
||||
await maybe_evolve_neighbors(new_e.id, [old_e.id], idx, mock_llm)
|
||||
assert "new_tag" in idx["entries"][old_e.id]["tags"]
|
||||
|
||||
async def test_no_op_on_empty_response(self):
|
||||
mock_llm = MagicMock()
|
||||
mock_resp = MagicMock()
|
||||
mock_resp.content = json.dumps({})
|
||||
mock_llm.acomplete = AsyncMock(return_value=mock_resp)
|
||||
|
||||
new_e = _entry("2026-03-01", "10:00")
|
||||
old_e = _entry("2026-03-01", "09:00", tags=["original"])
|
||||
idx = _make_index(new_e, old_e)
|
||||
|
||||
await maybe_evolve_neighbors(new_e.id, [old_e.id], idx, mock_llm)
|
||||
# Tags unchanged
|
||||
assert idx["entries"][old_e.id]["tags"] == ["original"]
|
||||
|
||||
async def test_silently_handles_llm_failure(self):
|
||||
mock_llm = MagicMock()
|
||||
mock_llm.acomplete = AsyncMock(side_effect=RuntimeError("down"))
|
||||
|
||||
new_e = _entry("2026-03-01", "10:00")
|
||||
old_e = _entry("2026-03-01", "09:00")
|
||||
idx = _make_index(new_e, old_e)
|
||||
|
||||
# Must not raise
|
||||
await maybe_evolve_neighbors(new_e.id, [old_e.id], idx, mock_llm)
|
||||
|
||||
async def test_respects_max_neighbors_cap(self):
|
||||
mock_llm = MagicMock()
|
||||
mock_resp = MagicMock()
|
||||
mock_resp.content = json.dumps({})
|
||||
mock_llm.acomplete = AsyncMock(return_value=mock_resp)
|
||||
|
||||
new_e = _entry("2026-03-01", "10:00")
|
||||
neighbors = [_entry("2026-03-01", f"0{i}:00") for i in range(5)]
|
||||
idx = _make_index(new_e, *neighbors)
|
||||
|
||||
await maybe_evolve_neighbors(
|
||||
new_e.id, [n.id for n in neighbors], idx, mock_llm, max_neighbors_to_evolve=2
|
||||
)
|
||||
assert mock_llm.acomplete.call_count == 2
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# recall_diary — semantic path and fallback (integration-style)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
class TestRecallDiary:
|
||||
async def test_substring_fallback_when_embeddings_disabled(
|
||||
self, tmp_path, monkeypatch
|
||||
):
|
||||
"""When HIVE_EMBED_MODEL is not set, recall_diary uses substring matching."""
|
||||
monkeypatch.delenv("HIVE_EMBED_MODEL", raising=False)
|
||||
|
||||
# Write a fake diary file
|
||||
memories_dir = tmp_path / ".hive" / "queen" / "memories"
|
||||
memories_dir.mkdir(parents=True)
|
||||
today_str = "2026-03-24"
|
||||
(memories_dir / f"MEMORY-{today_str}.md").write_text(
|
||||
"# March 24, 2026\n\n### 09:00\n\nWorked on the pipeline agent today.\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
# Patch the path functions
|
||||
import framework.agents.queen.queen_memory as qm
|
||||
monkeypatch.setattr(qm, "episodic_memory_path", lambda d=None: memories_dir / f"MEMORY-{today_str}.md")
|
||||
|
||||
from framework.tools.queen_memory_tools import recall_diary
|
||||
|
||||
result = await recall_diary(query="pipeline", days_back=1)
|
||||
assert "pipeline agent" in result
|
||||
|
||||
async def test_no_results_message(self, monkeypatch):
|
||||
"""Returns a helpful message when nothing matches."""
|
||||
monkeypatch.delenv("HIVE_EMBED_MODEL", raising=False)
|
||||
|
||||
import framework.agents.queen.queen_memory as qm
|
||||
# Point to a non-existent path
|
||||
monkeypatch.setattr(
|
||||
qm, "episodic_memory_path", lambda d=None: Path("/nonexistent/MEMORY.md")
|
||||
)
|
||||
|
||||
from framework.tools.queen_memory_tools import recall_diary
|
||||
|
||||
result = await recall_diary(query="nonexistent topic", days_back=1)
|
||||
assert "No diary entries" in result
|
||||
@@ -1,8 +1,8 @@
|
||||
"""Queen's ticket receiver entry point.
|
||||
|
||||
When the Worker Health Judge emits a WORKER_ESCALATION_TICKET event on the
|
||||
shared EventBus, this entry point fires and routes to the ``ticket_triage``
|
||||
node, where the Queen deliberates and decides whether to notify the operator.
|
||||
When a WORKER_ESCALATION_TICKET event is emitted on the shared EventBus,
|
||||
this entry point fires and routes to the ``ticket_triage`` node, where the
|
||||
Queen deliberates and decides whether to notify the operator.
|
||||
|
||||
Isolation level is ``isolated`` — the queen's triage memory is kept separate
|
||||
from the worker's shared memory. Each ticket triage runs in its own context.
|
||||
|
||||
@@ -0,0 +1,286 @@
|
||||
"""Worker per-run digest (run diary).
|
||||
|
||||
Storage layout:
|
||||
~/.hive/agents/{agent_name}/runs/{run_id}/digest.md
|
||||
|
||||
Each completed or failed worker run gets one digest file. The queen reads
|
||||
these via get_worker_status(focus='diary') before digging into live runtime
|
||||
logs — the diary is a cheap, persistent record that survives across sessions.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import traceback
|
||||
from collections import Counter
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from framework.runtime.event_bus import AgentEvent, EventBus
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
_DIGEST_SYSTEM = """\
|
||||
You maintain run digests for a worker agent.
|
||||
A run digest is a concise, factual record of a single task execution.
|
||||
|
||||
Write 3-6 sentences covering:
|
||||
- What the worker was asked to do (the task/goal)
|
||||
- What approach it took and what tools it used
|
||||
- What the outcome was (success, partial, or failure — and why if relevant)
|
||||
- Any notable issues, retries, or escalations to the queen
|
||||
|
||||
Write in third person past tense. Be direct and specific.
|
||||
Omit routine tool invocations unless the result matters.
|
||||
Output only the digest prose — no headings, no code fences.
|
||||
"""
|
||||
|
||||
|
||||
def _worker_runs_dir(agent_name: str) -> Path:
|
||||
return Path.home() / ".hive" / "agents" / agent_name / "runs"
|
||||
|
||||
|
||||
def digest_path(agent_name: str, run_id: str) -> Path:
|
||||
return _worker_runs_dir(agent_name) / run_id / "digest.md"
|
||||
|
||||
|
||||
def _collect_run_events(bus: EventBus, run_id: str, limit: int = 2000) -> list[AgentEvent]:
|
||||
"""Collect all events belonging to *run_id* from the bus history.
|
||||
|
||||
Strategy: find the EXECUTION_STARTED event that carries ``run_id``,
|
||||
extract its ``execution_id``, then query the bus by that execution_id.
|
||||
This works because TOOL_CALL_*, EDGE_TRAVERSED, NODE_STALLED etc. carry
|
||||
execution_id but not run_id.
|
||||
|
||||
Falls back to a full-scan run_id filter when EXECUTION_STARTED is not
|
||||
found (e.g. bus was rotated).
|
||||
"""
|
||||
from framework.runtime.event_bus import EventType
|
||||
|
||||
# Pass 1: find execution_id via EXECUTION_STARTED with matching run_id
|
||||
started = bus.get_history(event_type=EventType.EXECUTION_STARTED, limit=limit)
|
||||
exec_id: str | None = None
|
||||
for e in started:
|
||||
if getattr(e, "run_id", None) == run_id and e.execution_id:
|
||||
exec_id = e.execution_id
|
||||
break
|
||||
|
||||
if exec_id:
|
||||
return bus.get_history(execution_id=exec_id, limit=limit)
|
||||
|
||||
# Fallback: scan all events and match by run_id attribute
|
||||
return [e for e in bus.get_history(limit=limit) if getattr(e, "run_id", None) == run_id]
|
||||
|
||||
|
||||
def _build_run_context(
|
||||
events: list[AgentEvent],
|
||||
outcome_event: AgentEvent | None,
|
||||
) -> str:
|
||||
"""Assemble a plain-text run context string for the digest LLM call."""
|
||||
from framework.runtime.event_bus import EventType
|
||||
|
||||
# Reverse so events are in chronological order
|
||||
events_chron = list(reversed(events))
|
||||
|
||||
lines: list[str] = []
|
||||
|
||||
# Task input from EXECUTION_STARTED
|
||||
started = [e for e in events_chron if e.type == EventType.EXECUTION_STARTED]
|
||||
if started:
|
||||
inp = started[0].data.get("input", {})
|
||||
if inp:
|
||||
lines.append(f"Task input: {str(inp)[:400]}")
|
||||
|
||||
# Duration (elapsed so far if no outcome yet)
|
||||
ref_ts = outcome_event.timestamp if outcome_event else datetime.utcnow()
|
||||
if started:
|
||||
elapsed = (ref_ts - started[0].timestamp).total_seconds()
|
||||
m, s = divmod(int(elapsed), 60)
|
||||
lines.append(f"Duration so far: {m}m {s}s" if m else f"Duration so far: {s}s")
|
||||
|
||||
# Outcome
|
||||
if outcome_event is None:
|
||||
lines.append("Status: still running (mid-run snapshot)")
|
||||
elif outcome_event.type == EventType.EXECUTION_COMPLETED:
|
||||
out = outcome_event.data.get("output", {})
|
||||
out_str = f"Outcome: completed. Output: {str(out)[:300]}"
|
||||
lines.append(out_str if out else "Outcome: completed.")
|
||||
else:
|
||||
err = outcome_event.data.get("error", "")
|
||||
lines.append(f"Outcome: failed. Error: {str(err)[:300]}" if err else "Outcome: failed.")
|
||||
|
||||
# Node path (edge traversals)
|
||||
edges = [e for e in events_chron if e.type == EventType.EDGE_TRAVERSED]
|
||||
if edges:
|
||||
parts = [
|
||||
f"{e.data.get('source_node', '?')}->{e.data.get('target_node', '?')}"
|
||||
for e in edges[-20:]
|
||||
]
|
||||
lines.append(f"Node path: {', '.join(parts)}")
|
||||
|
||||
# Tools used
|
||||
tool_events = [e for e in events_chron if e.type == EventType.TOOL_CALL_COMPLETED]
|
||||
if tool_events:
|
||||
names = [e.data.get("tool_name", "?") for e in tool_events]
|
||||
counts = Counter(names)
|
||||
summary = ", ".join(f"{name}×{n}" if n > 1 else name for name, n in counts.most_common())
|
||||
lines.append(f"Tools used: {summary}")
|
||||
# Note any tool errors
|
||||
errors = [e for e in tool_events if e.data.get("is_error")]
|
||||
if errors:
|
||||
err_names = Counter(e.data.get("tool_name", "?") for e in errors)
|
||||
lines.append(f"Tool errors: {dict(err_names)}")
|
||||
|
||||
# Issues
|
||||
issue_map = {
|
||||
EventType.NODE_STALLED: "stall",
|
||||
EventType.NODE_TOOL_DOOM_LOOP: "doom loop",
|
||||
EventType.CONSTRAINT_VIOLATION: "constraint violation",
|
||||
EventType.NODE_RETRY: "retry",
|
||||
}
|
||||
issue_parts: list[str] = []
|
||||
for evt_type, label in issue_map.items():
|
||||
n = sum(1 for e in events_chron if e.type == evt_type)
|
||||
if n:
|
||||
issue_parts.append(f"{n} {label}(s)")
|
||||
if issue_parts:
|
||||
lines.append(f"Issues: {', '.join(issue_parts)}")
|
||||
|
||||
# Escalations to queen
|
||||
escalations = [e for e in events_chron if e.type == EventType.ESCALATION_REQUESTED]
|
||||
if escalations:
|
||||
lines.append(f"Escalations to queen: {len(escalations)}")
|
||||
|
||||
# Final LLM output snippet (last LLM_TEXT_DELTA snapshot)
|
||||
text_events = [e for e in reversed(events_chron) if e.type == EventType.LLM_TEXT_DELTA]
|
||||
if text_events:
|
||||
snapshot = text_events[0].data.get("snapshot", "") or ""
|
||||
if snapshot:
|
||||
lines.append(f"Final LLM output: {snapshot[-400:].strip()}")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
async def consolidate_worker_run(
|
||||
agent_name: str,
|
||||
run_id: str,
|
||||
outcome_event: AgentEvent | None,
|
||||
bus: EventBus,
|
||||
llm: Any,
|
||||
) -> None:
|
||||
"""Write (or overwrite) the digest for a worker run.
|
||||
|
||||
Called fire-and-forget either:
|
||||
- After EXECUTION_COMPLETED / EXECUTION_FAILED (outcome_event set, final write)
|
||||
- Periodically during a run on a cooldown timer (outcome_event=None, mid-run snapshot)
|
||||
|
||||
The digest file is always overwritten so each call produces the freshest view.
|
||||
The final completion/failure call supersedes any mid-run snapshot.
|
||||
|
||||
Args:
|
||||
agent_name: Worker agent directory name (determines storage path).
|
||||
run_id: The run ID.
|
||||
outcome_event: EXECUTION_COMPLETED or EXECUTION_FAILED event, or None for
|
||||
a mid-run snapshot.
|
||||
bus: The session EventBus (shared queen + worker).
|
||||
llm: LLMProvider with an acomplete() method.
|
||||
"""
|
||||
try:
|
||||
events = _collect_run_events(bus, run_id)
|
||||
run_context = _build_run_context(events, outcome_event)
|
||||
if not run_context:
|
||||
logger.debug("worker_memory: no events for run %s, skipping digest", run_id)
|
||||
return
|
||||
|
||||
is_final = outcome_event is not None
|
||||
logger.info(
|
||||
"worker_memory: generating %s digest for run %s ...",
|
||||
"final" if is_final else "mid-run",
|
||||
run_id,
|
||||
)
|
||||
|
||||
from framework.agents.queen.config import default_config
|
||||
|
||||
resp = await llm.acomplete(
|
||||
messages=[{"role": "user", "content": run_context}],
|
||||
system=_DIGEST_SYSTEM,
|
||||
max_tokens=min(default_config.max_tokens, 512),
|
||||
)
|
||||
digest_text = (resp.content or "").strip()
|
||||
if not digest_text:
|
||||
logger.warning("worker_memory: LLM returned empty digest for run %s", run_id)
|
||||
return
|
||||
|
||||
path = digest_path(agent_name, run_id)
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
from framework.runtime.event_bus import EventType
|
||||
|
||||
ts = (outcome_event.timestamp if outcome_event else datetime.utcnow()).strftime(
|
||||
"%Y-%m-%d %H:%M"
|
||||
)
|
||||
if outcome_event is None:
|
||||
status = "running"
|
||||
elif outcome_event.type == EventType.EXECUTION_COMPLETED:
|
||||
status = "completed"
|
||||
else:
|
||||
status = "failed"
|
||||
|
||||
path.write_text(
|
||||
f"# {run_id}\n\n**{ts}** | {status}\n\n{digest_text}\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
logger.info(
|
||||
"worker_memory: %s digest written for run %s (%d chars)",
|
||||
status,
|
||||
run_id,
|
||||
len(digest_text),
|
||||
)
|
||||
|
||||
except Exception:
|
||||
tb = traceback.format_exc()
|
||||
logger.exception("worker_memory: digest failed for run %s", run_id)
|
||||
# Persist the error so it's findable without log access
|
||||
error_path = _worker_runs_dir(agent_name) / run_id / "digest_error.txt"
|
||||
try:
|
||||
error_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
error_path.write_text(
|
||||
f"run_id: {run_id}\ntime: {datetime.now().isoformat()}\n\n{tb}",
|
||||
encoding="utf-8",
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def read_recent_digests(agent_name: str, max_runs: int = 5) -> list[tuple[str, str]]:
|
||||
"""Return recent run digests as [(run_id, content), ...], newest first.
|
||||
|
||||
Args:
|
||||
agent_name: Worker agent directory name.
|
||||
max_runs: Maximum number of digests to return.
|
||||
|
||||
Returns:
|
||||
List of (run_id, digest_content) tuples, ordered newest first.
|
||||
"""
|
||||
runs_dir = _worker_runs_dir(agent_name)
|
||||
if not runs_dir.exists():
|
||||
return []
|
||||
|
||||
digest_files = sorted(
|
||||
runs_dir.glob("*/digest.md"),
|
||||
key=lambda p: p.stat().st_mtime,
|
||||
reverse=True,
|
||||
)[:max_runs]
|
||||
|
||||
result: list[tuple[str, str]] = []
|
||||
for f in digest_files:
|
||||
try:
|
||||
content = f.read_text(encoding="utf-8").strip()
|
||||
if content:
|
||||
result.append((f.parent.name, content))
|
||||
except OSError:
|
||||
continue
|
||||
return result
|
||||
@@ -89,6 +89,16 @@ def main():
|
||||
|
||||
register_testing_commands(subparsers)
|
||||
|
||||
# Register skill commands (skill list, skill trust, ...)
|
||||
from framework.skills.cli import register_skill_commands
|
||||
|
||||
register_skill_commands(subparsers)
|
||||
|
||||
# Register debugger commands (debugger)
|
||||
from framework.debugger.cli import register_debugger_commands
|
||||
|
||||
register_debugger_commands(subparsers)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if hasattr(args, "func"):
|
||||
|
||||
+306
-3
@@ -19,6 +19,10 @@ from framework.graph.edge import DEFAULT_MAX_TOKENS
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
HIVE_CONFIG_FILE = Path.home() / ".hive" / "configuration.json"
|
||||
|
||||
# Hive LLM router endpoint (Anthropic-compatible).
|
||||
# litellm's Anthropic handler appends /v1/messages, so this is just the base host.
|
||||
HIVE_LLM_ENDPOINT = "https://api.adenhq.com"
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -47,16 +51,167 @@ def get_preferred_model() -> str:
|
||||
"""Return the user's preferred LLM model string (e.g. 'anthropic/claude-sonnet-4-20250514')."""
|
||||
llm = get_hive_config().get("llm", {})
|
||||
if llm.get("provider") and llm.get("model"):
|
||||
return f"{llm['provider']}/{llm['model']}"
|
||||
provider = str(llm["provider"])
|
||||
model = str(llm["model"]).strip()
|
||||
# OpenRouter quickstart stores raw model IDs; tolerate pasted "openrouter/<id>" too.
|
||||
if provider.lower() == "openrouter" and model.lower().startswith("openrouter/"):
|
||||
model = model[len("openrouter/") :]
|
||||
if model:
|
||||
return f"{provider}/{model}"
|
||||
return "anthropic/claude-sonnet-4-20250514"
|
||||
|
||||
|
||||
def get_preferred_worker_model() -> str | None:
|
||||
"""Return the user's preferred worker LLM model, or None if not configured.
|
||||
|
||||
Reads from the ``worker_llm`` section of ~/.hive/configuration.json.
|
||||
Returns None when no worker-specific model is set, so callers can
|
||||
fall back to the default (queen) model via ``get_preferred_model()``.
|
||||
"""
|
||||
worker_llm = get_hive_config().get("worker_llm", {})
|
||||
if worker_llm.get("provider") and worker_llm.get("model"):
|
||||
provider = str(worker_llm["provider"])
|
||||
model = str(worker_llm["model"]).strip()
|
||||
if provider.lower() == "openrouter" and model.lower().startswith("openrouter/"):
|
||||
model = model[len("openrouter/") :]
|
||||
if model:
|
||||
return f"{provider}/{model}"
|
||||
return None
|
||||
|
||||
|
||||
def get_worker_api_key() -> str | None:
|
||||
"""Return the API key for the worker LLM, falling back to the default key."""
|
||||
worker_llm = get_hive_config().get("worker_llm", {})
|
||||
if not worker_llm:
|
||||
return get_api_key()
|
||||
|
||||
# Worker-specific subscription / env var
|
||||
if worker_llm.get("use_claude_code_subscription"):
|
||||
try:
|
||||
from framework.runner.runner import get_claude_code_token
|
||||
|
||||
token = get_claude_code_token()
|
||||
if token:
|
||||
return token
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
if worker_llm.get("use_codex_subscription"):
|
||||
try:
|
||||
from framework.runner.runner import get_codex_token
|
||||
|
||||
token = get_codex_token()
|
||||
if token:
|
||||
return token
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
if worker_llm.get("use_kimi_code_subscription"):
|
||||
try:
|
||||
from framework.runner.runner import get_kimi_code_token
|
||||
|
||||
token = get_kimi_code_token()
|
||||
if token:
|
||||
return token
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
if worker_llm.get("use_antigravity_subscription"):
|
||||
try:
|
||||
from framework.runner.runner import get_antigravity_token
|
||||
|
||||
token = get_antigravity_token()
|
||||
if token:
|
||||
return token
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
api_key_env_var = worker_llm.get("api_key_env_var")
|
||||
if api_key_env_var:
|
||||
return os.environ.get(api_key_env_var)
|
||||
|
||||
# Fall back to default key
|
||||
return get_api_key()
|
||||
|
||||
|
||||
def get_worker_api_base() -> str | None:
|
||||
"""Return the api_base for the worker LLM, falling back to the default."""
|
||||
worker_llm = get_hive_config().get("worker_llm", {})
|
||||
if not worker_llm:
|
||||
return get_api_base()
|
||||
|
||||
if worker_llm.get("use_codex_subscription"):
|
||||
return "https://chatgpt.com/backend-api/codex"
|
||||
if worker_llm.get("use_kimi_code_subscription"):
|
||||
return "https://api.kimi.com/coding"
|
||||
if worker_llm.get("use_antigravity_subscription"):
|
||||
# Antigravity uses AntigravityProvider directly — no api_base needed.
|
||||
return None
|
||||
if worker_llm.get("api_base"):
|
||||
return worker_llm["api_base"]
|
||||
if str(worker_llm.get("provider", "")).lower() == "openrouter":
|
||||
return OPENROUTER_API_BASE
|
||||
return None
|
||||
|
||||
|
||||
def get_worker_llm_extra_kwargs() -> dict[str, Any]:
|
||||
"""Return extra kwargs for the worker LLM provider."""
|
||||
worker_llm = get_hive_config().get("worker_llm", {})
|
||||
if not worker_llm:
|
||||
return get_llm_extra_kwargs()
|
||||
|
||||
if worker_llm.get("use_claude_code_subscription"):
|
||||
api_key = get_worker_api_key()
|
||||
if api_key:
|
||||
return {
|
||||
"extra_headers": {"authorization": f"Bearer {api_key}"},
|
||||
}
|
||||
if worker_llm.get("use_codex_subscription"):
|
||||
api_key = get_worker_api_key()
|
||||
if api_key:
|
||||
headers: dict[str, str] = {
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
"User-Agent": "CodexBar",
|
||||
}
|
||||
try:
|
||||
from framework.runner.runner import get_codex_account_id
|
||||
|
||||
account_id = get_codex_account_id()
|
||||
if account_id:
|
||||
headers["ChatGPT-Account-Id"] = account_id
|
||||
except ImportError:
|
||||
pass
|
||||
return {
|
||||
"extra_headers": headers,
|
||||
"store": False,
|
||||
"allowed_openai_params": ["store"],
|
||||
}
|
||||
return {}
|
||||
|
||||
|
||||
def get_worker_max_tokens() -> int:
|
||||
"""Return max_tokens for the worker LLM, falling back to default."""
|
||||
worker_llm = get_hive_config().get("worker_llm", {})
|
||||
if worker_llm and "max_tokens" in worker_llm:
|
||||
return worker_llm["max_tokens"]
|
||||
return get_max_tokens()
|
||||
|
||||
|
||||
def get_worker_max_context_tokens() -> int:
|
||||
"""Return max_context_tokens for the worker LLM, falling back to default."""
|
||||
worker_llm = get_hive_config().get("worker_llm", {})
|
||||
if worker_llm and "max_context_tokens" in worker_llm:
|
||||
return worker_llm["max_context_tokens"]
|
||||
return get_max_context_tokens()
|
||||
|
||||
|
||||
def get_max_tokens() -> int:
|
||||
"""Return the configured max_tokens, falling back to DEFAULT_MAX_TOKENS."""
|
||||
return get_hive_config().get("llm", {}).get("max_tokens", DEFAULT_MAX_TOKENS)
|
||||
|
||||
|
||||
DEFAULT_MAX_CONTEXT_TOKENS = 32_000
|
||||
OPENROUTER_API_BASE = "https://openrouter.ai/api/v1"
|
||||
|
||||
|
||||
def get_max_context_tokens() -> int:
|
||||
@@ -109,6 +264,17 @@ def get_api_key() -> str | None:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# Antigravity subscription: read OAuth token from accounts JSON
|
||||
if llm.get("use_antigravity_subscription"):
|
||||
try:
|
||||
from framework.runner.runner import get_antigravity_token
|
||||
|
||||
token = get_antigravity_token()
|
||||
if token:
|
||||
return token
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# Standard env-var path (covers ZAI Code and all API-key providers)
|
||||
api_key_env_var = llm.get("api_key_env_var")
|
||||
if api_key_env_var:
|
||||
@@ -116,11 +282,141 @@ def get_api_key() -> str | None:
|
||||
return None
|
||||
|
||||
|
||||
# OAuth credentials for Antigravity are fetched from the opencode-antigravity-auth project.
|
||||
# This project reverse-engineered and published the public OAuth credentials
|
||||
# for Google's Antigravity/Cloud Code Assist API.
|
||||
# Source: https://github.com/NoeFabris/opencode-antigravity-auth
|
||||
_ANTIGRAVITY_CREDENTIALS_URL = (
|
||||
"https://raw.githubusercontent.com/NoeFabris/opencode-antigravity-auth/dev/src/constants.ts"
|
||||
)
|
||||
_antigravity_credentials_cache: tuple[str | None, str | None] = (None, None)
|
||||
|
||||
|
||||
def _fetch_antigravity_credentials() -> tuple[str | None, str | None]:
|
||||
"""Fetch OAuth client ID and secret from the public npm package source on GitHub."""
|
||||
global _antigravity_credentials_cache
|
||||
if _antigravity_credentials_cache[0] and _antigravity_credentials_cache[1]:
|
||||
return _antigravity_credentials_cache
|
||||
|
||||
import re
|
||||
import urllib.request
|
||||
|
||||
try:
|
||||
req = urllib.request.Request(
|
||||
_ANTIGRAVITY_CREDENTIALS_URL, headers={"User-Agent": "Hive/1.0"}
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
content = resp.read().decode("utf-8")
|
||||
id_match = re.search(r'ANTIGRAVITY_CLIENT_ID\s*=\s*"([^"]+)"', content)
|
||||
secret_match = re.search(r'ANTIGRAVITY_CLIENT_SECRET\s*=\s*"([^"]+)"', content)
|
||||
client_id = id_match.group(1) if id_match else None
|
||||
client_secret = secret_match.group(1) if secret_match else None
|
||||
if client_id and client_secret:
|
||||
_antigravity_credentials_cache = (client_id, client_secret)
|
||||
return client_id, client_secret
|
||||
except Exception as e:
|
||||
logger.debug("Failed to fetch Antigravity credentials from public source: %s", e)
|
||||
return None, None
|
||||
|
||||
|
||||
def get_antigravity_client_id() -> str:
|
||||
"""Return the Antigravity OAuth application client ID.
|
||||
|
||||
Checked in order:
|
||||
1. ``ANTIGRAVITY_CLIENT_ID`` environment variable
|
||||
2. ``llm.antigravity_client_id`` in ~/.hive/configuration.json
|
||||
3. Fetch from public source (opencode-antigravity-auth project on GitHub)
|
||||
"""
|
||||
env = os.environ.get("ANTIGRAVITY_CLIENT_ID")
|
||||
if env:
|
||||
return env
|
||||
cfg_val = get_hive_config().get("llm", {}).get("antigravity_client_id")
|
||||
if cfg_val:
|
||||
return cfg_val
|
||||
# Fetch from public source
|
||||
client_id, _ = _fetch_antigravity_credentials()
|
||||
if client_id:
|
||||
return client_id
|
||||
raise RuntimeError("Could not obtain Antigravity OAuth client ID")
|
||||
|
||||
|
||||
def get_antigravity_client_secret() -> str | None:
|
||||
"""Return the Antigravity OAuth client secret.
|
||||
|
||||
Checked in order:
|
||||
1. ``ANTIGRAVITY_CLIENT_SECRET`` environment variable
|
||||
2. ``llm.antigravity_client_secret`` in ~/.hive/configuration.json
|
||||
3. Fetch from public source (opencode-antigravity-auth project on GitHub)
|
||||
|
||||
Returns None when not found — token refresh will be skipped and
|
||||
the caller must use whatever access token is already available.
|
||||
"""
|
||||
env = os.environ.get("ANTIGRAVITY_CLIENT_SECRET")
|
||||
if env:
|
||||
return env
|
||||
cfg_val = get_hive_config().get("llm", {}).get("antigravity_client_secret") or None
|
||||
if cfg_val:
|
||||
return cfg_val
|
||||
# Fetch from public source
|
||||
_, secret = _fetch_antigravity_credentials()
|
||||
return secret
|
||||
|
||||
|
||||
def get_embed_model() -> str | None:
|
||||
"""Return the configured embedding model string, or None if not set.
|
||||
|
||||
Reads from the ``embedding`` section of ~/.hive/configuration.json:
|
||||
|
||||
{
|
||||
"embedding": {
|
||||
"provider": "openai",
|
||||
"model": "text-embedding-3-small",
|
||||
"api_key_env_var": "OPENAI_API_KEY"
|
||||
}
|
||||
}
|
||||
|
||||
Returns a litellm-compatible ``"provider/model"`` string, e.g.
|
||||
``"openai/text-embedding-3-small"``.
|
||||
Falls back to the ``HIVE_EMBED_MODEL`` environment variable for
|
||||
backward compatibility.
|
||||
"""
|
||||
embed = get_hive_config().get("embedding", {})
|
||||
if embed.get("provider") and embed.get("model"):
|
||||
provider = str(embed["provider"]).strip()
|
||||
model = str(embed["model"]).strip()
|
||||
if provider and model:
|
||||
return f"{provider}/{model}"
|
||||
return os.environ.get("HIVE_EMBED_MODEL") or None
|
||||
|
||||
|
||||
def get_embed_api_key() -> str | None:
|
||||
"""Return the API key for the embedding provider, or None if not set."""
|
||||
embed = get_hive_config().get("embedding", {})
|
||||
api_key_env_var = embed.get("api_key_env_var")
|
||||
if api_key_env_var:
|
||||
return os.environ.get(api_key_env_var)
|
||||
return None
|
||||
|
||||
|
||||
def get_embed_api_base() -> str | None:
|
||||
"""Return a custom api_base for the embedding provider, or None."""
|
||||
embed = get_hive_config().get("embedding", {})
|
||||
return embed.get("api_base") or None
|
||||
|
||||
|
||||
def get_gcu_enabled() -> bool:
|
||||
"""Return whether GCU (browser automation) is enabled in user config."""
|
||||
return get_hive_config().get("gcu_enabled", True)
|
||||
|
||||
|
||||
def get_gcu_viewport_scale() -> float:
|
||||
"""Return GCU viewport scale factor (0.1-1.0), default 0.8."""
|
||||
scale = get_hive_config().get("gcu_viewport_scale", 0.8)
|
||||
if isinstance(scale, (int, float)) and 0.1 <= scale <= 1.0:
|
||||
return float(scale)
|
||||
return 0.8
|
||||
|
||||
|
||||
def get_api_base() -> str | None:
|
||||
"""Return the api_base URL for OpenAI-compatible endpoints, if configured."""
|
||||
llm = get_hive_config().get("llm", {})
|
||||
@@ -130,7 +426,14 @@ def get_api_base() -> str | None:
|
||||
if llm.get("use_kimi_code_subscription"):
|
||||
# Kimi Code uses an Anthropic-compatible endpoint (no /v1 suffix).
|
||||
return "https://api.kimi.com/coding"
|
||||
return llm.get("api_base")
|
||||
if llm.get("use_antigravity_subscription"):
|
||||
# Antigravity uses AntigravityProvider directly — no api_base needed.
|
||||
return None
|
||||
if llm.get("api_base"):
|
||||
return llm["api_base"]
|
||||
if str(llm.get("provider", "")).lower() == "openrouter":
|
||||
return OPENROUTER_API_BASE
|
||||
return None
|
||||
|
||||
|
||||
def get_llm_extra_kwargs() -> dict[str, Any]:
|
||||
@@ -175,7 +478,7 @@ def get_llm_extra_kwargs() -> dict[str, Any]:
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# RuntimeConfig – shared across agent templates
|
||||
# RuntimeConfig - shared across agent templates
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
|
||||
@@ -142,13 +142,17 @@ def save_aden_api_key(key: str) -> None:
|
||||
os.environ[ADEN_ENV_VAR] = key
|
||||
|
||||
|
||||
def delete_aden_api_key() -> None:
|
||||
"""Remove ADEN_API_KEY from the encrypted store and ``os.environ``."""
|
||||
def delete_aden_api_key() -> bool:
|
||||
"""Remove ADEN_API_KEY from the encrypted store and ``os.environ``.
|
||||
|
||||
Returns True if the key existed and was deleted, False otherwise.
|
||||
"""
|
||||
deleted = False
|
||||
try:
|
||||
from .storage import EncryptedFileStorage
|
||||
|
||||
storage = EncryptedFileStorage()
|
||||
storage.delete(ADEN_CREDENTIAL_ID)
|
||||
deleted = storage.delete(ADEN_CREDENTIAL_ID)
|
||||
except (FileNotFoundError, PermissionError) as e:
|
||||
logger.debug("Could not delete %s from encrypted store: %s", ADEN_CREDENTIAL_ID, e)
|
||||
except Exception:
|
||||
@@ -157,8 +161,8 @@ def delete_aden_api_key() -> None:
|
||||
ADEN_CREDENTIAL_ID,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
os.environ.pop(ADEN_ENV_VAR, None)
|
||||
return deleted
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@@ -27,6 +27,7 @@ from __future__ import annotations
|
||||
|
||||
import getpass
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from collections.abc import Callable
|
||||
@@ -37,6 +38,8 @@ from typing import TYPE_CHECKING, Any
|
||||
if TYPE_CHECKING:
|
||||
from framework.graph import NodeSpec
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ANSI colors for terminal output
|
||||
class Colors:
|
||||
@@ -365,8 +368,11 @@ class CredentialSetupSession:
|
||||
self._print("")
|
||||
try:
|
||||
api_key = self.password_fn(f"Paste your {cred.env_var}: ").strip()
|
||||
except (EOFError, OSError) as exc:
|
||||
logger.debug("Password input unavailable, falling back to plain input: %s", exc)
|
||||
api_key = self._input(f"Paste your {cred.env_var}: ").strip()
|
||||
except Exception:
|
||||
# Fallback to regular input if password input fails
|
||||
logger.warning("Unexpected error reading password input", exc_info=True)
|
||||
api_key = self._input(f"Paste your {cred.env_var}: ").strip()
|
||||
|
||||
if not api_key:
|
||||
@@ -403,7 +409,11 @@ class CredentialSetupSession:
|
||||
|
||||
try:
|
||||
aden_key = self.password_fn("Paste your ADEN_API_KEY: ").strip()
|
||||
except (EOFError, OSError) as exc:
|
||||
logger.debug("Password input unavailable for ADEN_API_KEY: %s", exc)
|
||||
aden_key = self._input("Paste your ADEN_API_KEY: ").strip()
|
||||
except Exception:
|
||||
logger.warning("Unexpected error reading ADEN_API_KEY input", exc_info=True)
|
||||
aden_key = self._input("Paste your ADEN_API_KEY: ").strip()
|
||||
|
||||
if not aden_key:
|
||||
@@ -433,8 +443,10 @@ class CredentialSetupSession:
|
||||
value = store.get_key(cred_id, cred.credential_key)
|
||||
if value:
|
||||
os.environ[cred.env_var] = value
|
||||
except (KeyError, OSError) as exc:
|
||||
logger.debug("Could not export credential to env: %s", exc)
|
||||
except Exception:
|
||||
pass
|
||||
logger.warning("Unexpected error exporting credential to env", exc_info=True)
|
||||
return True
|
||||
else:
|
||||
self._print(
|
||||
@@ -457,9 +469,12 @@ class CredentialSetupSession:
|
||||
"message": result.message,
|
||||
"details": result.details,
|
||||
}
|
||||
except Exception:
|
||||
except ImportError:
|
||||
# No health checker available
|
||||
return None
|
||||
except Exception:
|
||||
logger.warning("Health check failed for %s", cred.credential_name, exc_info=True)
|
||||
return None
|
||||
|
||||
def _store_credential(self, cred: MissingCredential, value: str) -> None:
|
||||
"""Store credential in encrypted store and export to env."""
|
||||
@@ -561,7 +576,11 @@ def _load_nodes_from_python_agent(agent_path: Path) -> list:
|
||||
sys.modules[spec.name] = module
|
||||
spec.loader.exec_module(module)
|
||||
return getattr(module, "nodes", [])
|
||||
except (ImportError, OSError) as exc:
|
||||
logger.debug("Could not load agent module: %s", exc)
|
||||
return []
|
||||
except Exception:
|
||||
logger.warning("Unexpected error loading agent module", exc_info=True)
|
||||
return []
|
||||
|
||||
|
||||
@@ -588,7 +607,11 @@ def _load_nodes_from_json_agent(agent_json: Path) -> list:
|
||||
)
|
||||
)
|
||||
return nodes
|
||||
except (json.JSONDecodeError, KeyError, OSError) as exc:
|
||||
logger.debug("Could not load JSON agent: %s", exc)
|
||||
return []
|
||||
except Exception:
|
||||
logger.warning("Unexpected error loading JSON agent", exc_info=True)
|
||||
return []
|
||||
|
||||
|
||||
|
||||
@@ -51,6 +51,16 @@ def ensure_credential_key_env() -> None:
|
||||
if found and value:
|
||||
os.environ[var_name] = value
|
||||
logger.debug("Loaded %s from shell config", var_name)
|
||||
# Also load the currently configured LLM env var even if it's not in CREDENTIAL_SPECS.
|
||||
# This keeps quickstart-written keys available to fresh processes on Unix shells.
|
||||
from framework.config import get_hive_config
|
||||
|
||||
llm_env_var = str(get_hive_config().get("llm", {}).get("api_key_env_var", "")).strip()
|
||||
if llm_env_var and not os.environ.get(llm_env_var):
|
||||
found, value = check_env_var_in_shell_config(llm_env_var)
|
||||
if found and value:
|
||||
os.environ[llm_env_var] = value
|
||||
logger.debug("Loaded configured LLM env var %s from shell config", llm_env_var)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
@@ -0,0 +1,76 @@
|
||||
"""CLI command for the LLM debug log viewer."""
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
_SCRIPT = Path(__file__).resolve().parents[3] / "scripts" / "llm_debug_log_visualizer.py"
|
||||
|
||||
|
||||
def register_debugger_commands(subparsers: argparse._SubParsersAction) -> None:
|
||||
"""Register the ``hive debugger`` command."""
|
||||
parser = subparsers.add_parser(
|
||||
"debugger",
|
||||
help="Open the LLM debug log viewer",
|
||||
description=(
|
||||
"Start a local server that lets you browse LLM debug sessions "
|
||||
"recorded in ~/.hive/llm_logs. Sessions are loaded on demand so "
|
||||
"the browser stays responsive."
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--session",
|
||||
help="Execution ID to select initially.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--port",
|
||||
type=int,
|
||||
default=0,
|
||||
help="Port for the local server (0 = auto-pick a free port).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--logs-dir",
|
||||
help="Directory containing JSONL log files (default: ~/.hive/llm_logs).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--limit-files",
|
||||
type=int,
|
||||
default=None,
|
||||
help="Maximum number of newest log files to scan (default: 200).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output",
|
||||
help="Write a static HTML file instead of starting a server.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-open",
|
||||
action="store_true",
|
||||
help="Start the server but do not open a browser.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--include-tests",
|
||||
action="store_true",
|
||||
help="Show test/mock sessions (hidden by default).",
|
||||
)
|
||||
parser.set_defaults(func=cmd_debugger)
|
||||
|
||||
|
||||
def cmd_debugger(args: argparse.Namespace) -> int:
|
||||
"""Launch the LLM debug log visualizer."""
|
||||
cmd: list[str] = [sys.executable, str(_SCRIPT)]
|
||||
if args.session:
|
||||
cmd += ["--session", args.session]
|
||||
if args.port:
|
||||
cmd += ["--port", str(args.port)]
|
||||
if args.logs_dir:
|
||||
cmd += ["--logs-dir", args.logs_dir]
|
||||
if args.limit_files is not None:
|
||||
cmd += ["--limit-files", str(args.limit_files)]
|
||||
if args.output:
|
||||
cmd += ["--output", args.output]
|
||||
if args.no_open:
|
||||
cmd.append("--no-open")
|
||||
if args.include_tests:
|
||||
cmd.append("--include-tests")
|
||||
return subprocess.call(cmd)
|
||||
@@ -33,10 +33,20 @@ class Message:
|
||||
is_transition_marker: bool = False
|
||||
# True when this message is real human input (from /chat), not a system prompt
|
||||
is_client_input: bool = False
|
||||
# Optional image content blocks (e.g. from browser_screenshot)
|
||||
image_content: list[dict[str, Any]] | None = None
|
||||
# True when message contains an activated skill body (AS-10: never prune)
|
||||
is_skill_content: bool = False
|
||||
|
||||
def to_llm_dict(self) -> dict[str, Any]:
|
||||
"""Convert to OpenAI-format message dict."""
|
||||
if self.role == "user":
|
||||
if self.image_content:
|
||||
blocks: list[dict[str, Any]] = []
|
||||
if self.content:
|
||||
blocks.append({"type": "text", "text": self.content})
|
||||
blocks.extend(self.image_content)
|
||||
return {"role": "user", "content": blocks}
|
||||
return {"role": "user", "content": self.content}
|
||||
|
||||
if self.role == "assistant":
|
||||
@@ -47,6 +57,15 @@ class Message:
|
||||
|
||||
# role == "tool"
|
||||
content = f"ERROR: {self.content}" if self.is_error else self.content
|
||||
if self.image_content:
|
||||
# Multimodal tool result: text + image content blocks
|
||||
blocks: list[dict[str, Any]] = [{"type": "text", "text": content}]
|
||||
blocks.extend(self.image_content)
|
||||
return {
|
||||
"role": "tool",
|
||||
"tool_call_id": self.tool_use_id,
|
||||
"content": blocks,
|
||||
}
|
||||
return {
|
||||
"role": "tool",
|
||||
"tool_call_id": self.tool_use_id,
|
||||
@@ -72,6 +91,8 @@ class Message:
|
||||
d["is_transition_marker"] = self.is_transition_marker
|
||||
if self.is_client_input:
|
||||
d["is_client_input"] = self.is_client_input
|
||||
if self.image_content is not None:
|
||||
d["image_content"] = self.image_content
|
||||
return d
|
||||
|
||||
@classmethod
|
||||
@@ -87,6 +108,7 @@ class Message:
|
||||
phase_id=data.get("phase_id"),
|
||||
is_transition_marker=data.get("is_transition_marker", False),
|
||||
is_client_input=data.get("is_client_input", False),
|
||||
image_content=data.get("image_content"),
|
||||
)
|
||||
|
||||
|
||||
@@ -373,6 +395,7 @@ class NodeConversation:
|
||||
*,
|
||||
is_transition_marker: bool = False,
|
||||
is_client_input: bool = False,
|
||||
image_content: list[dict[str, Any]] | None = None,
|
||||
) -> Message:
|
||||
msg = Message(
|
||||
seq=self._next_seq,
|
||||
@@ -381,6 +404,7 @@ class NodeConversation:
|
||||
phase_id=self._current_phase,
|
||||
is_transition_marker=is_transition_marker,
|
||||
is_client_input=is_client_input,
|
||||
image_content=image_content,
|
||||
)
|
||||
self._messages.append(msg)
|
||||
self._next_seq += 1
|
||||
@@ -409,6 +433,8 @@ class NodeConversation:
|
||||
tool_use_id: str,
|
||||
content: str,
|
||||
is_error: bool = False,
|
||||
image_content: list[dict[str, Any]] | None = None,
|
||||
is_skill_content: bool = False,
|
||||
) -> Message:
|
||||
msg = Message(
|
||||
seq=self._next_seq,
|
||||
@@ -417,6 +443,8 @@ class NodeConversation:
|
||||
tool_use_id=tool_use_id,
|
||||
is_error=is_error,
|
||||
phase_id=self._current_phase,
|
||||
image_content=image_content,
|
||||
is_skill_content=is_skill_content,
|
||||
)
|
||||
self._messages.append(msg)
|
||||
self._next_seq += 1
|
||||
@@ -610,8 +638,15 @@ class NodeConversation:
|
||||
continue
|
||||
if msg.is_error:
|
||||
continue # never prune errors
|
||||
if msg.is_skill_content:
|
||||
continue # never prune activated skill instructions (AS-10)
|
||||
if msg.content.startswith("[Pruned tool result"):
|
||||
continue # already pruned
|
||||
# Tiny results (set_output acks, confirmations) — pruning
|
||||
# saves negligible space but makes the LLM think the call
|
||||
# failed, causing costly retries.
|
||||
if len(msg.content) < 100:
|
||||
continue
|
||||
|
||||
# Phase-aware: protect current phase messages
|
||||
if self._current_phase and msg.phase_id == self._current_phase:
|
||||
@@ -901,8 +936,7 @@ class NodeConversation:
|
||||
full_path = str((spill_path / conv_filename).resolve())
|
||||
ref_parts.append(
|
||||
f"[Previous conversation saved to '{full_path}'. "
|
||||
f"Use load_data('{conv_filename}'), read_file('{full_path}'), "
|
||||
f"or run_command('cat \"{full_path}\"') to review if needed.]"
|
||||
f"Use load_data('{conv_filename}') to review if needed.]"
|
||||
)
|
||||
elif not collapsed_msgs:
|
||||
ref_parts.append("[Previous freeform messages compacted.]")
|
||||
|
||||
@@ -322,7 +322,11 @@ class AsyncEntryPointSpec(BaseModel):
|
||||
|
||||
id: str = Field(description="Unique identifier for this entry point")
|
||||
name: str = Field(description="Human-readable name")
|
||||
entry_node: str = Field(description="Node ID to start execution from")
|
||||
entry_node: str = Field(
|
||||
default="",
|
||||
description="Deprecated: Node ID to start execution from. "
|
||||
"Triggers are graph-level; worker always enters at GraphSpec.entry_node.",
|
||||
)
|
||||
trigger_type: str = Field(
|
||||
default="manual",
|
||||
description="How this entry point is triggered: webhook, api, timer, event, manual",
|
||||
@@ -331,6 +335,10 @@ class AsyncEntryPointSpec(BaseModel):
|
||||
default_factory=dict,
|
||||
description="Trigger-specific configuration (e.g., webhook URL, timer interval)",
|
||||
)
|
||||
task: str = Field(
|
||||
default="",
|
||||
description="Worker task string when this trigger fires autonomously",
|
||||
)
|
||||
isolation_level: str = Field(
|
||||
default="shared", description="State isolation: isolated, shared, or synchronized"
|
||||
)
|
||||
@@ -368,28 +376,8 @@ class GraphSpec(BaseModel):
|
||||
edges=[...],
|
||||
)
|
||||
|
||||
For multi-entry-point agents (concurrent streams):
|
||||
GraphSpec(
|
||||
id="support-agent-graph",
|
||||
goal_id="support-001",
|
||||
entry_node="process-webhook", # Default entry
|
||||
async_entry_points=[
|
||||
AsyncEntryPointSpec(
|
||||
id="webhook",
|
||||
name="Zendesk Webhook",
|
||||
entry_node="process-webhook",
|
||||
trigger_type="webhook",
|
||||
),
|
||||
AsyncEntryPointSpec(
|
||||
id="api",
|
||||
name="API Handler",
|
||||
entry_node="process-request",
|
||||
trigger_type="api",
|
||||
),
|
||||
],
|
||||
nodes=[...],
|
||||
edges=[...],
|
||||
)
|
||||
Triggers (timer, webhook, event) are now defined in ``triggers.json``
|
||||
alongside the agent directory, not embedded in the graph spec.
|
||||
"""
|
||||
|
||||
id: str
|
||||
@@ -402,12 +390,6 @@ class GraphSpec(BaseModel):
|
||||
default_factory=dict,
|
||||
description="Named entry points for resuming execution. Format: {name: node_id}",
|
||||
)
|
||||
async_entry_points: list[AsyncEntryPointSpec] = Field(
|
||||
default_factory=list,
|
||||
description=(
|
||||
"Asynchronous entry points for concurrent execution streams (used with AgentRuntime)"
|
||||
),
|
||||
)
|
||||
terminal_nodes: list[str] = Field(
|
||||
default_factory=list, description="IDs of nodes that end execution"
|
||||
)
|
||||
@@ -486,17 +468,6 @@ class GraphSpec(BaseModel):
|
||||
return node
|
||||
return None
|
||||
|
||||
def has_async_entry_points(self) -> bool:
|
||||
"""Check if this graph uses async entry points (multi-stream execution)."""
|
||||
return len(self.async_entry_points) > 0
|
||||
|
||||
def get_async_entry_point(self, entry_point_id: str) -> AsyncEntryPointSpec | None:
|
||||
"""Get an async entry point by ID."""
|
||||
for ep in self.async_entry_points:
|
||||
if ep.id == entry_point_id:
|
||||
return ep
|
||||
return None
|
||||
|
||||
def get_outgoing_edges(self, node_id: str) -> list[EdgeSpec]:
|
||||
"""Get all edges leaving a node, sorted by priority."""
|
||||
edges = [e for e in self.edges if e.source == node_id]
|
||||
@@ -587,37 +558,6 @@ class GraphSpec(BaseModel):
|
||||
if not self.get_node(self.entry_node):
|
||||
errors.append(f"Entry node '{self.entry_node}' not found")
|
||||
|
||||
# Check async entry points
|
||||
seen_entry_ids = set()
|
||||
for entry_point in self.async_entry_points:
|
||||
# Check for duplicate IDs
|
||||
if entry_point.id in seen_entry_ids:
|
||||
errors.append(f"Duplicate async entry point ID: '{entry_point.id}'")
|
||||
seen_entry_ids.add(entry_point.id)
|
||||
|
||||
# Check entry node exists
|
||||
if not self.get_node(entry_point.entry_node):
|
||||
errors.append(
|
||||
f"Async entry point '{entry_point.id}' references "
|
||||
f"missing node '{entry_point.entry_node}'"
|
||||
)
|
||||
|
||||
# Validate isolation level
|
||||
valid_isolation = {"isolated", "shared", "synchronized"}
|
||||
if entry_point.isolation_level not in valid_isolation:
|
||||
errors.append(
|
||||
f"Async entry point '{entry_point.id}' has invalid isolation_level "
|
||||
f"'{entry_point.isolation_level}'. Valid: {valid_isolation}"
|
||||
)
|
||||
|
||||
# Validate trigger type
|
||||
valid_triggers = {"webhook", "api", "timer", "event", "manual"}
|
||||
if entry_point.trigger_type not in valid_triggers:
|
||||
errors.append(
|
||||
f"Async entry point '{entry_point.id}' has invalid trigger_type "
|
||||
f"'{entry_point.trigger_type}'. Valid: {valid_triggers}"
|
||||
)
|
||||
|
||||
# Check terminal nodes exist
|
||||
for term in self.terminal_nodes:
|
||||
if not self.get_node(term):
|
||||
@@ -646,10 +586,6 @@ class GraphSpec(BaseModel):
|
||||
for entry_point_node in self.entry_points.values():
|
||||
to_visit.append(entry_point_node)
|
||||
|
||||
# Add all async entry points as valid starting points
|
||||
for async_entry in self.async_entry_points:
|
||||
to_visit.append(async_entry.entry_node)
|
||||
|
||||
# Traverse from all entry points
|
||||
while to_visit:
|
||||
current = to_visit.pop()
|
||||
@@ -666,18 +602,10 @@ class GraphSpec(BaseModel):
|
||||
for sub_agent_id in sub_agents:
|
||||
reachable.add(sub_agent_id)
|
||||
|
||||
# Build set of async entry point nodes for quick lookup
|
||||
async_entry_nodes = {ep.entry_node for ep in self.async_entry_points}
|
||||
|
||||
for node in self.nodes:
|
||||
if node.id not in reachable:
|
||||
# Skip if node is a pause node, entry point target, or async entry
|
||||
# (pause/resume architecture and async entry points make reachable)
|
||||
if (
|
||||
node.id in self.pause_nodes
|
||||
or node.id in self.entry_points.values()
|
||||
or node.id in async_entry_nodes
|
||||
):
|
||||
# Skip if node is a pause node or entry point target
|
||||
if node.id in self.pause_nodes or node.id in self.entry_points.values():
|
||||
continue
|
||||
errors.append(f"Node '{node.id}' is unreachable from entry")
|
||||
|
||||
|
||||
+1063
-114
File diff suppressed because it is too large
Load Diff
@@ -27,11 +27,24 @@ from framework.graph.node import (
|
||||
SharedMemory,
|
||||
)
|
||||
from framework.graph.validator import OutputValidator
|
||||
from framework.llm.provider import LLMProvider, Tool
|
||||
from framework.llm.provider import LLMProvider, Tool, ToolUse
|
||||
from framework.observability import set_trace_context
|
||||
from framework.runtime.core import Runtime
|
||||
from framework.schemas.checkpoint import Checkpoint
|
||||
from framework.storage.checkpoint_store import CheckpointStore
|
||||
from framework.utils.io import atomic_write
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _default_max_context_tokens() -> int:
|
||||
"""Resolve max_context_tokens from global config, falling back to 32000."""
|
||||
try:
|
||||
from framework.config import get_max_context_tokens
|
||||
|
||||
return get_max_context_tokens()
|
||||
except Exception:
|
||||
return 32_000
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -138,6 +151,10 @@ class GraphExecutor:
|
||||
tool_provider_map: dict[str, str] | None = None,
|
||||
dynamic_tools_provider: Callable | None = None,
|
||||
dynamic_prompt_provider: Callable | None = None,
|
||||
iteration_metadata_provider: Callable | None = None,
|
||||
skills_catalog_prompt: str = "",
|
||||
protocols_prompt: str = "",
|
||||
skill_dirs: list[str] | None = None,
|
||||
):
|
||||
"""
|
||||
Initialize the executor.
|
||||
@@ -163,6 +180,9 @@ class GraphExecutor:
|
||||
tool list (for mode switching)
|
||||
dynamic_prompt_provider: Optional callback returning current
|
||||
system prompt (for phase switching)
|
||||
skills_catalog_prompt: Available skills catalog for system prompt
|
||||
protocols_prompt: Default skill operational protocols for system prompt
|
||||
skill_dirs: Skill base directories for Tier 3 resource access
|
||||
"""
|
||||
self.runtime = runtime
|
||||
self.llm = llm
|
||||
@@ -183,6 +203,22 @@ class GraphExecutor:
|
||||
self.tool_provider_map = tool_provider_map
|
||||
self.dynamic_tools_provider = dynamic_tools_provider
|
||||
self.dynamic_prompt_provider = dynamic_prompt_provider
|
||||
self.iteration_metadata_provider = iteration_metadata_provider
|
||||
self.skills_catalog_prompt = skills_catalog_prompt
|
||||
self.protocols_prompt = protocols_prompt
|
||||
self.skill_dirs: list[str] = skill_dirs or []
|
||||
|
||||
if protocols_prompt:
|
||||
self.logger.info(
|
||||
"GraphExecutor[%s] received protocols_prompt (%d chars)",
|
||||
stream_id,
|
||||
len(protocols_prompt),
|
||||
)
|
||||
else:
|
||||
self.logger.warning(
|
||||
"GraphExecutor[%s] received EMPTY protocols_prompt",
|
||||
stream_id,
|
||||
)
|
||||
|
||||
# Parallel execution settings
|
||||
self.enable_parallel_execution = enable_parallel_execution
|
||||
@@ -212,11 +248,11 @@ class GraphExecutor:
|
||||
"""
|
||||
if not self._storage_path:
|
||||
return
|
||||
state_path = self._storage_path / "state.json"
|
||||
try:
|
||||
import json as _json
|
||||
from datetime import datetime
|
||||
|
||||
state_path = self._storage_path / "state.json"
|
||||
if state_path.exists():
|
||||
state_data = _json.loads(state_path.read_text(encoding="utf-8"))
|
||||
else:
|
||||
@@ -239,9 +275,14 @@ class GraphExecutor:
|
||||
state_data["memory"] = memory_snapshot
|
||||
state_data["memory_keys"] = list(memory_snapshot.keys())
|
||||
|
||||
state_path.write_text(_json.dumps(state_data, indent=2), encoding="utf-8")
|
||||
with atomic_write(state_path, encoding="utf-8") as f:
|
||||
_json.dump(state_data, f, indent=2)
|
||||
except Exception:
|
||||
pass # Best-effort — never block execution
|
||||
logger.warning(
|
||||
"Failed to persist progress state to %s",
|
||||
state_path,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
def _validate_tools(self, graph: GraphSpec) -> list[str]:
|
||||
"""
|
||||
@@ -403,6 +444,14 @@ class GraphExecutor:
|
||||
)
|
||||
return s1 + "\n\n" + s2
|
||||
|
||||
def _get_runtime_log_session_id(self) -> str:
|
||||
"""Return the session-backed execution ID for runtime logging, if any."""
|
||||
if not self._storage_path:
|
||||
return ""
|
||||
if self._storage_path.parent.name != "sessions":
|
||||
return ""
|
||||
return self._storage_path.name
|
||||
|
||||
async def execute(
|
||||
self,
|
||||
graph: GraphSpec,
|
||||
@@ -696,10 +745,7 @@ class GraphExecutor:
|
||||
)
|
||||
|
||||
if self.runtime_logger:
|
||||
# Extract session_id from storage_path if available (for unified sessions)
|
||||
session_id = ""
|
||||
if self._storage_path and self._storage_path.name.startswith("session_"):
|
||||
session_id = self._storage_path.name
|
||||
session_id = self._get_runtime_log_session_id()
|
||||
self.runtime_logger.start_run(goal_id=goal.id, session_id=session_id)
|
||||
|
||||
self.logger.info(f"🚀 Starting execution: {goal.name}")
|
||||
@@ -925,6 +971,33 @@ class GraphExecutor:
|
||||
self.logger.info(" Executing...")
|
||||
result = await node_impl.execute(ctx)
|
||||
|
||||
# GCU tab cleanup: stop the browser profile after a top-level GCU node
|
||||
# finishes so tabs don't accumulate. Mirrors the subagent cleanup in
|
||||
# EventLoopNode._execute_subagent().
|
||||
if node_spec.node_type == "gcu" and self.tool_executor is not None:
|
||||
try:
|
||||
from gcu.browser.session import (
|
||||
_active_profile as _gcu_profile_var,
|
||||
)
|
||||
|
||||
_gcu_profile = _gcu_profile_var.get()
|
||||
_stop_use = ToolUse(
|
||||
id="gcu-cleanup",
|
||||
name="browser_stop",
|
||||
input={"profile": _gcu_profile},
|
||||
)
|
||||
_stop_result = self.tool_executor(_stop_use)
|
||||
if asyncio.iscoroutine(_stop_result) or asyncio.isfuture(_stop_result):
|
||||
await _stop_result
|
||||
except ImportError:
|
||||
pass # GCU not installed
|
||||
except Exception as _gcu_exc:
|
||||
logger.warning(
|
||||
"GCU browser_stop failed for profile %r: %s",
|
||||
_gcu_profile,
|
||||
_gcu_exc,
|
||||
)
|
||||
|
||||
# Emit node-completed event (skip event_loop nodes)
|
||||
if self._event_bus and node_spec.node_type != "event_loop":
|
||||
await self._event_bus.emit_node_loop_completed(
|
||||
@@ -1350,6 +1423,7 @@ class GraphExecutor:
|
||||
next_spec = graph.get_node(current_node_id)
|
||||
if next_spec and next_spec.node_type == "event_loop":
|
||||
from framework.graph.prompt_composer import (
|
||||
EXECUTION_SCOPE_PREAMBLE,
|
||||
build_accounts_prompt,
|
||||
build_narrative,
|
||||
build_transition_marker,
|
||||
@@ -1389,9 +1463,14 @@ class GraphExecutor:
|
||||
)
|
||||
|
||||
# Compose new system prompt (Layer 1 + 2 + 3 + accounts)
|
||||
# Prepend scope preamble to focus so the LLM stays
|
||||
# within this node's responsibility.
|
||||
_focus = next_spec.system_prompt
|
||||
if next_spec.output_keys and _focus:
|
||||
_focus = f"{EXECUTION_SCOPE_PREAMBLE}\n\n{_focus}"
|
||||
new_system = compose_system_prompt(
|
||||
identity_prompt=getattr(graph, "identity_prompt", None),
|
||||
focus_prompt=next_spec.system_prompt,
|
||||
focus_prompt=_focus,
|
||||
narrative=narrative,
|
||||
accounts_prompt=_node_accounts,
|
||||
)
|
||||
@@ -1753,10 +1832,34 @@ class GraphExecutor:
|
||||
if node_spec.tools:
|
||||
available_tools = [t for t in self.tools if t.name in node_spec.tools]
|
||||
|
||||
# Create scoped memory view
|
||||
# Create scoped memory view.
|
||||
# When permissions are restricted (non-empty key lists), auto-include
|
||||
# _-prefixed keys used by default skill protocols so agents can read/write
|
||||
# operational state (e.g. _working_notes, _batch_ledger) regardless of
|
||||
# what the node declares. When key lists are empty (unrestricted), leave
|
||||
# unchanged — empty means "allow all".
|
||||
read_keys = list(node_spec.input_keys)
|
||||
write_keys = list(node_spec.output_keys)
|
||||
# Only extend lists that were already restricted (non-empty).
|
||||
# Empty means "allow all" — adding keys would accidentally
|
||||
# activate the permission check and block legitimate reads/writes.
|
||||
if read_keys or write_keys:
|
||||
from framework.skills.defaults import SHARED_MEMORY_KEYS as _skill_keys
|
||||
|
||||
existing_underscore = [k for k in memory._data if k.startswith("_")]
|
||||
extra_keys = set(_skill_keys) | set(existing_underscore)
|
||||
# Only inject into read_keys when it was already non-empty — an empty
|
||||
# read_keys means "allow all reads" and injecting skill keys would
|
||||
# inadvertently restrict reads to skill keys only.
|
||||
for k in extra_keys:
|
||||
if read_keys and k not in read_keys:
|
||||
read_keys.append(k)
|
||||
if write_keys and k not in write_keys:
|
||||
write_keys.append(k)
|
||||
|
||||
scoped_memory = memory.with_permissions(
|
||||
read_keys=node_spec.input_keys,
|
||||
write_keys=node_spec.output_keys,
|
||||
read_keys=read_keys,
|
||||
write_keys=write_keys,
|
||||
)
|
||||
|
||||
# Build per-node accounts prompt (filtered to this node's tools)
|
||||
@@ -1799,6 +1902,10 @@ class GraphExecutor:
|
||||
shared_node_registry=self.node_registry, # For subagent escalation routing
|
||||
dynamic_tools_provider=self.dynamic_tools_provider,
|
||||
dynamic_prompt_provider=self.dynamic_prompt_provider,
|
||||
iteration_metadata_provider=self.iteration_metadata_provider,
|
||||
skills_catalog_prompt=self.skills_catalog_prompt,
|
||||
protocols_prompt=self.protocols_prompt,
|
||||
skill_dirs=self.skill_dirs,
|
||||
)
|
||||
|
||||
VALID_NODE_TYPES = {
|
||||
@@ -1872,7 +1979,7 @@ class GraphExecutor:
|
||||
max_tool_calls_per_turn=lc.get("max_tool_calls_per_turn", 30),
|
||||
tool_call_overflow_margin=lc.get("tool_call_overflow_margin", 0.5),
|
||||
stall_detection_threshold=lc.get("stall_detection_threshold", 3),
|
||||
max_context_tokens=lc.get("max_context_tokens", 32000),
|
||||
max_context_tokens=lc.get("max_context_tokens", _default_max_context_tokens()),
|
||||
max_tool_result_chars=lc.get("max_tool_result_chars", 30_000),
|
||||
spillover_dir=spillover,
|
||||
hooks=lc.get("hooks", {}),
|
||||
@@ -2039,6 +2146,10 @@ class GraphExecutor:
|
||||
edge=edge,
|
||||
)
|
||||
|
||||
# Track which branch wrote which key for memory conflict detection
|
||||
fanout_written_keys: dict[str, str] = {} # key -> branch_id that wrote it
|
||||
fanout_keys_lock = asyncio.Lock()
|
||||
|
||||
self.logger.info(f" ⑂ Fan-out: executing {len(branches)} branches in parallel")
|
||||
for branch in branches.values():
|
||||
target_spec = graph.get_node(branch.node_id)
|
||||
@@ -2130,8 +2241,31 @@ class GraphExecutor:
|
||||
)
|
||||
|
||||
if result.success:
|
||||
# Write outputs to shared memory using async write
|
||||
# Write outputs to shared memory with conflict detection
|
||||
conflict_strategy = self._parallel_config.memory_conflict_strategy
|
||||
for key, value in result.output.items():
|
||||
async with fanout_keys_lock:
|
||||
prior_branch = fanout_written_keys.get(key)
|
||||
if prior_branch and prior_branch != branch.branch_id:
|
||||
if conflict_strategy == "error":
|
||||
raise RuntimeError(
|
||||
f"Memory conflict: key '{key}' already written "
|
||||
f"by branch '{prior_branch}', "
|
||||
f"conflicting write from '{branch.branch_id}'"
|
||||
)
|
||||
elif conflict_strategy == "first_wins":
|
||||
self.logger.debug(
|
||||
f" ⚠ Skipping write to '{key}' "
|
||||
f"(first_wins: already set by {prior_branch})"
|
||||
)
|
||||
continue
|
||||
else:
|
||||
# last_wins (default): write and log
|
||||
self.logger.debug(
|
||||
f" ⚠ Key '{key}' overwritten "
|
||||
f"(last_wins: {prior_branch} -> {branch.branch_id})"
|
||||
)
|
||||
fanout_written_keys[key] = branch.branch_id
|
||||
await memory.write_async(key, value)
|
||||
|
||||
branch.result = result
|
||||
@@ -2178,9 +2312,11 @@ class GraphExecutor:
|
||||
|
||||
return branch, e
|
||||
|
||||
# Execute all branches concurrently
|
||||
tasks = [execute_single_branch(b) for b in branches.values()]
|
||||
results = await asyncio.gather(*tasks, return_exceptions=False)
|
||||
# Execute all branches concurrently with per-branch timeout
|
||||
timeout = self._parallel_config.branch_timeout_seconds
|
||||
branch_list = list(branches.values())
|
||||
tasks = [asyncio.wait_for(execute_single_branch(b), timeout=timeout) for b in branch_list]
|
||||
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
# Process results
|
||||
total_tokens = 0
|
||||
@@ -2188,17 +2324,33 @@ class GraphExecutor:
|
||||
branch_results: dict[str, NodeResult] = {}
|
||||
failed_branches: list[ParallelBranch] = []
|
||||
|
||||
for branch, result in results:
|
||||
path.append(branch.node_id)
|
||||
for i, result in enumerate(results):
|
||||
branch = branch_list[i]
|
||||
|
||||
if isinstance(result, Exception):
|
||||
if isinstance(result, asyncio.TimeoutError):
|
||||
# Branch timed out
|
||||
branch.status = "timed_out"
|
||||
branch.error = f"Branch timed out after {timeout}s"
|
||||
self.logger.warning(
|
||||
f" ⏱ Branch {graph.get_node(branch.node_id).name}: "
|
||||
f"timed out after {timeout}s"
|
||||
)
|
||||
path.append(branch.node_id)
|
||||
failed_branches.append(branch)
|
||||
elif result is None or not result.success:
|
||||
elif isinstance(result, Exception):
|
||||
path.append(branch.node_id)
|
||||
failed_branches.append(branch)
|
||||
else:
|
||||
total_tokens += result.tokens_used
|
||||
total_latency += result.latency_ms
|
||||
branch_results[branch.branch_id] = result
|
||||
returned_branch, node_result = result
|
||||
path.append(returned_branch.node_id)
|
||||
if node_result is None or isinstance(node_result, Exception):
|
||||
failed_branches.append(returned_branch)
|
||||
elif not node_result.success:
|
||||
failed_branches.append(returned_branch)
|
||||
else:
|
||||
total_tokens += node_result.tokens_used
|
||||
total_latency += node_result.latency_ms
|
||||
branch_results[returned_branch.branch_id] = node_result
|
||||
|
||||
# Handle failures based on config
|
||||
if failed_branches:
|
||||
|
||||
+56
-13
@@ -37,24 +37,45 @@ Follow these rules for reliable, efficient browser interaction.
|
||||
## Reading Pages
|
||||
- ALWAYS prefer `browser_snapshot` over `browser_get_text("body")`
|
||||
— it returns a compact ~1-5 KB accessibility tree vs 100+ KB of raw HTML.
|
||||
- Use `browser_snapshot_aria` when you need full ARIA properties
|
||||
for detailed element inspection.
|
||||
- Do NOT use `browser_screenshot` for reading text content
|
||||
— it produces huge base64 images with no searchable text.
|
||||
- Interaction tools (`browser_click`, `browser_type`, `browser_fill`,
|
||||
`browser_scroll`, etc.) return a page snapshot automatically in their
|
||||
result. Use it to decide your next action — do NOT call
|
||||
`browser_snapshot` separately after every action.
|
||||
Only call `browser_snapshot` when you need a fresh view without
|
||||
performing an action, or after setting `auto_snapshot=false`.
|
||||
- Do NOT use `browser_screenshot` to read text — use
|
||||
`browser_snapshot` for that (compact, searchable, fast).
|
||||
- DO use `browser_screenshot` when you need visual context:
|
||||
charts, images, canvas elements, layout verification, or when
|
||||
the snapshot doesn't capture what you need.
|
||||
- Only fall back to `browser_get_text` for extracting specific
|
||||
small elements by CSS selector.
|
||||
|
||||
## Navigation & Waiting
|
||||
- Always call `browser_wait` after navigation actions
|
||||
(`browser_open`, `browser_navigate`, `browser_click` on links)
|
||||
to let the page load.
|
||||
- `browser_navigate` and `browser_open` already wait for the page to
|
||||
load (`domcontentloaded`). Do NOT call `browser_wait` with no
|
||||
arguments after navigation — it wastes time.
|
||||
Only use `browser_wait` when you need a *specific element* or *text*
|
||||
to appear (pass `selector` or `text`).
|
||||
- NEVER re-navigate to the same URL after scrolling
|
||||
— this resets your scroll position and loses loaded content.
|
||||
|
||||
## Scrolling
|
||||
- Use large scroll amounts ~2000 when loading more content
|
||||
— sites like twitter and linkedin have lazy loading for paging.
|
||||
- After scrolling, take a new `browser_snapshot` to see updated content.
|
||||
- The scroll result includes a snapshot automatically — no need to call
|
||||
`browser_snapshot` separately.
|
||||
|
||||
## Batching Actions
|
||||
- You can call multiple tools in a single turn — they execute in parallel.
|
||||
ALWAYS batch independent actions together. Examples:
|
||||
- Fill multiple form fields in one turn.
|
||||
- Navigate + snapshot in one turn.
|
||||
- Click + scroll if targeting different elements.
|
||||
- When batching, set `auto_snapshot=false` on all but the last action
|
||||
to avoid redundant snapshots.
|
||||
- Aim for 3-5 tool calls per turn minimum. One tool call per turn is
|
||||
wasteful.
|
||||
|
||||
## Error Recovery
|
||||
- If a tool fails, retry once with the same approach.
|
||||
@@ -65,11 +86,33 @@ Follow these rules for reliable, efficient browser interaction.
|
||||
then `browser_start`, then retry.
|
||||
|
||||
## Tab Management
|
||||
- Use `browser_tabs` to list open tabs when managing multiple pages.
|
||||
- Pass `target_id` to tools when operating on a specific tab.
|
||||
- Open background tabs with `browser_open(url=..., background=true)`
|
||||
to avoid losing your current context.
|
||||
- Close tabs you no longer need with `browser_close` to free resources.
|
||||
|
||||
**Close tabs as soon as you are done with them** — not only at the end of the task.
|
||||
After reading or extracting data from a tab, close it immediately.
|
||||
|
||||
**Decision rules:**
|
||||
- Finished reading/extracting from a tab? → `browser_close(target_id=...)`
|
||||
- Completed a multi-tab workflow? → `browser_close_finished()` to clean up all your tabs
|
||||
- More than 3 tabs open? → stop and close finished ones before opening more
|
||||
- Popup appeared that you didn't need? → close it immediately
|
||||
|
||||
**Origin awareness:** `browser_tabs` returns an `origin` field for each tab:
|
||||
- `"agent"` — you opened it; you own it; close it when done
|
||||
- `"popup"` — opened by a link or script; close after extracting what you need
|
||||
- `"startup"` or `"user"` — leave these alone unless the task requires it
|
||||
|
||||
**Cleanup tools:**
|
||||
- `browser_close(target_id=...)` — close one specific tab
|
||||
- `browser_close_finished()` — close all your agent/popup tabs (safe: leaves startup/user tabs)
|
||||
- `browser_close_all()` — close everything except the active tab (use only for full reset)
|
||||
|
||||
**Multi-tab workflow pattern:**
|
||||
1. Open background tabs with `browser_open(url=..., background=true)` to stay on current tab
|
||||
2. Process each tab and close it with `browser_close` when done
|
||||
3. When the full workflow completes, call `browser_close_finished()` to confirm cleanup
|
||||
4. Check `browser_tabs` at any point — it shows `origin` and `age_seconds` per tab
|
||||
|
||||
Never accumulate tabs. Treat every tab you open as a resource you must free.
|
||||
|
||||
## Login & Auth Walls
|
||||
- If you see a "Log in" or "Sign up" prompt instead of expected
|
||||
|
||||
@@ -167,14 +167,6 @@ class Goal(BaseModel):
|
||||
|
||||
return met_weight >= total_weight * 0.9 # 90% threshold
|
||||
|
||||
def check_constraint(self, constraint_id: str, value: Any) -> bool:
|
||||
"""Check if a specific constraint is satisfied."""
|
||||
for c in self.constraints:
|
||||
if c.id == constraint_id:
|
||||
# This would be expanded with actual evaluation logic
|
||||
return True
|
||||
return True
|
||||
|
||||
def to_prompt_context(self) -> str:
|
||||
"""Generate context string for LLM prompts.
|
||||
|
||||
|
||||
@@ -565,6 +565,16 @@ class NodeContext:
|
||||
# staging / running) without restarting the conversation.
|
||||
dynamic_prompt_provider: Any = None # Callable[[], str] | None
|
||||
|
||||
# Skill system prompts — injected by the skill discovery pipeline
|
||||
skills_catalog_prompt: str = "" # Available skills XML catalog
|
||||
protocols_prompt: str = "" # Default skill operational protocols
|
||||
skill_dirs: list[str] = field(default_factory=list) # Skill base dirs for resource access
|
||||
|
||||
# Per-iteration metadata provider — when set, EventLoopNode merges
|
||||
# the returned dict into node_loop_iteration event data. Used by
|
||||
# the queen to record the current phase per iteration.
|
||||
iteration_metadata_provider: Any = None # Callable[[], dict] | None
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeResult:
|
||||
|
||||
@@ -26,6 +26,16 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Injected into every worker node's system prompt so the LLM understands
|
||||
# it is one step in a multi-node pipeline and should not overreach.
|
||||
EXECUTION_SCOPE_PREAMBLE = (
|
||||
"EXECUTION SCOPE: You are one node in a multi-step workflow graph. "
|
||||
"Focus ONLY on the task described in your instructions below. "
|
||||
"Call set_output() for each of your declared output keys, then stop. "
|
||||
"Do NOT attempt work that belongs to other nodes — the framework "
|
||||
"routes data between nodes automatically."
|
||||
)
|
||||
|
||||
|
||||
def _with_datetime(prompt: str) -> str:
|
||||
"""Append current datetime with local timezone to a system prompt."""
|
||||
@@ -140,14 +150,24 @@ def compose_system_prompt(
|
||||
focus_prompt: str | None,
|
||||
narrative: str | None = None,
|
||||
accounts_prompt: str | None = None,
|
||||
skills_catalog_prompt: str | None = None,
|
||||
protocols_prompt: str | None = None,
|
||||
execution_preamble: str | None = None,
|
||||
node_type_preamble: str | None = None,
|
||||
) -> str:
|
||||
"""Compose the three-layer system prompt.
|
||||
"""Compose the multi-layer system prompt.
|
||||
|
||||
Args:
|
||||
identity_prompt: Layer 1 — static agent identity (from GraphSpec).
|
||||
focus_prompt: Layer 3 — per-node focus directive (from NodeSpec.system_prompt).
|
||||
narrative: Layer 2 — auto-generated from conversation state.
|
||||
accounts_prompt: Connected accounts block (sits between identity and narrative).
|
||||
skills_catalog_prompt: Available skills catalog XML (Agent Skills standard).
|
||||
protocols_prompt: Default skill operational protocols section.
|
||||
execution_preamble: EXECUTION_SCOPE_PREAMBLE for worker nodes
|
||||
(prepended before focus so the LLM knows its pipeline scope).
|
||||
node_type_preamble: Node-type-specific preamble, e.g. GCU browser
|
||||
best-practices prompt (prepended before focus).
|
||||
|
||||
Returns:
|
||||
Composed system prompt with all layers present, plus current datetime.
|
||||
@@ -162,10 +182,27 @@ def compose_system_prompt(
|
||||
if accounts_prompt:
|
||||
parts.append(f"\n{accounts_prompt}")
|
||||
|
||||
# Skills catalog (discovered skills available for activation)
|
||||
if skills_catalog_prompt:
|
||||
parts.append(f"\n{skills_catalog_prompt}")
|
||||
|
||||
# Operational protocols (default skill behavioral guidance)
|
||||
if protocols_prompt:
|
||||
parts.append(f"\n{protocols_prompt}")
|
||||
|
||||
# Layer 2: Narrative (what's happened so far)
|
||||
if narrative:
|
||||
parts.append(f"\n--- Context (what has happened so far) ---\n{narrative}")
|
||||
|
||||
# Execution scope preamble (worker nodes — tells the LLM it is one
|
||||
# step in a multi-node pipeline and should not overreach)
|
||||
if execution_preamble:
|
||||
parts.append(f"\n{execution_preamble}")
|
||||
|
||||
# Node-type preamble (e.g. GCU browser best-practices)
|
||||
if node_type_preamble:
|
||||
parts.append(f"\n{node_type_preamble}")
|
||||
|
||||
# Layer 3: Focus (current phase directive)
|
||||
if focus_prompt:
|
||||
parts.append(f"\n--- Current Focus ---\n{focus_prompt}")
|
||||
@@ -255,7 +292,9 @@ def build_transition_marker(
|
||||
sections.append(f"\nCompleted: {previous_node.name}")
|
||||
sections.append(f" {previous_node.description}")
|
||||
|
||||
# Outputs in memory
|
||||
# Outputs in memory — use file references for large values so the
|
||||
# next node loads full data from disk instead of seeing truncated
|
||||
# inline previews that look deceptively complete.
|
||||
all_memory = memory.read_all()
|
||||
if all_memory:
|
||||
memory_lines: list[str] = []
|
||||
@@ -263,7 +302,29 @@ def build_transition_marker(
|
||||
if value is None:
|
||||
continue
|
||||
val_str = str(value)
|
||||
if len(val_str) > 300:
|
||||
if len(val_str) > 300 and data_dir:
|
||||
# Auto-spill large transition values to data files
|
||||
import json as _json
|
||||
|
||||
data_path = Path(data_dir)
|
||||
data_path.mkdir(parents=True, exist_ok=True)
|
||||
ext = ".json" if isinstance(value, (dict, list)) else ".txt"
|
||||
filename = f"output_{key}{ext}"
|
||||
try:
|
||||
write_content = (
|
||||
_json.dumps(value, indent=2, ensure_ascii=False)
|
||||
if isinstance(value, (dict, list))
|
||||
else str(value)
|
||||
)
|
||||
(data_path / filename).write_text(write_content, encoding="utf-8")
|
||||
file_size = (data_path / filename).stat().st_size
|
||||
val_str = (
|
||||
f"[Saved to '{filename}' ({file_size:,} bytes). "
|
||||
f"Use load_data(filename='{filename}') to access.]"
|
||||
)
|
||||
except Exception:
|
||||
val_str = val_str[:300] + "..."
|
||||
elif len(val_str) > 300:
|
||||
val_str = val_str[:300] + "..."
|
||||
memory_lines.append(f" {key}: {val_str}")
|
||||
if memory_lines:
|
||||
@@ -280,7 +341,7 @@ def build_transition_marker(
|
||||
]
|
||||
if file_lines:
|
||||
sections.append(
|
||||
"\nData files (use read_file to access):\n" + "\n".join(file_lines)
|
||||
"\nData files (use load_data to access):\n" + "\n".join(file_lines)
|
||||
)
|
||||
|
||||
# Agent working memory
|
||||
@@ -294,6 +355,12 @@ def build_transition_marker(
|
||||
# Next phase
|
||||
sections.append(f"\nNow entering: {next_node.name}")
|
||||
sections.append(f" {next_node.description}")
|
||||
if next_node.output_keys:
|
||||
sections.append(
|
||||
f"\nYour ONLY job in this phase: complete the task above and call "
|
||||
f"set_output() for {next_node.output_keys}. Do NOT do work that "
|
||||
f"belongs to later phases."
|
||||
)
|
||||
|
||||
# Reflection prompt (engineered metacognition)
|
||||
sections.append(
|
||||
|
||||
@@ -115,11 +115,23 @@ class SafeEvalVisitor(ast.NodeVisitor):
|
||||
return True
|
||||
|
||||
def visit_BoolOp(self, node: ast.BoolOp) -> Any:
|
||||
values = [self.visit(v) for v in node.values]
|
||||
# Short-circuit evaluation to match Python semantics.
|
||||
# Previously all operands were eagerly evaluated, which broke
|
||||
# guard patterns like: ``x is not None and x.get("key")``
|
||||
if isinstance(node.op, ast.And):
|
||||
return all(values)
|
||||
result = True
|
||||
for v in node.values:
|
||||
result = self.visit(v)
|
||||
if not result:
|
||||
return result
|
||||
return result
|
||||
elif isinstance(node.op, ast.Or):
|
||||
return any(values)
|
||||
result = False
|
||||
for v in node.values:
|
||||
result = self.visit(v)
|
||||
if result:
|
||||
return result
|
||||
return result
|
||||
raise ValueError(f"Boolean operator {type(node.op).__name__} is not allowed")
|
||||
|
||||
def visit_IfExp(self, node: ast.IfExp) -> Any:
|
||||
|
||||
@@ -0,0 +1,706 @@
|
||||
"""Antigravity (Google internal Cloud Code Assist) LLM provider.
|
||||
|
||||
Antigravity is Google's unified gateway API that routes requests to Gemini,
|
||||
Claude, and GPT-OSS models through a single Gemini-style interface. It is
|
||||
NOT the public ``generativelanguage.googleapis.com`` API.
|
||||
|
||||
Authentication uses Google OAuth2. Token refresh is done directly with the
|
||||
OAuth client secret — no local proxy required.
|
||||
|
||||
Credential sources (checked in order):
|
||||
1. ``~/.hive/antigravity-accounts.json`` (native OAuth implementation)
|
||||
2. Antigravity IDE SQLite state DB (macOS / Linux)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import time
|
||||
import uuid
|
||||
from collections.abc import AsyncIterator, Callable, Iterator
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from framework.llm.provider import LLMProvider, LLMResponse, Tool
|
||||
from framework.llm.stream_events import (
|
||||
FinishEvent,
|
||||
StreamErrorEvent,
|
||||
StreamEvent,
|
||||
TextDeltaEvent,
|
||||
TextEndEvent,
|
||||
ToolCallEvent,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Constants
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_TOKEN_URL = "https://oauth2.googleapis.com/token"
|
||||
|
||||
# Fallback order: daily sandbox → autopush sandbox → production
|
||||
_ENDPOINTS = [
|
||||
"https://daily-cloudcode-pa.sandbox.googleapis.com",
|
||||
"https://autopush-cloudcode-pa.sandbox.googleapis.com",
|
||||
"https://cloudcode-pa.googleapis.com",
|
||||
]
|
||||
_DEFAULT_PROJECT_ID = "rising-fact-p41fc"
|
||||
_TOKEN_REFRESH_BUFFER_SECS = 60
|
||||
|
||||
# Credentials file in ~/.hive/ (native implementation)
|
||||
_ACCOUNTS_FILE = Path.home() / ".hive" / "antigravity-accounts.json"
|
||||
_IDE_STATE_DB_MAC = (
|
||||
Path.home()
|
||||
/ "Library"
|
||||
/ "Application Support"
|
||||
/ "Antigravity"
|
||||
/ "User"
|
||||
/ "globalStorage"
|
||||
/ "state.vscdb"
|
||||
)
|
||||
_IDE_STATE_DB_LINUX = (
|
||||
Path.home() / ".config" / "Antigravity" / "User" / "globalStorage" / "state.vscdb"
|
||||
)
|
||||
_IDE_STATE_DB_KEY = "antigravityUnifiedStateSync.oauthToken"
|
||||
|
||||
_BASE_HEADERS: dict[str, str] = {
|
||||
# Mimic the Antigravity Electron app so the API accepts the request.
|
||||
"User-Agent": (
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 "
|
||||
"(KHTML, like Gecko) Antigravity/1.18.3 Chrome/138.0.7204.235 "
|
||||
"Electron/37.3.1 Safari/537.36"
|
||||
),
|
||||
"X-Goog-Api-Client": "google-cloud-sdk vscode_cloudshelleditor/0.1",
|
||||
"Client-Metadata": '{"ideType":"ANTIGRAVITY","platform":"MACOS","pluginType":"GEMINI"}',
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Credential loading helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _load_from_json_file() -> tuple[str | None, str | None, str, float]:
|
||||
"""Read credentials from JSON accounts file.
|
||||
|
||||
Reads from ~/.hive/antigravity-accounts.json.
|
||||
|
||||
Returns ``(access_token | None, refresh_token | None, project_id, expires_at)``.
|
||||
``expires_at`` is a Unix timestamp (seconds); 0.0 means unknown.
|
||||
"""
|
||||
if not _ACCOUNTS_FILE.exists():
|
||||
return None, None, _DEFAULT_PROJECT_ID, 0.0
|
||||
try:
|
||||
with open(_ACCOUNTS_FILE, encoding="utf-8") as fh:
|
||||
data = json.load(fh)
|
||||
except (OSError, json.JSONDecodeError) as exc:
|
||||
logger.debug("Failed to read Antigravity accounts file: %s", exc)
|
||||
return None, None, _DEFAULT_PROJECT_ID, 0.0
|
||||
|
||||
accounts = data.get("accounts", [])
|
||||
if not accounts:
|
||||
return None, None, _DEFAULT_PROJECT_ID, 0.0
|
||||
|
||||
account = next((a for a in accounts if a.get("enabled", True) is not False), accounts[0])
|
||||
schema_version = data.get("schemaVersion", 1)
|
||||
|
||||
if schema_version >= 4:
|
||||
# V4 schema: refresh = "refreshToken|projectId[|managedProjectId]"
|
||||
refresh_str = account.get("refresh", "")
|
||||
parts = refresh_str.split("|") if refresh_str else []
|
||||
refresh_token: str | None = parts[0] if parts else None
|
||||
project_id = parts[1] if len(parts) >= 2 and parts[1] else _DEFAULT_PROJECT_ID
|
||||
|
||||
access_token: str | None = account.get("access")
|
||||
expires_ms: int = account.get("expires", 0)
|
||||
expires_at = float(expires_ms) / 1000.0 if expires_ms else 0.0
|
||||
|
||||
# Treat near-expiry tokens as absent so _ensure_token() triggers a refresh.
|
||||
if access_token and expires_at and time.time() >= expires_at - _TOKEN_REFRESH_BUFFER_SECS:
|
||||
access_token = None
|
||||
expires_at = 0.0
|
||||
|
||||
return access_token, refresh_token, project_id, expires_at
|
||||
else:
|
||||
# V1–V3 schema: plain accessToken / refreshToken fields
|
||||
access_token = account.get("accessToken")
|
||||
refresh_token = account.get("refreshToken")
|
||||
# Estimate expiry from last_refresh + 1 h
|
||||
last_refresh_str: str | None = data.get("last_refresh")
|
||||
expires_at = 0.0
|
||||
if last_refresh_str:
|
||||
try:
|
||||
from datetime import datetime # noqa: PLC0415
|
||||
|
||||
ts = datetime.fromisoformat(last_refresh_str.replace("Z", "+00:00")).timestamp()
|
||||
expires_at = ts + 3600.0
|
||||
if time.time() >= expires_at - _TOKEN_REFRESH_BUFFER_SECS:
|
||||
access_token = None
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
return access_token, refresh_token, _DEFAULT_PROJECT_ID, expires_at
|
||||
|
||||
|
||||
def _load_from_ide_db() -> tuple[str | None, str | None, float]:
|
||||
"""Extract ``(access_token, refresh_token, expires_at)`` from the IDE SQLite DB."""
|
||||
import base64 # noqa: PLC0415
|
||||
import sqlite3 # noqa: PLC0415
|
||||
|
||||
for db_path in (_IDE_STATE_DB_MAC, _IDE_STATE_DB_LINUX):
|
||||
if not db_path.exists():
|
||||
continue
|
||||
try:
|
||||
con = sqlite3.connect(f"file:{db_path}?mode=ro", uri=True)
|
||||
try:
|
||||
row = con.execute(
|
||||
"SELECT value FROM ItemTable WHERE key = ?",
|
||||
(_IDE_STATE_DB_KEY,),
|
||||
).fetchone()
|
||||
finally:
|
||||
con.close()
|
||||
if not row:
|
||||
continue
|
||||
|
||||
blob = base64.b64decode(row[0])
|
||||
candidates = re.findall(rb"[A-Za-z0-9+/=_\-]{40,}", blob)
|
||||
access_token: str | None = None
|
||||
refresh_token: str | None = None
|
||||
for candidate in candidates:
|
||||
try:
|
||||
padded = candidate + b"=" * (-len(candidate) % 4)
|
||||
inner = base64.urlsafe_b64decode(padded)
|
||||
except Exception:
|
||||
continue
|
||||
if not access_token:
|
||||
m = re.search(rb"ya29\.[A-Za-z0-9_\-\.]+", inner)
|
||||
if m:
|
||||
access_token = m.group(0).decode("ascii")
|
||||
if not refresh_token:
|
||||
m = re.search(rb"1//[A-Za-z0-9_\-\.]+", inner)
|
||||
if m:
|
||||
refresh_token = m.group(0).decode("ascii")
|
||||
if access_token and refresh_token:
|
||||
break
|
||||
|
||||
if access_token:
|
||||
# Estimate expiry from DB mtime (IDE refreshes while running)
|
||||
mtime = db_path.stat().st_mtime
|
||||
expires_at = mtime + 3600.0
|
||||
return access_token, refresh_token, expires_at
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to read Antigravity IDE state DB: %s", exc)
|
||||
continue
|
||||
return None, None, 0.0
|
||||
|
||||
|
||||
def _do_token_refresh(refresh_token: str) -> tuple[str, float] | None:
|
||||
"""POST to Google OAuth endpoint and return ``(new_access_token, expires_at)``.
|
||||
|
||||
The client secret is sourced via ``get_antigravity_client_secret()`` (env var,
|
||||
config file, or npm package fallback). When unavailable the refresh is attempted
|
||||
without it — Google will reject it for web-app clients, but the npm fallback in
|
||||
``get_antigravity_client_secret()`` should ensure the secret is found at runtime.
|
||||
|
||||
Returns None when the HTTP request fails.
|
||||
"""
|
||||
from framework.config import get_antigravity_client_secret # noqa: PLC0415
|
||||
|
||||
client_secret = get_antigravity_client_secret()
|
||||
if not client_secret:
|
||||
logger.debug(
|
||||
"Antigravity client secret not configured — attempting refresh without it. "
|
||||
"Set ANTIGRAVITY_CLIENT_SECRET or run quickstart to configure."
|
||||
)
|
||||
|
||||
import urllib.error # noqa: PLC0415
|
||||
import urllib.parse # noqa: PLC0415
|
||||
import urllib.request # noqa: PLC0415
|
||||
|
||||
from framework.config import get_antigravity_client_id # noqa: PLC0415
|
||||
|
||||
params: dict[str, str] = {
|
||||
"grant_type": "refresh_token",
|
||||
"refresh_token": refresh_token,
|
||||
"client_id": get_antigravity_client_id(),
|
||||
}
|
||||
if client_secret:
|
||||
params["client_secret"] = client_secret
|
||||
body = urllib.parse.urlencode(params).encode("utf-8")
|
||||
|
||||
req = urllib.request.Request(
|
||||
_TOKEN_URL,
|
||||
data=body,
|
||||
headers={"Content-Type": "application/x-www-form-urlencoded"},
|
||||
method="POST",
|
||||
)
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=15) as resp: # noqa: S310
|
||||
payload = json.loads(resp.read())
|
||||
access_token: str = payload["access_token"]
|
||||
expires_in: int = payload.get("expires_in", 3600)
|
||||
logger.debug("Antigravity token refreshed successfully")
|
||||
return access_token, time.time() + expires_in
|
||||
except Exception as exc:
|
||||
logger.debug("Antigravity token refresh failed: %s", exc)
|
||||
return None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Message conversion helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _clean_tool_name(name: str) -> str:
|
||||
"""Sanitize a tool name for the Antigravity function-calling schema."""
|
||||
name = re.sub(r"[/\s]", "_", name)
|
||||
if name and not (name[0].isalpha() or name[0] == "_"):
|
||||
name = "_" + name
|
||||
return name[:64]
|
||||
|
||||
|
||||
def _to_gemini_contents(
|
||||
messages: list[dict[str, Any]],
|
||||
thought_sigs: dict[str, str] | None = None,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Convert OpenAI-format messages to Gemini-style ``contents`` array."""
|
||||
# Pre-build a map tool_call_id → function_name from assistant messages.
|
||||
# Tool result messages (role="tool") only carry tool_call_id, not the name,
|
||||
# but Gemini requires functionResponse.name to match the functionCall.name.
|
||||
tc_id_to_name: dict[str, str] = {}
|
||||
for msg in messages:
|
||||
if msg.get("role") == "assistant":
|
||||
for tc in msg.get("tool_calls") or []:
|
||||
tc_id = tc.get("id")
|
||||
fn_name = tc.get("function", {}).get("name", "")
|
||||
if tc_id and fn_name:
|
||||
tc_id_to_name[tc_id] = fn_name
|
||||
|
||||
contents: list[dict[str, Any]] = []
|
||||
# Consecutive tool-result messages must be batched into one user turn.
|
||||
pending_tool_parts: list[dict[str, Any]] = []
|
||||
|
||||
def _flush_tool_results() -> None:
|
||||
if pending_tool_parts:
|
||||
contents.append({"role": "user", "parts": list(pending_tool_parts)})
|
||||
pending_tool_parts.clear()
|
||||
|
||||
for msg in messages:
|
||||
role = msg.get("role", "user")
|
||||
content = msg.get("content")
|
||||
|
||||
if role == "system":
|
||||
continue # Handled via systemInstruction, not in contents.
|
||||
|
||||
if role == "tool":
|
||||
# OpenAI tool result → Gemini functionResponse part.
|
||||
result_str = content if isinstance(content, str) else str(content or "")
|
||||
tc_id = msg.get("tool_call_id", "")
|
||||
# Look up function name from the pre-built map; fall back to msg.name.
|
||||
fn_name = tc_id_to_name.get(tc_id) or msg.get("name", "")
|
||||
pending_tool_parts.append(
|
||||
{
|
||||
"functionResponse": {
|
||||
"name": fn_name,
|
||||
"id": tc_id,
|
||||
"response": {"content": result_str},
|
||||
}
|
||||
}
|
||||
)
|
||||
continue
|
||||
|
||||
_flush_tool_results()
|
||||
|
||||
gemini_role = "model" if role == "assistant" else "user"
|
||||
parts: list[dict[str, Any]] = []
|
||||
|
||||
if isinstance(content, str) and content:
|
||||
parts.append({"text": content})
|
||||
elif isinstance(content, list):
|
||||
for block in content:
|
||||
if not isinstance(block, dict):
|
||||
continue
|
||||
if block.get("type") == "text":
|
||||
text = block.get("text", "")
|
||||
if text:
|
||||
parts.append({"text": text})
|
||||
# Other block types (image_url etc.) skipped.
|
||||
|
||||
# Assistant messages may carry OpenAI-style tool_calls.
|
||||
for tc in msg.get("tool_calls") or []:
|
||||
fn = tc.get("function", {})
|
||||
try:
|
||||
args = json.loads(fn.get("arguments", "{}") or "{}")
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
args = {}
|
||||
tc_id = tc.get("id", str(uuid.uuid4()))
|
||||
fc_part: dict[str, Any] = {
|
||||
"functionCall": {
|
||||
"name": fn.get("name", ""),
|
||||
"args": args,
|
||||
"id": tc_id,
|
||||
}
|
||||
}
|
||||
if thought_sigs:
|
||||
sig = thought_sigs.get(tc_id, "")
|
||||
if sig:
|
||||
fc_part["thoughtSignature"] = sig # part-level, not inside functionCall
|
||||
parts.append(fc_part)
|
||||
|
||||
if parts:
|
||||
contents.append({"role": gemini_role, "parts": parts})
|
||||
|
||||
_flush_tool_results()
|
||||
|
||||
# Gemini requires the first turn to be a user turn. Drop any leading
|
||||
# model messages so the API doesn't reject with a 400.
|
||||
while contents and contents[0].get("role") == "model":
|
||||
contents.pop(0)
|
||||
|
||||
return contents
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Response parsing helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _map_finish_reason(reason: str) -> str:
|
||||
return {"STOP": "stop", "MAX_TOKENS": "max_tokens", "OTHER": "tool_use"}.get(
|
||||
(reason or "").upper(), "stop"
|
||||
)
|
||||
|
||||
|
||||
def _parse_complete_response(raw: dict[str, Any], model: str) -> LLMResponse:
|
||||
"""Parse a non-streaming Antigravity response dict → LLMResponse."""
|
||||
payload: dict[str, Any] = raw.get("response", raw)
|
||||
candidates: list[dict[str, Any]] = payload.get("candidates", [])
|
||||
usage: dict[str, Any] = payload.get("usageMetadata", {})
|
||||
|
||||
text_parts: list[str] = []
|
||||
if candidates:
|
||||
for part in candidates[0].get("content", {}).get("parts", []):
|
||||
if "text" in part and not part.get("thought"):
|
||||
text_parts.append(part["text"])
|
||||
|
||||
return LLMResponse(
|
||||
content="".join(text_parts),
|
||||
model=payload.get("modelVersion", model),
|
||||
input_tokens=usage.get("promptTokenCount", 0),
|
||||
output_tokens=usage.get("candidatesTokenCount", 0),
|
||||
stop_reason=_map_finish_reason(candidates[0].get("finishReason", "") if candidates else ""),
|
||||
raw_response=raw,
|
||||
)
|
||||
|
||||
|
||||
def _parse_sse_stream(
|
||||
response: Any,
|
||||
model: str,
|
||||
on_thought_signature: Callable[[str, str], None] | None = None,
|
||||
) -> Iterator[StreamEvent]:
|
||||
"""Parse Antigravity SSE response line-by-line → StreamEvents.
|
||||
|
||||
Each SSE line looks like::
|
||||
|
||||
data: {"response": {"candidates": [...], "usageMetadata": {...}}, "traceId": "..."}
|
||||
"""
|
||||
accumulated = ""
|
||||
input_tokens = 0
|
||||
output_tokens = 0
|
||||
finish_reason = ""
|
||||
|
||||
for raw_line in response:
|
||||
line: str = raw_line.decode("utf-8", errors="replace").rstrip("\r\n")
|
||||
if not line.startswith("data:"):
|
||||
continue
|
||||
data_str = line[5:].strip()
|
||||
if not data_str or data_str == "[DONE]":
|
||||
continue
|
||||
try:
|
||||
data: dict[str, Any] = json.loads(data_str)
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
# The outer envelope is {"response": {...}, "traceId": "..."}.
|
||||
payload: dict[str, Any] = data.get("response", data)
|
||||
|
||||
usage = payload.get("usageMetadata", {})
|
||||
if usage:
|
||||
input_tokens = usage.get("promptTokenCount", input_tokens)
|
||||
output_tokens = usage.get("candidatesTokenCount", output_tokens)
|
||||
|
||||
for candidate in payload.get("candidates", []):
|
||||
fr = candidate.get("finishReason", "")
|
||||
if fr:
|
||||
finish_reason = fr
|
||||
|
||||
for part in candidate.get("content", {}).get("parts", []):
|
||||
if "text" in part and not part.get("thought"):
|
||||
delta: str = part["text"]
|
||||
accumulated += delta
|
||||
yield TextDeltaEvent(content=delta, snapshot=accumulated)
|
||||
elif "functionCall" in part:
|
||||
fc: dict[str, Any] = part["functionCall"]
|
||||
tool_use_id = fc.get("id") or str(uuid.uuid4())
|
||||
thought_sig = part.get("thoughtSignature", "") # sibling of functionCall
|
||||
if thought_sig and on_thought_signature:
|
||||
on_thought_signature(tool_use_id, thought_sig)
|
||||
args = fc.get("args", {})
|
||||
if isinstance(args, str):
|
||||
try:
|
||||
args = json.loads(args)
|
||||
except json.JSONDecodeError:
|
||||
args = {}
|
||||
yield ToolCallEvent(
|
||||
tool_use_id=tool_use_id,
|
||||
tool_name=fc.get("name", ""),
|
||||
tool_input=args,
|
||||
)
|
||||
|
||||
if accumulated:
|
||||
yield TextEndEvent(full_text=accumulated)
|
||||
yield FinishEvent(
|
||||
stop_reason=_map_finish_reason(finish_reason),
|
||||
input_tokens=input_tokens,
|
||||
output_tokens=output_tokens,
|
||||
model=model,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Provider
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class AntigravityProvider(LLMProvider):
|
||||
"""LLM provider for Google's internal Antigravity Code Assist gateway.
|
||||
|
||||
No local proxy required. Handles OAuth token refresh, Gemini-format
|
||||
request/response conversion, and SSE streaming directly.
|
||||
"""
|
||||
|
||||
def __init__(self, model: str = "gemini-3-flash") -> None:
|
||||
# Strip any provider prefix ("openai/gemini-3-flash" → "gemini-3-flash").
|
||||
if "/" in model:
|
||||
model = model.split("/", 1)[1]
|
||||
self.model = model
|
||||
|
||||
self._access_token: str | None = None
|
||||
self._refresh_token: str | None = None
|
||||
self._project_id: str = _DEFAULT_PROJECT_ID
|
||||
self._token_expires_at: float = 0.0
|
||||
self._thought_sigs: dict[str, str] = {} # tool_use_id → thoughtSignature
|
||||
|
||||
self._init_credentials()
|
||||
|
||||
# --- Credential management -------------------------------------------- #
|
||||
|
||||
def _init_credentials(self) -> None:
|
||||
"""Load credentials from the best available source."""
|
||||
access, refresh, project_id, expires_at = _load_from_json_file()
|
||||
if refresh:
|
||||
self._refresh_token = refresh
|
||||
self._project_id = project_id
|
||||
self._access_token = access
|
||||
self._token_expires_at = expires_at
|
||||
return
|
||||
|
||||
# Fall back to IDE state DB.
|
||||
access, refresh, expires_at = _load_from_ide_db()
|
||||
if access:
|
||||
self._access_token = access
|
||||
self._refresh_token = refresh
|
||||
self._token_expires_at = expires_at
|
||||
|
||||
def has_credentials(self) -> bool:
|
||||
"""Return True if any credential is available."""
|
||||
return bool(self._access_token or self._refresh_token)
|
||||
|
||||
def _ensure_token(self) -> str:
|
||||
"""Return a valid access token, refreshing via OAuth if needed."""
|
||||
if (
|
||||
self._access_token
|
||||
and self._token_expires_at
|
||||
and time.time() < self._token_expires_at - _TOKEN_REFRESH_BUFFER_SECS
|
||||
):
|
||||
return self._access_token
|
||||
|
||||
if self._refresh_token:
|
||||
result = _do_token_refresh(self._refresh_token)
|
||||
if result:
|
||||
self._access_token, self._token_expires_at = result
|
||||
return self._access_token
|
||||
|
||||
if self._access_token:
|
||||
logger.warning("Using potentially stale Antigravity access token")
|
||||
return self._access_token
|
||||
|
||||
raise RuntimeError(
|
||||
"No valid Antigravity credentials. "
|
||||
"Run: uv run python core/antigravity_auth.py auth account add"
|
||||
)
|
||||
|
||||
# --- Request building -------------------------------------------------- #
|
||||
|
||||
def _build_body(
|
||||
self,
|
||||
messages: list[dict[str, Any]],
|
||||
system: str,
|
||||
tools: list[Tool] | None,
|
||||
max_tokens: int,
|
||||
) -> dict[str, Any]:
|
||||
contents = _to_gemini_contents(messages, self._thought_sigs)
|
||||
inner: dict[str, Any] = {
|
||||
"contents": contents,
|
||||
"generationConfig": {"maxOutputTokens": max_tokens},
|
||||
}
|
||||
if system:
|
||||
inner["systemInstruction"] = {"parts": [{"text": system}]}
|
||||
if tools:
|
||||
inner["tools"] = [
|
||||
{
|
||||
"functionDeclarations": [
|
||||
{
|
||||
"name": _clean_tool_name(t.name),
|
||||
"description": t.description,
|
||||
"parameters": t.parameters
|
||||
or {
|
||||
"type": "object",
|
||||
"properties": {},
|
||||
},
|
||||
}
|
||||
for t in tools
|
||||
]
|
||||
}
|
||||
]
|
||||
return {
|
||||
"project": self._project_id,
|
||||
"model": self.model,
|
||||
"request": inner,
|
||||
"requestType": "agent",
|
||||
"userAgent": "antigravity",
|
||||
"requestId": f"agent-{uuid.uuid4()}",
|
||||
}
|
||||
|
||||
# --- HTTP transport ---------------------------------------------------- #
|
||||
|
||||
def _post(self, body: dict[str, Any], *, streaming: bool) -> Any:
|
||||
"""POST to the Antigravity endpoint, falling back through the endpoint list."""
|
||||
import urllib.error # noqa: PLC0415
|
||||
import urllib.request # noqa: PLC0415
|
||||
|
||||
token = self._ensure_token()
|
||||
body_bytes = json.dumps(body).encode("utf-8")
|
||||
path = (
|
||||
"/v1internal:streamGenerateContent?alt=sse"
|
||||
if streaming
|
||||
else "/v1internal:generateContent"
|
||||
)
|
||||
headers = {
|
||||
**_BASE_HEADERS,
|
||||
"Authorization": f"Bearer {token}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
if streaming:
|
||||
headers["Accept"] = "text/event-stream"
|
||||
|
||||
last_exc: Exception | None = None
|
||||
for base_url in _ENDPOINTS:
|
||||
url = f"{base_url}{path}"
|
||||
req = urllib.request.Request(url, data=body_bytes, headers=headers, method="POST")
|
||||
try:
|
||||
return urllib.request.urlopen(req, timeout=120) # noqa: S310
|
||||
except urllib.error.HTTPError as exc:
|
||||
if exc.code in (401, 403) and self._refresh_token:
|
||||
# Token rejected — refresh once and retry this endpoint.
|
||||
result = _do_token_refresh(self._refresh_token)
|
||||
if result:
|
||||
self._access_token, self._token_expires_at = result
|
||||
headers["Authorization"] = f"Bearer {self._access_token}"
|
||||
req2 = urllib.request.Request(
|
||||
url, data=body_bytes, headers=headers, method="POST"
|
||||
)
|
||||
try:
|
||||
return urllib.request.urlopen(req2, timeout=120) # noqa: S310
|
||||
except urllib.error.HTTPError as exc2:
|
||||
last_exc = exc2
|
||||
continue
|
||||
last_exc = exc
|
||||
continue
|
||||
elif exc.code >= 500:
|
||||
last_exc = exc
|
||||
continue
|
||||
# Include the API response body in the exception for easier debugging.
|
||||
try:
|
||||
err_body = exc.read().decode("utf-8", errors="replace")
|
||||
except Exception:
|
||||
err_body = "(unreadable)"
|
||||
raise RuntimeError(f"Antigravity HTTP {exc.code} from {url}: {err_body}") from exc
|
||||
except (urllib.error.URLError, OSError) as exc:
|
||||
last_exc = exc
|
||||
continue
|
||||
|
||||
raise RuntimeError(
|
||||
f"All Antigravity endpoints failed. Last error: {last_exc}"
|
||||
) from last_exc
|
||||
|
||||
# --- LLMProvider interface --------------------------------------------- #
|
||||
|
||||
def complete(
|
||||
self,
|
||||
messages: list[dict[str, Any]],
|
||||
system: str = "",
|
||||
tools: list[Tool] | None = None,
|
||||
max_tokens: int = 1024,
|
||||
response_format: dict[str, Any] | None = None,
|
||||
json_mode: bool = False,
|
||||
max_retries: int | None = None,
|
||||
) -> LLMResponse:
|
||||
if json_mode:
|
||||
suffix = "\n\nPlease respond with a valid JSON object."
|
||||
system = (system + suffix) if system else suffix.strip()
|
||||
|
||||
body = self._build_body(messages, system, tools, max_tokens)
|
||||
resp = self._post(body, streaming=False)
|
||||
return _parse_complete_response(json.loads(resp.read()), self.model)
|
||||
|
||||
async def stream(
|
||||
self,
|
||||
messages: list[dict[str, Any]],
|
||||
system: str = "",
|
||||
tools: list[Tool] | None = None,
|
||||
max_tokens: int = 4096,
|
||||
) -> AsyncIterator[StreamEvent]:
|
||||
import asyncio # noqa: PLC0415
|
||||
import concurrent.futures # noqa: PLC0415
|
||||
|
||||
loop = asyncio.get_running_loop()
|
||||
queue: asyncio.Queue[StreamEvent | None] = asyncio.Queue()
|
||||
|
||||
def _blocking_work() -> None:
|
||||
try:
|
||||
body = self._build_body(messages, system, tools, max_tokens)
|
||||
http_resp = self._post(body, streaming=True)
|
||||
for event in _parse_sse_stream(
|
||||
http_resp, self.model, self._thought_sigs.__setitem__
|
||||
):
|
||||
loop.call_soon_threadsafe(queue.put_nowait, event)
|
||||
except Exception as exc:
|
||||
logger.error("Antigravity stream error: %s", exc)
|
||||
loop.call_soon_threadsafe(queue.put_nowait, StreamErrorEvent(error=str(exc)))
|
||||
finally:
|
||||
loop.call_soon_threadsafe(queue.put_nowait, None) # sentinel
|
||||
|
||||
executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
|
||||
fut = loop.run_in_executor(executor, _blocking_work)
|
||||
try:
|
||||
while True:
|
||||
event = await queue.get()
|
||||
if event is None:
|
||||
break
|
||||
yield event
|
||||
finally:
|
||||
await fut
|
||||
executor.shutdown(wait=False)
|
||||
@@ -0,0 +1,106 @@
|
||||
"""Model capability checks for LLM providers.
|
||||
|
||||
Vision support rules are derived from official vendor documentation:
|
||||
- ZAI (z.ai): docs.z.ai/guides/vlm — GLM-4.6V variants are vision; GLM-5/4.6/4.7 are text-only
|
||||
- MiniMax: platform.minimax.io/docs — minimax-vl-01 is vision; M2.x are text-only
|
||||
- DeepSeek: api-docs.deepseek.com — deepseek-vl2 is vision; chat/reasoner are text-only
|
||||
- Cerebras: inference-docs.cerebras.ai — no vision models at all
|
||||
- Groq: console.groq.com/docs/vision — vision capable; treat as supported by default
|
||||
- Ollama/LM Studio/vLLM/llama.cpp: local runners denied by default; model names
|
||||
don't reliably indicate vision support, so users must configure explicitly
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
def _model_name(model: str) -> str:
|
||||
"""Return the bare model name after stripping any 'provider/' prefix."""
|
||||
if "/" in model:
|
||||
return model.split("/", 1)[1]
|
||||
return model
|
||||
|
||||
|
||||
# Step 1: explicit vision allow-list — these always support images regardless
|
||||
# of what the provider-level rules say. Checked first so that e.g. glm-4.6v
|
||||
# is allowed even though glm-4.6 is denied.
|
||||
_VISION_ALLOW_BARE_PREFIXES: tuple[str, ...] = (
|
||||
# ZAI/GLM vision models (docs.z.ai/guides/vlm)
|
||||
"glm-4v", # GLM-4V series (legacy)
|
||||
"glm-4.6v", # GLM-4.6V, GLM-4.6V-flash, GLM-4.6V-flashx
|
||||
# DeepSeek vision models
|
||||
"deepseek-vl", # deepseek-vl2, deepseek-vl2-small, deepseek-vl2-tiny
|
||||
# MiniMax vision model
|
||||
"minimax-vl", # minimax-vl-01
|
||||
)
|
||||
|
||||
# Step 2: provider-level deny — every model from this provider is text-only.
|
||||
_TEXT_ONLY_PROVIDER_PREFIXES: tuple[str, ...] = (
|
||||
# Cerebras: inference-docs.cerebras.ai lists only text models
|
||||
"cerebras/",
|
||||
# Local runners: model names don't reliably indicate vision support
|
||||
"ollama/",
|
||||
"ollama_chat/",
|
||||
"lm_studio/",
|
||||
"vllm/",
|
||||
"llamacpp/",
|
||||
)
|
||||
|
||||
# Step 3: per-model deny — text-only models within otherwise mixed providers.
|
||||
# Matched against the bare model name (provider prefix stripped, lower-cased).
|
||||
# The vision allow-list above is checked first, so vision variants of the same
|
||||
# family are already handled before these deny patterns are reached.
|
||||
_TEXT_ONLY_MODEL_BARE_PREFIXES: tuple[str, ...] = (
|
||||
# --- ZAI / GLM family ---
|
||||
# text-only: glm-5, glm-4.6, glm-4.7, glm-4.5, zai-glm-*
|
||||
# vision: glm-4v, glm-4.6v (caught by allow-list above)
|
||||
"glm-5",
|
||||
"glm-4.6", # bare glm-4.6 is text-only; glm-4.6v is caught by allow-list
|
||||
"glm-4.7",
|
||||
"glm-4.5",
|
||||
"zai-glm",
|
||||
# --- DeepSeek ---
|
||||
# text-only: deepseek-chat, deepseek-coder, deepseek-reasoner
|
||||
# vision: deepseek-vl2 (caught by allow-list above)
|
||||
# Note: LiteLLM's deepseek handler may flatten content lists for some models;
|
||||
# VL models are allowed through and rely on LiteLLM's native VL support.
|
||||
"deepseek-chat",
|
||||
"deepseek-coder",
|
||||
"deepseek-reasoner",
|
||||
# --- MiniMax ---
|
||||
# text-only: minimax-m2.*, minimax-text-*, abab* (legacy)
|
||||
# vision: minimax-vl-01 (caught by allow-list above)
|
||||
"minimax-m2",
|
||||
"minimax-text",
|
||||
"abab",
|
||||
)
|
||||
|
||||
|
||||
def supports_image_tool_results(model: str) -> bool:
|
||||
"""Return whether *model* can receive image content in messages.
|
||||
|
||||
Used to gate both user-message images and tool-result image blocks.
|
||||
|
||||
Logic (checked in order):
|
||||
1. Vision allow-list → True (known vision model, skip all denies)
|
||||
2. Provider deny → False (entire provider is text-only)
|
||||
3. Model deny → False (specific text-only model within a mixed provider)
|
||||
4. Default → True (assume capable; unknown providers and models)
|
||||
"""
|
||||
model_lower = model.lower()
|
||||
bare = _model_name(model_lower)
|
||||
|
||||
# 1. Explicit vision allow — takes priority over all denies
|
||||
if any(bare.startswith(p) for p in _VISION_ALLOW_BARE_PREFIXES):
|
||||
return True
|
||||
|
||||
# 2. Provider-level deny (all models from this provider are text-only)
|
||||
if any(model_lower.startswith(p) for p in _TEXT_ONLY_PROVIDER_PREFIXES):
|
||||
return False
|
||||
|
||||
# 3. Per-model deny (text-only variants within mixed-capability families)
|
||||
if any(bare.startswith(p) for p in _TEXT_ONLY_MODEL_BARE_PREFIXES):
|
||||
return False
|
||||
|
||||
# 5. Default: assume vision capable
|
||||
# Covers: OpenAI, Anthropic, Google, Mistral, Kimi, and other hosted providers
|
||||
return True
|
||||
+710
-14
@@ -7,9 +7,13 @@ Groq, and local models.
|
||||
See: https://docs.litellm.ai/docs/providers
|
||||
"""
|
||||
|
||||
import ast
|
||||
import asyncio
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
from collections.abc import AsyncIterator
|
||||
from datetime import datetime
|
||||
@@ -23,6 +27,7 @@ except ImportError:
|
||||
litellm = None # type: ignore[assignment]
|
||||
RateLimitError = Exception # type: ignore[assignment, misc]
|
||||
|
||||
from framework.config import HIVE_LLM_ENDPOINT as HIVE_API_BASE
|
||||
from framework.llm.provider import LLMProvider, LLMResponse, Tool
|
||||
from framework.llm.stream_events import StreamEvent
|
||||
|
||||
@@ -43,8 +48,17 @@ def _patch_litellm_anthropic_oauth() -> None:
|
||||
"""
|
||||
try:
|
||||
from litellm.llms.anthropic.common_utils import AnthropicModelInfo
|
||||
from litellm.types.llms.anthropic import ANTHROPIC_OAUTH_TOKEN_PREFIX
|
||||
from litellm.types.llms.anthropic import (
|
||||
ANTHROPIC_OAUTH_BETA_HEADER,
|
||||
ANTHROPIC_OAUTH_TOKEN_PREFIX,
|
||||
)
|
||||
except ImportError:
|
||||
logger.warning(
|
||||
"Could not apply litellm Anthropic OAuth patch — litellm internals may have "
|
||||
"changed. Anthropic OAuth tokens (Claude Code subscriptions) may fail with 401. "
|
||||
"See BerriAI/litellm#19618. Current litellm version: %s",
|
||||
getattr(litellm, "__version__", "unknown"),
|
||||
)
|
||||
return
|
||||
|
||||
original = AnthropicModelInfo.validate_environment
|
||||
@@ -62,9 +76,27 @@ def _patch_litellm_anthropic_oauth() -> None:
|
||||
api_key=api_key,
|
||||
api_base=api_base,
|
||||
)
|
||||
# Check both authorization header and x-api-key for OAuth tokens.
|
||||
# litellm's optionally_handle_anthropic_oauth only checks headers["authorization"],
|
||||
# but hive passes OAuth tokens via api_key — so litellm puts them into x-api-key.
|
||||
# Anthropic rejects OAuth tokens in x-api-key; they must go in Authorization: Bearer.
|
||||
auth = result.get("authorization", "")
|
||||
if auth.startswith(f"Bearer {ANTHROPIC_OAUTH_TOKEN_PREFIX}"):
|
||||
x_api_key = result.get("x-api-key", "")
|
||||
oauth_prefix = f"Bearer {ANTHROPIC_OAUTH_TOKEN_PREFIX}"
|
||||
auth_is_oauth = auth.startswith(oauth_prefix)
|
||||
key_is_oauth = x_api_key.startswith(ANTHROPIC_OAUTH_TOKEN_PREFIX)
|
||||
if auth_is_oauth or key_is_oauth:
|
||||
token = x_api_key if key_is_oauth else auth.removeprefix("Bearer ").strip()
|
||||
result.pop("x-api-key", None)
|
||||
result["authorization"] = f"Bearer {token}"
|
||||
# Merge the OAuth beta header with any existing beta headers.
|
||||
existing_beta = result.get("anthropic-beta", "")
|
||||
beta_parts = (
|
||||
[b.strip() for b in existing_beta.split(",") if b.strip()] if existing_beta else []
|
||||
)
|
||||
if ANTHROPIC_OAUTH_BETA_HEADER not in beta_parts:
|
||||
beta_parts.append(ANTHROPIC_OAUTH_BETA_HEADER)
|
||||
result["anthropic-beta"] = ",".join(beta_parts)
|
||||
return result
|
||||
|
||||
AnthropicModelInfo.validate_environment = _patched_validate_environment
|
||||
@@ -86,10 +118,12 @@ def _patch_litellm_metadata_nonetype() -> None:
|
||||
"""
|
||||
import functools
|
||||
|
||||
patched_count = 0
|
||||
for fn_name in ("completion", "acompletion", "responses", "aresponses"):
|
||||
original = getattr(litellm, fn_name, None)
|
||||
if original is None:
|
||||
continue
|
||||
patched_count += 1
|
||||
if asyncio.iscoroutinefunction(original):
|
||||
|
||||
@functools.wraps(original)
|
||||
@@ -109,37 +143,122 @@ def _patch_litellm_metadata_nonetype() -> None:
|
||||
|
||||
setattr(litellm, fn_name, _sync_wrapper)
|
||||
|
||||
if patched_count == 0:
|
||||
logger.warning(
|
||||
"Could not apply litellm metadata=None patch — none of the expected entry "
|
||||
"points (completion, acompletion, responses, aresponses) were found. "
|
||||
"metadata=None TypeError may occur. Current litellm version: %s",
|
||||
getattr(litellm, "__version__", "unknown"),
|
||||
)
|
||||
|
||||
|
||||
if litellm is not None:
|
||||
_patch_litellm_anthropic_oauth()
|
||||
_patch_litellm_metadata_nonetype()
|
||||
# Let litellm silently drop params unsupported by the target provider
|
||||
# (e.g. stream_options for Anthropic) instead of forwarding them verbatim.
|
||||
litellm.drop_params = True
|
||||
|
||||
RATE_LIMIT_MAX_RETRIES = 10
|
||||
RATE_LIMIT_BACKOFF_BASE = 2 # seconds
|
||||
RATE_LIMIT_MAX_DELAY = 120 # seconds - cap to prevent absurd waits
|
||||
MINIMAX_API_BASE = "https://api.minimax.io/v1"
|
||||
OPENROUTER_API_BASE = "https://openrouter.ai/api/v1"
|
||||
|
||||
# Providers that accept cache_control on message content blocks.
|
||||
# Anthropic: native ephemeral caching. MiniMax & Z-AI/GLM: pass-through to their APIs.
|
||||
# (OpenAI caches automatically server-side; Groq/Gemini/etc. strip the header.)
|
||||
_CACHE_CONTROL_PREFIXES = ("anthropic/", "claude-", "minimax/", "minimax-", "MiniMax-", "zai-glm", "glm-")
|
||||
_CACHE_CONTROL_PREFIXES = (
|
||||
"anthropic/",
|
||||
"claude-",
|
||||
"minimax/",
|
||||
"minimax-",
|
||||
"MiniMax-",
|
||||
"zai-glm",
|
||||
"glm-",
|
||||
)
|
||||
|
||||
|
||||
def _model_supports_cache_control(model: str) -> bool:
|
||||
return any(model.startswith(p) for p in _CACHE_CONTROL_PREFIXES)
|
||||
|
||||
|
||||
# Kimi For Coding uses an Anthropic-compatible endpoint (no /v1 suffix).
|
||||
# Claude Code integration uses this format; the /v1 OpenAI-compatible endpoint
|
||||
# enforces a coding-agent whitelist that blocks unknown User-Agents.
|
||||
KIMI_API_BASE = "https://api.kimi.com/coding"
|
||||
|
||||
# Claude Code OAuth subscription: the Anthropic API requires a specific
|
||||
# User-Agent and a billing integrity header for OAuth-authenticated requests.
|
||||
CLAUDE_CODE_VERSION = "2.1.76"
|
||||
CLAUDE_CODE_USER_AGENT = f"claude-code/{CLAUDE_CODE_VERSION}"
|
||||
_CLAUDE_CODE_BILLING_SALT = "59cf53e54c78"
|
||||
|
||||
|
||||
def _sample_js_code_unit(text: str, idx: int) -> str:
|
||||
"""Return the character at UTF-16 code unit index *idx*, matching JS semantics."""
|
||||
encoded = text.encode("utf-16-le")
|
||||
unit_offset = idx * 2
|
||||
if unit_offset + 2 > len(encoded):
|
||||
return "0"
|
||||
code_unit = int.from_bytes(encoded[unit_offset : unit_offset + 2], "little")
|
||||
return chr(code_unit)
|
||||
|
||||
|
||||
def _claude_code_billing_header(messages: list[dict[str, Any]]) -> str:
|
||||
"""Build the billing integrity system block required by Anthropic's OAuth path."""
|
||||
# Find the first user message text
|
||||
first_text = ""
|
||||
for msg in messages:
|
||||
if msg.get("role") != "user":
|
||||
continue
|
||||
content = msg.get("content")
|
||||
if isinstance(content, str):
|
||||
first_text = content
|
||||
break
|
||||
if isinstance(content, list):
|
||||
for block in content:
|
||||
if isinstance(block, dict) and block.get("type") == "text" and block.get("text"):
|
||||
first_text = block["text"]
|
||||
break
|
||||
if first_text:
|
||||
break
|
||||
|
||||
sampled = "".join(_sample_js_code_unit(first_text, i) for i in (4, 7, 20))
|
||||
version_hash = hashlib.sha256(
|
||||
f"{_CLAUDE_CODE_BILLING_SALT}{sampled}{CLAUDE_CODE_VERSION}".encode()
|
||||
).hexdigest()
|
||||
entrypoint = os.environ.get("CLAUDE_CODE_ENTRYPOINT", "").strip() or "cli"
|
||||
return (
|
||||
f"x-anthropic-billing-header: cc_version={CLAUDE_CODE_VERSION}.{version_hash[:3]}; "
|
||||
f"cc_entrypoint={entrypoint}; cch=00000;"
|
||||
)
|
||||
|
||||
|
||||
# Empty-stream retries use a short fixed delay, not the rate-limit backoff.
|
||||
# Conversation-structure issues are deterministic — long waits don't help.
|
||||
EMPTY_STREAM_MAX_RETRIES = 3
|
||||
EMPTY_STREAM_RETRY_DELAY = 1.0 # seconds
|
||||
OPENROUTER_TOOL_COMPAT_ERROR_SNIPPETS = (
|
||||
"no endpoints found that support tool use",
|
||||
"no endpoints available that support tool use",
|
||||
"provider routing",
|
||||
)
|
||||
OPENROUTER_TOOL_CALL_RE = re.compile(
|
||||
r"<\|tool_call_start\|>\s*(.*?)\s*<\|tool_call_end\|>",
|
||||
re.DOTALL,
|
||||
)
|
||||
OPENROUTER_TOOL_COMPAT_CACHE_TTL_SECONDS = 3600
|
||||
# OpenRouter routing can change over time, so tool-compat caching must expire.
|
||||
OPENROUTER_TOOL_COMPAT_MODEL_CACHE: dict[str, float] = {}
|
||||
|
||||
# Directory for dumping failed requests
|
||||
FAILED_REQUESTS_DIR = Path.home() / ".hive" / "failed_requests"
|
||||
|
||||
# Maximum number of dump files to retain in ~/.hive/failed_requests/.
|
||||
# Older files are pruned automatically to prevent unbounded disk growth.
|
||||
MAX_FAILED_REQUEST_DUMPS = 50
|
||||
|
||||
|
||||
def _estimate_tokens(model: str, messages: list[dict]) -> tuple[int, str]:
|
||||
"""Estimate token count for messages. Returns (token_count, method)."""
|
||||
@@ -156,6 +275,42 @@ def _estimate_tokens(model: str, messages: list[dict]) -> tuple[int, str]:
|
||||
return total_chars // 4, "estimate"
|
||||
|
||||
|
||||
def _prune_failed_request_dumps(max_files: int = MAX_FAILED_REQUEST_DUMPS) -> None:
|
||||
"""Remove oldest dump files when the count exceeds *max_files*.
|
||||
|
||||
Best-effort: never raises — a pruning failure must not break retry logic.
|
||||
"""
|
||||
try:
|
||||
all_dumps = sorted(
|
||||
FAILED_REQUESTS_DIR.glob("*.json"),
|
||||
key=lambda f: f.stat().st_mtime,
|
||||
)
|
||||
excess = len(all_dumps) - max_files
|
||||
if excess > 0:
|
||||
for old_file in all_dumps[:excess]:
|
||||
old_file.unlink(missing_ok=True)
|
||||
except Exception:
|
||||
pass # Best-effort — never block the caller
|
||||
|
||||
|
||||
def _remember_openrouter_tool_compat_model(model: str) -> None:
|
||||
"""Cache OpenRouter tool-compat fallback for a bounded time window."""
|
||||
OPENROUTER_TOOL_COMPAT_MODEL_CACHE[model] = (
|
||||
time.monotonic() + OPENROUTER_TOOL_COMPAT_CACHE_TTL_SECONDS
|
||||
)
|
||||
|
||||
|
||||
def _is_openrouter_tool_compat_cached(model: str) -> bool:
|
||||
"""Return True when the cached OpenRouter compat entry is still fresh."""
|
||||
expires_at = OPENROUTER_TOOL_COMPAT_MODEL_CACHE.get(model)
|
||||
if expires_at is None:
|
||||
return False
|
||||
if expires_at <= time.monotonic():
|
||||
OPENROUTER_TOOL_COMPAT_MODEL_CACHE.pop(model, None)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def _dump_failed_request(
|
||||
model: str,
|
||||
kwargs: dict[str, Any],
|
||||
@@ -187,6 +342,9 @@ def _dump_failed_request(
|
||||
with open(filepath, "w", encoding="utf-8") as f:
|
||||
json.dump(dump_data, f, indent=2, default=str)
|
||||
|
||||
# Prune old dumps to prevent unbounded disk growth
|
||||
_prune_failed_request_dumps()
|
||||
|
||||
return str(filepath)
|
||||
|
||||
|
||||
@@ -348,15 +506,27 @@ class LiteLLMProvider(LLMProvider):
|
||||
# Strip a trailing /v1 in case the user's saved config has the old value.
|
||||
if api_base and api_base.rstrip("/").endswith("/v1"):
|
||||
api_base = api_base.rstrip("/")[:-3]
|
||||
elif model.lower().startswith("hive/"):
|
||||
model = "anthropic/" + model[len("hive/") :]
|
||||
if api_base and api_base.rstrip("/").endswith("/v1"):
|
||||
api_base = api_base.rstrip("/")[:-3]
|
||||
self.model = model
|
||||
self.api_key = api_key
|
||||
self.api_base = api_base or self._default_api_base_for_model(_original_model)
|
||||
self.extra_kwargs = kwargs
|
||||
# Detect Claude Code OAuth subscription by checking the api_key prefix.
|
||||
self._claude_code_oauth = bool(api_key and api_key.startswith("sk-ant-oat"))
|
||||
if self._claude_code_oauth:
|
||||
# Anthropic requires a specific User-Agent for OAuth requests.
|
||||
eh = self.extra_kwargs.setdefault("extra_headers", {})
|
||||
eh.setdefault("user-agent", CLAUDE_CODE_USER_AGENT)
|
||||
# The Codex ChatGPT backend (chatgpt.com/backend-api/codex) rejects
|
||||
# several standard OpenAI params: max_output_tokens, stream_options.
|
||||
self._codex_backend = bool(
|
||||
self.api_base and "chatgpt.com/backend-api/codex" in self.api_base
|
||||
)
|
||||
# Antigravity routes through a local OpenAI-compatible proxy — no patches needed.
|
||||
self._antigravity = bool(self.api_base and "localhost:8069" in self.api_base)
|
||||
|
||||
if litellm is None:
|
||||
raise ImportError(
|
||||
@@ -375,8 +545,12 @@ class LiteLLMProvider(LLMProvider):
|
||||
model_lower = model.lower()
|
||||
if model_lower.startswith("minimax/") or model_lower.startswith("minimax-"):
|
||||
return MINIMAX_API_BASE
|
||||
if model_lower.startswith("openrouter/"):
|
||||
return OPENROUTER_API_BASE
|
||||
if model_lower.startswith("kimi/"):
|
||||
return KIMI_API_BASE
|
||||
if model_lower.startswith("hive/"):
|
||||
return HIVE_API_BASE
|
||||
return None
|
||||
|
||||
def _completion_with_rate_limit_retry(
|
||||
@@ -715,6 +889,9 @@ class LiteLLMProvider(LLMProvider):
|
||||
return await self._collect_stream_to_response(stream_iter)
|
||||
|
||||
full_messages: list[dict[str, Any]] = []
|
||||
if self._claude_code_oauth:
|
||||
billing = _claude_code_billing_header(messages)
|
||||
full_messages.append({"role": "system", "content": billing})
|
||||
if system:
|
||||
sys_msg: dict[str, Any] = {"role": "system", "content": system}
|
||||
if _model_supports_cache_control(self.model):
|
||||
@@ -776,11 +953,504 @@ class LiteLLMProvider(LLMProvider):
|
||||
},
|
||||
}
|
||||
|
||||
def _is_anthropic_model(self) -> bool:
|
||||
"""Return True when the configured model targets Anthropic."""
|
||||
model = (self.model or "").lower()
|
||||
return model.startswith("anthropic/") or model.startswith("claude-")
|
||||
|
||||
def _is_minimax_model(self) -> bool:
|
||||
"""Return True when the configured model targets MiniMax."""
|
||||
model = (self.model or "").lower()
|
||||
return model.startswith("minimax/") or model.startswith("minimax-")
|
||||
|
||||
def _is_openrouter_model(self) -> bool:
|
||||
"""Return True when the configured model targets OpenRouter."""
|
||||
model = (self.model or "").lower()
|
||||
if model.startswith("openrouter/"):
|
||||
return True
|
||||
api_base = (self.api_base or "").lower()
|
||||
return "openrouter.ai/api/v1" in api_base
|
||||
|
||||
def _should_use_openrouter_tool_compat(
|
||||
self,
|
||||
error: BaseException,
|
||||
tools: list[Tool] | None,
|
||||
) -> bool:
|
||||
"""Return True when OpenRouter rejects native tool use for the model."""
|
||||
if not tools or not self._is_openrouter_model():
|
||||
return False
|
||||
error_text = str(error).lower()
|
||||
return "openrouter" in error_text and any(
|
||||
snippet in error_text for snippet in OPENROUTER_TOOL_COMPAT_ERROR_SNIPPETS
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _extract_json_object(text: str) -> dict[str, Any] | None:
|
||||
"""Extract the first JSON object from a model response."""
|
||||
candidates = [text.strip()]
|
||||
|
||||
stripped = text.strip()
|
||||
if stripped.startswith("```"):
|
||||
fence_lines = stripped.splitlines()
|
||||
if len(fence_lines) >= 3:
|
||||
candidates.append("\n".join(fence_lines[1:-1]).strip())
|
||||
|
||||
decoder = json.JSONDecoder()
|
||||
for candidate in candidates:
|
||||
if not candidate:
|
||||
continue
|
||||
try:
|
||||
parsed = json.loads(candidate)
|
||||
except json.JSONDecodeError:
|
||||
parsed = None
|
||||
if isinstance(parsed, dict):
|
||||
return parsed
|
||||
|
||||
for start_idx, char in enumerate(candidate):
|
||||
if char != "{":
|
||||
continue
|
||||
try:
|
||||
parsed, _ = decoder.raw_decode(candidate[start_idx:])
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
if isinstance(parsed, dict):
|
||||
return parsed
|
||||
return None
|
||||
|
||||
def _parse_openrouter_tool_compat_response(
|
||||
self,
|
||||
content: str,
|
||||
tools: list[Tool],
|
||||
) -> tuple[str, list[dict[str, Any]]]:
|
||||
"""Parse JSON tool-compat output into assistant text and tool calls."""
|
||||
payload = self._extract_json_object(content)
|
||||
if payload is None:
|
||||
text_tool_content, text_tool_calls = self._parse_openrouter_text_tool_calls(
|
||||
content,
|
||||
tools,
|
||||
)
|
||||
if text_tool_calls:
|
||||
logger.info(
|
||||
"[openrouter-tool-compat] Parsed textual tool-call markers for %s",
|
||||
self.model,
|
||||
)
|
||||
return text_tool_content, text_tool_calls
|
||||
logger.info(
|
||||
"[openrouter-tool-compat] %s returned non-JSON fallback content; "
|
||||
"treating it as plain text.",
|
||||
self.model,
|
||||
)
|
||||
return content.strip(), []
|
||||
|
||||
assistant_text = payload.get("assistant_response")
|
||||
if not isinstance(assistant_text, str):
|
||||
assistant_text = payload.get("content")
|
||||
if not isinstance(assistant_text, str):
|
||||
assistant_text = payload.get("response")
|
||||
if not isinstance(assistant_text, str):
|
||||
assistant_text = ""
|
||||
|
||||
tool_calls_raw = payload.get("tool_calls")
|
||||
if not tool_calls_raw and {"name", "arguments"} <= payload.keys():
|
||||
tool_calls_raw = [payload]
|
||||
elif isinstance(payload.get("tool_call"), dict):
|
||||
tool_calls_raw = [payload["tool_call"]]
|
||||
|
||||
if not isinstance(tool_calls_raw, list):
|
||||
tool_calls_raw = []
|
||||
|
||||
allowed_tool_names = {tool.name for tool in tools}
|
||||
tool_calls: list[dict[str, Any]] = []
|
||||
compat_prefix = f"openrouter_compat_{time.time_ns()}"
|
||||
|
||||
for idx, raw_call in enumerate(tool_calls_raw):
|
||||
if not isinstance(raw_call, dict):
|
||||
continue
|
||||
|
||||
function_block = raw_call.get("function")
|
||||
function_name = (
|
||||
raw_call.get("name")
|
||||
or raw_call.get("tool_name")
|
||||
or (function_block.get("name") if isinstance(function_block, dict) else None)
|
||||
)
|
||||
if not isinstance(function_name, str) or function_name not in allowed_tool_names:
|
||||
if function_name:
|
||||
logger.warning(
|
||||
"[openrouter-tool-compat] Ignoring unknown tool '%s' for model %s",
|
||||
function_name,
|
||||
self.model,
|
||||
)
|
||||
continue
|
||||
|
||||
arguments = raw_call.get("arguments")
|
||||
if arguments is None:
|
||||
arguments = raw_call.get("tool_input")
|
||||
if arguments is None:
|
||||
arguments = raw_call.get("input")
|
||||
if arguments is None and isinstance(function_block, dict):
|
||||
arguments = function_block.get("arguments")
|
||||
if arguments is None:
|
||||
arguments = {}
|
||||
|
||||
if isinstance(arguments, str):
|
||||
try:
|
||||
arguments = json.loads(arguments)
|
||||
except json.JSONDecodeError:
|
||||
arguments = {"_raw": arguments}
|
||||
elif not isinstance(arguments, dict):
|
||||
arguments = {"value": arguments}
|
||||
|
||||
tool_calls.append(
|
||||
{
|
||||
"id": f"{compat_prefix}_{idx}",
|
||||
"name": function_name,
|
||||
"input": arguments,
|
||||
}
|
||||
)
|
||||
|
||||
return assistant_text.strip(), tool_calls
|
||||
|
||||
@staticmethod
|
||||
def _close_truncated_json_fragment(fragment: str) -> str:
|
||||
"""Close a truncated JSON fragment by balancing quotes/brackets."""
|
||||
stack: list[str] = []
|
||||
in_string = False
|
||||
escaped = False
|
||||
normalized = fragment.rstrip()
|
||||
|
||||
while normalized and normalized[-1] in ",:{[":
|
||||
normalized = normalized[:-1].rstrip()
|
||||
|
||||
for char in normalized:
|
||||
if in_string:
|
||||
if escaped:
|
||||
escaped = False
|
||||
elif char == "\\":
|
||||
escaped = True
|
||||
elif char == '"':
|
||||
in_string = False
|
||||
continue
|
||||
|
||||
if char == '"':
|
||||
in_string = True
|
||||
elif char in "{[":
|
||||
stack.append(char)
|
||||
elif char == "}" and stack and stack[-1] == "{":
|
||||
stack.pop()
|
||||
elif char == "]" and stack and stack[-1] == "[":
|
||||
stack.pop()
|
||||
|
||||
if in_string:
|
||||
if escaped:
|
||||
normalized = normalized[:-1]
|
||||
normalized += '"'
|
||||
|
||||
for opener in reversed(stack):
|
||||
normalized += "}" if opener == "{" else "]"
|
||||
|
||||
return normalized
|
||||
|
||||
def _repair_truncated_tool_arguments(self, raw_arguments: str) -> dict[str, Any] | None:
|
||||
"""Try to recover a truncated JSON object from tool-call arguments."""
|
||||
stripped = raw_arguments.strip()
|
||||
if not stripped or stripped[0] != "{":
|
||||
return None
|
||||
|
||||
max_trim = min(len(stripped), 256)
|
||||
for trim in range(max_trim + 1):
|
||||
candidate = stripped[: len(stripped) - trim].rstrip()
|
||||
if not candidate:
|
||||
break
|
||||
candidate = self._close_truncated_json_fragment(candidate)
|
||||
try:
|
||||
parsed = json.loads(candidate)
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
if isinstance(parsed, dict):
|
||||
return parsed
|
||||
return None
|
||||
|
||||
def _parse_tool_call_arguments(self, raw_arguments: str, tool_name: str) -> dict[str, Any]:
|
||||
"""Parse streamed tool arguments, repairing truncation when possible."""
|
||||
try:
|
||||
parsed = json.loads(raw_arguments) if raw_arguments else {}
|
||||
except json.JSONDecodeError:
|
||||
parsed = None
|
||||
|
||||
if isinstance(parsed, dict):
|
||||
return parsed
|
||||
|
||||
repaired = self._repair_truncated_tool_arguments(raw_arguments)
|
||||
if repaired is not None:
|
||||
logger.warning(
|
||||
"[tool-args] Recovered truncated arguments for %s on %s",
|
||||
tool_name,
|
||||
self.model,
|
||||
)
|
||||
return repaired
|
||||
|
||||
raise ValueError(
|
||||
f"Failed to parse tool call arguments for '{tool_name}' (likely truncated JSON)."
|
||||
)
|
||||
|
||||
def _parse_openrouter_text_tool_calls(
|
||||
self,
|
||||
content: str,
|
||||
tools: list[Tool],
|
||||
) -> tuple[str, list[dict[str, Any]]]:
|
||||
"""Parse textual OpenRouter tool calls into synthetic tool calls.
|
||||
|
||||
Supports both:
|
||||
- Marker wrapped payloads: <|tool_call_start|>...<|tool_call_end|>
|
||||
- Plain one-line tool calls: ask_user("...", ["..."])
|
||||
"""
|
||||
tools_by_name = {tool.name: tool for tool in tools}
|
||||
compat_prefix = f"openrouter_compat_{time.time_ns()}"
|
||||
tool_calls: list[dict[str, Any]] = []
|
||||
segment_index = 0
|
||||
|
||||
for match in OPENROUTER_TOOL_CALL_RE.finditer(content):
|
||||
parsed_calls = self._parse_openrouter_text_tool_call_block(
|
||||
block=match.group(1),
|
||||
tools_by_name=tools_by_name,
|
||||
compat_prefix=f"{compat_prefix}_{segment_index}",
|
||||
)
|
||||
if parsed_calls:
|
||||
segment_index += 1
|
||||
tool_calls.extend(parsed_calls)
|
||||
|
||||
stripped_content = OPENROUTER_TOOL_CALL_RE.sub("", content)
|
||||
retained_lines: list[str] = []
|
||||
for line in stripped_content.splitlines():
|
||||
stripped_line = line.strip()
|
||||
if not stripped_line:
|
||||
retained_lines.append(line)
|
||||
continue
|
||||
|
||||
candidate = stripped_line
|
||||
if candidate.startswith("`") and candidate.endswith("`") and len(candidate) > 1:
|
||||
candidate = candidate[1:-1].strip()
|
||||
|
||||
parsed_calls = self._parse_openrouter_text_tool_call_block(
|
||||
block=candidate,
|
||||
tools_by_name=tools_by_name,
|
||||
compat_prefix=f"{compat_prefix}_{segment_index}",
|
||||
)
|
||||
if parsed_calls:
|
||||
segment_index += 1
|
||||
tool_calls.extend(parsed_calls)
|
||||
continue
|
||||
|
||||
retained_lines.append(line)
|
||||
|
||||
stripped_text = "\n".join(retained_lines).strip()
|
||||
return stripped_text, tool_calls
|
||||
|
||||
def _parse_openrouter_text_tool_call_block(
|
||||
self,
|
||||
block: str,
|
||||
tools_by_name: dict[str, Tool],
|
||||
compat_prefix: str,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Parse a single textual tool-call block like [tool(arg='x')]."""
|
||||
try:
|
||||
parsed = ast.parse(block.strip(), mode="eval").body
|
||||
except SyntaxError:
|
||||
return []
|
||||
|
||||
call_nodes = parsed.elts if isinstance(parsed, ast.List) else [parsed]
|
||||
tool_calls: list[dict[str, Any]] = []
|
||||
|
||||
for call_index, call_node in enumerate(call_nodes):
|
||||
if not isinstance(call_node, ast.Call) or not isinstance(call_node.func, ast.Name):
|
||||
continue
|
||||
|
||||
tool_name = call_node.func.id
|
||||
tool = tools_by_name.get(tool_name)
|
||||
if tool is None:
|
||||
continue
|
||||
|
||||
try:
|
||||
tool_input = self._parse_openrouter_text_tool_call_arguments(
|
||||
call_node=call_node,
|
||||
tool=tool,
|
||||
)
|
||||
except (ValueError, SyntaxError):
|
||||
continue
|
||||
|
||||
tool_calls.append(
|
||||
{
|
||||
"id": f"{compat_prefix}_{call_index}",
|
||||
"name": tool_name,
|
||||
"input": tool_input,
|
||||
}
|
||||
)
|
||||
|
||||
return tool_calls
|
||||
|
||||
@staticmethod
|
||||
def _parse_openrouter_text_tool_call_arguments(
|
||||
call_node: ast.Call,
|
||||
tool: Tool,
|
||||
) -> dict[str, Any]:
|
||||
"""Parse positional/keyword args from a textual tool call."""
|
||||
properties = tool.parameters.get("properties", {})
|
||||
positional_keys = list(properties.keys())
|
||||
tool_input: dict[str, Any] = {}
|
||||
|
||||
if len(call_node.args) > len(positional_keys):
|
||||
raise ValueError("Too many positional args for textual tool call")
|
||||
|
||||
for idx, arg_node in enumerate(call_node.args):
|
||||
tool_input[positional_keys[idx]] = ast.literal_eval(arg_node)
|
||||
|
||||
for kwarg in call_node.keywords:
|
||||
if kwarg.arg is None:
|
||||
raise ValueError("Star args are not supported in textual tool calls")
|
||||
tool_input[kwarg.arg] = ast.literal_eval(kwarg.value)
|
||||
|
||||
return tool_input
|
||||
|
||||
def _build_openrouter_tool_compat_messages(
|
||||
self,
|
||||
messages: list[dict[str, Any]],
|
||||
system: str,
|
||||
tools: list[Tool],
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Build a JSON-only prompt for models without native tool support."""
|
||||
tool_specs = [
|
||||
{
|
||||
"name": tool.name,
|
||||
"description": tool.description,
|
||||
"parameters": tool.parameters,
|
||||
}
|
||||
for tool in tools
|
||||
]
|
||||
compat_instruction = (
|
||||
"Tool compatibility mode is active because this OpenRouter model does not support "
|
||||
"native function calling on the routed provider.\n"
|
||||
"Return exactly one JSON object and nothing else.\n"
|
||||
'Schema: {"assistant_response": string, '
|
||||
'"tool_calls": [{"name": string, "arguments": object}]}\n'
|
||||
"Rules:\n"
|
||||
"- If a tool is required, put one or more entries in tool_calls "
|
||||
"and do not invent tool results.\n"
|
||||
"- If no tool is required, set tool_calls to [] and put the full "
|
||||
"answer in assistant_response.\n"
|
||||
"- Only use tool names from the allowed tool list.\n"
|
||||
"- arguments must always be valid JSON objects.\n"
|
||||
f"Allowed tools:\n{json.dumps(tool_specs, ensure_ascii=True)}"
|
||||
)
|
||||
compat_system = compat_instruction if not system else f"{system}\n\n{compat_instruction}"
|
||||
|
||||
full_messages: list[dict[str, Any]] = [{"role": "system", "content": compat_system}]
|
||||
full_messages.extend(messages)
|
||||
return [
|
||||
message
|
||||
for message in full_messages
|
||||
if not (
|
||||
message.get("role") == "assistant"
|
||||
and not message.get("content")
|
||||
and not message.get("tool_calls")
|
||||
)
|
||||
]
|
||||
|
||||
async def _acomplete_via_openrouter_tool_compat(
|
||||
self,
|
||||
messages: list[dict[str, Any]],
|
||||
system: str,
|
||||
tools: list[Tool],
|
||||
max_tokens: int,
|
||||
) -> LLMResponse:
|
||||
"""Emulate tool calling via JSON when OpenRouter rejects native tools."""
|
||||
full_messages = self._build_openrouter_tool_compat_messages(messages, system, tools)
|
||||
kwargs: dict[str, Any] = {
|
||||
"model": self.model,
|
||||
"messages": full_messages,
|
||||
"max_tokens": max_tokens,
|
||||
**self.extra_kwargs,
|
||||
}
|
||||
if self.api_key:
|
||||
kwargs["api_key"] = self.api_key
|
||||
if self.api_base:
|
||||
kwargs["api_base"] = self.api_base
|
||||
|
||||
response = await self._acompletion_with_rate_limit_retry(**kwargs)
|
||||
raw_content = response.choices[0].message.content or ""
|
||||
assistant_text, tool_calls = self._parse_openrouter_tool_compat_response(
|
||||
raw_content,
|
||||
tools,
|
||||
)
|
||||
usage = response.usage
|
||||
input_tokens = usage.prompt_tokens if usage else 0
|
||||
output_tokens = usage.completion_tokens if usage else 0
|
||||
stop_reason = "tool_calls" if tool_calls else (response.choices[0].finish_reason or "stop")
|
||||
|
||||
return LLMResponse(
|
||||
content=assistant_text,
|
||||
model=response.model or self.model,
|
||||
input_tokens=input_tokens,
|
||||
output_tokens=output_tokens,
|
||||
stop_reason=stop_reason,
|
||||
raw_response={
|
||||
"compat_mode": "openrouter_tool_emulation",
|
||||
"tool_calls": tool_calls,
|
||||
"response": response,
|
||||
},
|
||||
)
|
||||
|
||||
async def _stream_via_openrouter_tool_compat(
|
||||
self,
|
||||
messages: list[dict[str, Any]],
|
||||
system: str,
|
||||
tools: list[Tool],
|
||||
max_tokens: int,
|
||||
) -> AsyncIterator[StreamEvent]:
|
||||
"""Fallback stream for OpenRouter models without native tool support."""
|
||||
from framework.llm.stream_events import (
|
||||
FinishEvent,
|
||||
StreamErrorEvent,
|
||||
TextDeltaEvent,
|
||||
TextEndEvent,
|
||||
ToolCallEvent,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"[openrouter-tool-compat] Using compatibility mode for %s",
|
||||
self.model,
|
||||
)
|
||||
try:
|
||||
response = await self._acomplete_via_openrouter_tool_compat(
|
||||
messages=messages,
|
||||
system=system,
|
||||
tools=tools,
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
except Exception as e:
|
||||
yield StreamErrorEvent(error=str(e), recoverable=False)
|
||||
return
|
||||
|
||||
raw_response = response.raw_response if isinstance(response.raw_response, dict) else {}
|
||||
tool_calls = raw_response.get("tool_calls", [])
|
||||
|
||||
if response.content:
|
||||
yield TextDeltaEvent(content=response.content, snapshot=response.content)
|
||||
yield TextEndEvent(full_text=response.content)
|
||||
|
||||
for tool_call in tool_calls:
|
||||
yield ToolCallEvent(
|
||||
tool_use_id=tool_call["id"],
|
||||
tool_name=tool_call["name"],
|
||||
tool_input=tool_call["input"],
|
||||
)
|
||||
|
||||
yield FinishEvent(
|
||||
stop_reason=response.stop_reason,
|
||||
input_tokens=response.input_tokens,
|
||||
output_tokens=response.output_tokens,
|
||||
model=response.model,
|
||||
)
|
||||
|
||||
async def _stream_via_nonstream_completion(
|
||||
self,
|
||||
messages: list[dict[str, Any]],
|
||||
@@ -824,12 +1494,11 @@ class LiteLLMProvider(LLMProvider):
|
||||
tool_calls = msg.tool_calls or []
|
||||
|
||||
for tc in tool_calls:
|
||||
parsed_args: Any
|
||||
args = tc.function.arguments if tc.function else ""
|
||||
try:
|
||||
parsed_args = json.loads(args) if args else {}
|
||||
except json.JSONDecodeError:
|
||||
parsed_args = {"_raw": args}
|
||||
parsed_args = self._parse_tool_call_arguments(
|
||||
args,
|
||||
tc.function.name if tc.function else "",
|
||||
)
|
||||
yield ToolCallEvent(
|
||||
tool_use_id=getattr(tc, "id", ""),
|
||||
tool_name=tc.function.name if tc.function else "",
|
||||
@@ -888,7 +1557,20 @@ class LiteLLMProvider(LLMProvider):
|
||||
yield event
|
||||
return
|
||||
|
||||
if tools and self._is_openrouter_model() and _is_openrouter_tool_compat_cached(self.model):
|
||||
async for event in self._stream_via_openrouter_tool_compat(
|
||||
messages=messages,
|
||||
system=system,
|
||||
tools=tools,
|
||||
max_tokens=max_tokens,
|
||||
):
|
||||
yield event
|
||||
return
|
||||
|
||||
full_messages: list[dict[str, Any]] = []
|
||||
if self._claude_code_oauth:
|
||||
billing = _claude_code_billing_header(messages)
|
||||
full_messages.append({"role": "system", "content": billing})
|
||||
if system:
|
||||
sys_msg: dict[str, Any] = {"role": "system", "content": system}
|
||||
if _model_supports_cache_control(self.model):
|
||||
@@ -926,9 +1608,12 @@ class LiteLLMProvider(LLMProvider):
|
||||
"messages": full_messages,
|
||||
"max_tokens": max_tokens,
|
||||
"stream": True,
|
||||
"stream_options": {"include_usage": True},
|
||||
**self.extra_kwargs,
|
||||
}
|
||||
# stream_options is OpenAI-specific; Anthropic rejects it with 400.
|
||||
# Only include it for providers that support it.
|
||||
if not self._is_anthropic_model():
|
||||
kwargs["stream_options"] = {"include_usage": True}
|
||||
if self.api_key:
|
||||
kwargs["api_key"] = self.api_key
|
||||
if self.api_base:
|
||||
@@ -1034,10 +1719,10 @@ class LiteLLMProvider(LLMProvider):
|
||||
if choice.finish_reason:
|
||||
stream_finish_reason = choice.finish_reason
|
||||
for _idx, tc_data in sorted(tool_calls_acc.items()):
|
||||
try:
|
||||
parsed_args = json.loads(tc_data["arguments"])
|
||||
except (json.JSONDecodeError, KeyError):
|
||||
parsed_args = {"_raw": tc_data.get("arguments", "")}
|
||||
parsed_args = self._parse_tool_call_arguments(
|
||||
tc_data.get("arguments", ""),
|
||||
tc_data.get("name", ""),
|
||||
)
|
||||
tail_events.append(
|
||||
ToolCallEvent(
|
||||
tool_use_id=tc_data["id"],
|
||||
@@ -1066,7 +1751,8 @@ class LiteLLMProvider(LLMProvider):
|
||||
else getattr(usage, "cache_read_input_tokens", 0) or 0
|
||||
)
|
||||
logger.debug(
|
||||
"[tokens] finish-chunk usage: input=%d output=%d cached=%d model=%s",
|
||||
"[tokens] finish-chunk usage: "
|
||||
"input=%d output=%d cached=%d model=%s",
|
||||
input_tokens,
|
||||
output_tokens,
|
||||
cached_tokens,
|
||||
@@ -1217,6 +1903,16 @@ class LiteLLMProvider(LLMProvider):
|
||||
return
|
||||
|
||||
except Exception as e:
|
||||
if self._should_use_openrouter_tool_compat(e, tools):
|
||||
_remember_openrouter_tool_compat_model(self.model)
|
||||
async for event in self._stream_via_openrouter_tool_compat(
|
||||
messages=messages,
|
||||
system=system,
|
||||
tools=tools or [],
|
||||
max_tokens=max_tokens,
|
||||
):
|
||||
yield event
|
||||
return
|
||||
if _is_stream_transient_error(e) and attempt < RATE_LIMIT_MAX_RETRIES:
|
||||
wait = _compute_retry_delay(attempt, exception=e)
|
||||
logger.warning(
|
||||
|
||||
@@ -45,6 +45,8 @@ class ToolResult:
|
||||
tool_use_id: str
|
||||
content: str
|
||||
is_error: bool = False
|
||||
image_content: list[dict[str, Any]] | None = None
|
||||
is_skill_content: bool = False # AS-10: marks activated skill body, protected from pruning
|
||||
|
||||
|
||||
class LLMProvider(ABC):
|
||||
|
||||
@@ -1,33 +1 @@
|
||||
"""Framework-level worker monitoring package.
|
||||
|
||||
Provides the Worker Health Judge: a reusable secondary graph that attaches to
|
||||
any worker agent runtime and monitors its execution health via periodic log
|
||||
inspection. Emits structured EscalationTickets when degradation is detected.
|
||||
|
||||
Usage::
|
||||
|
||||
from framework.monitoring import HEALTH_JUDGE_ENTRY_POINT, judge_goal, judge_graph
|
||||
from framework.tools.worker_monitoring_tools import register_worker_monitoring_tools
|
||||
|
||||
# Register tools bound to the worker runtime's EventBus
|
||||
monitoring_registry = ToolRegistry()
|
||||
register_worker_monitoring_tools(monitoring_registry, worker_runtime._event_bus, storage_path)
|
||||
|
||||
# Load judge as secondary graph on the worker runtime
|
||||
await worker_runtime.add_graph(
|
||||
graph_id="judge",
|
||||
graph=judge_graph,
|
||||
goal=judge_goal,
|
||||
entry_points={"health_check": HEALTH_JUDGE_ENTRY_POINT},
|
||||
storage_subpath="graphs/judge",
|
||||
)
|
||||
"""
|
||||
|
||||
from .judge import HEALTH_JUDGE_ENTRY_POINT, judge_goal, judge_graph, judge_node
|
||||
|
||||
__all__ = [
|
||||
"HEALTH_JUDGE_ENTRY_POINT",
|
||||
"judge_goal",
|
||||
"judge_graph",
|
||||
"judge_node",
|
||||
]
|
||||
"""Framework-level worker monitoring package."""
|
||||
|
||||
@@ -1,258 +0,0 @@
|
||||
"""Worker Health Judge — framework-level reusable monitoring graph.
|
||||
|
||||
Attaches to any worker agent runtime as a secondary graph. Fires on a
|
||||
2-minute timer, reads the worker's session logs via ``get_worker_health_summary``,
|
||||
accumulates observations in a continuous conversation context, and emits a
|
||||
structured ``EscalationTicket`` when it detects a degradation pattern.
|
||||
|
||||
Usage::
|
||||
|
||||
from framework.monitoring import judge_graph, judge_goal, HEALTH_JUDGE_ENTRY_POINT
|
||||
from framework.tools.worker_monitoring_tools import register_worker_monitoring_tools
|
||||
|
||||
# Register tools bound to the worker runtime's event bus
|
||||
monitoring_registry = ToolRegistry()
|
||||
register_worker_monitoring_tools(
|
||||
monitoring_registry, worker_runtime._event_bus, storage_path
|
||||
)
|
||||
monitoring_tools = list(monitoring_registry.get_tools().values())
|
||||
monitoring_executor = monitoring_registry.get_executor()
|
||||
|
||||
# Load judge as secondary graph on the worker runtime
|
||||
await worker_runtime.add_graph(
|
||||
graph_id="judge",
|
||||
graph=judge_graph,
|
||||
goal=judge_goal,
|
||||
entry_points={"health_check": HEALTH_JUDGE_ENTRY_POINT},
|
||||
storage_subpath="graphs/judge",
|
||||
)
|
||||
|
||||
Design:
|
||||
- ``isolation_level="isolated"`` — the judge has its own memory, not
|
||||
polluting the worker's shared memory namespace.
|
||||
- ``conversation_mode="continuous"`` — the judge's conversation carries
|
||||
across timer ticks. The conversation IS the judge's memory. It tracks
|
||||
trends by referring to its own prior messages ("Last check I saw 47
|
||||
steps; now 52; 5 new steps, 3 RETRY").
|
||||
- No shared memory keys. No external state files.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from framework.graph import Constraint, Goal, NodeSpec, SuccessCriterion
|
||||
from framework.graph.edge import AsyncEntryPointSpec, GraphSpec
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Goal
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
judge_goal = Goal(
|
||||
id="worker-health-monitor",
|
||||
name="Worker Health Monitor",
|
||||
description=(
|
||||
"Periodically assess the health of the worker agent by reading its "
|
||||
"execution logs. Detect degradation patterns (excessive retries, "
|
||||
"stalls, doom loops) and emit structured EscalationTickets when the "
|
||||
"worker needs attention."
|
||||
),
|
||||
success_criteria=[
|
||||
SuccessCriterion(
|
||||
id="accurate-detection",
|
||||
description="Only escalates genuine degradation, not normal retry cycles",
|
||||
metric="false_positive_rate",
|
||||
target="low",
|
||||
weight=0.5,
|
||||
),
|
||||
SuccessCriterion(
|
||||
id="timely-detection",
|
||||
description="Detects genuine stalls within 2 timer ticks (≤4 minutes)",
|
||||
metric="detection_latency_minutes",
|
||||
target="<=4",
|
||||
weight=0.5,
|
||||
),
|
||||
],
|
||||
constraints=[
|
||||
Constraint(
|
||||
id="conservative-escalation",
|
||||
description=(
|
||||
"Do not escalate on a single bad verdict or a brief stall. "
|
||||
"Require clear patterns (10+ consecutive bad verdicts or 4+ minute stall) "
|
||||
"before creating a ticket."
|
||||
),
|
||||
constraint_type="hard",
|
||||
category="quality",
|
||||
),
|
||||
Constraint(
|
||||
id="complete-ticket",
|
||||
description=(
|
||||
"Every EscalationTicket must have all required fields filled. "
|
||||
"Do not emit partial or placeholder tickets."
|
||||
),
|
||||
constraint_type="hard",
|
||||
category="correctness",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Node
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
judge_node = NodeSpec(
|
||||
id="judge",
|
||||
name="Worker Health Judge",
|
||||
description=(
|
||||
"Autonomous health monitor for worker agents. Reads execution logs "
|
||||
"on each timer tick, compares to prior observations (via conversation "
|
||||
"history), and emits a structured EscalationTicket when a genuine "
|
||||
"degradation pattern is detected."
|
||||
),
|
||||
node_type="event_loop",
|
||||
client_facing=False, # Autonomous monitor, not interactive
|
||||
max_node_visits=0, # Unbounded — runs on every timer tick
|
||||
input_keys=[],
|
||||
output_keys=["health_verdict"],
|
||||
nullable_output_keys=["health_verdict"],
|
||||
success_criteria=(
|
||||
"A clear health verdict is produced each check: either 'healthy' with "
|
||||
"a brief observation, or a complete EscalationTicket is emitted via "
|
||||
"emit_escalation_ticket and health_verdict describes the issue."
|
||||
),
|
||||
tools=[
|
||||
"get_worker_health_summary",
|
||||
"emit_escalation_ticket",
|
||||
],
|
||||
system_prompt="""\
|
||||
You are the Worker Health Judge. You run every 2 minutes alongside a worker \
|
||||
agent to monitor its execution health.
|
||||
|
||||
# Your Role
|
||||
|
||||
You observe the worker's iteration patterns over time and escalate only when \
|
||||
you see genuine degradation — not normal retry cycles. Your conversation history \
|
||||
IS your memory. On each check, refer to your previous observations to track trends.
|
||||
|
||||
# Check Procedure
|
||||
|
||||
On each timer tick (every 2 minutes):
|
||||
|
||||
## Step 1: Read health snapshot
|
||||
Call get_worker_health_summary() with no arguments to auto-discover the active \
|
||||
session. This returns:
|
||||
- worker_agent_id: the worker's agent name — use this for ticket identity fields
|
||||
- worker_graph_id: the worker's primary graph ID — use this for ticket identity fields
|
||||
- session_id: the session being monitored — use this for worker_session_id in tickets
|
||||
- total_steps: how many log steps have been recorded
|
||||
- recent_verdicts: list of recent ACCEPT/RETRY/CONTINUE verdicts
|
||||
- steps_since_last_accept: consecutive non-ACCEPT steps
|
||||
- stall_minutes: wall-clock since last step (null if active)
|
||||
- evidence_snippet: recent LLM output
|
||||
|
||||
## Step 2: Compare to prior check
|
||||
Look at your conversation history. What was total_steps last time?
|
||||
- If total_steps is UNCHANGED from prior check AND prior check was also unchanged:
|
||||
→ STALL confirmed (worker has produced no new iterations in 4+ minutes).
|
||||
→ Escalate with severity="high" or "critical" depending on stall duration.
|
||||
- If total_steps increased: worker is making progress. Examine verdicts.
|
||||
|
||||
## Step 3: Analyze verdict pattern
|
||||
- Healthy: Mix of ACCEPT and RETRY, steps_since_last_accept < 5. No action.
|
||||
- Warning: steps_since_last_accept is 5-9. Note it, no escalation yet.
|
||||
- Degraded: steps_since_last_accept >= 10. Examine evidence_snippet.
|
||||
- If evidence shows the agent is making real progress (complex reasoning,
|
||||
exploring solutions, productive tool use): may be a hard problem. Note it.
|
||||
- If evidence shows a loop (same error, same tool call, no new information):
|
||||
→ Escalate with severity="medium" or "high".
|
||||
- Critical: steps_since_last_accept >= 20, OR stall_minutes >= 4.
|
||||
→ Escalate with severity="critical".
|
||||
|
||||
## Step 4: Decide
|
||||
|
||||
### If healthy:
|
||||
set_output("health_verdict", "healthy: <brief observation>")
|
||||
Done.
|
||||
|
||||
### If escalating:
|
||||
Build an EscalationTicket JSON string with ALL required fields:
|
||||
{
|
||||
"worker_agent_id": "<worker_agent_id from get_worker_health_summary>",
|
||||
"worker_session_id": "<session_id from get_worker_health_summary>",
|
||||
"worker_node_id": "<worker_graph_id from get_worker_health_summary>",
|
||||
"worker_graph_id": "<worker_graph_id from get_worker_health_summary>",
|
||||
"severity": "<low|medium|high|critical>",
|
||||
"cause": "<what you observed — concrete, specific>",
|
||||
"judge_reasoning": "<why you decided to escalate, not just dismiss>",
|
||||
"suggested_action": "<what you recommend: restart, human review, etc.>",
|
||||
"recent_verdicts": [<list from get_worker_health_summary>],
|
||||
"total_steps_checked": <int>,
|
||||
"steps_since_last_accept": <int>,
|
||||
"stall_minutes": <float or null>,
|
||||
"evidence_snippet": "<from get_worker_health_summary>"
|
||||
}
|
||||
|
||||
Call: emit_escalation_ticket(ticket_json=<the JSON string above>)
|
||||
Then: set_output("health_verdict", "escalated: <one-line summary>")
|
||||
|
||||
# Severity Guide
|
||||
|
||||
- low: Mild concern, worth noting. 5-9 consecutive bad verdicts.
|
||||
- medium: Clear degradation pattern. 10-15 bad verdicts or brief stall (1-2 min).
|
||||
- high: Serious issue. 15+ bad verdicts or stall 2-4 minutes or clear doom loop.
|
||||
- critical: Worker is definitively stuck. 20+ bad verdicts or stall > 4 minutes.
|
||||
|
||||
# Conservative Bias
|
||||
|
||||
You MUST resist the urge to escalate prematurely. Worker agents naturally retry.
|
||||
A node may legitimately need 5-8 retries before succeeding. Do not escalate unless:
|
||||
1. The pattern is clear and sustained across your observation window, AND
|
||||
2. The evidence shows no genuine progress
|
||||
|
||||
One missed escalation is less costly than two false alarms. The Queen will filter \
|
||||
further. But do not be passive — genuine stalls and doom loops must be caught.
|
||||
|
||||
# Rules
|
||||
- Never escalate on the FIRST check unless stall_minutes > 4
|
||||
- Always call get_worker_health_summary FIRST before deciding anything
|
||||
- All ticket fields are REQUIRED — do not submit partial tickets
|
||||
- After any emit_escalation_ticket call, always set_output to complete the check
|
||||
""",
|
||||
)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Entry Point
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
HEALTH_JUDGE_ENTRY_POINT = AsyncEntryPointSpec(
|
||||
id="health_check",
|
||||
name="Worker Health Check",
|
||||
entry_node="judge",
|
||||
trigger_type="timer",
|
||||
trigger_config={
|
||||
"interval_minutes": 2,
|
||||
"run_immediately": True, # Fire immediately to establish a baseline
|
||||
},
|
||||
isolation_level="isolated", # Own memory namespace, not polluting worker's
|
||||
)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Graph
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
judge_graph = GraphSpec(
|
||||
id="judge-graph",
|
||||
goal_id=judge_goal.id,
|
||||
version="1.0.0",
|
||||
entry_node="judge",
|
||||
entry_points={"health_check": "judge"},
|
||||
terminal_nodes=["judge"], # Judge node can terminate after each check
|
||||
pause_nodes=[],
|
||||
nodes=[judge_node],
|
||||
edges=[],
|
||||
conversation_mode="continuous", # Conversation persists across timer ticks
|
||||
async_entry_points=[HEALTH_JUDGE_ENTRY_POINT],
|
||||
loop_config={
|
||||
"max_iterations": 10, # One check shouldn't take many turns
|
||||
"max_tool_calls_per_turn": 3, # get_summary + optionally emit_ticket
|
||||
"max_context_tokens": 16000, # Compact — judge only needs recent context
|
||||
},
|
||||
)
|
||||
@@ -83,18 +83,18 @@ configure_logging(level="INFO", format="auto")
|
||||
- Compact single-line format (easy to stream/parse)
|
||||
- All trace context fields included automatically
|
||||
|
||||
### Human-Readable Format (Development)
|
||||
### Human-Readable Format (Development / Terminal)
|
||||
|
||||
```
|
||||
[INFO ] [trace:12345678 | exec:a1b2c3d4 | agent:sales-agent] Starting agent execution
|
||||
[INFO ] [trace:12345678 | exec:a1b2c3d4 | agent:sales-agent] Processing input data [node_id:input-processor]
|
||||
[INFO ] [trace:12345678 | exec:a1b2c3d4 | agent:sales-agent] LLM call completed [latency_ms:1250] [tokens_used:450]
|
||||
[INFO ] [agent:sales-agent] Starting agent execution
|
||||
[INFO ] [agent:sales-agent] Processing input data [node_id:input-processor]
|
||||
[INFO ] [agent:sales-agent] LLM call completed [latency_ms:1250] [tokens_used:450]
|
||||
```
|
||||
|
||||
**Features:**
|
||||
- Color-coded log levels
|
||||
- Shortened IDs for readability (first 8 chars)
|
||||
- Context prefix shows trace correlation
|
||||
- Terminal output omits trace_id and execution_id for readability
|
||||
- For full traceability (e.g. debugging), use `ENV=production` to get JSON file logs with trace_id and execution_id
|
||||
|
||||
## Trace Context Fields
|
||||
|
||||
|
||||
@@ -4,8 +4,9 @@ Structured logging with automatic trace context propagation.
|
||||
Key Features:
|
||||
- Zero developer friction: Standard logger.info() calls get automatic context
|
||||
- ContextVar-based propagation: Thread-safe and async-safe
|
||||
- Dual output modes: JSON for production, human-readable for development
|
||||
- Correlation IDs: trace_id follows entire request flow automatically
|
||||
- Dual output modes: JSON for production (full trace_id/execution_id), human-readable for terminal
|
||||
- Terminal omits trace_id/execution_id for readability
|
||||
- Use ENV=production for file logs with full traceability
|
||||
|
||||
Architecture:
|
||||
Runtime.start_run() → Generates trace_id, sets context once
|
||||
@@ -29,6 +30,8 @@ from typing import Any
|
||||
# ContextVar is thread-safe and async-safe - perfect for concurrent agent execution
|
||||
trace_context: ContextVar[dict[str, Any] | None] = ContextVar("trace_context", default=None)
|
||||
|
||||
_STANDARD_LOG_RECORD_FIELDS = set(logging.makeLogRecord({}).__dict__)
|
||||
|
||||
# ANSI escape code pattern (matches \033[...m or \x1b[...m)
|
||||
ANSI_ESCAPE_PATTERN = re.compile(r"\x1b\[[0-9;]*m|\033\[[0-9;]*m")
|
||||
|
||||
@@ -91,6 +94,14 @@ class StructuredFormatter(logging.Formatter):
|
||||
if model is not None:
|
||||
log_entry["model"] = model
|
||||
|
||||
# Preserve arbitrary structured fields passed via ``extra=...``.
|
||||
for key, value in record.__dict__.items():
|
||||
if key in _STANDARD_LOG_RECORD_FIELDS or key.startswith("_"):
|
||||
continue
|
||||
if key in log_entry:
|
||||
continue
|
||||
log_entry[key] = value
|
||||
|
||||
# Add exception info if present (strip ANSI codes from exception text too)
|
||||
if record.exc_info:
|
||||
exception_text = self.formatException(record.exc_info)
|
||||
@@ -101,10 +112,11 @@ class StructuredFormatter(logging.Formatter):
|
||||
|
||||
class HumanReadableFormatter(logging.Formatter):
|
||||
"""
|
||||
Human-readable formatter for development.
|
||||
Human-readable formatter for development (terminal output).
|
||||
|
||||
Provides colorized logs with trace context for local debugging.
|
||||
Includes trace_id prefix for correlation - AUTOMATIC!
|
||||
Provides colorized logs for local debugging. Omits trace_id and execution_id
|
||||
from the terminal for readability; use ENV=production (JSON file logs) when
|
||||
traceability is needed.
|
||||
"""
|
||||
|
||||
COLORS = {
|
||||
@@ -118,18 +130,11 @@ class HumanReadableFormatter(logging.Formatter):
|
||||
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
"""Format log record as human-readable string."""
|
||||
# Get trace context - AUTOMATIC!
|
||||
# Get trace context; omit trace_id and execution_id in terminal for readability
|
||||
context = trace_context.get() or {}
|
||||
trace_id = context.get("trace_id", "")
|
||||
execution_id = context.get("execution_id", "")
|
||||
agent_id = context.get("agent_id", "")
|
||||
|
||||
# Build context prefix
|
||||
prefix_parts = []
|
||||
if trace_id:
|
||||
prefix_parts.append(f"trace:{trace_id[:8]}")
|
||||
if execution_id:
|
||||
prefix_parts.append(f"exec:{execution_id[-8:]}")
|
||||
if agent_id:
|
||||
prefix_parts.append(f"agent:{agent_id}")
|
||||
|
||||
@@ -211,6 +216,15 @@ def configure_logging(
|
||||
root_logger.addHandler(handler)
|
||||
root_logger.setLevel(level.upper())
|
||||
|
||||
# Suppress noisy LiteLLM INFO logs (model/provider line + Provider List URL
|
||||
# printed on every single completion call). Warnings and errors still show.
|
||||
# Honour LITELLM_LOG env var so users can opt-in to debug output.
|
||||
_litellm_level = os.getenv("LITELLM_LOG", "").upper()
|
||||
if _litellm_level and hasattr(logging, _litellm_level):
|
||||
logging.getLogger("LiteLLM").setLevel(getattr(logging, _litellm_level))
|
||||
else:
|
||||
logging.getLogger("LiteLLM").setLevel(logging.WARNING)
|
||||
|
||||
# When in JSON mode, configure known third-party loggers to use JSON formatter
|
||||
# This ensures libraries like LiteLLM, httpcore also output clean JSON
|
||||
if format == "json":
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""Agent Runner - load and run exported agents."""
|
||||
|
||||
from framework.runner.mcp_registry import MCPRegistry
|
||||
from framework.runner.orchestrator import AgentOrchestrator
|
||||
from framework.runner.protocol import (
|
||||
AgentMessage,
|
||||
@@ -17,6 +18,7 @@ __all__ = [
|
||||
"AgentInfo",
|
||||
"ValidationResult",
|
||||
"ToolRegistry",
|
||||
"MCPRegistry",
|
||||
"tool",
|
||||
# Multi-agent
|
||||
"AgentOrchestrator",
|
||||
|
||||
@@ -243,12 +243,8 @@ def register_commands(subparsers: argparse._SubParsersAction) -> None:
|
||||
action="store_true",
|
||||
help="Open dashboard in browser after server starts",
|
||||
)
|
||||
serve_parser.add_argument(
|
||||
"--verbose", "-v", action="store_true", help="Enable INFO log level"
|
||||
)
|
||||
serve_parser.add_argument(
|
||||
"--debug", action="store_true", help="Enable DEBUG log level"
|
||||
)
|
||||
serve_parser.add_argument("--verbose", "-v", action="store_true", help="Enable INFO log level")
|
||||
serve_parser.add_argument("--debug", action="store_true", help="Enable DEBUG log level")
|
||||
serve_parser.set_defaults(func=cmd_serve)
|
||||
|
||||
# open command (serve + auto-open browser)
|
||||
@@ -286,12 +282,8 @@ def register_commands(subparsers: argparse._SubParsersAction) -> None:
|
||||
default=None,
|
||||
help="LLM model for preloaded agents",
|
||||
)
|
||||
open_parser.add_argument(
|
||||
"--verbose", "-v", action="store_true", help="Enable INFO log level"
|
||||
)
|
||||
open_parser.add_argument(
|
||||
"--debug", action="store_true", help="Enable DEBUG log level"
|
||||
)
|
||||
open_parser.add_argument("--verbose", "-v", action="store_true", help="Enable INFO log level")
|
||||
open_parser.add_argument("--debug", action="store_true", help="Enable DEBUG log level")
|
||||
open_parser.set_defaults(func=cmd_open)
|
||||
|
||||
|
||||
@@ -387,12 +379,10 @@ def _prompt_before_start(agent_path: str, runner, model: str | None = None):
|
||||
|
||||
def cmd_run(args: argparse.Namespace) -> int:
|
||||
"""Run an exported agent."""
|
||||
import logging
|
||||
|
||||
from framework.credentials.models import CredentialError
|
||||
from framework.runner import AgentRunner
|
||||
|
||||
from framework.observability import configure_logging
|
||||
from framework.runner import AgentRunner
|
||||
|
||||
# Set logging level (quiet by default for cleaner output)
|
||||
if args.quiet:
|
||||
@@ -932,12 +922,10 @@ def _format_natural_language_to_json(
|
||||
|
||||
def cmd_shell(args: argparse.Namespace) -> int:
|
||||
"""Start an interactive agent session."""
|
||||
import logging
|
||||
|
||||
from framework.credentials.models import CredentialError
|
||||
from framework.runner import AgentRunner
|
||||
|
||||
from framework.observability import configure_logging
|
||||
from framework.runner import AgentRunner
|
||||
|
||||
configure_logging(level="INFO")
|
||||
|
||||
@@ -1573,6 +1561,22 @@ def _open_browser(url: str) -> None:
|
||||
pass # Best-effort — don't crash if browser can't open
|
||||
|
||||
|
||||
def _format_subprocess_output(output: str | bytes | None, limit: int = 2000) -> str:
|
||||
"""Return subprocess output as trimmed text safe for console logging."""
|
||||
if not output:
|
||||
return ""
|
||||
|
||||
if isinstance(output, bytes):
|
||||
text = output.decode(errors="replace")
|
||||
else:
|
||||
text = output
|
||||
|
||||
text = text.strip()
|
||||
if len(text) <= limit:
|
||||
return text
|
||||
return text[-limit:]
|
||||
|
||||
|
||||
def _build_frontend() -> bool:
|
||||
"""Build the frontend if source is newer than dist. Returns True if dist exists."""
|
||||
import subprocess
|
||||
@@ -1608,18 +1612,25 @@ def _build_frontend() -> bool:
|
||||
|
||||
# Need to build
|
||||
print("Building frontend...")
|
||||
npm_cmd = "npm.cmd" if sys.platform == "win32" else "npm"
|
||||
try:
|
||||
# Incremental tsc caches can drift across branch changes and block builds.
|
||||
for cache_file in frontend_dir.glob("tsconfig*.tsbuildinfo"):
|
||||
cache_file.unlink(missing_ok=True)
|
||||
|
||||
# Ensure deps are installed
|
||||
subprocess.run(
|
||||
["npm", "install", "--no-fund", "--no-audit"],
|
||||
[npm_cmd, "install", "--no-fund", "--no-audit"],
|
||||
encoding="utf-8",
|
||||
errors="replace",
|
||||
cwd=frontend_dir,
|
||||
check=True,
|
||||
capture_output=True,
|
||||
)
|
||||
subprocess.run(
|
||||
["npm", "run", "build"],
|
||||
[npm_cmd, "run", "build"],
|
||||
encoding="utf-8",
|
||||
errors="replace",
|
||||
cwd=frontend_dir,
|
||||
check=True,
|
||||
capture_output=True,
|
||||
@@ -1630,22 +1641,26 @@ def _build_frontend() -> bool:
|
||||
print("Node.js not found — skipping frontend build.")
|
||||
return dist_dir.is_dir()
|
||||
except subprocess.CalledProcessError as exc:
|
||||
stderr = exc.stderr.decode(errors="replace") if exc.stderr else ""
|
||||
print(f"Frontend build failed: {stderr[:500]}")
|
||||
stdout = _format_subprocess_output(exc.stdout)
|
||||
stderr = _format_subprocess_output(exc.stderr)
|
||||
cmd = " ".join(exc.cmd) if isinstance(exc.cmd, (list, tuple)) else str(exc.cmd)
|
||||
details = "\n".join(part for part in [stdout, stderr] if part).strip()
|
||||
if details:
|
||||
print(f"Frontend build failed while running {cmd}:\n{details}")
|
||||
else:
|
||||
print(f"Frontend build failed while running {cmd} (exit {exc.returncode}).")
|
||||
return dist_dir.is_dir()
|
||||
|
||||
|
||||
def cmd_serve(args: argparse.Namespace) -> int:
|
||||
"""Start the HTTP API server."""
|
||||
import logging
|
||||
|
||||
from aiohttp import web
|
||||
|
||||
_build_frontend()
|
||||
|
||||
from framework.server.app import create_app
|
||||
|
||||
from framework.observability import configure_logging
|
||||
from framework.server.app import create_app
|
||||
|
||||
if getattr(args, "debug", False):
|
||||
configure_logging(level="DEBUG")
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""MCP Client for connecting to Model Context Protocol servers.
|
||||
|
||||
This module provides a client for connecting to MCP servers and invoking their tools.
|
||||
Supports both STDIO and HTTP transports using the official MCP Python SDK.
|
||||
Supports STDIO, HTTP, UNIX socket, and SSE transports using the official MCP Python SDK.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
@@ -22,7 +22,7 @@ class MCPServerConfig:
|
||||
"""Configuration for an MCP server connection."""
|
||||
|
||||
name: str
|
||||
transport: Literal["stdio", "http"]
|
||||
transport: Literal["stdio", "http", "unix", "sse"]
|
||||
|
||||
# For STDIO transport
|
||||
command: str | None = None
|
||||
@@ -33,6 +33,7 @@ class MCPServerConfig:
|
||||
# For HTTP transport
|
||||
url: str | None = None
|
||||
headers: dict[str, str] = field(default_factory=dict)
|
||||
socket_path: str | None = None
|
||||
|
||||
# Optional metadata
|
||||
description: str = ""
|
||||
@@ -52,7 +53,7 @@ class MCPClient:
|
||||
"""
|
||||
Client for communicating with MCP servers.
|
||||
|
||||
Supports both STDIO and HTTP transports using the official MCP SDK.
|
||||
Supports STDIO, HTTP, UNIX socket, and SSE transports using the official MCP SDK.
|
||||
Manages the connection lifecycle and provides methods to list and invoke tools.
|
||||
"""
|
||||
|
||||
@@ -68,6 +69,7 @@ class MCPClient:
|
||||
self._read_stream = None
|
||||
self._write_stream = None
|
||||
self._stdio_context = None # Context manager for stdio_client
|
||||
self._sse_context = None # Context manager for sse_client
|
||||
self._errlog_handle = None # Track errlog file handle for cleanup
|
||||
self._http_client: httpx.Client | None = None
|
||||
self._tools: dict[str, MCPTool] = {}
|
||||
@@ -141,6 +143,10 @@ class MCPClient:
|
||||
self._connect_stdio()
|
||||
elif self.config.transport == "http":
|
||||
self._connect_http()
|
||||
elif self.config.transport == "unix":
|
||||
self._connect_unix()
|
||||
elif self.config.transport == "sse":
|
||||
self._connect_sse()
|
||||
else:
|
||||
raise ValueError(f"Unsupported transport: {self.config.transport}")
|
||||
|
||||
@@ -266,10 +272,94 @@ class MCPClient:
|
||||
logger.warning(f"Health check failed for MCP server '{self.config.name}': {e}")
|
||||
# Continue anyway, server might not have health endpoint
|
||||
|
||||
def _connect_unix(self) -> None:
|
||||
"""Connect to MCP server via UNIX domain socket transport."""
|
||||
if not self.config.url:
|
||||
raise ValueError("url is required for UNIX transport")
|
||||
if not self.config.socket_path:
|
||||
raise ValueError("socket_path is required for UNIX transport")
|
||||
|
||||
self._http_client = httpx.Client(
|
||||
base_url=self.config.url,
|
||||
headers=self.config.headers,
|
||||
timeout=30.0,
|
||||
transport=httpx.HTTPTransport(uds=self.config.socket_path),
|
||||
)
|
||||
|
||||
try:
|
||||
response = self._http_client.get("/health")
|
||||
response.raise_for_status()
|
||||
logger.info(
|
||||
"Connected to MCP server '%s' via UNIX socket at %s",
|
||||
self.config.name,
|
||||
self.config.socket_path,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Health check failed for MCP server '{self.config.name}': {e}")
|
||||
# Continue anyway, server might not have health endpoint
|
||||
|
||||
def _connect_sse(self) -> None:
|
||||
"""Connect to MCP server via SSE transport using MCP SDK with persistent session."""
|
||||
if not self.config.url:
|
||||
raise ValueError("url is required for SSE transport")
|
||||
|
||||
try:
|
||||
loop_started = threading.Event()
|
||||
connection_ready = threading.Event()
|
||||
connection_error = []
|
||||
|
||||
def run_event_loop():
|
||||
"""Run event loop in background thread."""
|
||||
self._loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(self._loop)
|
||||
loop_started.set()
|
||||
|
||||
async def init_connection():
|
||||
try:
|
||||
from mcp import ClientSession
|
||||
from mcp.client.sse import sse_client
|
||||
|
||||
self._sse_context = sse_client(
|
||||
self.config.url,
|
||||
headers=self.config.headers,
|
||||
timeout=30.0,
|
||||
)
|
||||
(
|
||||
self._read_stream,
|
||||
self._write_stream,
|
||||
) = await self._sse_context.__aenter__()
|
||||
|
||||
self._session = ClientSession(self._read_stream, self._write_stream)
|
||||
await self._session.__aenter__()
|
||||
await self._session.initialize()
|
||||
|
||||
connection_ready.set()
|
||||
except Exception as e:
|
||||
connection_error.append(e)
|
||||
connection_ready.set()
|
||||
|
||||
self._loop.create_task(init_connection())
|
||||
self._loop.run_forever()
|
||||
|
||||
self._loop_thread = threading.Thread(target=run_event_loop, daemon=True)
|
||||
self._loop_thread.start()
|
||||
|
||||
loop_started.wait(timeout=5)
|
||||
if not loop_started.is_set():
|
||||
raise RuntimeError("Event loop failed to start")
|
||||
|
||||
connection_ready.wait(timeout=10)
|
||||
if connection_error:
|
||||
raise connection_error[0]
|
||||
|
||||
logger.info(f"Connected to MCP server '{self.config.name}' via SSE")
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to connect to MCP server: {e}") from e
|
||||
|
||||
def _discover_tools(self) -> None:
|
||||
"""Discover available tools from the MCP server."""
|
||||
try:
|
||||
if self.config.transport == "stdio":
|
||||
if self.config.transport in {"stdio", "sse"}:
|
||||
tools_list = self._run_async(self._list_tools_stdio_async())
|
||||
else:
|
||||
tools_list = self._list_tools_http()
|
||||
@@ -371,9 +461,37 @@ class MCPClient:
|
||||
if self.config.transport == "stdio":
|
||||
with self._stdio_call_lock:
|
||||
return self._run_async(self._call_tool_stdio_async(tool_name, arguments))
|
||||
elif self.config.transport == "sse":
|
||||
return self._call_tool_with_retry(
|
||||
lambda: self._run_async(self._call_tool_stdio_async(tool_name, arguments))
|
||||
)
|
||||
elif self.config.transport == "unix":
|
||||
return self._call_tool_with_retry(lambda: self._call_tool_http(tool_name, arguments))
|
||||
else:
|
||||
return self._call_tool_http(tool_name, arguments)
|
||||
|
||||
def _call_tool_with_retry(self, call: Any) -> Any:
|
||||
"""Retry transient MCP transport failures once after reconnecting."""
|
||||
if self.config.transport == "stdio":
|
||||
return call()
|
||||
|
||||
if self.config.transport not in {"unix", "sse"}:
|
||||
return call()
|
||||
|
||||
try:
|
||||
return call()
|
||||
except (httpx.ConnectError, httpx.ReadTimeout) as original_error:
|
||||
logger.warning(
|
||||
"Retrying MCP tool call after transport error from '%s': %s",
|
||||
self.config.name,
|
||||
original_error,
|
||||
)
|
||||
self._reconnect()
|
||||
try:
|
||||
return call()
|
||||
except (httpx.ConnectError, httpx.ReadTimeout) as retry_error:
|
||||
raise original_error from retry_error
|
||||
|
||||
async def _call_tool_stdio_async(self, tool_name: str, arguments: dict[str, Any]) -> Any:
|
||||
"""Call tool via STDIO protocol using persistent session."""
|
||||
if not self._session:
|
||||
@@ -391,17 +509,30 @@ class MCPClient:
|
||||
error_text = content_item.text
|
||||
raise RuntimeError(f"MCP tool '{tool_name}' failed: {error_text}")
|
||||
|
||||
# Extract content
|
||||
# Extract content — preserve image blocks alongside text
|
||||
if result.content:
|
||||
# MCP returns content as a list of content items
|
||||
if len(result.content) > 0:
|
||||
content_item = result.content[0]
|
||||
# Check if it's a text content item
|
||||
if hasattr(content_item, "text"):
|
||||
return content_item.text
|
||||
elif hasattr(content_item, "data"):
|
||||
return content_item.data
|
||||
return result.content
|
||||
text_parts: list[str] = []
|
||||
image_parts: list[dict[str, Any]] = []
|
||||
for item in result.content:
|
||||
if hasattr(item, "text"):
|
||||
text_parts.append(item.text)
|
||||
elif hasattr(item, "data") and hasattr(item, "mimeType"):
|
||||
# MCP ImageContent — preserve as structured image block
|
||||
image_parts.append(
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": f"data:{item.mimeType};base64,{item.data}",
|
||||
},
|
||||
}
|
||||
)
|
||||
elif hasattr(item, "data"):
|
||||
text_parts.append(str(item.data))
|
||||
|
||||
text = "\n".join(text_parts) if text_parts else ""
|
||||
if image_parts:
|
||||
return {"_text": text, "_images": image_parts}
|
||||
return text if text else None
|
||||
|
||||
return None
|
||||
|
||||
@@ -433,18 +564,24 @@ class MCPClient:
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to call tool via HTTP: {e}") from e
|
||||
|
||||
def _reconnect(self) -> None:
|
||||
"""Reconnect to the configured MCP server."""
|
||||
logger.info(f"Reconnecting to MCP server '{self.config.name}'...")
|
||||
self.disconnect()
|
||||
self.connect()
|
||||
|
||||
_CLEANUP_TIMEOUT = 10
|
||||
_THREAD_JOIN_TIMEOUT = 12
|
||||
|
||||
async def _cleanup_stdio_async(self) -> None:
|
||||
"""Async cleanup for STDIO session and context managers.
|
||||
"""Async cleanup for persistent MCP session and context managers.
|
||||
|
||||
Cleanup order is critical:
|
||||
- The session must be closed BEFORE the stdio_context because the session
|
||||
depends on the streams provided by stdio_context.
|
||||
- This mirrors the initialization order in _connect_stdio(), where
|
||||
stdio_context is entered first (providing streams), then the session is
|
||||
created with those streams and entered.
|
||||
- The session must be closed BEFORE the transport context manager because the
|
||||
session depends on the streams provided by that context.
|
||||
- This mirrors the initialization order in _connect_stdio() / _connect_sse(),
|
||||
where the transport context is entered first (providing streams), then the
|
||||
session is created with those streams and entered.
|
||||
- Do not change this ordering without carefully considering these dependencies.
|
||||
"""
|
||||
# First: close session (depends on stdio_context streams)
|
||||
@@ -477,6 +614,16 @@ class MCPClient:
|
||||
finally:
|
||||
self._stdio_context = None
|
||||
|
||||
try:
|
||||
if self._sse_context:
|
||||
await self._sse_context.__aexit__(None, None, None)
|
||||
except asyncio.CancelledError:
|
||||
logger.debug("SSE context cleanup was cancelled; proceeding with best-effort shutdown")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error closing SSE context: {e}")
|
||||
finally:
|
||||
self._sse_context = None
|
||||
|
||||
# Third: close errlog file handle if we opened one
|
||||
if self._errlog_handle is not None:
|
||||
try:
|
||||
@@ -552,6 +699,7 @@ class MCPClient:
|
||||
# Setting None to None is safe and ensures clean state.
|
||||
self._session = None
|
||||
self._stdio_context = None
|
||||
self._sse_context = None
|
||||
self._read_stream = None
|
||||
self._write_stream = None
|
||||
self._loop = None
|
||||
|
||||
@@ -0,0 +1,409 @@
|
||||
"""Shared MCP client connection management."""
|
||||
|
||||
import logging
|
||||
import threading
|
||||
|
||||
import httpx
|
||||
|
||||
from framework.runner.mcp_client import MCPClient, MCPServerConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_TRANSITION_TIMEOUT = 30.0
|
||||
|
||||
|
||||
class MCPConnectionManager:
|
||||
"""Process-wide MCP client pool keyed by server name."""
|
||||
|
||||
_instance = None
|
||||
_lock = threading.Lock()
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._pool: dict[str, MCPClient] = {}
|
||||
self._refcounts: dict[str, int] = {}
|
||||
self._configs: dict[str, MCPServerConfig] = {}
|
||||
self._pool_lock = threading.Lock()
|
||||
self._transitions: dict[str, threading.Event] = {}
|
||||
|
||||
@classmethod
|
||||
def get_instance(cls) -> "MCPConnectionManager":
|
||||
"""Return the process-level singleton instance."""
|
||||
if cls._instance is None:
|
||||
with cls._lock:
|
||||
if cls._instance is None:
|
||||
cls._instance = cls()
|
||||
return cls._instance
|
||||
|
||||
@staticmethod
|
||||
def _is_connected(client: MCPClient | None) -> bool:
|
||||
return bool(client and getattr(client, "_connected", False))
|
||||
|
||||
def has_connection(self, server_name: str) -> bool:
|
||||
"""Return True when a live pooled connection exists for ``server_name``."""
|
||||
with self._pool_lock:
|
||||
return self._is_connected(self._pool.get(server_name))
|
||||
|
||||
def acquire(self, config: MCPServerConfig) -> MCPClient:
|
||||
"""Get or create a shared connection and increment its refcount."""
|
||||
server_name = config.name
|
||||
|
||||
while True:
|
||||
should_connect = False
|
||||
transition_event: threading.Event | None = None
|
||||
|
||||
with self._pool_lock:
|
||||
client = self._pool.get(server_name)
|
||||
if self._is_connected(client) and server_name not in self._transitions:
|
||||
new_refcount = self._refcounts.get(server_name, 0) + 1
|
||||
self._refcounts[server_name] = new_refcount
|
||||
self._configs[server_name] = config
|
||||
logger.debug(
|
||||
"Reusing pooled connection for MCP server '%s' (refcount=%d)",
|
||||
server_name,
|
||||
new_refcount,
|
||||
)
|
||||
return client
|
||||
|
||||
transition_event = self._transitions.get(server_name)
|
||||
if transition_event is None:
|
||||
transition_event = threading.Event()
|
||||
self._transitions[server_name] = transition_event
|
||||
self._configs[server_name] = config
|
||||
should_connect = True
|
||||
|
||||
if not should_connect:
|
||||
if not transition_event.wait(timeout=_TRANSITION_TIMEOUT):
|
||||
logger.warning(
|
||||
"Timed out waiting for transition on MCP server '%s', "
|
||||
"forcing cleanup and retrying",
|
||||
server_name,
|
||||
)
|
||||
with self._pool_lock:
|
||||
stuck = self._transitions.get(server_name)
|
||||
if stuck is transition_event:
|
||||
self._transitions.pop(server_name, None)
|
||||
transition_event.set()
|
||||
continue
|
||||
|
||||
logger.info("Connecting to MCP server '%s'", server_name)
|
||||
client = MCPClient(config)
|
||||
try:
|
||||
client.connect()
|
||||
except Exception:
|
||||
logger.warning(
|
||||
"Failed to connect to MCP server '%s'",
|
||||
server_name,
|
||||
exc_info=True,
|
||||
)
|
||||
with self._pool_lock:
|
||||
current = self._transitions.get(server_name)
|
||||
if current is transition_event:
|
||||
self._transitions.pop(server_name, None)
|
||||
if (
|
||||
server_name not in self._pool
|
||||
and self._refcounts.get(server_name, 0) <= 0
|
||||
):
|
||||
self._configs.pop(server_name, None)
|
||||
transition_event.set()
|
||||
raise
|
||||
|
||||
with self._pool_lock:
|
||||
current = self._transitions.get(server_name)
|
||||
if current is transition_event:
|
||||
self._pool[server_name] = client
|
||||
self._refcounts[server_name] = self._refcounts.get(server_name, 0) + 1
|
||||
self._configs[server_name] = config
|
||||
self._transitions.pop(server_name, None)
|
||||
transition_event.set()
|
||||
logger.info(
|
||||
"Connected to MCP server '%s' (refcount=1)",
|
||||
server_name,
|
||||
)
|
||||
return client
|
||||
|
||||
# Lost the transition race, clean up and retry
|
||||
try:
|
||||
client.disconnect()
|
||||
except Exception:
|
||||
logger.debug(
|
||||
"Error disconnecting stale client for '%s'",
|
||||
server_name,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
def release(self, server_name: str) -> None:
|
||||
"""Decrement refcount and disconnect when the last user releases."""
|
||||
while True:
|
||||
disconnect_client: MCPClient | None = None
|
||||
transition_event: threading.Event | None = None
|
||||
should_disconnect = False
|
||||
|
||||
with self._pool_lock:
|
||||
transition_event = self._transitions.get(server_name)
|
||||
if transition_event is None:
|
||||
refcount = self._refcounts.get(server_name, 0)
|
||||
if refcount <= 0:
|
||||
return
|
||||
if refcount > 1:
|
||||
self._refcounts[server_name] = refcount - 1
|
||||
logger.debug(
|
||||
"Released MCP server '%s' (refcount=%d)",
|
||||
server_name,
|
||||
refcount - 1,
|
||||
)
|
||||
return
|
||||
|
||||
disconnect_client = self._pool.pop(server_name, None)
|
||||
self._refcounts.pop(server_name, None)
|
||||
self._configs.pop(server_name, None)
|
||||
transition_event = threading.Event()
|
||||
self._transitions[server_name] = transition_event
|
||||
should_disconnect = True
|
||||
|
||||
if not should_disconnect:
|
||||
if not transition_event.wait(timeout=_TRANSITION_TIMEOUT):
|
||||
logger.warning(
|
||||
"Timed out waiting for transition on '%s' during release, forcing cleanup",
|
||||
server_name,
|
||||
)
|
||||
with self._pool_lock:
|
||||
stuck = self._transitions.get(server_name)
|
||||
if stuck is transition_event:
|
||||
self._transitions.pop(server_name, None)
|
||||
transition_event.set()
|
||||
continue
|
||||
|
||||
try:
|
||||
if disconnect_client is not None:
|
||||
disconnect_client.disconnect()
|
||||
logger.info(
|
||||
"Disconnected MCP server '%s' (last reference released)",
|
||||
server_name,
|
||||
)
|
||||
except Exception:
|
||||
logger.warning(
|
||||
"Error disconnecting MCP server '%s' during release",
|
||||
server_name,
|
||||
exc_info=True,
|
||||
)
|
||||
finally:
|
||||
with self._pool_lock:
|
||||
current = self._transitions.get(server_name)
|
||||
if current is transition_event:
|
||||
self._transitions.pop(server_name, None)
|
||||
transition_event.set()
|
||||
return
|
||||
|
||||
def health_check(self, server_name: str) -> bool:
|
||||
"""Return True when the pooled connection appears healthy."""
|
||||
while True:
|
||||
with self._pool_lock:
|
||||
transition_event = self._transitions.get(server_name)
|
||||
if transition_event is None:
|
||||
client = self._pool.get(server_name)
|
||||
config = self._configs.get(server_name)
|
||||
break
|
||||
|
||||
if not transition_event.wait(timeout=_TRANSITION_TIMEOUT):
|
||||
logger.warning(
|
||||
"Timed out waiting for transition on '%s' during health check",
|
||||
server_name,
|
||||
)
|
||||
return False
|
||||
|
||||
if client is None or config is None:
|
||||
return False
|
||||
|
||||
try:
|
||||
match config.transport:
|
||||
case "stdio":
|
||||
client.list_tools()
|
||||
return True
|
||||
case "http":
|
||||
if not config.url:
|
||||
return False
|
||||
with httpx.Client(
|
||||
base_url=config.url,
|
||||
headers=config.headers,
|
||||
timeout=5.0,
|
||||
) as http_client:
|
||||
response = http_client.get("/health")
|
||||
response.raise_for_status()
|
||||
return True
|
||||
case "sse":
|
||||
client.list_tools()
|
||||
return True
|
||||
case "unix":
|
||||
if not config.socket_path:
|
||||
return False
|
||||
with httpx.Client(
|
||||
base_url=config.url or "http://localhost",
|
||||
headers=config.headers,
|
||||
timeout=5.0,
|
||||
transport=httpx.HTTPTransport(uds=config.socket_path),
|
||||
) as http_client:
|
||||
response = http_client.get("/health")
|
||||
response.raise_for_status()
|
||||
return True
|
||||
case _:
|
||||
logger.warning(
|
||||
"Unknown transport '%s' for health check on '%s'",
|
||||
config.transport,
|
||||
server_name,
|
||||
)
|
||||
return False
|
||||
except Exception:
|
||||
logger.debug(
|
||||
"Health check failed for MCP server '%s'",
|
||||
server_name,
|
||||
exc_info=True,
|
||||
)
|
||||
return False
|
||||
|
||||
def reconnect(self, server_name: str) -> MCPClient:
|
||||
"""Force a disconnect and replace the pooled client with a fresh one."""
|
||||
while True:
|
||||
transition_event: threading.Event | None = None
|
||||
old_client: MCPClient | None = None
|
||||
|
||||
with self._pool_lock:
|
||||
transition_event = self._transitions.get(server_name)
|
||||
if transition_event is None:
|
||||
config = self._configs.get(server_name)
|
||||
if config is None:
|
||||
raise KeyError(f"Unknown MCP server: {server_name}")
|
||||
old_client = self._pool.get(server_name)
|
||||
transition_event = threading.Event()
|
||||
self._transitions[server_name] = transition_event
|
||||
break
|
||||
|
||||
if not transition_event.wait(timeout=_TRANSITION_TIMEOUT):
|
||||
logger.warning(
|
||||
"Timed out waiting for transition on '%s' during reconnect, forcing cleanup",
|
||||
server_name,
|
||||
)
|
||||
with self._pool_lock:
|
||||
stuck = self._transitions.get(server_name)
|
||||
if stuck is transition_event:
|
||||
self._transitions.pop(server_name, None)
|
||||
transition_event.set()
|
||||
|
||||
# Disconnect old client safely
|
||||
if old_client is not None:
|
||||
try:
|
||||
old_client.disconnect()
|
||||
logger.info("Disconnected old client for '%s'", server_name)
|
||||
except Exception:
|
||||
logger.warning(
|
||||
"Error disconnecting old client for '%s' during reconnect",
|
||||
server_name,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
logger.info("Reconnecting MCP server '%s'", server_name)
|
||||
new_client = MCPClient(config)
|
||||
try:
|
||||
new_client.connect()
|
||||
except Exception:
|
||||
with self._pool_lock:
|
||||
current = self._transitions.get(server_name)
|
||||
if current is transition_event:
|
||||
self._pool.pop(server_name, None)
|
||||
self._transitions.pop(server_name, None)
|
||||
transition_event.set()
|
||||
raise
|
||||
|
||||
with self._pool_lock:
|
||||
current = self._transitions.get(server_name)
|
||||
if current is transition_event:
|
||||
current_refcount = self._refcounts.get(server_name, 0)
|
||||
if current_refcount <= 0:
|
||||
# All holders released during reconnect. Discard the
|
||||
# new client instead of creating a phantom reference.
|
||||
# Caller should acquire() fresh if needed.
|
||||
self._transitions.pop(server_name, None)
|
||||
transition_event.set()
|
||||
logger.info(
|
||||
"Reconnected MCP server '%s' but refcount dropped to 0, "
|
||||
"discarding new client",
|
||||
server_name,
|
||||
)
|
||||
try:
|
||||
new_client.disconnect()
|
||||
except Exception:
|
||||
logger.debug(
|
||||
"Error disconnecting discarded client for '%s'",
|
||||
server_name,
|
||||
exc_info=True,
|
||||
)
|
||||
raise KeyError(
|
||||
f"MCP server '{server_name}' was fully released during reconnect"
|
||||
)
|
||||
|
||||
self._pool[server_name] = new_client
|
||||
self._configs[server_name] = config
|
||||
self._refcounts[server_name] = current_refcount
|
||||
self._transitions.pop(server_name, None)
|
||||
transition_event.set()
|
||||
logger.info(
|
||||
"Reconnected MCP server '%s' (refcount=%d)",
|
||||
server_name,
|
||||
current_refcount,
|
||||
)
|
||||
return new_client
|
||||
|
||||
try:
|
||||
new_client.disconnect()
|
||||
except Exception:
|
||||
logger.debug(
|
||||
"Error disconnecting stale client for '%s' after reconnect race",
|
||||
server_name,
|
||||
exc_info=True,
|
||||
)
|
||||
return self.acquire(config)
|
||||
|
||||
def cleanup_all(self) -> None:
|
||||
"""Disconnect all pooled clients and clear manager state."""
|
||||
while True:
|
||||
with self._pool_lock:
|
||||
if self._transitions:
|
||||
pending = list(self._transitions.values())
|
||||
else:
|
||||
cleanup_events = {name: threading.Event() for name in self._pool}
|
||||
clients = list(self._pool.items())
|
||||
self._transitions.update(cleanup_events)
|
||||
self._pool.clear()
|
||||
self._refcounts.clear()
|
||||
self._configs.clear()
|
||||
break
|
||||
|
||||
all_resolved = all(event.wait(timeout=_TRANSITION_TIMEOUT) for event in pending)
|
||||
if not all_resolved:
|
||||
logger.warning(
|
||||
"Timed out waiting for pending transitions during cleanup, "
|
||||
"forcing cleanup of stuck transitions",
|
||||
)
|
||||
with self._pool_lock:
|
||||
for sn, evt in list(self._transitions.items()):
|
||||
if not evt.is_set():
|
||||
self._transitions.pop(sn, None)
|
||||
evt.set()
|
||||
|
||||
logger.info("Cleaning up %d pooled MCP connections", len(clients))
|
||||
for server_name, client in clients:
|
||||
try:
|
||||
client.disconnect()
|
||||
logger.debug("Disconnected MCP server '%s' during cleanup", server_name)
|
||||
except Exception:
|
||||
logger.warning(
|
||||
"Error disconnecting MCP server '%s' during cleanup",
|
||||
server_name,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
with self._pool_lock:
|
||||
for server_name, event in cleanup_events.items():
|
||||
current = self._transitions.get(server_name)
|
||||
if current is event:
|
||||
self._transitions.pop(server_name, None)
|
||||
event.set()
|
||||
@@ -0,0 +1,815 @@
|
||||
"""MCP Server Registry: local state management for installed MCP servers."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
import tomllib
|
||||
from datetime import UTC, datetime
|
||||
from importlib.metadata import PackageNotFoundError, version
|
||||
from pathlib import Path
|
||||
from typing import Any, Literal
|
||||
|
||||
import httpx
|
||||
|
||||
from framework.runner.mcp_client import MCPClient, MCPServerConfig
|
||||
from framework.runner.mcp_connection_manager import MCPConnectionManager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_INDEX_URL = (
|
||||
"https://raw.githubusercontent.com/aden-hive/hive-mcp-registry/main/registry_index.json"
|
||||
)
|
||||
DEFAULT_REFRESH_INTERVAL_HOURS = 24
|
||||
_LAST_FETCHED_FILENAME = "last_fetched"
|
||||
_LEGACY_LAST_FETCHED_FILENAME = "last_fetched.json"
|
||||
|
||||
_DEFAULT_CONFIG = {
|
||||
"index_url": DEFAULT_INDEX_URL,
|
||||
"refresh_interval_hours": DEFAULT_REFRESH_INTERVAL_HOURS,
|
||||
}
|
||||
|
||||
|
||||
class MCPRegistry:
|
||||
"""Manages local MCP server state in ~/.hive/mcp_registry/."""
|
||||
|
||||
def __init__(self, base_path: Path | None = None):
|
||||
self._base = base_path or Path.home() / ".hive" / "mcp_registry"
|
||||
self._installed_path = self._base / "installed.json"
|
||||
self._config_path = self._base / "config.json"
|
||||
self._cache_dir = self._base / "cache"
|
||||
|
||||
# ── Initialization ──────────────────────────────────────────────
|
||||
|
||||
def initialize(self) -> None:
|
||||
"""Create directory structure and default files if missing."""
|
||||
self._base.mkdir(parents=True, exist_ok=True)
|
||||
self._cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if not self._config_path.exists():
|
||||
self._write_json(self._config_path, _DEFAULT_CONFIG)
|
||||
|
||||
if not self._installed_path.exists():
|
||||
self._write_json(self._installed_path, {"servers": {}})
|
||||
|
||||
# ── Internal I/O ────────────────────────────────────────────────
|
||||
|
||||
def _read_installed(self) -> dict:
|
||||
"""Read installed.json, initializing if needed."""
|
||||
if not self._installed_path.exists():
|
||||
self.initialize()
|
||||
return json.loads(self._installed_path.read_text(encoding="utf-8"))
|
||||
|
||||
def _write_installed(self, data: dict) -> None:
|
||||
"""Write installed.json."""
|
||||
self._write_json(self._installed_path, data)
|
||||
|
||||
def _read_config(self) -> dict:
|
||||
"""Read config.json."""
|
||||
if not self._config_path.exists():
|
||||
self.initialize()
|
||||
return json.loads(self._config_path.read_text(encoding="utf-8"))
|
||||
|
||||
def _read_cached_index(self) -> dict:
|
||||
"""Read cached registry_index.json."""
|
||||
index_path = self._cache_dir / "registry_index.json"
|
||||
if not index_path.exists():
|
||||
return {"servers": {}}
|
||||
return json.loads(index_path.read_text(encoding="utf-8"))
|
||||
|
||||
def _get_effective_manifest(
|
||||
self,
|
||||
name: str,
|
||||
entry: dict,
|
||||
cached_index: dict | None = None,
|
||||
) -> dict:
|
||||
"""Return the manifest currently in effect for an installed entry."""
|
||||
manifest = entry.get("manifest", {})
|
||||
if entry.get("source") != "registry":
|
||||
return manifest
|
||||
|
||||
index = cached_index or self._read_cached_index()
|
||||
cached_manifest = index.get("servers", {}).get(name)
|
||||
if cached_manifest is not None:
|
||||
return cached_manifest
|
||||
|
||||
# Fall back to persisted manifest data when the cache is unavailable.
|
||||
if isinstance(manifest, dict) and manifest:
|
||||
return manifest
|
||||
return {}
|
||||
|
||||
@staticmethod
|
||||
def _write_json(path: Path, data: dict) -> None:
|
||||
"""Write JSON to file atomically (write to temp, fsync, rename)."""
|
||||
content = json.dumps(data, indent=2) + "\n"
|
||||
fd, tmp_path = tempfile.mkstemp(dir=path.parent, suffix=".tmp")
|
||||
try:
|
||||
with os.fdopen(fd, "w", encoding="utf-8") as f:
|
||||
f.write(content)
|
||||
f.flush()
|
||||
os.fsync(f.fileno())
|
||||
os.replace(tmp_path, path)
|
||||
except BaseException:
|
||||
try:
|
||||
os.unlink(tmp_path)
|
||||
except OSError:
|
||||
pass
|
||||
raise
|
||||
|
||||
# ── add_local ───────────────────────────────────────────────────
|
||||
|
||||
def add_local(
|
||||
self,
|
||||
name: str,
|
||||
transport: str | None = None,
|
||||
manifest: dict | None = None,
|
||||
url: str | None = None,
|
||||
command: str | None = None,
|
||||
args: list[str] | None = None,
|
||||
env: dict[str, str] | None = None,
|
||||
headers: dict[str, str] | None = None,
|
||||
cwd: str | None = None,
|
||||
socket_path: str | None = None,
|
||||
description: str = "",
|
||||
) -> dict:
|
||||
"""Register a local/running MCP server.
|
||||
|
||||
Can be called with an inline manifest dict, or with individual
|
||||
transport/url/command params that build a manifest automatically.
|
||||
"""
|
||||
data = self._read_installed()
|
||||
if name in data["servers"]:
|
||||
raise ValueError(f"Server '{name}' already exists. Use remove first.")
|
||||
|
||||
if manifest is not None:
|
||||
# Inline manifest provided directly
|
||||
manifest = {**manifest, "name": name}
|
||||
transport_config = manifest.get("transport", {})
|
||||
transport = transport or transport_config.get("default", "stdio")
|
||||
if "transport" not in manifest:
|
||||
manifest["transport"] = {"supported": [transport], "default": transport}
|
||||
else:
|
||||
# Build manifest from individual params
|
||||
if not transport:
|
||||
raise ValueError("transport is required when manifest is not provided")
|
||||
manifest = {
|
||||
"name": name,
|
||||
"description": description,
|
||||
"transport": {"supported": [transport], "default": transport},
|
||||
}
|
||||
match transport:
|
||||
case "http":
|
||||
if not url:
|
||||
raise ValueError("url is required for http transport")
|
||||
manifest["http"] = {"url": url, "headers": headers or {}}
|
||||
case "stdio":
|
||||
if not command:
|
||||
raise ValueError("command is required for stdio transport")
|
||||
manifest["stdio"] = {
|
||||
"command": command,
|
||||
"args": args or [],
|
||||
"env": env or {},
|
||||
"cwd": cwd,
|
||||
}
|
||||
case "unix":
|
||||
if not socket_path:
|
||||
raise ValueError("socket_path is required for unix transport")
|
||||
manifest["unix"] = {"socket_path": socket_path}
|
||||
manifest["http"] = {"url": url or "http://localhost"}
|
||||
case "sse":
|
||||
if not url:
|
||||
raise ValueError("url is required for sse transport")
|
||||
manifest["sse"] = {"url": url}
|
||||
case _:
|
||||
raise ValueError(f"Unsupported transport: {transport}")
|
||||
|
||||
entry = self._make_entry(
|
||||
source="local",
|
||||
manifest=manifest,
|
||||
transport=transport,
|
||||
installed_by="hive mcp add",
|
||||
)
|
||||
|
||||
data["servers"][name] = entry
|
||||
self._write_installed(data)
|
||||
logger.info("Registered local MCP server '%s' (%s)", name, transport)
|
||||
return entry
|
||||
|
||||
# ── install ─────────────────────────────────────────────────────
|
||||
|
||||
def install(self, name: str, transport: str | None = None, version: str | None = None) -> dict:
|
||||
"""Install a server from the cached remote registry index."""
|
||||
data = self._read_installed()
|
||||
if name in data["servers"]:
|
||||
raise ValueError(f"Server '{name}' already exists. Remove it first or use update.")
|
||||
|
||||
index = self._read_cached_index()
|
||||
manifest = index.get("servers", {}).get(name)
|
||||
if manifest is None:
|
||||
raise ValueError(
|
||||
f"Server '{name}' not found in registry index. "
|
||||
"Run 'hive mcp update' to refresh the index."
|
||||
)
|
||||
|
||||
# Validate version if specified
|
||||
if version is not None:
|
||||
index_version = manifest.get("version")
|
||||
if index_version is None:
|
||||
raise ValueError(f"Cannot pin version for '{name}': manifest has no version field.")
|
||||
if index_version != version:
|
||||
raise ValueError(
|
||||
f"Version mismatch for '{name}': requested {version}, "
|
||||
f"index has {index_version}. "
|
||||
"Run 'hive mcp update' to refresh the index."
|
||||
)
|
||||
|
||||
transport_config = manifest.get("transport", {})
|
||||
supported = transport_config.get("supported", [])
|
||||
if transport is not None:
|
||||
if supported and transport not in supported:
|
||||
raise ValueError(
|
||||
f"Transport '{transport}' not supported by '{name}'. Supported: {supported}"
|
||||
)
|
||||
resolved_transport = transport
|
||||
else:
|
||||
resolved_transport = transport_config.get("default", "stdio")
|
||||
|
||||
entry = self._make_entry(
|
||||
source="registry",
|
||||
manifest=self._make_registry_manifest_snapshot(name, manifest),
|
||||
transport=resolved_transport,
|
||||
installed_by="hive mcp install",
|
||||
pinned=version is not None,
|
||||
auto_update=version is None,
|
||||
resolved_package_version=manifest.get("version"),
|
||||
)
|
||||
|
||||
data["servers"][name] = entry
|
||||
self._write_installed(data)
|
||||
logger.info(
|
||||
"Installed MCP server '%s' v%s from registry",
|
||||
name,
|
||||
entry["manifest_version"],
|
||||
)
|
||||
return entry
|
||||
|
||||
# ── remove / enable / disable ───────────────────────────────────
|
||||
|
||||
def remove(self, name: str) -> None:
|
||||
"""Remove a server from the registry."""
|
||||
data = self._read_installed()
|
||||
if name not in data["servers"]:
|
||||
raise ValueError(f"Server '{name}' is not installed.")
|
||||
del data["servers"][name]
|
||||
self._write_installed(data)
|
||||
logger.info("Removed MCP server '%s'", name)
|
||||
|
||||
def enable(self, name: str) -> None:
|
||||
"""Enable a disabled server."""
|
||||
self._set_enabled(name, enabled=True)
|
||||
|
||||
def disable(self, name: str) -> None:
|
||||
"""Disable a server without removing it."""
|
||||
self._set_enabled(name, enabled=False)
|
||||
|
||||
def _set_enabled(self, name: str, *, enabled: bool) -> None:
|
||||
data = self._read_installed()
|
||||
if name not in data["servers"]:
|
||||
raise ValueError(f"Server '{name}' is not installed.")
|
||||
data["servers"][name]["enabled"] = enabled
|
||||
self._write_installed(data)
|
||||
logger.info("%s MCP server '%s'", "Enabled" if enabled else "Disabled", name)
|
||||
|
||||
# ── list / get ──────────────────────────────────────────────────
|
||||
|
||||
def list_installed(self) -> list[dict]:
|
||||
"""Return all installed servers as a list of dicts with name included."""
|
||||
data = self._read_installed()
|
||||
return [{"name": name, **entry} for name, entry in data["servers"].items()]
|
||||
|
||||
def get_server(self, name: str) -> dict | None:
|
||||
"""Get a single installed server entry by name, or None if not found."""
|
||||
data = self._read_installed()
|
||||
entry = data["servers"].get(name)
|
||||
if entry is None:
|
||||
return None
|
||||
return {"name": name, **entry}
|
||||
|
||||
def list_available(self) -> list[dict]:
|
||||
"""List all servers from cached remote index."""
|
||||
index = self._read_cached_index()
|
||||
return [{"name": name, **m} for name, m in index.get("servers", {}).items()]
|
||||
|
||||
# ── set_override ────────────────────────────────────────────────
|
||||
|
||||
def set_override(
|
||||
self,
|
||||
name: str,
|
||||
key: str,
|
||||
value: str,
|
||||
override_type: Literal["env", "headers"] = "env",
|
||||
) -> None:
|
||||
"""Set an env or header override for a server."""
|
||||
data = self._read_installed()
|
||||
if name not in data["servers"]:
|
||||
raise ValueError(f"Server '{name}' is not installed.")
|
||||
if override_type not in ("env", "headers"):
|
||||
raise ValueError(f"Invalid override type: {override_type}")
|
||||
data["servers"][name]["overrides"][override_type][key] = value
|
||||
self._write_installed(data)
|
||||
logger.info("Set %s override %s for MCP server '%s'", override_type, key, name)
|
||||
|
||||
# ── search ──────────────────────────────────────────────────────
|
||||
|
||||
def search(self, query: str) -> list[dict]:
|
||||
"""Search registry index by name, tag, description, or tool name."""
|
||||
query_lower = query.lower()
|
||||
index = self._read_cached_index()
|
||||
matches = []
|
||||
|
||||
for name, manifest in index.get("servers", {}).items():
|
||||
if self._matches_query(name, manifest, query_lower):
|
||||
matches.append({"name": name, **manifest})
|
||||
|
||||
return matches
|
||||
|
||||
@staticmethod
|
||||
def _matches_query(name: str, manifest: dict, query: str) -> bool:
|
||||
"""Check if a manifest matches a search query."""
|
||||
if query in name.lower():
|
||||
return True
|
||||
|
||||
description = manifest.get("description", "")
|
||||
if query in description.lower():
|
||||
return True
|
||||
|
||||
for tag in manifest.get("tags", []):
|
||||
if query in tag.lower():
|
||||
return True
|
||||
|
||||
for tool in manifest.get("tools", []):
|
||||
tool_name = tool.get("name", "") if isinstance(tool, dict) else str(tool)
|
||||
if query in tool_name.lower():
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
# ── update_index ────────────────────────────────────────────────
|
||||
|
||||
def is_index_stale(self) -> bool:
|
||||
"""Check if the cached registry index needs refreshing."""
|
||||
last_fetched_path = self._cache_dir / _LAST_FETCHED_FILENAME
|
||||
legacy_path = self._cache_dir / _LEGACY_LAST_FETCHED_FILENAME
|
||||
if not last_fetched_path.exists() and not legacy_path.exists():
|
||||
return True
|
||||
|
||||
try:
|
||||
path = last_fetched_path if last_fetched_path.exists() else legacy_path
|
||||
data = json.loads(path.read_text(encoding="utf-8"))
|
||||
last_fetched = datetime.fromisoformat(data["timestamp"])
|
||||
config = self._read_config()
|
||||
interval_hours = config.get("refresh_interval_hours", DEFAULT_REFRESH_INTERVAL_HOURS)
|
||||
age_hours = (datetime.now(UTC) - last_fetched).total_seconds() / 3600
|
||||
return age_hours >= interval_hours
|
||||
except (KeyError, ValueError, OSError):
|
||||
return True
|
||||
|
||||
def update_index(self) -> int:
|
||||
"""Fetch the latest registry index from remote and cache it.
|
||||
|
||||
Returns the number of servers in the index.
|
||||
"""
|
||||
config = self._read_config()
|
||||
url = config.get("index_url", DEFAULT_INDEX_URL)
|
||||
|
||||
response = httpx.get(url, timeout=10.0)
|
||||
response.raise_for_status()
|
||||
index = response.json()
|
||||
|
||||
self._write_json(self._cache_dir / "registry_index.json", index)
|
||||
# Write last_fetched atomically too
|
||||
self._write_json(
|
||||
self._cache_dir / _LAST_FETCHED_FILENAME,
|
||||
{"timestamp": datetime.now(UTC).isoformat()},
|
||||
)
|
||||
|
||||
server_count = len(index.get("servers", {}))
|
||||
logger.info("Updated registry index: %d servers available", server_count)
|
||||
return server_count
|
||||
|
||||
# ── load_agent_selection ────────────────────────────────────────
|
||||
|
||||
def load_agent_selection(self, agent_path: Path) -> list[dict[str, Any]]:
|
||||
"""Load mcp_registry.json from an agent directory and resolve servers.
|
||||
|
||||
Returns list of plain dicts compatible with ToolRegistry.register_mcp_server().
|
||||
"""
|
||||
registry_json_path = agent_path / "mcp_registry.json"
|
||||
if not registry_json_path.exists():
|
||||
return []
|
||||
|
||||
selection = json.loads(registry_json_path.read_text(encoding="utf-8"))
|
||||
|
||||
# Validate types at the JSON boundary. Bad fields are dropped with a
|
||||
# warning so the agent still starts (graceful degradation).
|
||||
expected_types: dict[str, type] = {
|
||||
"include": list,
|
||||
"tags": list,
|
||||
"exclude": list,
|
||||
"profile": str,
|
||||
"max_tools": int,
|
||||
"versions": dict,
|
||||
}
|
||||
validated: dict[str, Any] = {}
|
||||
for field, expected in expected_types.items():
|
||||
value = selection.get(field)
|
||||
if value is None:
|
||||
continue
|
||||
if not isinstance(value, expected):
|
||||
logger.warning(
|
||||
"mcp_registry.json: '%s' must be %s, got %s; ignoring",
|
||||
field,
|
||||
expected.__name__,
|
||||
type(value).__name__,
|
||||
)
|
||||
continue
|
||||
validated[field] = value
|
||||
|
||||
configs = self.resolve_for_agent(
|
||||
include=validated.get("include"),
|
||||
tags=validated.get("tags"),
|
||||
exclude=validated.get("exclude"),
|
||||
profile=validated.get("profile"),
|
||||
max_tools=validated.get("max_tools"),
|
||||
versions=validated.get("versions"),
|
||||
)
|
||||
return [self._server_config_to_dict(c) for c in configs]
|
||||
|
||||
# ── resolve_for_agent ───────────────────────────────────────────
|
||||
|
||||
def resolve_for_agent(
|
||||
self,
|
||||
include: list[str] | None = None,
|
||||
tags: list[str] | None = None,
|
||||
exclude: list[str] | None = None,
|
||||
profile: str | None = None,
|
||||
max_tools: int | None = None,
|
||||
versions: dict[str, str] | None = None,
|
||||
) -> list[MCPServerConfig]:
|
||||
"""Resolve installed servers matching agent selection criteria.
|
||||
|
||||
Selection precedence per PRD section 7.2:
|
||||
1. profile expands to server names (union with include + tags)
|
||||
2. include adds explicit servers
|
||||
3. tags adds servers whose tags overlap
|
||||
4. exclude removes (always wins)
|
||||
5. Load order: include-order first, then alphabetical for tag/profile matches
|
||||
|
||||
Returns list of MCPServerConfig objects ready for ToolRegistry.
|
||||
"""
|
||||
data = self._read_installed()
|
||||
servers = data.get("servers", {})
|
||||
cached_index = self._read_cached_index()
|
||||
exclude_set = set(exclude or [])
|
||||
|
||||
# Phase 1: collect profile-matched servers (alphabetical)
|
||||
profile_matched: list[str] = []
|
||||
if profile:
|
||||
for name, entry in sorted(servers.items()):
|
||||
if name in exclude_set:
|
||||
continue
|
||||
if profile == "all":
|
||||
profile_matched.append(name)
|
||||
else:
|
||||
manifest = self._get_effective_manifest(name, entry, cached_index)
|
||||
profiles = manifest.get("hive", {}).get("profiles", [])
|
||||
if profile in profiles:
|
||||
profile_matched.append(name)
|
||||
|
||||
# Phase 2: collect tag-matched servers (alphabetical)
|
||||
tag_matched: list[str] = []
|
||||
if tags:
|
||||
tag_set = set(tags)
|
||||
for name, entry in sorted(servers.items()):
|
||||
if name in exclude_set:
|
||||
continue
|
||||
manifest = self._get_effective_manifest(name, entry, cached_index)
|
||||
server_tags = set(manifest.get("tags", []))
|
||||
if tag_set & server_tags:
|
||||
tag_matched.append(name)
|
||||
|
||||
# Phase 3: build final ordered list
|
||||
# include-order first, then alphabetical for profile/tag matches
|
||||
selected: list[str] = []
|
||||
seen: set[str] = set()
|
||||
|
||||
for name in include or []:
|
||||
if name not in seen and name not in exclude_set:
|
||||
selected.append(name)
|
||||
seen.add(name)
|
||||
|
||||
for name in profile_matched:
|
||||
if name not in seen:
|
||||
selected.append(name)
|
||||
seen.add(name)
|
||||
|
||||
for name in tag_matched:
|
||||
if name not in seen:
|
||||
selected.append(name)
|
||||
seen.add(name)
|
||||
|
||||
# Build configs, tracking aggregate tool count for max_tools cap (FR-56)
|
||||
configs: list[MCPServerConfig] = []
|
||||
total_tools = 0
|
||||
for name in selected:
|
||||
entry = servers.get(name)
|
||||
if entry is None:
|
||||
logger.warning(
|
||||
"Server '%s' requested but not installed. Run: hive mcp install %s",
|
||||
name,
|
||||
name,
|
||||
)
|
||||
continue
|
||||
if not entry.get("enabled", True):
|
||||
continue
|
||||
|
||||
manifest = self._get_effective_manifest(name, entry, cached_index)
|
||||
|
||||
# Check version pin (VC-6)
|
||||
if versions and name in versions:
|
||||
installed_version = entry.get("manifest_version", "0.0.0")
|
||||
pinned_version = versions[name]
|
||||
if installed_version != pinned_version:
|
||||
logger.warning(
|
||||
"Server '%s' version mismatch: installed=%s, pinned=%s. "
|
||||
"Run: hive mcp update %s",
|
||||
name,
|
||||
installed_version,
|
||||
pinned_version,
|
||||
name,
|
||||
)
|
||||
continue
|
||||
|
||||
# Check tool count cap before adding (FR-56)
|
||||
manifest_tools = manifest.get("tools", [])
|
||||
server_tool_count = len(manifest_tools)
|
||||
if max_tools is not None and server_tool_count == 0:
|
||||
logger.debug(
|
||||
"Server '%s' has no declared tools in manifest, skipping max_tools check",
|
||||
name,
|
||||
)
|
||||
elif max_tools is not None and total_tools + server_tool_count > max_tools:
|
||||
logger.info(
|
||||
"Skipping server '%s' (%d tools): would exceed max_tools=%d",
|
||||
name,
|
||||
server_tool_count,
|
||||
max_tools,
|
||||
)
|
||||
continue
|
||||
|
||||
config = self._manifest_to_server_config(
|
||||
name,
|
||||
manifest,
|
||||
entry.get("overrides", {}),
|
||||
transport_override=entry.get("transport"),
|
||||
)
|
||||
if config is not None:
|
||||
configs.append(config)
|
||||
total_tools += server_tool_count
|
||||
|
||||
return configs
|
||||
|
||||
def _manifest_to_server_config(
|
||||
self,
|
||||
name: str,
|
||||
manifest: dict,
|
||||
overrides: dict | None = None,
|
||||
transport_override: str | None = None,
|
||||
) -> MCPServerConfig | None:
|
||||
"""Convert a manifest and overrides to MCPServerConfig."""
|
||||
overrides = overrides or {}
|
||||
transport_config = manifest.get("transport", {})
|
||||
transport = transport_override or transport_config.get("default", "stdio")
|
||||
description = manifest.get("description", "")
|
||||
|
||||
match transport:
|
||||
case "stdio":
|
||||
stdio_config = manifest.get("stdio", {})
|
||||
merged_env = {
|
||||
**stdio_config.get("env", {}),
|
||||
**overrides.get("env", {}),
|
||||
}
|
||||
return MCPServerConfig(
|
||||
name=name,
|
||||
transport="stdio",
|
||||
command=stdio_config.get("command"),
|
||||
args=stdio_config.get("args", []),
|
||||
env=merged_env,
|
||||
cwd=stdio_config.get("cwd"),
|
||||
description=description,
|
||||
)
|
||||
case "http":
|
||||
http_config = manifest.get("http", {})
|
||||
url = http_config.get("url", "")
|
||||
merged_headers = {
|
||||
**http_config.get("headers", {}),
|
||||
**overrides.get("headers", {}),
|
||||
}
|
||||
return MCPServerConfig(
|
||||
name=name,
|
||||
transport="http",
|
||||
url=url,
|
||||
headers=merged_headers,
|
||||
description=description,
|
||||
)
|
||||
case "unix":
|
||||
unix_config = manifest.get("unix", {})
|
||||
http_config = manifest.get("http", {})
|
||||
merged_headers = {
|
||||
**http_config.get("headers", {}),
|
||||
**overrides.get("headers", {}),
|
||||
}
|
||||
return MCPServerConfig(
|
||||
name=name,
|
||||
transport="unix",
|
||||
socket_path=unix_config.get("socket_path"),
|
||||
url=http_config.get("url") or "http://localhost",
|
||||
headers=merged_headers,
|
||||
description=description,
|
||||
)
|
||||
case "sse":
|
||||
sse_config = manifest.get("sse", {})
|
||||
merged_headers = {
|
||||
**sse_config.get("headers", {}),
|
||||
**overrides.get("headers", {}),
|
||||
}
|
||||
return MCPServerConfig(
|
||||
name=name,
|
||||
transport="sse",
|
||||
url=sse_config.get("url", ""),
|
||||
headers=merged_headers,
|
||||
description=description,
|
||||
)
|
||||
case _:
|
||||
logger.warning(
|
||||
"Unsupported transport '%s' for server '%s'",
|
||||
transport,
|
||||
name,
|
||||
)
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _server_config_to_dict(config: MCPServerConfig) -> dict[str, Any]:
|
||||
"""Convert MCPServerConfig to plain dict for ToolRegistry.register_mcp_server()."""
|
||||
return {
|
||||
"name": config.name,
|
||||
"transport": config.transport,
|
||||
"command": config.command,
|
||||
"args": config.args,
|
||||
"env": config.env,
|
||||
"cwd": config.cwd,
|
||||
"url": config.url,
|
||||
"headers": config.headers,
|
||||
"socket_path": config.socket_path,
|
||||
"description": config.description,
|
||||
}
|
||||
|
||||
# ── run_health_check ────────────────────────────────────────────
|
||||
|
||||
def health_check(self, name: str | None = None) -> dict | dict[str, dict]:
|
||||
"""Check health of installed server(s). Updates telemetry fields.
|
||||
|
||||
If name is None, checks all installed servers and returns
|
||||
a dict mapping server names to their health results.
|
||||
|
||||
"""
|
||||
if name is None:
|
||||
results = {}
|
||||
for server in self.list_installed():
|
||||
results[server["name"]] = self.health_check(server["name"])
|
||||
return results
|
||||
|
||||
data = self._read_installed()
|
||||
if name not in data["servers"]:
|
||||
raise ValueError(f"Server '{name}' is not installed.")
|
||||
|
||||
entry = data["servers"][name]
|
||||
manifest = self._get_effective_manifest(name, entry)
|
||||
config = self._manifest_to_server_config(
|
||||
name,
|
||||
manifest,
|
||||
entry.get("overrides", {}),
|
||||
transport_override=entry.get("transport"),
|
||||
)
|
||||
now = datetime.now(UTC).isoformat()
|
||||
|
||||
result: dict[str, Any] = {
|
||||
"name": name,
|
||||
"status": "unknown",
|
||||
"tools": 0,
|
||||
"error": None,
|
||||
}
|
||||
|
||||
if config is None:
|
||||
transport = entry.get("transport", "unknown")
|
||||
result["status"] = "unhealthy"
|
||||
result["error"] = f"Unsupported transport '{transport}'"
|
||||
entry["last_health_status"] = "unhealthy"
|
||||
entry["last_error"] = result["error"]
|
||||
entry["last_health_check_at"] = now
|
||||
self._write_installed(data)
|
||||
return result
|
||||
|
||||
manager = MCPConnectionManager.get_instance()
|
||||
|
||||
try:
|
||||
if manager.has_connection(name):
|
||||
is_healthy = manager.health_check(name)
|
||||
if not is_healthy:
|
||||
raise RuntimeError("Shared MCP connection health check failed")
|
||||
pooled_client = manager.acquire(config)
|
||||
try:
|
||||
tools = pooled_client.list_tools()
|
||||
finally:
|
||||
manager.release(name)
|
||||
else:
|
||||
with MCPClient(config) as client:
|
||||
tools = client.list_tools()
|
||||
|
||||
result["status"] = "healthy"
|
||||
result["tools"] = len(tools)
|
||||
entry["last_health_status"] = "healthy"
|
||||
entry["last_error"] = None
|
||||
entry["last_validated_with_hive_version"] = self._get_hive_version()
|
||||
except Exception as exc:
|
||||
result["status"] = "unhealthy"
|
||||
result["error"] = str(exc)
|
||||
entry["last_health_status"] = "unhealthy"
|
||||
entry["last_error"] = str(exc)
|
||||
|
||||
entry["last_health_check_at"] = now
|
||||
self._write_installed(data)
|
||||
return result
|
||||
|
||||
def run_health_check(self, name: str | None = None) -> dict | dict[str, dict]:
|
||||
"""Backward-compatible wrapper for the public health_check API."""
|
||||
return self.health_check(name)
|
||||
|
||||
@staticmethod
|
||||
def _get_hive_version() -> str:
|
||||
"""Get the current Hive version."""
|
||||
try:
|
||||
return version("framework")
|
||||
except PackageNotFoundError:
|
||||
project_toml = Path(__file__).resolve().parents[2] / "pyproject.toml"
|
||||
if not project_toml.exists():
|
||||
return "unknown"
|
||||
try:
|
||||
with project_toml.open("rb") as f:
|
||||
data = tomllib.load(f)
|
||||
return data.get("project", {}).get("version", "unknown")
|
||||
except (tomllib.TOMLDecodeError, OSError):
|
||||
return "unknown"
|
||||
|
||||
# ── helpers ──────────────────────────────────────────────────────
|
||||
|
||||
@staticmethod
|
||||
def _make_entry(
|
||||
*,
|
||||
source: str,
|
||||
manifest: dict,
|
||||
transport: str,
|
||||
installed_by: str,
|
||||
pinned: bool = False,
|
||||
auto_update: bool = False,
|
||||
resolved_package_version: str | None = None,
|
||||
) -> dict:
|
||||
"""Build a standard installed server entry."""
|
||||
now = datetime.now(UTC).isoformat()
|
||||
return {
|
||||
"source": source,
|
||||
"manifest_version": manifest.get("version", "0.0.0"),
|
||||
"manifest": manifest,
|
||||
"installed_at": now,
|
||||
"installed_by": installed_by,
|
||||
"transport": transport,
|
||||
"enabled": True,
|
||||
"pinned": pinned,
|
||||
"auto_update": auto_update,
|
||||
"resolved_package_version": resolved_package_version,
|
||||
"overrides": {"env": {}, "headers": {}},
|
||||
"last_health_check_at": None,
|
||||
"last_health_status": None,
|
||||
"last_error": None,
|
||||
"last_used_at": None,
|
||||
"last_validated_with_hive_version": None,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _make_registry_manifest_snapshot(name: str, manifest: dict) -> dict[str, Any]:
|
||||
"""Persist a full manifest snapshot for registry-installed servers."""
|
||||
manifest_snapshot = dict(manifest)
|
||||
manifest_snapshot["name"] = name
|
||||
return manifest_snapshot
|
||||
@@ -1,6 +1,6 @@
|
||||
"""Pre-load validation for agent graphs.
|
||||
|
||||
Runs structural and credential checks before MCP servers are spawned.
|
||||
Runs structural, credential, and skill-trust checks before MCP servers are spawned.
|
||||
Fails fast with actionable error messages.
|
||||
"""
|
||||
|
||||
@@ -169,6 +169,9 @@ def run_preload_validation(
|
||||
1. Graph structure (includes GCU subagent-only checks) — non-recoverable
|
||||
2. Credentials — potentially recoverable via interactive setup
|
||||
|
||||
Skill discovery and trust gating (AS-13) happen later in runner._setup()
|
||||
so they have access to agent-level skill configuration.
|
||||
|
||||
Raises PreloadValidationError for structural issues.
|
||||
Raises CredentialError for credential issues.
|
||||
"""
|
||||
|
||||
+466
-88
@@ -16,7 +16,6 @@ from framework.credentials.validation import (
|
||||
from framework.graph import Goal
|
||||
from framework.graph.edge import (
|
||||
DEFAULT_MAX_TOKENS,
|
||||
AsyncEntryPointSpec,
|
||||
EdgeCondition,
|
||||
EdgeSpec,
|
||||
GraphSpec,
|
||||
@@ -29,6 +28,7 @@ from framework.runner.tool_registry import ToolRegistry
|
||||
from framework.runtime.agent_runtime import AgentRuntime, AgentRuntimeConfig, create_agent_runtime
|
||||
from framework.runtime.execution_stream import EntryPointSpec
|
||||
from framework.runtime.runtime_log_store import RuntimeLogStore
|
||||
from framework.tools.flowchart_utils import generate_fallback_flowchart
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from framework.runner.protocol import AgentMessage, CapabilityResponse
|
||||
@@ -552,6 +552,319 @@ def get_kimi_code_token() -> str | None:
|
||||
return None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Antigravity subscription token helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Antigravity IDE (native macOS/Linux app) stores OAuth tokens in its
|
||||
# VSCode-style SQLite state database under the key
|
||||
# "antigravityUnifiedStateSync.oauthToken" as a base64-encoded protobuf blob.
|
||||
ANTIGRAVITY_IDE_STATE_DB = (
|
||||
Path.home()
|
||||
/ "Library"
|
||||
/ "Application Support"
|
||||
/ "Antigravity"
|
||||
/ "User"
|
||||
/ "globalStorage"
|
||||
/ "state.vscdb"
|
||||
)
|
||||
# Linux fallback for the IDE state DB
|
||||
ANTIGRAVITY_IDE_STATE_DB_LINUX = (
|
||||
Path.home() / ".config" / "Antigravity" / "User" / "globalStorage" / "state.vscdb"
|
||||
)
|
||||
# Antigravity credentials stored by native OAuth implementation
|
||||
ANTIGRAVITY_AUTH_FILE = Path.home() / ".hive" / "antigravity-accounts.json"
|
||||
|
||||
ANTIGRAVITY_OAUTH_TOKEN_URL = "https://oauth2.googleapis.com/token"
|
||||
_ANTIGRAVITY_TOKEN_LIFETIME_SECS = 3600 # Google access tokens expire in 1 hour
|
||||
_ANTIGRAVITY_IDE_STATE_DB_KEY = "antigravityUnifiedStateSync.oauthToken"
|
||||
|
||||
|
||||
def _read_antigravity_ide_credentials() -> dict | None:
|
||||
"""Read credentials from the Antigravity IDE's SQLite state database.
|
||||
|
||||
The Antigravity desktop IDE (VSCode-based) stores its OAuth token as a
|
||||
base64-encoded protobuf blob in a SQLite database. The access token is
|
||||
a standard Google OAuth ``ya29.*`` bearer token.
|
||||
|
||||
Returns:
|
||||
Dict with ``accessToken`` and optionally ``refreshToken`` keys,
|
||||
plus ``_source: "ide"`` to skip file-based save on refresh.
|
||||
Returns None if the database is absent or the key is not found.
|
||||
"""
|
||||
import re
|
||||
import sqlite3
|
||||
|
||||
for db_path in (ANTIGRAVITY_IDE_STATE_DB, ANTIGRAVITY_IDE_STATE_DB_LINUX):
|
||||
if not db_path.exists():
|
||||
continue
|
||||
try:
|
||||
con = sqlite3.connect(f"file:{db_path}?mode=ro", uri=True)
|
||||
try:
|
||||
row = con.execute(
|
||||
"SELECT value FROM ItemTable WHERE key = ?",
|
||||
(_ANTIGRAVITY_IDE_STATE_DB_KEY,),
|
||||
).fetchone()
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
if not row:
|
||||
continue
|
||||
|
||||
import base64
|
||||
|
||||
blob = base64.b64decode(row[0])
|
||||
|
||||
# The protobuf blob contains the access token (ya29.*) and
|
||||
# refresh token (1//*) as length-prefixed UTF-8 strings.
|
||||
# Decode the inner base64 layer and extract with regex.
|
||||
inner_b64_candidates = re.findall(rb"[A-Za-z0-9+/=_\-]{40,}", blob)
|
||||
access_token: str | None = None
|
||||
refresh_token: str | None = None
|
||||
for candidate in inner_b64_candidates:
|
||||
try:
|
||||
padded = candidate + b"=" * (-len(candidate) % 4)
|
||||
inner = base64.urlsafe_b64decode(padded)
|
||||
except Exception:
|
||||
continue
|
||||
if not access_token:
|
||||
m = re.search(rb"ya29\.[A-Za-z0-9_\-\.]+", inner)
|
||||
if m:
|
||||
access_token = m.group(0).decode("ascii")
|
||||
if not refresh_token:
|
||||
m = re.search(rb"1//[A-Za-z0-9_\-\.]+", inner)
|
||||
if m:
|
||||
refresh_token = m.group(0).decode("ascii")
|
||||
if access_token and refresh_token:
|
||||
break
|
||||
|
||||
if access_token:
|
||||
return {
|
||||
"accounts": [
|
||||
{
|
||||
"accessToken": access_token,
|
||||
"refreshToken": refresh_token or "",
|
||||
}
|
||||
],
|
||||
"_source": "ide",
|
||||
"_db_path": str(db_path),
|
||||
}
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to read Antigravity IDE state DB: %s", exc)
|
||||
continue
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _read_antigravity_credentials() -> dict | None:
|
||||
"""Read Antigravity auth data from all supported credential sources.
|
||||
|
||||
Checks in order:
|
||||
1. Antigravity IDE SQLite state database (native macOS/Linux app)
|
||||
2. Native OAuth credentials file (~/.hive/antigravity-accounts.json)
|
||||
|
||||
Returns:
|
||||
Auth data dict with an ``accounts`` list on success, None otherwise.
|
||||
"""
|
||||
# 1. Native Antigravity IDE (primary on macOS)
|
||||
ide_creds = _read_antigravity_ide_credentials()
|
||||
if ide_creds:
|
||||
return ide_creds
|
||||
|
||||
# 2. Native OAuth credentials file
|
||||
if ANTIGRAVITY_AUTH_FILE.exists():
|
||||
try:
|
||||
with open(ANTIGRAVITY_AUTH_FILE, encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
accounts = data.get("accounts", [])
|
||||
if accounts and isinstance(accounts[0], dict):
|
||||
return data
|
||||
except (json.JSONDecodeError, OSError):
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
def _is_antigravity_token_expired(auth_data: dict) -> bool:
|
||||
"""Check whether the Antigravity access token is expired or near expiry.
|
||||
|
||||
For IDE-sourced credentials: uses the state DB's mtime as last_refresh
|
||||
since the IDE keeps the DB fresh while it's running.
|
||||
For JSON-sourced credentials: uses the ``last_refresh`` field or file mtime.
|
||||
"""
|
||||
import time
|
||||
from datetime import datetime
|
||||
|
||||
now = time.time()
|
||||
|
||||
if auth_data.get("_source") == "ide":
|
||||
# The IDE refreshes tokens automatically while running.
|
||||
# Use the DB file's mtime as a proxy for when the token was last updated.
|
||||
try:
|
||||
db_path = Path(auth_data.get("_db_path", str(ANTIGRAVITY_IDE_STATE_DB)))
|
||||
last_refresh: float = db_path.stat().st_mtime
|
||||
except OSError:
|
||||
return True
|
||||
expires_at = last_refresh + _ANTIGRAVITY_TOKEN_LIFETIME_SECS
|
||||
return now >= (expires_at - _TOKEN_REFRESH_BUFFER_SECS)
|
||||
|
||||
last_refresh_val: float | str | None = auth_data.get("last_refresh")
|
||||
if last_refresh_val is None:
|
||||
try:
|
||||
last_refresh_val = ANTIGRAVITY_AUTH_FILE.stat().st_mtime
|
||||
except OSError:
|
||||
return True
|
||||
elif isinstance(last_refresh_val, str):
|
||||
try:
|
||||
last_refresh_val = datetime.fromisoformat(
|
||||
last_refresh_val.replace("Z", "+00:00")
|
||||
).timestamp()
|
||||
except (ValueError, TypeError):
|
||||
return True
|
||||
|
||||
expires_at = float(last_refresh_val) + _ANTIGRAVITY_TOKEN_LIFETIME_SECS
|
||||
return now >= (expires_at - _TOKEN_REFRESH_BUFFER_SECS)
|
||||
|
||||
|
||||
def _refresh_antigravity_token(refresh_token: str) -> dict | None:
|
||||
"""Refresh the Antigravity access token via Google OAuth.
|
||||
|
||||
POSTs form-encoded ``grant_type=refresh_token`` to the Google token
|
||||
endpoint using Antigravity's public OAuth client ID.
|
||||
|
||||
Returns:
|
||||
Parsed response dict (containing ``access_token``) on success,
|
||||
None on any error.
|
||||
"""
|
||||
import urllib.error
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
|
||||
from framework.config import get_antigravity_client_id, get_antigravity_client_secret
|
||||
|
||||
client_id = get_antigravity_client_id()
|
||||
client_secret = get_antigravity_client_secret()
|
||||
params: dict = {
|
||||
"grant_type": "refresh_token",
|
||||
"refresh_token": refresh_token,
|
||||
"client_id": client_id,
|
||||
}
|
||||
if client_secret:
|
||||
params["client_secret"] = client_secret
|
||||
|
||||
data = urllib.parse.urlencode(params).encode("utf-8")
|
||||
|
||||
req = urllib.request.Request(
|
||||
ANTIGRAVITY_OAUTH_TOKEN_URL,
|
||||
data=data,
|
||||
headers={"Content-Type": "application/x-www-form-urlencoded"},
|
||||
method="POST",
|
||||
)
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=15) as resp: # noqa: S310
|
||||
return json.loads(resp.read())
|
||||
except (urllib.error.URLError, json.JSONDecodeError, TimeoutError, OSError) as exc:
|
||||
logger.debug("Antigravity token refresh failed: %s", exc)
|
||||
return None
|
||||
|
||||
|
||||
def _save_refreshed_antigravity_credentials(auth_data: dict, token_data: dict) -> None:
|
||||
"""Write refreshed tokens back to the Antigravity JSON credentials file.
|
||||
|
||||
Skipped for IDE-sourced credentials (the IDE manages its own DB).
|
||||
Updates ``accounts[0].accessToken`` (and ``refreshToken`` if present),
|
||||
then persists ``last_refresh`` as an ISO-8601 UTC string.
|
||||
"""
|
||||
from datetime import datetime
|
||||
|
||||
# IDE manages its own state — we do not write back to its SQLite DB
|
||||
if auth_data.get("_source") == "ide":
|
||||
return
|
||||
|
||||
try:
|
||||
accounts = auth_data.get("accounts", [])
|
||||
if not accounts:
|
||||
return
|
||||
account = accounts[0]
|
||||
account["accessToken"] = token_data["access_token"]
|
||||
if "refresh_token" in token_data:
|
||||
account["refreshToken"] = token_data["refresh_token"]
|
||||
auth_data["accounts"] = accounts
|
||||
auth_data["last_refresh"] = datetime.now(UTC).isoformat()
|
||||
|
||||
ANTIGRAVITY_AUTH_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||
fd = os.open(ANTIGRAVITY_AUTH_FILE, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
|
||||
with os.fdopen(fd, "w", encoding="utf-8") as f:
|
||||
json.dump(auth_data, f, indent=2)
|
||||
logger.debug("Antigravity credentials refreshed and saved")
|
||||
except (OSError, KeyError) as exc:
|
||||
logger.debug("Failed to save refreshed Antigravity credentials: %s", exc)
|
||||
|
||||
|
||||
def get_antigravity_token() -> str | None:
|
||||
"""Get the OAuth access token from an Antigravity subscription.
|
||||
|
||||
Credential sources checked in order:
|
||||
1. Antigravity IDE SQLite state DB (native app, macOS/Linux)
|
||||
2. antigravity-auth CLI JSON file
|
||||
|
||||
For IDE credentials the token is read directly (the IDE refreshes it
|
||||
automatically while running). For JSON credentials an automatic OAuth
|
||||
refresh is attempted when the token is near expiry.
|
||||
|
||||
Returns:
|
||||
The ``ya29.*`` Google OAuth access token, or None if unavailable.
|
||||
"""
|
||||
auth_data = _read_antigravity_credentials()
|
||||
if not auth_data:
|
||||
return None
|
||||
|
||||
accounts = auth_data.get("accounts", [])
|
||||
if not accounts:
|
||||
return None
|
||||
account = accounts[0]
|
||||
|
||||
access_token = account.get("accessToken")
|
||||
if not access_token:
|
||||
return None
|
||||
|
||||
if not _is_antigravity_token_expired(auth_data):
|
||||
return access_token
|
||||
|
||||
# Token is expired or near expiry — attempt a refresh
|
||||
refresh_token = account.get("refreshToken")
|
||||
if not refresh_token:
|
||||
logger.warning(
|
||||
"Antigravity token expired and no refresh token available. "
|
||||
"Re-open the Antigravity IDE to refresh, or run 'antigravity-auth accounts add'."
|
||||
)
|
||||
return access_token # return stale token; proxy may still accept it briefly
|
||||
|
||||
logger.info("Antigravity token expired or near expiry, refreshing...")
|
||||
token_data = _refresh_antigravity_token(refresh_token)
|
||||
|
||||
if token_data and "access_token" in token_data:
|
||||
_save_refreshed_antigravity_credentials(auth_data, token_data)
|
||||
return token_data["access_token"]
|
||||
|
||||
logger.warning(
|
||||
"Antigravity token refresh failed. "
|
||||
"Re-open the Antigravity IDE or run 'antigravity-auth accounts add'."
|
||||
)
|
||||
return access_token
|
||||
|
||||
|
||||
def _is_antigravity_proxy_available() -> bool:
|
||||
"""Return True if antigravity-auth serve is running on localhost:8069."""
|
||||
import socket
|
||||
|
||||
try:
|
||||
with socket.create_connection(("localhost", 8069), timeout=0.5):
|
||||
return True
|
||||
except (OSError, TimeoutError):
|
||||
return False
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentInfo:
|
||||
"""Information about an exported agent."""
|
||||
@@ -570,9 +883,6 @@ class AgentInfo:
|
||||
constraints: list[dict]
|
||||
required_tools: list[str]
|
||||
has_tools_module: bool
|
||||
# Multi-entry-point support
|
||||
async_entry_points: list[dict] = field(default_factory=list)
|
||||
is_multi_entry_point: bool = False
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -630,22 +940,6 @@ def load_agent_export(data: str | dict) -> tuple[GraphSpec, Goal]:
|
||||
)
|
||||
edges.append(edge)
|
||||
|
||||
# Build AsyncEntryPointSpec objects for multi-entry-point support
|
||||
async_entry_points = []
|
||||
for aep_data in graph_data.get("async_entry_points", []):
|
||||
async_entry_points.append(
|
||||
AsyncEntryPointSpec(
|
||||
id=aep_data["id"],
|
||||
name=aep_data.get("name", aep_data["id"]),
|
||||
entry_node=aep_data["entry_node"],
|
||||
trigger_type=aep_data.get("trigger_type", "manual"),
|
||||
trigger_config=aep_data.get("trigger_config", {}),
|
||||
isolation_level=aep_data.get("isolation_level", "shared"),
|
||||
priority=aep_data.get("priority", 0),
|
||||
max_concurrent=aep_data.get("max_concurrent", 10),
|
||||
)
|
||||
)
|
||||
|
||||
# Build GraphSpec
|
||||
graph = GraphSpec(
|
||||
id=graph_data.get("id", "agent-graph"),
|
||||
@@ -653,7 +947,6 @@ def load_agent_export(data: str | dict) -> tuple[GraphSpec, Goal]:
|
||||
version=graph_data.get("version", "1.0.0"),
|
||||
entry_node=graph_data.get("entry_node", ""),
|
||||
entry_points=graph_data.get("entry_points", {}), # Support pause/resume architecture
|
||||
async_entry_points=async_entry_points, # Support multi-entry-point agents
|
||||
terminal_nodes=graph_data.get("terminal_nodes", []),
|
||||
pause_nodes=graph_data.get("pause_nodes", []), # Support pause/resume architecture
|
||||
nodes=nodes,
|
||||
@@ -805,8 +1098,6 @@ class AgentRunner:
|
||||
|
||||
# AgentRuntime — unified execution path for all agents
|
||||
self._agent_runtime: AgentRuntime | None = None
|
||||
self._uses_async_entry_points = self.graph.has_async_entry_points()
|
||||
|
||||
# Pre-load validation: structural checks + credentials.
|
||||
# Fails fast with actionable guidance — no MCP noise on screen.
|
||||
run_preload_validation(
|
||||
@@ -830,6 +1121,9 @@ class AgentRunner:
|
||||
if mcp_config_path.exists():
|
||||
self._load_mcp_servers_from_config(mcp_config_path)
|
||||
|
||||
# Auto-discover registry-selected MCP servers from mcp_registry.json
|
||||
self._load_registry_mcp_servers(agent_path)
|
||||
|
||||
@staticmethod
|
||||
def _import_agent_module(agent_path: Path):
|
||||
"""Import an agent package from its directory path.
|
||||
@@ -927,7 +1221,8 @@ class AgentRunner:
|
||||
if agent_config and hasattr(agent_config, "max_tokens"):
|
||||
max_tokens = agent_config.max_tokens
|
||||
logger.info(
|
||||
"Agent default_config overrides max_tokens: %d (configuration.json value ignored)",
|
||||
"Agent default_config overrides max_tokens: %d "
|
||||
"(configuration.json value ignored)",
|
||||
max_tokens,
|
||||
)
|
||||
else:
|
||||
@@ -964,7 +1259,6 @@ class AgentRunner:
|
||||
"version": "1.0.0",
|
||||
"entry_node": getattr(agent_module, "entry_node", nodes[0].id),
|
||||
"entry_points": getattr(agent_module, "entry_points", {}),
|
||||
"async_entry_points": getattr(agent_module, "async_entry_points", []),
|
||||
"terminal_nodes": getattr(agent_module, "terminal_nodes", []),
|
||||
"pause_nodes": getattr(agent_module, "pause_nodes", []),
|
||||
"nodes": nodes,
|
||||
@@ -982,6 +1276,12 @@ class AgentRunner:
|
||||
|
||||
graph = GraphSpec(**graph_kwargs)
|
||||
|
||||
# Generate flowchart.json if missing (for template/legacy agents)
|
||||
generate_fallback_flowchart(graph, goal, agent_path)
|
||||
# Read skill configuration from agent module
|
||||
agent_default_skills = getattr(agent_module, "default_skills", None)
|
||||
agent_skills = getattr(agent_module, "skills", None)
|
||||
|
||||
# Read runtime config (webhook settings, etc.) if defined
|
||||
agent_runtime_config = getattr(agent_module, "runtime_config", None)
|
||||
|
||||
@@ -993,7 +1293,7 @@ class AgentRunner:
|
||||
configure_fn = getattr(agent_module, "configure_for_account", None)
|
||||
list_accts_fn = getattr(agent_module, "list_connected_accounts", None)
|
||||
|
||||
return cls(
|
||||
runner = cls(
|
||||
agent_path=agent_path,
|
||||
graph=graph,
|
||||
goal=goal,
|
||||
@@ -1009,6 +1309,10 @@ class AgentRunner:
|
||||
list_accounts=list_accts_fn,
|
||||
credential_store=credential_store,
|
||||
)
|
||||
# Stash skill config for use in _setup()
|
||||
runner._agent_default_skills = agent_default_skills
|
||||
runner._agent_skills = agent_skills
|
||||
return runner
|
||||
|
||||
# Fallback: load from agent.json (legacy JSON-based agents)
|
||||
agent_json_path = agent_path / "agent.json"
|
||||
@@ -1026,7 +1330,10 @@ class AgentRunner:
|
||||
except json.JSONDecodeError as exc:
|
||||
raise ValueError(f"Invalid JSON in agent export file: {agent_json_path}") from exc
|
||||
|
||||
return cls(
|
||||
# Generate flowchart.json if missing (for legacy JSON-based agents)
|
||||
generate_fallback_flowchart(graph, goal, agent_path)
|
||||
|
||||
runner = cls(
|
||||
agent_path=agent_path,
|
||||
graph=graph,
|
||||
goal=goal,
|
||||
@@ -1037,6 +1344,9 @@ class AgentRunner:
|
||||
skip_credential_validation=skip_credential_validation or False,
|
||||
credential_store=credential_store,
|
||||
)
|
||||
runner._agent_default_skills = None
|
||||
runner._agent_skills = None
|
||||
return runner
|
||||
|
||||
def register_tool(
|
||||
self,
|
||||
@@ -1117,6 +1427,45 @@ class AgentRunner:
|
||||
"""Load and register MCP servers from a configuration file."""
|
||||
self._tool_registry.load_mcp_config(config_path)
|
||||
|
||||
def _load_registry_mcp_servers(self, agent_path: Path) -> None:
|
||||
"""Load and register MCP servers selected via ``mcp_registry.json``."""
|
||||
from framework.runner.mcp_registry import MCPRegistry
|
||||
|
||||
try:
|
||||
registry = MCPRegistry()
|
||||
registry.initialize()
|
||||
server_configs = registry.load_agent_selection(agent_path)
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"Failed to load MCP registry servers for '%s': %s",
|
||||
agent_path.name,
|
||||
exc,
|
||||
)
|
||||
return
|
||||
|
||||
if not server_configs:
|
||||
return
|
||||
|
||||
results = self._tool_registry.load_registry_servers(server_configs)
|
||||
loaded = [result for result in results if result["status"] == "loaded"]
|
||||
skipped = [result for result in results if result["status"] != "loaded"]
|
||||
|
||||
logger.info(
|
||||
"Loaded %d/%d MCP registry server(s) for agent '%s'",
|
||||
len(loaded),
|
||||
len(results),
|
||||
agent_path.name,
|
||||
)
|
||||
if skipped:
|
||||
logger.info(
|
||||
"Skipped MCP registry servers for agent '%s': %s",
|
||||
agent_path.name,
|
||||
[
|
||||
{"server": result["server"], "reason": result["skipped_reason"]}
|
||||
for result in skipped
|
||||
],
|
||||
)
|
||||
|
||||
def set_approval_callback(self, callback: Callable) -> None:
|
||||
"""
|
||||
Set a callback for human-in-the-loop approval during execution.
|
||||
@@ -1147,7 +1496,10 @@ class AgentRunner:
|
||||
|
||||
# Create LLM provider
|
||||
# Uses LiteLLM which auto-detects the provider from model name
|
||||
if self.mock_mode:
|
||||
# Skip if already injected (e.g. worker agents with a pre-built LLM)
|
||||
if self._llm is not None:
|
||||
pass # LLM already configured externally
|
||||
elif self.mock_mode:
|
||||
# Use mock LLM for testing without real API calls
|
||||
from framework.llm.mock import MockLLMProvider
|
||||
|
||||
@@ -1161,6 +1513,7 @@ class AgentRunner:
|
||||
use_claude_code = llm_config.get("use_claude_code_subscription", False)
|
||||
use_codex = llm_config.get("use_codex_subscription", False)
|
||||
use_kimi_code = llm_config.get("use_kimi_code_subscription", False)
|
||||
use_antigravity = llm_config.get("use_antigravity_subscription", False)
|
||||
api_base = llm_config.get("api_base")
|
||||
|
||||
api_key = None
|
||||
@@ -1168,20 +1521,28 @@ class AgentRunner:
|
||||
# Get OAuth token from Claude Code subscription
|
||||
api_key = get_claude_code_token()
|
||||
if not api_key:
|
||||
print("Warning: Claude Code subscription configured but no token found.")
|
||||
print("Run 'claude' to authenticate, then try again.")
|
||||
logger.warning(
|
||||
"Claude Code subscription configured but no token found. "
|
||||
"Run 'claude' to authenticate, then try again."
|
||||
)
|
||||
elif use_codex:
|
||||
# Get OAuth token from Codex subscription
|
||||
api_key = get_codex_token()
|
||||
if not api_key:
|
||||
print("Warning: Codex subscription configured but no token found.")
|
||||
print("Run 'codex' to authenticate, then try again.")
|
||||
logger.warning(
|
||||
"Codex subscription configured but no token found. "
|
||||
"Run 'codex' to authenticate, then try again."
|
||||
)
|
||||
elif use_kimi_code:
|
||||
# Get API key from Kimi Code CLI config (~/.kimi/config.toml)
|
||||
api_key = get_kimi_code_token()
|
||||
if not api_key:
|
||||
print("Warning: Kimi Code subscription configured but no key found.")
|
||||
print("Run 'kimi /login' to authenticate, then try again.")
|
||||
logger.warning(
|
||||
"Kimi Code subscription configured but no key found. "
|
||||
"Run 'kimi /login' to authenticate, then try again."
|
||||
)
|
||||
elif use_antigravity:
|
||||
pass # AntigravityProvider handles credentials internally
|
||||
|
||||
if api_key and use_claude_code:
|
||||
# Use litellm's built-in Anthropic OAuth support.
|
||||
@@ -1220,6 +1581,19 @@ class AgentRunner:
|
||||
api_key=api_key,
|
||||
api_base=api_base,
|
||||
)
|
||||
elif use_antigravity:
|
||||
# Direct OAuth to Google's internal Cloud Code Assist gateway.
|
||||
# No local proxy required — AntigravityProvider handles token
|
||||
# refresh and Gemini-format request/response conversion natively.
|
||||
from framework.llm.antigravity import AntigravityProvider # noqa: PLC0415
|
||||
|
||||
provider = AntigravityProvider(model=self.model)
|
||||
if not provider.has_credentials():
|
||||
print(
|
||||
"Warning: Antigravity credentials not found. "
|
||||
"Run: uv run python core/antigravity_auth.py auth account add"
|
||||
)
|
||||
self._llm = provider
|
||||
else:
|
||||
# Local models (e.g. Ollama) don't need an API key
|
||||
if self._is_local_model(self.model):
|
||||
@@ -1251,8 +1625,12 @@ class AgentRunner:
|
||||
if api_key_env:
|
||||
os.environ[api_key_env] = api_key
|
||||
elif api_key_env:
|
||||
print(f"Warning: {api_key_env} not set. LLM calls will fail.")
|
||||
print(f"Set it with: export {api_key_env}=your-api-key")
|
||||
logger.warning(
|
||||
"%s not set. LLM calls will fail. "
|
||||
"Set it with: export %s=your-api-key",
|
||||
api_key_env,
|
||||
api_key_env,
|
||||
)
|
||||
|
||||
# Fail fast if the agent needs an LLM but none was configured
|
||||
if self._llm is None:
|
||||
@@ -1346,6 +1724,20 @@ class AgentRunner:
|
||||
except Exception:
|
||||
pass # Best-effort — agent works without account info
|
||||
|
||||
# Skill configuration — the runtime handles discovery, loading, trust-gating and
|
||||
# prompt rasterization. The runner just builds the config.
|
||||
from framework.skills.config import SkillsConfig
|
||||
from framework.skills.manager import SkillsManagerConfig
|
||||
|
||||
skills_manager_config = SkillsManagerConfig(
|
||||
skills_config=SkillsConfig.from_agent_vars(
|
||||
default_skills=getattr(self, "_agent_default_skills", None),
|
||||
skills=getattr(self, "_agent_skills", None),
|
||||
),
|
||||
project_root=self.agent_path,
|
||||
interactive=self._interactive,
|
||||
)
|
||||
|
||||
self._setup_agent_runtime(
|
||||
tools,
|
||||
tool_executor,
|
||||
@@ -1353,6 +1745,7 @@ class AgentRunner:
|
||||
accounts_data=accounts_data,
|
||||
tool_provider_map=tool_provider_map,
|
||||
event_bus=event_bus,
|
||||
skills_manager_config=skills_manager_config,
|
||||
)
|
||||
|
||||
def _get_api_key_env_var(self, model: str) -> str | None:
|
||||
@@ -1373,6 +1766,8 @@ class AgentRunner:
|
||||
return "MISTRAL_API_KEY"
|
||||
elif model_lower.startswith("groq/"):
|
||||
return "GROQ_API_KEY"
|
||||
elif model_lower.startswith("openrouter/"):
|
||||
return "OPENROUTER_API_KEY"
|
||||
elif self._is_local_model(model_lower):
|
||||
return None # Local models don't need an API key
|
||||
elif model_lower.startswith("azure/"):
|
||||
@@ -1387,6 +1782,8 @@ class AgentRunner:
|
||||
return "MINIMAX_API_KEY"
|
||||
elif model_lower.startswith("kimi/"):
|
||||
return "KIMI_API_KEY"
|
||||
elif model_lower.startswith("hive/"):
|
||||
return "HIVE_API_KEY"
|
||||
else:
|
||||
# Default: assume OpenAI-compatible
|
||||
return "OPENAI_API_KEY"
|
||||
@@ -1409,6 +1806,8 @@ class AgentRunner:
|
||||
cred_id = "minimax"
|
||||
elif model_lower.startswith("kimi/"):
|
||||
cred_id = "kimi"
|
||||
elif model_lower.startswith("hive/"):
|
||||
cred_id = "hive"
|
||||
# Add more mappings as providers are added to LLM_CREDENTIALS
|
||||
|
||||
if cred_id is None:
|
||||
@@ -1448,23 +1847,13 @@ class AgentRunner:
|
||||
accounts_data: list[dict] | None = None,
|
||||
tool_provider_map: dict[str, str] | None = None,
|
||||
event_bus=None,
|
||||
skills_catalog_prompt: str = "",
|
||||
protocols_prompt: str = "",
|
||||
skill_dirs: list[str] | None = None,
|
||||
skills_manager_config=None,
|
||||
) -> None:
|
||||
"""Set up multi-entry-point execution using AgentRuntime."""
|
||||
# Convert AsyncEntryPointSpec to EntryPointSpec for AgentRuntime
|
||||
entry_points = []
|
||||
for async_ep in self.graph.async_entry_points:
|
||||
ep = EntryPointSpec(
|
||||
id=async_ep.id,
|
||||
name=async_ep.name,
|
||||
entry_node=async_ep.entry_node,
|
||||
trigger_type=async_ep.trigger_type,
|
||||
trigger_config=async_ep.trigger_config,
|
||||
isolation_level=async_ep.isolation_level,
|
||||
priority=async_ep.priority,
|
||||
max_concurrent=async_ep.max_concurrent,
|
||||
max_resurrections=async_ep.max_resurrections,
|
||||
)
|
||||
entry_points.append(ep)
|
||||
|
||||
# Always create a primary entry point for the graph's entry node.
|
||||
# For multi-entry-point agents this ensures the primary path (e.g.
|
||||
@@ -1521,26 +1910,37 @@ class AgentRunner:
|
||||
accounts_data=accounts_data,
|
||||
tool_provider_map=tool_provider_map,
|
||||
event_bus=event_bus,
|
||||
skills_manager_config=skills_manager_config,
|
||||
)
|
||||
|
||||
# Pass intro_message through for TUI display
|
||||
self._agent_runtime.intro_message = self.intro_message
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Execution modes
|
||||
#
|
||||
# run() – One-shot, blocking execution for worker agents
|
||||
# (headless CLI via ``hive run``). Validates, runs
|
||||
# the graph to completion, and returns the result.
|
||||
#
|
||||
# start() / trigger() – Long-lived runtime for the frontend (queen).
|
||||
# start() boots the runtime; trigger() sends
|
||||
# non-blocking execution requests. Used by the
|
||||
# server session manager and API routes.
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: dict | None = None,
|
||||
session_state: dict | None = None,
|
||||
entry_point_id: str | None = None,
|
||||
) -> ExecutionResult:
|
||||
"""
|
||||
Execute the agent with given input data.
|
||||
"""One-shot execution for worker agents (headless CLI).
|
||||
|
||||
Validates credentials before execution. If any required credentials
|
||||
are missing, returns an error result with instructions on how to
|
||||
provide them.
|
||||
Validates credentials, runs the graph to completion, and returns
|
||||
the result. Used by ``hive run`` and programmatic callers.
|
||||
|
||||
For single-entry-point agents, this is the standard execution path.
|
||||
For multi-entry-point agents, you can optionally specify which entry point to use.
|
||||
For the frontend (queen), use start() + trigger() instead.
|
||||
|
||||
Args:
|
||||
input_data: Input data for the agent (e.g., {"lead_id": "123"})
|
||||
@@ -1666,7 +2066,12 @@ class AgentRunner:
|
||||
# === Runtime API ===
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Start the agent runtime."""
|
||||
"""Boot the agent runtime for the frontend (queen).
|
||||
|
||||
Pair with trigger() to send execution requests. Used by the
|
||||
server session manager. For headless worker agents, use run()
|
||||
instead.
|
||||
"""
|
||||
if self._agent_runtime is None:
|
||||
self._setup()
|
||||
|
||||
@@ -1683,10 +2088,10 @@ class AgentRunner:
|
||||
input_data: dict[str, Any],
|
||||
correlation_id: str | None = None,
|
||||
) -> str:
|
||||
"""
|
||||
Trigger execution at a specific entry point (non-blocking).
|
||||
"""Send a non-blocking execution request to a running runtime.
|
||||
|
||||
Returns execution ID for tracking.
|
||||
Used by the server API routes after start(). For headless
|
||||
worker agents, use run() instead.
|
||||
|
||||
Args:
|
||||
entry_point_id: Which entry point to trigger
|
||||
@@ -1771,19 +2176,6 @@ class AgentRunner:
|
||||
for edge in self.graph.edges
|
||||
]
|
||||
|
||||
# Build async entry points info
|
||||
async_entry_points_info = [
|
||||
{
|
||||
"id": ep.id,
|
||||
"name": ep.name,
|
||||
"entry_node": ep.entry_node,
|
||||
"trigger_type": ep.trigger_type,
|
||||
"isolation_level": ep.isolation_level,
|
||||
"max_concurrent": ep.max_concurrent,
|
||||
}
|
||||
for ep in self.graph.async_entry_points
|
||||
]
|
||||
|
||||
return AgentInfo(
|
||||
name=self.graph.id,
|
||||
description=self.graph.description,
|
||||
@@ -1810,8 +2202,6 @@ class AgentRunner:
|
||||
],
|
||||
required_tools=sorted(required_tools),
|
||||
has_tools_module=(self.agent_path / "tools.py").exists(),
|
||||
async_entry_points=async_entry_points_info,
|
||||
is_multi_entry_point=self._uses_async_entry_points,
|
||||
)
|
||||
|
||||
def validate(self) -> ValidationResult:
|
||||
@@ -2126,18 +2516,6 @@ Respond with JSON only:
|
||||
trigger_type="manual",
|
||||
isolation_level="shared",
|
||||
)
|
||||
for aep in runner.graph.async_entry_points:
|
||||
entry_points[aep.id] = EntryPointSpec(
|
||||
id=aep.id,
|
||||
name=aep.name,
|
||||
entry_node=aep.entry_node,
|
||||
trigger_type=aep.trigger_type,
|
||||
trigger_config=aep.trigger_config,
|
||||
isolation_level=aep.isolation_level,
|
||||
priority=aep.priority,
|
||||
max_concurrent=aep.max_concurrent,
|
||||
)
|
||||
|
||||
await runtime.add_graph(
|
||||
graph_id=gid,
|
||||
graph=runner.graph,
|
||||
|
||||
@@ -16,6 +16,8 @@ from framework.llm.provider import Tool, ToolResult, ToolUse
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_INPUT_LOG_MAX_LEN = 500
|
||||
|
||||
# Per-execution context overrides. Each asyncio task (and thus each
|
||||
# concurrent graph execution) gets its own copy, so there are no races
|
||||
# when multiple ExecutionStreams run in parallel.
|
||||
@@ -54,6 +56,8 @@ class ToolRegistry:
|
||||
def __init__(self):
|
||||
self._tools: dict[str, RegisteredTool] = {}
|
||||
self._mcp_clients: list[Any] = [] # List of MCPClient instances
|
||||
self._mcp_client_servers: dict[int, str] = {} # client id -> server name
|
||||
self._mcp_managed_clients: set[int] = set() # client ids acquired from the manager
|
||||
self._session_context: dict[str, Any] = {} # Auto-injected context for tools
|
||||
self._provider_index: dict[str, set[str]] = {} # provider -> tool names
|
||||
# MCP resync tracking
|
||||
@@ -243,6 +247,13 @@ class ToolRegistry:
|
||||
def _wrap_result(tool_use_id: str, result: Any) -> ToolResult:
|
||||
if isinstance(result, ToolResult):
|
||||
return result
|
||||
# MCP client returns dict with _images when image content is present
|
||||
if isinstance(result, dict) and "_images" in result:
|
||||
return ToolResult(
|
||||
tool_use_id=tool_use_id,
|
||||
content=result.get("_text", ""),
|
||||
image_content=result["_images"],
|
||||
)
|
||||
return ToolResult(
|
||||
tool_use_id=tool_use_id,
|
||||
content=json.dumps(result) if not isinstance(result, str) else result,
|
||||
@@ -269,6 +280,17 @@ class ToolRegistry:
|
||||
r = await result
|
||||
return _wrap_result(tool_use.id, r)
|
||||
except Exception as exc:
|
||||
inputs_str = json.dumps(tool_use.input, default=str)
|
||||
if len(inputs_str) > _INPUT_LOG_MAX_LEN:
|
||||
inputs_str = inputs_str[:_INPUT_LOG_MAX_LEN] + "...(truncated)"
|
||||
logger.error(
|
||||
"Async tool '%s' failed (tool_use_id=%s): %s\nInputs: %s",
|
||||
tool_use.name,
|
||||
tool_use.id,
|
||||
exc,
|
||||
inputs_str,
|
||||
exc_info=True,
|
||||
)
|
||||
return ToolResult(
|
||||
tool_use_id=tool_use.id,
|
||||
content=json.dumps({"error": str(exc)}),
|
||||
@@ -279,6 +301,17 @@ class ToolRegistry:
|
||||
|
||||
return _wrap_result(tool_use.id, result)
|
||||
except Exception as e:
|
||||
inputs_str = json.dumps(tool_use.input, default=str)
|
||||
if len(inputs_str) > _INPUT_LOG_MAX_LEN:
|
||||
inputs_str = inputs_str[:_INPUT_LOG_MAX_LEN] + "...(truncated)"
|
||||
logger.error(
|
||||
"Tool '%s' execution failed for tool_use_id=%s: %s\nInputs: %s",
|
||||
tool_use.name,
|
||||
tool_use.id,
|
||||
e,
|
||||
inputs_str,
|
||||
exc_info=True,
|
||||
)
|
||||
return ToolResult(
|
||||
tool_use_id=tool_use.id,
|
||||
content=json.dumps({"error": str(e)}),
|
||||
@@ -453,21 +486,85 @@ class ToolRegistry:
|
||||
# Treat top-level keys as server names
|
||||
server_list = [{"name": name, **cfg} for name, cfg in config.items()]
|
||||
|
||||
for server_config in server_list:
|
||||
server_config = self._resolve_mcp_server_config(server_config, base_dir)
|
||||
try:
|
||||
self.register_mcp_server(server_config)
|
||||
except Exception as e:
|
||||
name = server_config.get("name", "unknown")
|
||||
logger.warning(f"Failed to register MCP server '{name}': {e}")
|
||||
resolved_server_list = [
|
||||
self._resolve_mcp_server_config(server_config, base_dir)
|
||||
for server_config in server_list
|
||||
]
|
||||
self.load_registry_servers(resolved_server_list, log_summary=False)
|
||||
|
||||
# Snapshot credential files and ADEN_API_KEY so we can detect mid-session changes
|
||||
self._mcp_cred_snapshot = self._snapshot_credentials()
|
||||
self._mcp_aden_key_snapshot = os.environ.get("ADEN_API_KEY")
|
||||
|
||||
def _register_mcp_server_with_retry(
|
||||
self,
|
||||
server_config: dict[str, Any],
|
||||
) -> tuple[bool, int, str | None]:
|
||||
"""Register a single MCP server with one retry for transient failures."""
|
||||
name = server_config.get("name", "unknown")
|
||||
last_error: str | None = None
|
||||
|
||||
for attempt in range(2):
|
||||
try:
|
||||
count = self.register_mcp_server(server_config)
|
||||
if count > 0:
|
||||
return True, count, None
|
||||
last_error = "registered 0 tools"
|
||||
except Exception as exc:
|
||||
last_error = str(exc)
|
||||
|
||||
if attempt == 0:
|
||||
logger.warning(
|
||||
"MCP server '%s' failed to register, retrying in 2s: %s",
|
||||
name,
|
||||
last_error,
|
||||
)
|
||||
import time
|
||||
|
||||
time.sleep(2)
|
||||
else:
|
||||
logger.warning("MCP server '%s' failed after retry: %s", name, last_error)
|
||||
|
||||
return False, 0, last_error
|
||||
|
||||
def load_registry_servers(
|
||||
self,
|
||||
server_list: list[dict[str, Any]],
|
||||
*,
|
||||
log_summary: bool = True,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Register resolved registry-selected MCP servers with retry and status tracking."""
|
||||
results: list[dict[str, Any]] = []
|
||||
|
||||
for server_config in server_list:
|
||||
name = server_config.get("name", "unknown")
|
||||
success, tools_loaded, error = self._register_mcp_server_with_retry(server_config)
|
||||
result = {
|
||||
"server": name,
|
||||
"status": "loaded" if success else "skipped",
|
||||
"tools_loaded": tools_loaded,
|
||||
"skipped_reason": None if success else (error or "unknown error"),
|
||||
}
|
||||
results.append(result)
|
||||
|
||||
if log_summary:
|
||||
logger.info(
|
||||
"MCP registry server resolution",
|
||||
extra={
|
||||
"event": "mcp_registry_server_resolution",
|
||||
"server": result["server"],
|
||||
"status": result["status"],
|
||||
"tools_loaded": result["tools_loaded"],
|
||||
"skipped_reason": result["skipped_reason"],
|
||||
},
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
def register_mcp_server(
|
||||
self,
|
||||
server_config: dict[str, Any],
|
||||
use_connection_manager: bool = True,
|
||||
) -> int:
|
||||
"""
|
||||
Register an MCP server and discover its tools.
|
||||
@@ -483,12 +580,14 @@ class ToolRegistry:
|
||||
- url: Server URL (for http)
|
||||
- headers: HTTP headers (for http)
|
||||
- description: Server description (optional)
|
||||
use_connection_manager: When True, reuse a shared client keyed by server name
|
||||
|
||||
Returns:
|
||||
Number of tools registered from this server
|
||||
"""
|
||||
try:
|
||||
from framework.runner.mcp_client import MCPClient, MCPServerConfig
|
||||
from framework.runner.mcp_connection_manager import MCPConnectionManager
|
||||
|
||||
# Build config object
|
||||
config = MCPServerConfig(
|
||||
@@ -500,15 +599,23 @@ class ToolRegistry:
|
||||
cwd=server_config.get("cwd"),
|
||||
url=server_config.get("url"),
|
||||
headers=server_config.get("headers", {}),
|
||||
socket_path=server_config.get("socket_path"),
|
||||
description=server_config.get("description", ""),
|
||||
)
|
||||
|
||||
# Create and connect client
|
||||
client = MCPClient(config)
|
||||
client.connect()
|
||||
if use_connection_manager:
|
||||
client = MCPConnectionManager.get_instance().acquire(config)
|
||||
else:
|
||||
client = MCPClient(config)
|
||||
client.connect()
|
||||
|
||||
# Store client for cleanup
|
||||
self._mcp_clients.append(client)
|
||||
client_id = id(client)
|
||||
self._mcp_client_servers[client_id] = config.name
|
||||
if use_connection_manager:
|
||||
self._mcp_managed_clients.add(client_id)
|
||||
|
||||
# Register each tool
|
||||
server_name = server_config["name"]
|
||||
@@ -548,14 +655,25 @@ class ToolRegistry:
|
||||
}
|
||||
merged_inputs = {**clean_inputs, **filtered_context}
|
||||
result = client_ref.call_tool(tool_name, merged_inputs)
|
||||
# MCP tools return content array, extract the result
|
||||
# MCP client already extracts content (returns str
|
||||
# or {"_text": ..., "_images": ...} for image results).
|
||||
# Handle legacy list format from HTTP transport.
|
||||
if isinstance(result, list) and len(result) > 0:
|
||||
if isinstance(result[0], dict) and "text" in result[0]:
|
||||
return result[0]["text"]
|
||||
return result[0]
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error(f"MCP tool '{tool_name}' execution failed: {e}")
|
||||
inputs_str = json.dumps(inputs, default=str)
|
||||
if len(inputs_str) > _INPUT_LOG_MAX_LEN:
|
||||
inputs_str = inputs_str[:_INPUT_LOG_MAX_LEN] + "...(truncated)"
|
||||
logger.error(
|
||||
"MCP tool '%s' execution failed: %s\nInputs: %s",
|
||||
tool_name,
|
||||
e,
|
||||
inputs_str,
|
||||
exc_info=True,
|
||||
)
|
||||
return {"error": str(e)}
|
||||
|
||||
return executor
|
||||
@@ -708,12 +826,7 @@ class ToolRegistry:
|
||||
logger.info("%s — resyncing MCP servers", reason)
|
||||
|
||||
# 1. Disconnect existing MCP clients
|
||||
for client in self._mcp_clients:
|
||||
try:
|
||||
client.disconnect()
|
||||
except Exception as e:
|
||||
logger.warning(f"Error disconnecting MCP client during resync: {e}")
|
||||
self._mcp_clients.clear()
|
||||
self._cleanup_mcp_clients("during resync")
|
||||
|
||||
# 2. Remove MCP-registered tools
|
||||
for name in self._mcp_tool_names:
|
||||
@@ -728,12 +841,28 @@ class ToolRegistry:
|
||||
|
||||
def cleanup(self) -> None:
|
||||
"""Clean up all MCP client connections."""
|
||||
self._cleanup_mcp_clients()
|
||||
|
||||
def _cleanup_mcp_clients(self, context: str = "") -> None:
|
||||
"""Disconnect or release all tracked MCP clients for this registry."""
|
||||
if context:
|
||||
context = f" {context}"
|
||||
|
||||
for client in self._mcp_clients:
|
||||
client_id = id(client)
|
||||
server_name = self._mcp_client_servers.get(client_id, client.config.name)
|
||||
try:
|
||||
client.disconnect()
|
||||
if client_id in self._mcp_managed_clients:
|
||||
from framework.runner.mcp_connection_manager import MCPConnectionManager
|
||||
|
||||
MCPConnectionManager.get_instance().release(server_name)
|
||||
else:
|
||||
client.disconnect()
|
||||
except Exception as e:
|
||||
logger.warning(f"Error disconnecting MCP client: {e}")
|
||||
logger.warning(f"Error disconnecting MCP client{context}: {e}")
|
||||
self._mcp_clients.clear()
|
||||
self._mcp_client_servers.clear()
|
||||
self._mcp_managed_clients.clear()
|
||||
|
||||
def __del__(self):
|
||||
"""Destructor to ensure cleanup."""
|
||||
|
||||
@@ -454,11 +454,11 @@ An agent has requested handoff to the Hive Coder (via the `escalate` synthetic t
|
||||
|
||||
## Worker Health Monitoring
|
||||
|
||||
These events form the **judge → queen → operator** escalation pipeline.
|
||||
These events form the **queen → operator** escalation pipeline.
|
||||
|
||||
### `worker_escalation_ticket`
|
||||
|
||||
The Worker Health Judge has detected a degradation pattern and is escalating to the Queen.
|
||||
A worker degradation pattern has been detected and is being escalated to the Queen.
|
||||
|
||||
| Data Field | Type | Description |
|
||||
| ---------- | ------ | ------------------------------------ |
|
||||
|
||||
@@ -8,6 +8,7 @@ while preserving the goal-driven approach.
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
import uuid
|
||||
from collections.abc import Callable
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
@@ -28,6 +29,7 @@ if TYPE_CHECKING:
|
||||
from framework.graph.edge import GraphSpec
|
||||
from framework.graph.goal import Goal
|
||||
from framework.llm.provider import LLMProvider, Tool
|
||||
from framework.skills.manager import SkillsManagerConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -131,6 +133,11 @@ class AgentRuntime:
|
||||
accounts_data: list[dict] | None = None,
|
||||
tool_provider_map: dict[str, str] | None = None,
|
||||
event_bus: "EventBus | None" = None,
|
||||
skills_manager_config: "SkillsManagerConfig | None" = None,
|
||||
# Deprecated — pass skills_manager_config instead.
|
||||
skills_catalog_prompt: str = "",
|
||||
protocols_prompt: str = "",
|
||||
skill_dirs: list[str] | None = None,
|
||||
):
|
||||
"""
|
||||
Initialize agent runtime.
|
||||
@@ -152,7 +159,16 @@ class AgentRuntime:
|
||||
event_bus: Optional external EventBus. If provided, the runtime shares
|
||||
this bus instead of creating its own. Used by SessionManager to
|
||||
share a single bus between queen, worker, and judge.
|
||||
skills_catalog_prompt: Available skills catalog for system prompt
|
||||
protocols_prompt: Default skill operational protocols for system prompt
|
||||
skill_dirs: Skill base directories for Tier 3 resource access
|
||||
skills_manager_config: Skill configuration — the runtime owns
|
||||
discovery, loading, and prompt renderation internally.
|
||||
skills_catalog_prompt: Deprecated. Pre-rendered skills catalog.
|
||||
protocols_prompt: Deprecated. Pre-rendered operational protocols.
|
||||
"""
|
||||
from framework.skills.manager import SkillsManager
|
||||
|
||||
self.graph = graph
|
||||
self.goal = goal
|
||||
self._config = config or AgentRuntimeConfig()
|
||||
@@ -160,6 +176,31 @@ class AgentRuntime:
|
||||
self._checkpoint_config = checkpoint_config
|
||||
self.accounts_prompt = accounts_prompt
|
||||
|
||||
# --- Skill lifecycle: runtime owns the SkillsManager ---
|
||||
if skills_manager_config is not None:
|
||||
# New path: config-driven, runtime handles loading
|
||||
self._skills_manager = SkillsManager(skills_manager_config)
|
||||
self._skills_manager.load()
|
||||
elif skills_catalog_prompt or protocols_prompt:
|
||||
# Legacy path: caller passed pre-rendered strings
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"Passing pre-rendered skills_catalog_prompt/protocols_prompt "
|
||||
"is deprecated. Pass skills_manager_config instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
self._skills_manager = SkillsManager.from_precomputed(
|
||||
skills_catalog_prompt, protocols_prompt
|
||||
)
|
||||
else:
|
||||
# Bare constructor: auto-load defaults
|
||||
self._skills_manager = SkillsManager()
|
||||
self._skills_manager.load()
|
||||
|
||||
self.skill_dirs: list[str] = self._skills_manager.allowlisted_dirs
|
||||
|
||||
# Primary graph identity
|
||||
self._graph_id: str = graph_id or "primary"
|
||||
|
||||
@@ -215,6 +256,18 @@ class AgentRuntime:
|
||||
# Optional greeting shown to user on TUI load (set by AgentRunner)
|
||||
self.intro_message: str = ""
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Skill prompt accessors (read by ExecutionStream constructors)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@property
|
||||
def skills_catalog_prompt(self) -> str:
|
||||
return self._skills_manager.skills_catalog_prompt
|
||||
|
||||
@property
|
||||
def protocols_prompt(self) -> str:
|
||||
return self._skills_manager.protocols_prompt
|
||||
|
||||
def register_entry_point(self, spec: EntryPointSpec) -> None:
|
||||
"""
|
||||
Register a named entry point for the agent.
|
||||
@@ -292,6 +345,9 @@ class AgentRuntime:
|
||||
accounts_prompt=self._accounts_prompt,
|
||||
accounts_data=self._accounts_data,
|
||||
tool_provider_map=self._tool_provider_map,
|
||||
skills_catalog_prompt=self.skills_catalog_prompt,
|
||||
protocols_prompt=self.protocols_prompt,
|
||||
skill_dirs=self.skill_dirs,
|
||||
)
|
||||
await stream.start()
|
||||
self._streams[ep_id] = stream
|
||||
@@ -392,18 +448,24 @@ class AgentRuntime:
|
||||
|
||||
tc = spec.trigger_config
|
||||
cron_expr = tc.get("cron")
|
||||
interval = tc.get("interval_minutes")
|
||||
_raw_interval = tc.get("interval_minutes")
|
||||
interval = float(_raw_interval) if _raw_interval is not None else None
|
||||
run_immediately = tc.get("run_immediately", False)
|
||||
|
||||
if cron_expr:
|
||||
# Cron expression mode — takes priority over interval_minutes
|
||||
try:
|
||||
from croniter import croniter
|
||||
except ImportError as e:
|
||||
raise RuntimeError(
|
||||
"croniter is required for cron-based entry points. "
|
||||
"Install it with: uv pip install croniter"
|
||||
) from e
|
||||
|
||||
# Validate the expression upfront
|
||||
try:
|
||||
if not croniter.is_valid(cron_expr):
|
||||
raise ValueError(f"Invalid cron expression: {cron_expr}")
|
||||
except (ImportError, ValueError) as e:
|
||||
except ValueError as e:
|
||||
logger.warning(
|
||||
"Entry point '%s' has invalid cron config: %s",
|
||||
ep_id,
|
||||
@@ -543,7 +605,7 @@ class AgentRuntime:
|
||||
ep_id,
|
||||
cron_expr,
|
||||
run_immediately,
|
||||
idle_timeout=tc.get("idle_timeout_seconds", 300),
|
||||
idle_timeout=float(tc.get("idle_timeout_seconds", 300)),
|
||||
)()
|
||||
)
|
||||
self._timer_tasks.append(task)
|
||||
@@ -673,7 +735,7 @@ class AgentRuntime:
|
||||
ep_id,
|
||||
interval,
|
||||
run_immediately,
|
||||
idle_timeout=tc.get("idle_timeout_seconds", 300),
|
||||
idle_timeout=float(tc.get("idle_timeout_seconds", 300)),
|
||||
)()
|
||||
)
|
||||
self._timer_tasks.append(task)
|
||||
@@ -822,7 +884,8 @@ class AgentRuntime:
|
||||
if stream is None:
|
||||
raise ValueError(f"Entry point '{entry_point_id}' not found")
|
||||
|
||||
return await stream.execute(input_data, correlation_id, session_state)
|
||||
run_id = uuid.uuid4().hex[:12]
|
||||
return await stream.execute(input_data, correlation_id, session_state, run_id=run_id)
|
||||
|
||||
async def trigger_and_wait(
|
||||
self,
|
||||
@@ -919,6 +982,9 @@ class AgentRuntime:
|
||||
accounts_prompt=self._accounts_prompt,
|
||||
accounts_data=self._accounts_data,
|
||||
tool_provider_map=self._tool_provider_map,
|
||||
skills_catalog_prompt=self.skills_catalog_prompt,
|
||||
protocols_prompt=self.protocols_prompt,
|
||||
skill_dirs=self.skill_dirs,
|
||||
)
|
||||
if self._running:
|
||||
await stream.start()
|
||||
@@ -997,7 +1063,8 @@ class AgentRuntime:
|
||||
if spec.trigger_type != "timer":
|
||||
continue
|
||||
tc = spec.trigger_config
|
||||
interval = tc.get("interval_minutes")
|
||||
_raw_interval = tc.get("interval_minutes")
|
||||
interval = float(_raw_interval) if _raw_interval is not None else None
|
||||
run_immediately = tc.get("run_immediately", False)
|
||||
|
||||
if interval and interval > 0 and self._running:
|
||||
@@ -1142,7 +1209,7 @@ class AgentRuntime:
|
||||
ep_id,
|
||||
interval,
|
||||
run_immediately,
|
||||
idle_timeout=tc.get("idle_timeout_seconds", 300),
|
||||
idle_timeout=float(tc.get("idle_timeout_seconds", 300)),
|
||||
)()
|
||||
)
|
||||
timer_tasks.append(task)
|
||||
@@ -1359,8 +1426,8 @@ class AgentRuntime:
|
||||
allowed_keys = set(entry_node.input_keys)
|
||||
|
||||
# Search primary graph's streams for an active session.
|
||||
# Skip isolated streams (e.g. health judge) — they have their own
|
||||
# session directories and must never be used as a shared session.
|
||||
# Skip isolated streams — they have their own session directories
|
||||
# and must never be used as a shared session.
|
||||
all_streams: list[tuple[str, ExecutionStream]] = []
|
||||
for _gid, reg in self._graphs.items():
|
||||
for ep_id, stream in reg.streams.items():
|
||||
@@ -1407,6 +1474,7 @@ class AgentRuntime:
|
||||
graph_id: str | None = None,
|
||||
*,
|
||||
is_client_input: bool = False,
|
||||
image_content: list[dict[str, Any]] | None = None,
|
||||
) -> bool:
|
||||
"""Inject user input into a running client-facing node.
|
||||
|
||||
@@ -1419,6 +1487,8 @@ class AgentRuntime:
|
||||
graph_id: Optional graph to search first (defaults to active graph)
|
||||
is_client_input: True when the message originates from a real
|
||||
human user (e.g. /chat endpoint), False for external events.
|
||||
image_content: Optional list of image content blocks (OpenAI
|
||||
image_url format) to include alongside the text.
|
||||
|
||||
Returns:
|
||||
True if input was delivered, False if no matching node found
|
||||
@@ -1430,7 +1500,9 @@ class AgentRuntime:
|
||||
target = graph_id or self._active_graph_id
|
||||
if target in self._graphs:
|
||||
for stream in self._graphs[target].streams.values():
|
||||
if await stream.inject_input(node_id, content, is_client_input=is_client_input):
|
||||
if await stream.inject_input(
|
||||
node_id, content, is_client_input=is_client_input, image_content=image_content
|
||||
):
|
||||
return True
|
||||
|
||||
# Then search all other graphs
|
||||
@@ -1438,7 +1510,9 @@ class AgentRuntime:
|
||||
if gid == target:
|
||||
continue
|
||||
for stream in reg.streams.values():
|
||||
if await stream.inject_input(node_id, content, is_client_input=is_client_input):
|
||||
if await stream.inject_input(
|
||||
node_id, content, is_client_input=is_client_input, image_content=image_content
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -1697,6 +1771,11 @@ def create_agent_runtime(
|
||||
accounts_data: list[dict] | None = None,
|
||||
tool_provider_map: dict[str, str] | None = None,
|
||||
event_bus: "EventBus | None" = None,
|
||||
skills_manager_config: "SkillsManagerConfig | None" = None,
|
||||
# Deprecated — pass skills_manager_config instead.
|
||||
skills_catalog_prompt: str = "",
|
||||
protocols_prompt: str = "",
|
||||
skill_dirs: list[str] | None = None,
|
||||
) -> AgentRuntime:
|
||||
"""
|
||||
Create and configure an AgentRuntime with entry points.
|
||||
@@ -1723,6 +1802,13 @@ def create_agent_runtime(
|
||||
accounts_data: Raw account data for per-node prompt generation.
|
||||
tool_provider_map: Tool name to provider name mapping for account routing.
|
||||
event_bus: Optional external EventBus to share with other components.
|
||||
skills_catalog_prompt: Available skills catalog for system prompt.
|
||||
protocols_prompt: Default skill operational protocols for system prompt.
|
||||
skill_dirs: Skill base directories for Tier 3 resource access.
|
||||
skills_manager_config: Skill configuration — the runtime owns
|
||||
discovery, loading, and prompt renderation internally.
|
||||
skills_catalog_prompt: Deprecated. Pre-rendered skills catalog.
|
||||
protocols_prompt: Deprecated. Pre-rendered operational protocols.
|
||||
|
||||
Returns:
|
||||
Configured AgentRuntime (not yet started)
|
||||
@@ -1749,6 +1835,10 @@ def create_agent_runtime(
|
||||
accounts_data=accounts_data,
|
||||
tool_provider_map=tool_provider_map,
|
||||
event_bus=event_bus,
|
||||
skills_manager_config=skills_manager_config,
|
||||
skills_catalog_prompt=skills_catalog_prompt,
|
||||
protocols_prompt=protocols_prompt,
|
||||
skill_dirs=skill_dirs,
|
||||
)
|
||||
|
||||
for spec in entry_points:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""EscalationTicket — structured schema for worker health judge escalations."""
|
||||
"""EscalationTicket — structured schema for worker health escalations."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
@@ -10,10 +10,10 @@ from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class EscalationTicket(BaseModel):
|
||||
"""Structured escalation report emitted by the Worker Health Judge.
|
||||
"""Structured escalation report for worker health monitoring.
|
||||
|
||||
The judge must fill every field before calling emit_escalation_ticket.
|
||||
Pydantic validation rejects partial tickets, preventing impulsive escalation.
|
||||
All fields must be filled before calling emit_escalation_ticket.
|
||||
Pydantic validation rejects partial tickets.
|
||||
"""
|
||||
|
||||
ticket_id: str = Field(default_factory=lambda: str(uuid4()))
|
||||
@@ -25,7 +25,7 @@ class EscalationTicket(BaseModel):
|
||||
worker_node_id: str
|
||||
worker_graph_id: str
|
||||
|
||||
# Problem characterization (filled by judge via LLM deliberation)
|
||||
# Problem characterization
|
||||
severity: Literal["low", "medium", "high", "critical"]
|
||||
cause: str # Human-readable: "Node has produced 18 RETRY verdicts..."
|
||||
judge_reasoning: str # Judge's own deliberation chain
|
||||
|
||||
@@ -97,6 +97,7 @@ class EventType(StrEnum):
|
||||
# Client I/O (client_facing=True nodes only)
|
||||
CLIENT_OUTPUT_DELTA = "client_output_delta"
|
||||
CLIENT_INPUT_REQUESTED = "client_input_requested"
|
||||
CLIENT_INPUT_RECEIVED = "client_input_received"
|
||||
|
||||
# Internal node observability (client_facing=False nodes)
|
||||
NODE_INTERNAL_OUTPUT = "node_internal_output"
|
||||
@@ -104,7 +105,7 @@ class EventType(StrEnum):
|
||||
NODE_STALLED = "node_stalled"
|
||||
NODE_TOOL_DOOM_LOOP = "node_tool_doom_loop"
|
||||
|
||||
# Judge decisions
|
||||
# Judge decisions (implicit judge in event loop nodes)
|
||||
JUDGE_VERDICT = "judge_verdict"
|
||||
|
||||
# Output tracking
|
||||
@@ -116,6 +117,7 @@ class EventType(StrEnum):
|
||||
|
||||
# Context management
|
||||
CONTEXT_COMPACTED = "context_compacted"
|
||||
CONTEXT_USAGE_UPDATED = "context_usage_updated"
|
||||
|
||||
# External triggers
|
||||
WEBHOOK_RECEIVED = "webhook_received"
|
||||
@@ -126,7 +128,7 @@ class EventType(StrEnum):
|
||||
# Escalation (agent requests handoff to queen)
|
||||
ESCALATION_REQUESTED = "escalation_requested"
|
||||
|
||||
# Worker health monitoring (judge → queen → operator)
|
||||
# Worker health monitoring
|
||||
WORKER_ESCALATION_TICKET = "worker_escalation_ticket"
|
||||
QUEEN_INTERVENTION_REQUESTED = "queen_intervention_requested"
|
||||
|
||||
@@ -152,6 +154,14 @@ class EventType(StrEnum):
|
||||
# Subagent reports (one-way progress updates from sub-agents)
|
||||
SUBAGENT_REPORT = "subagent_report"
|
||||
|
||||
# Trigger lifecycle (queen-level triggers / heartbeats)
|
||||
TRIGGER_AVAILABLE = "trigger_available"
|
||||
TRIGGER_ACTIVATED = "trigger_activated"
|
||||
TRIGGER_DEACTIVATED = "trigger_deactivated"
|
||||
TRIGGER_FIRED = "trigger_fired"
|
||||
TRIGGER_REMOVED = "trigger_removed"
|
||||
TRIGGER_UPDATED = "trigger_updated"
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentEvent:
|
||||
@@ -165,10 +175,11 @@ class AgentEvent:
|
||||
timestamp: datetime = field(default_factory=datetime.now)
|
||||
correlation_id: str | None = None # For tracking related events
|
||||
graph_id: str | None = None # Which graph emitted this event (multi-graph sessions)
|
||||
run_id: str | None = None # Unique ID per trigger() invocation — used for run dividers
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert to dictionary for serialization."""
|
||||
return {
|
||||
d = {
|
||||
"type": self.type.value,
|
||||
"stream_id": self.stream_id,
|
||||
"node_id": self.node_id,
|
||||
@@ -178,6 +189,9 @@ class AgentEvent:
|
||||
"correlation_id": self.correlation_id,
|
||||
"graph_id": self.graph_id,
|
||||
}
|
||||
if self.run_id is not None:
|
||||
d["run_id"] = self.run_id
|
||||
return d
|
||||
|
||||
|
||||
# Type for event handlers
|
||||
@@ -246,6 +260,128 @@ class EventBus:
|
||||
self._semaphore = asyncio.Semaphore(max_concurrent_handlers)
|
||||
self._subscription_counter = 0
|
||||
self._lock = asyncio.Lock()
|
||||
# Per-session persistent event log (always-on, survives restarts)
|
||||
self._session_log: IO[str] | None = None
|
||||
self._session_log_iteration_offset: int = 0
|
||||
# Accumulator for client_output_delta snapshots — flushed on llm_turn_complete.
|
||||
# Key: (stream_id, node_id, execution_id, iteration, inner_turn) → latest AgentEvent
|
||||
self._pending_output_snapshots: dict[tuple, AgentEvent] = {}
|
||||
|
||||
def set_session_log(self, path: Path, *, iteration_offset: int = 0) -> None:
|
||||
"""Enable per-session event persistence to a JSONL file.
|
||||
|
||||
Called once when the queen starts so that all events survive server
|
||||
restarts and can be replayed to reconstruct the frontend state.
|
||||
|
||||
``iteration_offset`` is added to the ``iteration`` field in logged
|
||||
events so that cold-resumed sessions produce monotonically increasing
|
||||
iteration values — preventing frontend message ID collisions between
|
||||
the original run and resumed runs.
|
||||
"""
|
||||
if self._session_log is not None:
|
||||
try:
|
||||
self._session_log.close()
|
||||
except Exception:
|
||||
pass
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
self._session_log = open(path, "a", encoding="utf-8") # noqa: SIM115
|
||||
self._session_log_iteration_offset = iteration_offset
|
||||
logger.info("Session event log → %s (iteration_offset=%d)", path, iteration_offset)
|
||||
|
||||
def close_session_log(self) -> None:
|
||||
"""Close the per-session event log file."""
|
||||
# Flush any pending output snapshots before closing
|
||||
self._flush_pending_snapshots()
|
||||
if self._session_log is not None:
|
||||
try:
|
||||
self._session_log.close()
|
||||
except Exception:
|
||||
pass
|
||||
self._session_log = None
|
||||
|
||||
# Event types that are high-frequency streaming deltas — accumulated rather
|
||||
# than written individually to the session log.
|
||||
_STREAMING_DELTA_TYPES = frozenset(
|
||||
{
|
||||
EventType.CLIENT_OUTPUT_DELTA,
|
||||
EventType.LLM_TEXT_DELTA,
|
||||
EventType.LLM_REASONING_DELTA,
|
||||
}
|
||||
)
|
||||
|
||||
def _write_session_log_event(self, event: AgentEvent) -> None:
|
||||
"""Write an event to the per-session log with streaming coalescing.
|
||||
|
||||
Streaming deltas (client_output_delta, llm_text_delta) are accumulated
|
||||
in memory. When llm_turn_complete fires, any pending snapshots for that
|
||||
(stream_id, node_id, execution_id) are flushed as single consolidated
|
||||
events before the turn-complete event itself is written.
|
||||
|
||||
Note: iteration offset is already applied in publish() before this is
|
||||
called, so events here already have correct iteration values.
|
||||
"""
|
||||
if self._session_log is None:
|
||||
return
|
||||
|
||||
if event.type in self._STREAMING_DELTA_TYPES:
|
||||
# Accumulate — keep only the latest event (which carries the full snapshot)
|
||||
key = (
|
||||
event.stream_id,
|
||||
event.node_id,
|
||||
event.execution_id,
|
||||
event.data.get("iteration"),
|
||||
event.data.get("inner_turn", 0),
|
||||
)
|
||||
self._pending_output_snapshots[key] = event
|
||||
return
|
||||
|
||||
# On turn-complete, flush accumulated snapshots for this stream first
|
||||
if event.type == EventType.LLM_TURN_COMPLETE:
|
||||
self._flush_pending_snapshots(
|
||||
stream_id=event.stream_id,
|
||||
node_id=event.node_id,
|
||||
execution_id=event.execution_id,
|
||||
)
|
||||
|
||||
line = json.dumps(event.to_dict(), default=str)
|
||||
self._session_log.write(line + "\n")
|
||||
self._session_log.flush()
|
||||
|
||||
def _flush_pending_snapshots(
|
||||
self,
|
||||
stream_id: str | None = None,
|
||||
node_id: str | None = None,
|
||||
execution_id: str | None = None,
|
||||
) -> None:
|
||||
"""Flush accumulated streaming snapshots to the session log.
|
||||
|
||||
When called with filters, only matching entries are flushed.
|
||||
When called without filters (e.g. on close), everything is flushed.
|
||||
"""
|
||||
if self._session_log is None or not self._pending_output_snapshots:
|
||||
return
|
||||
|
||||
to_flush: list[tuple] = []
|
||||
for key, _evt in self._pending_output_snapshots.items():
|
||||
if stream_id is not None:
|
||||
k_stream, k_node, k_exec, _, _ = key
|
||||
if k_stream != stream_id or k_node != node_id or k_exec != execution_id:
|
||||
continue
|
||||
to_flush.append(key)
|
||||
|
||||
for key in to_flush:
|
||||
evt = self._pending_output_snapshots.pop(key)
|
||||
try:
|
||||
line = json.dumps(evt.to_dict(), default=str)
|
||||
self._session_log.write(line + "\n")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if to_flush:
|
||||
try:
|
||||
self._session_log.flush()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def subscribe(
|
||||
self,
|
||||
@@ -311,6 +447,19 @@ class EventBus:
|
||||
Args:
|
||||
event: Event to publish
|
||||
"""
|
||||
# Apply iteration offset at the source so ALL consumers (SSE subscribers,
|
||||
# event history, session log) see the same monotonically increasing
|
||||
# iteration values. Without this, live SSE would use raw iterations
|
||||
# while events.jsonl would use offset iterations, causing ID collisions
|
||||
# on the frontend when replaying after cold resume.
|
||||
if (
|
||||
self._session_log_iteration_offset
|
||||
and isinstance(event.data, dict)
|
||||
and "iteration" in event.data
|
||||
):
|
||||
offset = self._session_log_iteration_offset
|
||||
event.data = {**event.data, "iteration": event.data["iteration"] + offset}
|
||||
|
||||
# Add to history
|
||||
async with self._lock:
|
||||
self._event_history.append(event)
|
||||
@@ -331,6 +480,15 @@ class EventBus:
|
||||
except Exception:
|
||||
pass # never break event delivery
|
||||
|
||||
# Per-session persistent log (always-on when set_session_log was called).
|
||||
# Streaming deltas are coalesced: client_output_delta and llm_text_delta
|
||||
# are accumulated and flushed as a single snapshot event on llm_turn_complete.
|
||||
if self._session_log is not None:
|
||||
try:
|
||||
self._write_session_log_event(event)
|
||||
except Exception:
|
||||
pass # never break event delivery
|
||||
|
||||
# Find matching subscriptions
|
||||
matching_handlers: list[EventHandler] = []
|
||||
|
||||
@@ -377,8 +535,8 @@ class EventBus:
|
||||
async with self._semaphore:
|
||||
try:
|
||||
await handler(event)
|
||||
except Exception as e:
|
||||
logger.error(f"Handler error for {event.type}: {e}")
|
||||
except Exception:
|
||||
logger.exception(f"Handler error for {event.type}")
|
||||
|
||||
# Run all handlers concurrently
|
||||
await asyncio.gather(*[run_handler(h) for h in handlers], return_exceptions=True)
|
||||
@@ -391,6 +549,7 @@ class EventBus:
|
||||
execution_id: str,
|
||||
input_data: dict[str, Any] | None = None,
|
||||
correlation_id: str | None = None,
|
||||
run_id: str | None = None,
|
||||
) -> None:
|
||||
"""Emit execution started event."""
|
||||
await self.publish(
|
||||
@@ -400,6 +559,7 @@ class EventBus:
|
||||
execution_id=execution_id,
|
||||
data={"input": input_data or {}},
|
||||
correlation_id=correlation_id,
|
||||
run_id=run_id,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -409,6 +569,7 @@ class EventBus:
|
||||
execution_id: str,
|
||||
output: dict[str, Any] | None = None,
|
||||
correlation_id: str | None = None,
|
||||
run_id: str | None = None,
|
||||
) -> None:
|
||||
"""Emit execution completed event."""
|
||||
await self.publish(
|
||||
@@ -418,6 +579,7 @@ class EventBus:
|
||||
execution_id=execution_id,
|
||||
data={"output": output or {}},
|
||||
correlation_id=correlation_id,
|
||||
run_id=run_id,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -427,6 +589,7 @@ class EventBus:
|
||||
execution_id: str,
|
||||
error: str,
|
||||
correlation_id: str | None = None,
|
||||
run_id: str | None = None,
|
||||
) -> None:
|
||||
"""Emit execution failed event."""
|
||||
await self.publish(
|
||||
@@ -436,6 +599,7 @@ class EventBus:
|
||||
execution_id=execution_id,
|
||||
data={"error": error},
|
||||
correlation_id=correlation_id,
|
||||
run_id=run_id,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -527,15 +691,19 @@ class EventBus:
|
||||
node_id: str,
|
||||
iteration: int,
|
||||
execution_id: str | None = None,
|
||||
extra_data: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Emit node loop iteration event."""
|
||||
data: dict[str, Any] = {"iteration": iteration}
|
||||
if extra_data:
|
||||
data.update(extra_data)
|
||||
await self.publish(
|
||||
AgentEvent(
|
||||
type=EventType.NODE_LOOP_ITERATION,
|
||||
stream_id=stream_id,
|
||||
node_id=node_id,
|
||||
execution_id=execution_id,
|
||||
data={"iteration": iteration},
|
||||
data=data,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -584,6 +752,7 @@ class EventBus:
|
||||
content: str,
|
||||
snapshot: str,
|
||||
execution_id: str | None = None,
|
||||
inner_turn: int = 0,
|
||||
) -> None:
|
||||
"""Emit LLM text delta event."""
|
||||
await self.publish(
|
||||
@@ -592,7 +761,7 @@ class EventBus:
|
||||
stream_id=stream_id,
|
||||
node_id=node_id,
|
||||
execution_id=execution_id,
|
||||
data={"content": content, "snapshot": snapshot},
|
||||
data={"content": content, "snapshot": snapshot, "inner_turn": inner_turn},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -708,9 +877,10 @@ class EventBus:
|
||||
snapshot: str,
|
||||
execution_id: str | None = None,
|
||||
iteration: int | None = None,
|
||||
inner_turn: int = 0,
|
||||
) -> None:
|
||||
"""Emit client output delta event (client_facing=True nodes)."""
|
||||
data: dict = {"content": content, "snapshot": snapshot}
|
||||
data: dict = {"content": content, "snapshot": snapshot, "inner_turn": inner_turn}
|
||||
if iteration is not None:
|
||||
data["iteration"] = iteration
|
||||
await self.publish(
|
||||
@@ -1009,7 +1179,7 @@ class EventBus:
|
||||
ticket: dict,
|
||||
execution_id: str | None = None,
|
||||
) -> None:
|
||||
"""Emitted by health judge when worker shows a degradation pattern."""
|
||||
"""Emitted when worker shows a degradation pattern."""
|
||||
await self.publish(
|
||||
AgentEvent(
|
||||
type=EventType.WORKER_ESCALATION_TICKET,
|
||||
|
||||
@@ -127,6 +127,7 @@ class ExecutionContext:
|
||||
input_data: dict[str, Any]
|
||||
isolation_level: IsolationLevel
|
||||
session_state: dict[str, Any] | None = None # For resuming from pause
|
||||
run_id: str | None = None # Unique ID per trigger() invocation
|
||||
started_at: datetime = field(default_factory=datetime.now)
|
||||
completed_at: datetime | None = None
|
||||
status: str = "pending" # pending, running, completed, failed, paused
|
||||
@@ -185,6 +186,9 @@ class ExecutionStream:
|
||||
accounts_prompt: str = "",
|
||||
accounts_data: list[dict] | None = None,
|
||||
tool_provider_map: dict[str, str] | None = None,
|
||||
skills_catalog_prompt: str = "",
|
||||
protocols_prompt: str = "",
|
||||
skill_dirs: list[str] | None = None,
|
||||
):
|
||||
"""
|
||||
Initialize execution stream.
|
||||
@@ -208,6 +212,9 @@ class ExecutionStream:
|
||||
accounts_prompt: Connected accounts block for system prompt injection
|
||||
accounts_data: Raw account data for per-node prompt generation
|
||||
tool_provider_map: Tool name to provider name mapping for account routing
|
||||
skills_catalog_prompt: Available skills catalog for system prompt
|
||||
protocols_prompt: Default skill operational protocols for system prompt
|
||||
skill_dirs: Skill base directories for Tier 3 resource access
|
||||
"""
|
||||
self.stream_id = stream_id
|
||||
self.entry_spec = entry_spec
|
||||
@@ -229,6 +236,22 @@ class ExecutionStream:
|
||||
self._accounts_prompt = accounts_prompt
|
||||
self._accounts_data = accounts_data
|
||||
self._tool_provider_map = tool_provider_map
|
||||
self._skills_catalog_prompt = skills_catalog_prompt
|
||||
self._protocols_prompt = protocols_prompt
|
||||
self._skill_dirs: list[str] = skill_dirs or []
|
||||
|
||||
_es_logger = logging.getLogger(__name__)
|
||||
if protocols_prompt:
|
||||
_es_logger.info(
|
||||
"ExecutionStream[%s] received protocols_prompt (%d chars)",
|
||||
stream_id,
|
||||
len(protocols_prompt),
|
||||
)
|
||||
else:
|
||||
_es_logger.warning(
|
||||
"ExecutionStream[%s] received EMPTY protocols_prompt",
|
||||
stream_id,
|
||||
)
|
||||
|
||||
# Create stream-scoped runtime
|
||||
self._runtime = StreamRuntime(
|
||||
@@ -410,6 +433,7 @@ class ExecutionStream:
|
||||
content: str,
|
||||
*,
|
||||
is_client_input: bool = False,
|
||||
image_content: list[dict[str, Any]] | None = None,
|
||||
) -> bool:
|
||||
"""Inject user input into a running client-facing EventLoopNode.
|
||||
|
||||
@@ -421,7 +445,33 @@ class ExecutionStream:
|
||||
for executor in self._active_executors.values():
|
||||
node = executor.node_registry.get(node_id)
|
||||
if node is not None and hasattr(node, "inject_event"):
|
||||
await node.inject_event(content, is_client_input=is_client_input)
|
||||
await node.inject_event(
|
||||
content, is_client_input=is_client_input, image_content=image_content
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
async def inject_trigger(
|
||||
self,
|
||||
node_id: str,
|
||||
trigger: Any,
|
||||
) -> bool:
|
||||
"""Inject a trigger event into a running queen EventLoopNode.
|
||||
|
||||
Searches active executors for a node matching ``node_id`` and calls
|
||||
its ``inject_trigger()`` method to wake the queen.
|
||||
|
||||
Args:
|
||||
node_id: The queen EventLoopNode ID.
|
||||
trigger: A ``TriggerEvent`` instance (typed as Any to avoid
|
||||
circular imports with graph layer).
|
||||
|
||||
Returns True if the trigger was delivered, False otherwise.
|
||||
"""
|
||||
for executor in self._active_executors.values():
|
||||
node = executor.node_registry.get(node_id)
|
||||
if node is not None and hasattr(node, "inject_trigger"):
|
||||
await node.inject_trigger(trigger)
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -430,6 +480,7 @@ class ExecutionStream:
|
||||
input_data: dict[str, Any],
|
||||
correlation_id: str | None = None,
|
||||
session_state: dict[str, Any] | None = None,
|
||||
run_id: str | None = None,
|
||||
) -> str:
|
||||
"""
|
||||
Queue an execution and return its ID.
|
||||
@@ -440,6 +491,7 @@ class ExecutionStream:
|
||||
input_data: Input data for this execution
|
||||
correlation_id: Optional ID to correlate related executions
|
||||
session_state: Optional session state to resume from (with paused_at, memory)
|
||||
run_id: Unique ID for this trigger invocation (for run dividers)
|
||||
|
||||
Returns:
|
||||
Execution ID for tracking
|
||||
@@ -500,6 +552,7 @@ class ExecutionStream:
|
||||
input_data=input_data,
|
||||
isolation_level=self.entry_spec.get_isolation_level(),
|
||||
session_state=session_state,
|
||||
run_id=run_id,
|
||||
)
|
||||
|
||||
async with self._lock:
|
||||
@@ -575,7 +628,9 @@ class ExecutionStream:
|
||||
execution_id=execution_id,
|
||||
input_data=ctx.input_data,
|
||||
correlation_id=ctx.correlation_id,
|
||||
run_id=ctx.run_id,
|
||||
)
|
||||
self._write_run_event(execution_id, ctx.run_id, "run_started")
|
||||
|
||||
# Create execution-scoped memory
|
||||
self._state_manager.create_memory(
|
||||
@@ -645,6 +700,9 @@ class ExecutionStream:
|
||||
accounts_prompt=self._accounts_prompt,
|
||||
accounts_data=self._accounts_data,
|
||||
tool_provider_map=self._tool_provider_map,
|
||||
skills_catalog_prompt=self._skills_catalog_prompt,
|
||||
protocols_prompt=self._protocols_prompt,
|
||||
skill_dirs=self._skill_dirs,
|
||||
)
|
||||
# Track executor so inject_input() can reach EventLoopNode instances
|
||||
self._active_executors[execution_id] = executor
|
||||
@@ -740,6 +798,7 @@ class ExecutionStream:
|
||||
execution_id=execution_id,
|
||||
output=result.output,
|
||||
correlation_id=ctx.correlation_id,
|
||||
run_id=ctx.run_id,
|
||||
)
|
||||
elif result.paused_at:
|
||||
# The executor returns paused_at on CancelledError but
|
||||
@@ -757,8 +816,22 @@ class ExecutionStream:
|
||||
execution_id=execution_id,
|
||||
error=result.error or "Unknown error",
|
||||
correlation_id=ctx.correlation_id,
|
||||
run_id=ctx.run_id,
|
||||
)
|
||||
|
||||
# Write run event for historical restoration
|
||||
if result.success:
|
||||
self._write_run_event(execution_id, ctx.run_id, "run_completed")
|
||||
elif result.paused_at:
|
||||
self._write_run_event(execution_id, ctx.run_id, "run_paused")
|
||||
else:
|
||||
self._write_run_event(
|
||||
execution_id,
|
||||
ctx.run_id,
|
||||
"run_failed",
|
||||
{"error": result.error or "Unknown error"},
|
||||
)
|
||||
|
||||
logger.debug(f"Execution {execution_id} completed: success={result.success}")
|
||||
|
||||
except asyncio.CancelledError:
|
||||
@@ -818,8 +891,10 @@ class ExecutionStream:
|
||||
execution_id=execution_id,
|
||||
error=cancel_reason,
|
||||
correlation_id=ctx.correlation_id,
|
||||
run_id=ctx.run_id,
|
||||
)
|
||||
|
||||
self._write_run_event(execution_id, ctx.run_id, "run_cancelled")
|
||||
# Don't re-raise - we've handled it and saved state
|
||||
|
||||
except Exception as e:
|
||||
@@ -856,7 +931,9 @@ class ExecutionStream:
|
||||
execution_id=execution_id,
|
||||
error=str(e),
|
||||
correlation_id=ctx.correlation_id,
|
||||
run_id=ctx.run_id,
|
||||
)
|
||||
self._write_run_event(execution_id, ctx.run_id, "run_failed", {"error": str(e)})
|
||||
|
||||
finally:
|
||||
# Clean up state
|
||||
@@ -872,6 +949,36 @@ class ExecutionStream:
|
||||
self._completion_events.pop(execution_id, None)
|
||||
self._execution_tasks.pop(execution_id, None)
|
||||
|
||||
def _write_run_event(
|
||||
self,
|
||||
execution_id: str,
|
||||
run_id: str | None,
|
||||
event: str,
|
||||
extra: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Append a run lifecycle event to runs.jsonl for historical restoration."""
|
||||
if not self._session_store or not run_id:
|
||||
return
|
||||
import json as _json
|
||||
|
||||
session_dir = self._session_store.get_session_path(execution_id)
|
||||
runs_file = session_dir / "runs.jsonl"
|
||||
now = datetime.now()
|
||||
record = {
|
||||
"run_id": run_id,
|
||||
"event": event,
|
||||
"timestamp": now.isoformat(),
|
||||
"created_at": now.timestamp(),
|
||||
}
|
||||
if extra:
|
||||
record.update(extra)
|
||||
try:
|
||||
runs_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(runs_file, "a", encoding="utf-8") as f:
|
||||
f.write(_json.dumps(record) + "\n")
|
||||
except OSError:
|
||||
pass # Non-critical — don't break execution
|
||||
|
||||
async def _write_session_state(
|
||||
self,
|
||||
execution_id: str,
|
||||
@@ -978,8 +1085,8 @@ class ExecutionStream:
|
||||
def _create_modified_graph(self) -> "GraphSpec":
|
||||
"""Create a graph with the entry point overridden.
|
||||
|
||||
Preserves the original graph's entry_points and async_entry_points
|
||||
so that validation correctly considers ALL entry nodes reachable.
|
||||
Preserves the original graph's entry_points so that validation
|
||||
correctly considers ALL entry nodes reachable.
|
||||
Each stream only executes from its own entry_node, but the full
|
||||
graph must validate with all entry points accounted for.
|
||||
"""
|
||||
@@ -1004,7 +1111,6 @@ class ExecutionStream:
|
||||
version=self.graph.version,
|
||||
entry_node=self.entry_spec.entry_node, # Use our entry point
|
||||
entry_points=merged_entry_points,
|
||||
async_entry_points=self.graph.async_entry_points,
|
||||
terminal_nodes=self.graph.terminal_nodes,
|
||||
pause_nodes=self.graph.pause_nodes,
|
||||
nodes=self.graph.nodes,
|
||||
|
||||
@@ -8,6 +8,7 @@ write. Errors are silently swallowed — this must never break the agent.
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import IO, Any
|
||||
@@ -47,6 +48,9 @@ def log_llm_turn(
|
||||
Never raises.
|
||||
"""
|
||||
try:
|
||||
# Skip logging during test runs to avoid polluting real logs.
|
||||
if os.environ.get("PYTEST_CURRENT_TEST") or os.environ.get("HIVE_DISABLE_LLM_LOGS"):
|
||||
return
|
||||
global _log_file, _log_ready # noqa: PLW0603
|
||||
if not _log_ready:
|
||||
_log_file = _open_log()
|
||||
|
||||
@@ -47,25 +47,34 @@ class RuntimeLogStore:
|
||||
self._base_path = base_path
|
||||
# Note: _runs_dir is determined per-run_id by _get_run_dir()
|
||||
|
||||
def _session_logs_dir(self, run_id: str) -> Path:
|
||||
"""Return the unified session-backed logs directory for a run ID."""
|
||||
is_runtime_logs = self._base_path.name == "runtime_logs"
|
||||
root = self._base_path.parent if is_runtime_logs else self._base_path
|
||||
return root / "sessions" / run_id / "logs"
|
||||
|
||||
def _legacy_run_dir(self, run_id: str) -> Path:
|
||||
"""Return the deprecated standalone runs directory for a run ID."""
|
||||
return self._base_path / "runs" / run_id
|
||||
|
||||
def _get_run_dir(self, run_id: str) -> Path:
|
||||
"""Determine run directory path based on run_id format.
|
||||
|
||||
- New format (session_*): {storage_root}/sessions/{run_id}/logs/
|
||||
- Session-backed runs: {storage_root}/sessions/{run_id}/logs/
|
||||
- Old format (anything else): {base_path}/runs/{run_id}/ (deprecated)
|
||||
"""
|
||||
if run_id.startswith("session_"):
|
||||
is_runtime_logs = self._base_path.name == "runtime_logs"
|
||||
root = self._base_path.parent if is_runtime_logs else self._base_path
|
||||
return root / "sessions" / run_id / "logs"
|
||||
session_run_dir = self._session_logs_dir(run_id)
|
||||
if session_run_dir.exists() or run_id.startswith("session_"):
|
||||
return session_run_dir
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
f"Reading logs from deprecated location for run_id={run_id}. "
|
||||
"New sessions use unified storage at sessions/session_*/logs/",
|
||||
"New sessions use unified storage at sessions/<session_id>/logs/",
|
||||
DeprecationWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
return self._base_path / "runs" / run_id
|
||||
return self._legacy_run_dir(run_id)
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Incremental write (sync — called from locked sections)
|
||||
@@ -76,6 +85,10 @@ class RuntimeLogStore:
|
||||
run_dir = self._get_run_dir(run_id)
|
||||
run_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def ensure_session_run_dir(self, run_id: str) -> None:
|
||||
"""Create the unified session-backed log directory immediately."""
|
||||
self._session_logs_dir(run_id).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def append_step(self, run_id: str, step: NodeStepLog) -> None:
|
||||
"""Append one JSONL line to tool_logs.jsonl. Sync."""
|
||||
path = self._get_run_dir(run_id) / "tool_logs.jsonl"
|
||||
@@ -200,17 +213,17 @@ class RuntimeLogStore:
|
||||
run_ids = []
|
||||
|
||||
# Scan new location: base_path/sessions/{session_id}/logs/
|
||||
# Determine the correct base path for sessions
|
||||
is_runtime_logs = self._base_path.name == "runtime_logs"
|
||||
root = self._base_path.parent if is_runtime_logs else self._base_path
|
||||
sessions_dir = root / "sessions"
|
||||
|
||||
if sessions_dir.exists():
|
||||
for session_dir in sessions_dir.iterdir():
|
||||
if session_dir.is_dir() and session_dir.name.startswith("session_"):
|
||||
logs_dir = session_dir / "logs"
|
||||
if logs_dir.exists() and logs_dir.is_dir():
|
||||
run_ids.append(session_dir.name)
|
||||
if not session_dir.is_dir():
|
||||
continue
|
||||
logs_dir = session_dir / "logs"
|
||||
if logs_dir.exists() and logs_dir.is_dir():
|
||||
run_ids.append(session_dir.name)
|
||||
|
||||
# Scan old location: base_path/runs/ (deprecated)
|
||||
old_runs_dir = self._base_path / "runs"
|
||||
|
||||
@@ -66,15 +66,16 @@ class RuntimeLogger:
|
||||
"""
|
||||
if session_id:
|
||||
self._run_id = session_id
|
||||
self._store.ensure_session_run_dir(self._run_id)
|
||||
else:
|
||||
ts = datetime.now(UTC).strftime("%Y%m%dT%H%M%S")
|
||||
short_uuid = uuid.uuid4().hex[:8]
|
||||
self._run_id = f"{ts}_{short_uuid}"
|
||||
self._store.ensure_run_dir(self._run_id)
|
||||
|
||||
self._goal_id = goal_id
|
||||
self._started_at = datetime.now(UTC).isoformat()
|
||||
self._logged_node_ids = set()
|
||||
self._store.ensure_run_dir(self._run_id)
|
||||
return self._run_id
|
||||
|
||||
def log_step(
|
||||
|
||||
@@ -17,7 +17,7 @@ from pathlib import Path
|
||||
import pytest
|
||||
|
||||
from framework.graph import Goal
|
||||
from framework.graph.edge import AsyncEntryPointSpec, EdgeCondition, EdgeSpec, GraphSpec
|
||||
from framework.graph.edge import EdgeCondition, EdgeSpec, GraphSpec
|
||||
from framework.graph.goal import Constraint, SuccessCriterion
|
||||
from framework.graph.node import NodeSpec
|
||||
from framework.runtime.agent_runtime import AgentRuntime, create_agent_runtime
|
||||
@@ -101,30 +101,12 @@ def sample_graph():
|
||||
),
|
||||
]
|
||||
|
||||
async_entry_points = [
|
||||
AsyncEntryPointSpec(
|
||||
id="webhook",
|
||||
name="Webhook Handler",
|
||||
entry_node="process-webhook",
|
||||
trigger_type="webhook",
|
||||
isolation_level="shared",
|
||||
),
|
||||
AsyncEntryPointSpec(
|
||||
id="api",
|
||||
name="API Handler",
|
||||
entry_node="process-api",
|
||||
trigger_type="api",
|
||||
isolation_level="shared",
|
||||
),
|
||||
]
|
||||
|
||||
return GraphSpec(
|
||||
id="test-graph",
|
||||
goal_id="test-goal",
|
||||
version="1.0.0",
|
||||
entry_node="process-webhook",
|
||||
entry_points={"start": "process-webhook"},
|
||||
async_entry_points=async_entry_points,
|
||||
terminal_nodes=["complete"],
|
||||
pause_nodes=[],
|
||||
nodes=nodes,
|
||||
@@ -504,108 +486,6 @@ class TestAgentRuntime:
|
||||
# === GraphSpec Validation Tests ===
|
||||
|
||||
|
||||
class TestGraphSpecValidation:
|
||||
"""Tests for GraphSpec with async_entry_points."""
|
||||
|
||||
def test_has_async_entry_points(self, sample_graph):
|
||||
"""Test checking for async entry points."""
|
||||
assert sample_graph.has_async_entry_points() is True
|
||||
|
||||
# Graph without async entry points
|
||||
simple_graph = GraphSpec(
|
||||
id="simple",
|
||||
goal_id="goal",
|
||||
entry_node="start",
|
||||
nodes=[],
|
||||
edges=[],
|
||||
)
|
||||
assert simple_graph.has_async_entry_points() is False
|
||||
|
||||
def test_get_async_entry_point(self, sample_graph):
|
||||
"""Test getting async entry point by ID."""
|
||||
ep = sample_graph.get_async_entry_point("webhook")
|
||||
assert ep is not None
|
||||
assert ep.id == "webhook"
|
||||
assert ep.entry_node == "process-webhook"
|
||||
|
||||
ep_not_found = sample_graph.get_async_entry_point("nonexistent")
|
||||
assert ep_not_found is None
|
||||
|
||||
def test_validate_async_entry_points(self):
|
||||
"""Test validation catches async entry point errors."""
|
||||
nodes = [
|
||||
NodeSpec(
|
||||
id="valid-node",
|
||||
name="Valid Node",
|
||||
description="A valid node",
|
||||
node_type="event_loop",
|
||||
input_keys=[],
|
||||
output_keys=[],
|
||||
),
|
||||
]
|
||||
|
||||
# Invalid entry node
|
||||
graph = GraphSpec(
|
||||
id="test",
|
||||
goal_id="goal",
|
||||
entry_node="valid-node",
|
||||
async_entry_points=[
|
||||
AsyncEntryPointSpec(
|
||||
id="invalid",
|
||||
name="Invalid",
|
||||
entry_node="nonexistent-node",
|
||||
trigger_type="webhook",
|
||||
),
|
||||
],
|
||||
nodes=nodes,
|
||||
edges=[],
|
||||
)
|
||||
|
||||
errors = graph.validate()["errors"]
|
||||
assert any("nonexistent-node" in e for e in errors)
|
||||
|
||||
# Invalid isolation level
|
||||
graph2 = GraphSpec(
|
||||
id="test",
|
||||
goal_id="goal",
|
||||
entry_node="valid-node",
|
||||
async_entry_points=[
|
||||
AsyncEntryPointSpec(
|
||||
id="bad-isolation",
|
||||
name="Bad Isolation",
|
||||
entry_node="valid-node",
|
||||
trigger_type="webhook",
|
||||
isolation_level="invalid",
|
||||
),
|
||||
],
|
||||
nodes=nodes,
|
||||
edges=[],
|
||||
)
|
||||
|
||||
errors2 = graph2.validate()["errors"]
|
||||
assert any("isolation_level" in e for e in errors2)
|
||||
|
||||
# Invalid trigger type
|
||||
graph3 = GraphSpec(
|
||||
id="test",
|
||||
goal_id="goal",
|
||||
entry_node="valid-node",
|
||||
async_entry_points=[
|
||||
AsyncEntryPointSpec(
|
||||
id="bad-trigger",
|
||||
name="Bad Trigger",
|
||||
entry_node="valid-node",
|
||||
trigger_type="invalid_trigger",
|
||||
),
|
||||
],
|
||||
nodes=nodes,
|
||||
edges=[],
|
||||
)
|
||||
|
||||
errors3 = graph3.validate()["errors"]
|
||||
assert any("trigger_type" in e for e in errors3)
|
||||
|
||||
|
||||
# === Integration Tests ===
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,29 @@
|
||||
"""Tests for custom session-backed runtime logging paths."""
|
||||
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from framework.graph.executor import GraphExecutor
|
||||
from framework.runtime.runtime_log_store import RuntimeLogStore
|
||||
from framework.runtime.runtime_logger import RuntimeLogger
|
||||
|
||||
|
||||
def test_graph_executor_uses_custom_session_dir_name_for_runtime_logs():
|
||||
executor = GraphExecutor(
|
||||
runtime=MagicMock(),
|
||||
storage_path=Path("/tmp/test-agent/sessions/my-custom-session"),
|
||||
)
|
||||
|
||||
assert executor._get_runtime_log_session_id() == "my-custom-session"
|
||||
|
||||
|
||||
def test_runtime_logger_creates_session_log_dir_for_custom_session_id(tmp_path):
|
||||
base = tmp_path / ".hive" / "agents" / "test_agent"
|
||||
base.mkdir(parents=True)
|
||||
store = RuntimeLogStore(base)
|
||||
logger = RuntimeLogger(store=store, agent_id="test-agent")
|
||||
|
||||
run_id = logger.start_run(goal_id="goal-1", session_id="my-custom-session")
|
||||
|
||||
assert run_id == "my-custom-session"
|
||||
assert (base / "sessions" / "my-custom-session" / "logs").is_dir()
|
||||
@@ -483,7 +483,6 @@ class TestEventDrivenEntryPoints:
|
||||
version="1.0.0",
|
||||
entry_node="process-event",
|
||||
entry_points={"start": "process-event"},
|
||||
async_entry_points=[],
|
||||
terminal_nodes=[],
|
||||
pause_nodes=[],
|
||||
nodes=nodes,
|
||||
|
||||
@@ -0,0 +1,22 @@
|
||||
"""Trigger definitions for queen-level heartbeats (timers, webhooks)."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
|
||||
|
||||
@dataclass
|
||||
class TriggerDefinition:
|
||||
"""A registered trigger that can be activated on the queen runtime.
|
||||
|
||||
Trigger *definitions* come from the worker's ``triggers.json``.
|
||||
Activation state is per-session (persisted in ``SessionState.active_triggers``).
|
||||
"""
|
||||
|
||||
id: str
|
||||
trigger_type: str # "timer" | "webhook"
|
||||
trigger_config: dict[str, Any] = field(default_factory=dict)
|
||||
description: str = ""
|
||||
task: str = ""
|
||||
active: bool = False
|
||||
@@ -144,6 +144,13 @@ class SessionState(BaseModel):
|
||||
checkpoint_enabled: bool = False
|
||||
latest_checkpoint_id: str | None = None
|
||||
|
||||
# Trigger activation state (IDs of triggers the queen/user turned on)
|
||||
active_triggers: list[str] = Field(default_factory=list)
|
||||
# Per-trigger task strings (user overrides, keyed by trigger ID)
|
||||
trigger_tasks: dict[str, str] = Field(default_factory=dict)
|
||||
# True after first successful worker execution (gates trigger delivery on restart)
|
||||
worker_configured: bool = Field(default=False)
|
||||
|
||||
model_config = {"extra": "allow"}
|
||||
|
||||
@computed_field
|
||||
|
||||
@@ -94,6 +94,29 @@ def sessions_dir(session: Session) -> Path:
|
||||
return Path.home() / ".hive" / "agents" / agent_name / "sessions"
|
||||
|
||||
|
||||
def cold_sessions_dir(session_id: str) -> Path | None:
|
||||
"""Resolve the worker sessions directory from disk for a cold/stopped session.
|
||||
|
||||
Reads agent_path from the queen session's meta.json to find the agent name,
|
||||
then returns ~/.hive/agents/{agent_name}/sessions/.
|
||||
Returns None if meta.json is missing or has no agent_path.
|
||||
"""
|
||||
import json
|
||||
|
||||
meta_path = Path.home() / ".hive" / "queen" / "session" / session_id / "meta.json"
|
||||
if not meta_path.exists():
|
||||
return None
|
||||
try:
|
||||
meta = json.loads(meta_path.read_text(encoding="utf-8"))
|
||||
agent_path = meta.get("agent_path")
|
||||
if not agent_path:
|
||||
return None
|
||||
agent_name = Path(agent_path).name
|
||||
return Path.home() / ".hive" / "agents" / agent_name / "sessions"
|
||||
except (json.JSONDecodeError, OSError):
|
||||
return None
|
||||
|
||||
|
||||
# Allowed CORS origins (localhost on any port)
|
||||
_CORS_ORIGINS = {"http://localhost", "http://127.0.0.1"}
|
||||
|
||||
|
||||
@@ -62,6 +62,7 @@ async def create_queen(
|
||||
from framework.agents.queen.nodes.thinking_hook import select_expert_persona
|
||||
from framework.graph.event_loop_node import HookContext, HookResult
|
||||
from framework.graph.executor import GraphExecutor
|
||||
from framework.runner.mcp_registry import MCPRegistry
|
||||
from framework.runner.tool_registry import ToolRegistry
|
||||
from framework.runtime.core import Runtime
|
||||
from framework.runtime.event_bus import AgentEvent, EventType
|
||||
@@ -69,6 +70,7 @@ async def create_queen(
|
||||
QueenPhaseState,
|
||||
register_queen_lifecycle_tools,
|
||||
)
|
||||
from framework.tools.queen_memory_tools import register_queen_memory_tools
|
||||
|
||||
hive_home = Path.home() / ".hive"
|
||||
|
||||
@@ -85,11 +87,43 @@ async def create_queen(
|
||||
except Exception:
|
||||
logger.warning("Queen: MCP config failed to load", exc_info=True)
|
||||
|
||||
try:
|
||||
registry = MCPRegistry()
|
||||
registry.initialize()
|
||||
registry_configs = registry.load_agent_selection(queen_pkg_dir)
|
||||
if registry_configs:
|
||||
results = queen_registry.load_registry_servers(registry_configs)
|
||||
logger.info("Queen: loaded MCP registry servers: %s", results)
|
||||
except Exception:
|
||||
logger.warning("Queen: MCP registry config failed to load", exc_info=True)
|
||||
|
||||
# ---- Phase state --------------------------------------------------
|
||||
initial_phase = "staging" if worker_identity else "planning"
|
||||
phase_state = QueenPhaseState(phase=initial_phase, event_bus=session.event_bus)
|
||||
session.phase_state = phase_state
|
||||
|
||||
# ---- Track ask rounds during planning ----------------------------
|
||||
# Increment planning_ask_rounds each time the queen requests user
|
||||
# input (ask_user or ask_user_multiple) while in the planning phase.
|
||||
async def _track_planning_asks(event: AgentEvent) -> None:
|
||||
if phase_state.phase != "planning":
|
||||
return
|
||||
# Only count explicit ask_user / ask_user_multiple calls, not
|
||||
# auto-block (text-only turns emit CLIENT_INPUT_REQUESTED with
|
||||
# an empty prompt and no options/questions).
|
||||
data = event.data or {}
|
||||
has_prompt = bool(data.get("prompt"))
|
||||
has_questions = bool(data.get("questions"))
|
||||
has_options = bool(data.get("options"))
|
||||
if has_prompt or has_questions or has_options:
|
||||
phase_state.planning_ask_rounds += 1
|
||||
|
||||
session.event_bus.subscribe(
|
||||
[EventType.CLIENT_INPUT_REQUESTED],
|
||||
_track_planning_asks,
|
||||
filter_stream="queen",
|
||||
)
|
||||
|
||||
# ---- Lifecycle tools (always registered) --------------------------
|
||||
register_queen_lifecycle_tools(
|
||||
queen_registry,
|
||||
@@ -100,6 +134,9 @@ async def create_queen(
|
||||
phase_state=phase_state,
|
||||
)
|
||||
|
||||
# ---- Episodic memory tools (always registered) ---------------------
|
||||
register_queen_memory_tools(queen_registry)
|
||||
|
||||
# ---- Monitoring tools (only when worker is loaded) ----------------
|
||||
if session.worker_runtime:
|
||||
from framework.tools.worker_monitoring_tools import register_worker_monitoring_tools
|
||||
@@ -110,6 +147,7 @@ async def create_queen(
|
||||
session.worker_path,
|
||||
stream_id="queen",
|
||||
worker_graph_id=session.worker_runtime._graph_id,
|
||||
default_session_id=session.id,
|
||||
)
|
||||
|
||||
queen_tools = list(queen_registry.get_tools().values())
|
||||
@@ -149,7 +187,8 @@ async def create_queen(
|
||||
worker_identity = (
|
||||
"\n\n# Worker Profile\n"
|
||||
"No worker agent loaded. You are operating independently.\n"
|
||||
"Handle all tasks directly using your coding tools."
|
||||
"Design or build the agent to solve the user's problem "
|
||||
"according to your current phase."
|
||||
)
|
||||
|
||||
_planning_body = (
|
||||
@@ -192,6 +231,16 @@ async def create_queen(
|
||||
+ worker_identity
|
||||
)
|
||||
|
||||
# ---- Default skill protocols -------------------------------------
|
||||
try:
|
||||
from framework.skills.manager import SkillsManager
|
||||
|
||||
_queen_skills_mgr = SkillsManager()
|
||||
_queen_skills_mgr.load()
|
||||
phase_state.protocols_prompt = _queen_skills_mgr.protocols_prompt
|
||||
except Exception:
|
||||
logger.debug("Queen skill loading failed (non-fatal)", exc_info=True)
|
||||
|
||||
# ---- Persona hook ------------------------------------------------
|
||||
_session_llm = session.llm
|
||||
_session_event_bus = session.event_bus
|
||||
@@ -252,6 +301,7 @@ async def create_queen(
|
||||
execution_id=session.id,
|
||||
dynamic_tools_provider=phase_state.get_current_tools,
|
||||
dynamic_prompt_provider=phase_state.get_current_prompt,
|
||||
iteration_metadata_provider=lambda: {"phase": phase_state.phase},
|
||||
)
|
||||
session.queen_executor = executor
|
||||
|
||||
@@ -269,6 +319,8 @@ async def create_queen(
|
||||
return
|
||||
if phase_state.phase == "running":
|
||||
if event.type == EventType.EXECUTION_COMPLETED:
|
||||
# Mark worker as configured after first successful run
|
||||
session.worker_configured = True
|
||||
output = event.data.get("output", {})
|
||||
output_summary = ""
|
||||
if output:
|
||||
|
||||
@@ -103,7 +103,9 @@ async def handle_delete_credential(request: web.Request) -> web.Response:
|
||||
if credential_id == "aden_api_key":
|
||||
from framework.credentials.key_storage import delete_aden_api_key
|
||||
|
||||
delete_aden_api_key()
|
||||
deleted = delete_aden_api_key()
|
||||
if not deleted:
|
||||
return web.json_response({"error": "Credential 'aden_api_key' not found"}, status=404)
|
||||
return web.json_response({"deleted": True})
|
||||
|
||||
store = _get_store(request)
|
||||
@@ -178,7 +180,10 @@ async def handle_check_agent(request: web.Request) -> web.Response:
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(f"Error checking agent credentials: {e}")
|
||||
return web.json_response({"error": str(e)}, status=500)
|
||||
return web.json_response(
|
||||
{"error": "Internal server error while checking credentials"},
|
||||
status=500,
|
||||
)
|
||||
|
||||
|
||||
def _status_to_dict(c) -> dict:
|
||||
|
||||
@@ -6,7 +6,7 @@ import logging
|
||||
from aiohttp import web
|
||||
from aiohttp.client_exceptions import ClientConnectionResetError as _AiohttpConnReset
|
||||
|
||||
from framework.runtime.event_bus import EventType
|
||||
from framework.runtime.event_bus import AgentEvent, EventType
|
||||
from framework.server.app import resolve_session
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -15,6 +15,7 @@ logger = logging.getLogger(__name__)
|
||||
DEFAULT_EVENT_TYPES = [
|
||||
EventType.CLIENT_OUTPUT_DELTA,
|
||||
EventType.CLIENT_INPUT_REQUESTED,
|
||||
EventType.CLIENT_INPUT_RECEIVED,
|
||||
EventType.LLM_TEXT_DELTA,
|
||||
EventType.TOOL_CALL_STARTED,
|
||||
EventType.TOOL_CALL_COMPLETED,
|
||||
@@ -36,10 +37,17 @@ DEFAULT_EVENT_TYPES = [
|
||||
EventType.NODE_RETRY,
|
||||
EventType.NODE_TOOL_DOOM_LOOP,
|
||||
EventType.CONTEXT_COMPACTED,
|
||||
EventType.CONTEXT_USAGE_UPDATED,
|
||||
EventType.WORKER_LOADED,
|
||||
EventType.CREDENTIALS_REQUIRED,
|
||||
EventType.SUBAGENT_REPORT,
|
||||
EventType.QUEEN_PHASE_CHANGED,
|
||||
EventType.TRIGGER_AVAILABLE,
|
||||
EventType.TRIGGER_ACTIVATED,
|
||||
EventType.TRIGGER_DEACTIVATED,
|
||||
EventType.TRIGGER_FIRED,
|
||||
EventType.TRIGGER_REMOVED,
|
||||
EventType.TRIGGER_UPDATED,
|
||||
EventType.DRAFT_GRAPH_UPDATED,
|
||||
]
|
||||
|
||||
@@ -90,6 +98,7 @@ async def handle_events(request: web.Request) -> web.StreamResponse:
|
||||
"execution_failed",
|
||||
"execution_paused",
|
||||
"client_input_requested",
|
||||
"client_input_received",
|
||||
"node_loop_iteration",
|
||||
"node_loop_started",
|
||||
"credentials_required",
|
||||
@@ -143,6 +152,7 @@ async def handle_events(request: web.Request) -> web.StreamResponse:
|
||||
EventType.CLIENT_OUTPUT_DELTA.value,
|
||||
EventType.EXECUTION_STARTED.value,
|
||||
EventType.CLIENT_INPUT_REQUESTED.value,
|
||||
EventType.CLIENT_INPUT_RECEIVED.value,
|
||||
}
|
||||
event_type_values = {et.value for et in event_types}
|
||||
replay_types = _REPLAY_TYPES & event_type_values
|
||||
@@ -157,6 +167,54 @@ async def handle_events(request: web.Request) -> web.StreamResponse:
|
||||
if replayed:
|
||||
logger.info("SSE replayed %d buffered events for session='%s'", replayed, session.id)
|
||||
|
||||
# Inject a live-status snapshot so the frontend knows which nodes are
|
||||
# currently running. This covers the case where the user navigated away
|
||||
# and back — the localStorage snapshot is stale, and the ring-buffer
|
||||
# replay may not include the original node_loop_started events.
|
||||
worker_runtime = getattr(session, "worker_runtime", None)
|
||||
if worker_runtime and getattr(worker_runtime, "is_running", False):
|
||||
try:
|
||||
for stream_info in worker_runtime.get_active_streams():
|
||||
graph_id = stream_info.get("graph_id")
|
||||
stream_id = stream_info.get("stream_id", "default")
|
||||
for exec_id in stream_info.get("active_execution_ids", []):
|
||||
# Synthesize execution_started so frontend sets workerRunState
|
||||
synth_exec = AgentEvent(
|
||||
type=EventType.EXECUTION_STARTED,
|
||||
stream_id=stream_id,
|
||||
execution_id=exec_id,
|
||||
graph_id=graph_id,
|
||||
data={"synthetic": True},
|
||||
).to_dict()
|
||||
try:
|
||||
queue.put_nowait(synth_exec)
|
||||
except asyncio.QueueFull:
|
||||
pass
|
||||
|
||||
# Find the currently executing node via the executor
|
||||
for _gid, reg in worker_runtime._graphs.items():
|
||||
if _gid != graph_id:
|
||||
continue
|
||||
for _ep_id, stream in reg.streams.items():
|
||||
for exec_id, executor in stream._active_executors.items():
|
||||
current = getattr(executor, "current_node_id", None)
|
||||
if current:
|
||||
synth_node = AgentEvent(
|
||||
type=EventType.NODE_LOOP_STARTED,
|
||||
stream_id=stream_id,
|
||||
node_id=current,
|
||||
execution_id=exec_id,
|
||||
graph_id=graph_id,
|
||||
data={"synthetic": True},
|
||||
).to_dict()
|
||||
try:
|
||||
queue.put_nowait(synth_node)
|
||||
except asyncio.QueueFull:
|
||||
pass
|
||||
logger.info("SSE injected live-status snapshot for session='%s'", session.id)
|
||||
except Exception:
|
||||
logger.debug("Failed to inject live-status snapshot", exc_info=True)
|
||||
|
||||
event_count = 0
|
||||
close_reason = "unknown"
|
||||
try:
|
||||
|
||||
@@ -108,7 +108,10 @@ async def handle_chat(request: web.Request) -> web.Response:
|
||||
The input box is permanently connected to the queen agent.
|
||||
Worker input is handled separately via /worker-input.
|
||||
|
||||
Body: {"message": "hello"}
|
||||
Body: {"message": "hello", "images": [{"type": "image_url", "image_url": {"url": "data:..."}}]}
|
||||
|
||||
The optional ``images`` field accepts a list of OpenAI-format image_url
|
||||
content blocks. The frontend encodes images as base64 data URIs.
|
||||
"""
|
||||
session, err = resolve_session(request)
|
||||
if err:
|
||||
@@ -116,15 +119,31 @@ async def handle_chat(request: web.Request) -> web.Response:
|
||||
|
||||
body = await request.json()
|
||||
message = body.get("message", "")
|
||||
image_content = body.get("images") or None # list[dict] | None
|
||||
|
||||
if not message:
|
||||
if not message and not image_content:
|
||||
return web.json_response({"error": "message is required"}, status=400)
|
||||
|
||||
queen_executor = session.queen_executor
|
||||
if queen_executor is not None:
|
||||
node = queen_executor.node_registry.get("queen")
|
||||
if node is not None and hasattr(node, "inject_event"):
|
||||
await node.inject_event(message, is_client_input=True)
|
||||
await node.inject_event(message, is_client_input=True, image_content=image_content)
|
||||
# Publish to EventBus so the session event log captures user messages
|
||||
from framework.runtime.event_bus import AgentEvent, EventType
|
||||
|
||||
await session.event_bus.publish(
|
||||
AgentEvent(
|
||||
type=EventType.CLIENT_INPUT_RECEIVED,
|
||||
stream_id="queen",
|
||||
node_id="queen",
|
||||
execution_id=session.id,
|
||||
data={
|
||||
"content": message,
|
||||
"image_count": len(image_content) if image_content else 0,
|
||||
},
|
||||
)
|
||||
)
|
||||
return web.json_response(
|
||||
{
|
||||
"status": "queen",
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
|
||||
from aiohttp import web
|
||||
|
||||
@@ -116,6 +117,20 @@ async def handle_list_nodes(request: web.Request) -> web.Response:
|
||||
}
|
||||
for ep in reg.entry_points.values()
|
||||
]
|
||||
# Append triggers from triggers.json (stored on session)
|
||||
for t in getattr(session, "available_triggers", {}).values():
|
||||
entry = {
|
||||
"id": t.id,
|
||||
"name": t.description or t.id,
|
||||
"entry_node": graph.entry_node,
|
||||
"trigger_type": t.trigger_type,
|
||||
"trigger_config": t.trigger_config,
|
||||
"task": t.task,
|
||||
}
|
||||
mono = getattr(session, "trigger_next_fire", {}).get(t.id)
|
||||
if mono is not None:
|
||||
entry["next_fire_in"] = max(0.0, mono - time.monotonic())
|
||||
entry_points.append(entry)
|
||||
return web.json_response(
|
||||
{
|
||||
"nodes": nodes,
|
||||
@@ -261,10 +276,12 @@ async def handle_flowchart_map(request: web.Request) -> web.Response:
|
||||
|
||||
# Fast path: already in memory
|
||||
if phase_state is not None and phase_state.original_draft_graph is not None:
|
||||
return web.json_response({
|
||||
"map": phase_state.flowchart_map,
|
||||
"original_draft": phase_state.original_draft_graph,
|
||||
})
|
||||
return web.json_response(
|
||||
{
|
||||
"map": phase_state.flowchart_map,
|
||||
"original_draft": phase_state.original_draft_graph,
|
||||
}
|
||||
)
|
||||
|
||||
# Try loading from flowchart.json in the agent folder
|
||||
worker_path = getattr(session, "worker_path", None)
|
||||
@@ -281,10 +298,12 @@ async def handle_flowchart_map(request: web.Request) -> web.Response:
|
||||
if phase_state is not None and original_draft:
|
||||
phase_state.original_draft_graph = original_draft
|
||||
phase_state.flowchart_map = fmap
|
||||
return web.json_response({
|
||||
"map": fmap,
|
||||
"original_draft": original_draft,
|
||||
})
|
||||
return web.json_response(
|
||||
{
|
||||
"map": fmap,
|
||||
"original_draft": original_draft,
|
||||
}
|
||||
)
|
||||
except Exception:
|
||||
logger.warning("Failed to read flowchart.json from %s", worker_path)
|
||||
|
||||
|
||||
@@ -9,8 +9,9 @@ Session-primary routes:
|
||||
- DELETE /api/sessions/{session_id}/worker — unload worker from session
|
||||
- GET /api/sessions/{session_id}/stats — runtime statistics
|
||||
- GET /api/sessions/{session_id}/entry-points — list entry points
|
||||
- PATCH /api/sessions/{session_id}/triggers/{id} — update trigger task
|
||||
- GET /api/sessions/{session_id}/graphs — list graph IDs
|
||||
- GET /api/sessions/{session_id}/queen-messages — queen conversation history
|
||||
- GET /api/sessions/{session_id}/events/history — persisted eventbus log (for replay)
|
||||
|
||||
Worker session browsing (persisted execution runs on disk):
|
||||
- GET /api/sessions/{session_id}/worker-sessions — list
|
||||
@@ -22,15 +23,20 @@ Worker session browsing (persisted execution runs on disk):
|
||||
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import contextlib
|
||||
import json
|
||||
import logging
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
from aiohttp import web
|
||||
|
||||
from framework.server.app import (
|
||||
cold_sessions_dir,
|
||||
resolve_session,
|
||||
safe_path_segment,
|
||||
sessions_dir,
|
||||
@@ -47,8 +53,11 @@ def _get_manager(request: web.Request) -> SessionManager:
|
||||
|
||||
def _session_to_live_dict(session) -> dict:
|
||||
"""Serialize a live Session to the session-primary JSON shape."""
|
||||
from framework.llm.capabilities import supports_image_tool_results
|
||||
|
||||
info = session.worker_info
|
||||
phase_state = getattr(session, "phase_state", None)
|
||||
queen_model: str = getattr(getattr(session, "runner", None), "model", "") or ""
|
||||
return {
|
||||
"session_id": session.id,
|
||||
"worker_id": session.worker_id,
|
||||
@@ -61,7 +70,10 @@ def _session_to_live_dict(session) -> dict:
|
||||
"loaded_at": session.loaded_at,
|
||||
"uptime_seconds": round(time.time() - session.loaded_at, 1),
|
||||
"intro_message": getattr(session.runner, "intro_message", "") or "",
|
||||
"queen_phase": phase_state.phase if phase_state else "planning",
|
||||
"queen_phase": phase_state.phase
|
||||
if phase_state
|
||||
else ("staging" if session.worker_runtime else "planning"),
|
||||
"queen_supports_images": supports_image_tool_results(queen_model) if queen_model else True,
|
||||
}
|
||||
|
||||
|
||||
@@ -140,6 +152,7 @@ async def handle_create_session(request: web.Request) -> web.Response:
|
||||
session = await manager.create_session_with_worker(
|
||||
agent_path,
|
||||
agent_id=agent_id,
|
||||
session_id=session_id,
|
||||
model=model,
|
||||
initial_prompt=initial_prompt,
|
||||
queen_resume_from=queen_resume_from,
|
||||
@@ -228,6 +241,22 @@ async def handle_get_live_session(request: web.Request) -> web.Response:
|
||||
}
|
||||
for ep in rt.get_entry_points()
|
||||
]
|
||||
# Append triggers from triggers.json (stored on session)
|
||||
runner = getattr(session, "runner", None)
|
||||
graph_entry = runner.graph.entry_node if runner else ""
|
||||
for t in getattr(session, "available_triggers", {}).values():
|
||||
entry = {
|
||||
"id": t.id,
|
||||
"name": t.description or t.id,
|
||||
"entry_node": graph_entry,
|
||||
"trigger_type": t.trigger_type,
|
||||
"trigger_config": t.trigger_config,
|
||||
"task": t.task,
|
||||
}
|
||||
mono = getattr(session, "trigger_next_fire", {}).get(t.id)
|
||||
if mono is not None:
|
||||
entry["next_fire_in"] = max(0.0, mono - time.monotonic())
|
||||
data["entry_points"].append(entry)
|
||||
data["graphs"] = session.worker_runtime.list_graphs()
|
||||
|
||||
return web.json_response(data)
|
||||
@@ -351,23 +380,190 @@ async def handle_session_entry_points(request: web.Request) -> web.Response:
|
||||
|
||||
rt = session.worker_runtime
|
||||
eps = rt.get_entry_points() if rt else []
|
||||
entry_points = [
|
||||
{
|
||||
"id": ep.id,
|
||||
"name": ep.name,
|
||||
"entry_node": ep.entry_node,
|
||||
"trigger_type": ep.trigger_type,
|
||||
"trigger_config": ep.trigger_config,
|
||||
**(
|
||||
{"next_fire_in": nf}
|
||||
if rt and (nf := rt.get_timer_next_fire_in(ep.id)) is not None
|
||||
else {}
|
||||
),
|
||||
}
|
||||
for ep in eps
|
||||
]
|
||||
# Append triggers from triggers.json (stored on session)
|
||||
runner = getattr(session, "runner", None)
|
||||
graph_entry = runner.graph.entry_node if runner else ""
|
||||
for t in getattr(session, "available_triggers", {}).values():
|
||||
entry = {
|
||||
"id": t.id,
|
||||
"name": t.description or t.id,
|
||||
"entry_node": graph_entry,
|
||||
"trigger_type": t.trigger_type,
|
||||
"trigger_config": t.trigger_config,
|
||||
"task": t.task,
|
||||
}
|
||||
mono = getattr(session, "trigger_next_fire", {}).get(t.id)
|
||||
if mono is not None:
|
||||
entry["next_fire_in"] = max(0.0, mono - time.monotonic())
|
||||
entry_points.append(entry)
|
||||
return web.json_response({"entry_points": entry_points})
|
||||
|
||||
|
||||
async def handle_update_trigger_task(request: web.Request) -> web.Response:
|
||||
"""PATCH /api/sessions/{session_id}/triggers/{trigger_id} — update trigger fields."""
|
||||
session, err = resolve_session(request)
|
||||
if err:
|
||||
return err
|
||||
|
||||
trigger_id = request.match_info["trigger_id"]
|
||||
available = getattr(session, "available_triggers", {})
|
||||
tdef = available.get(trigger_id)
|
||||
if tdef is None:
|
||||
return web.json_response(
|
||||
{"error": f"Trigger '{trigger_id}' not found"},
|
||||
status=404,
|
||||
)
|
||||
|
||||
try:
|
||||
body = await request.json()
|
||||
except Exception:
|
||||
return web.json_response({"error": "Invalid JSON body"}, status=400)
|
||||
|
||||
updates: dict[str, object] = {}
|
||||
|
||||
if "task" in body:
|
||||
task = body.get("task")
|
||||
if not isinstance(task, str):
|
||||
return web.json_response({"error": "'task' must be a string"}, status=400)
|
||||
tdef.task = task
|
||||
updates["task"] = tdef.task
|
||||
|
||||
trigger_config_update = body.get("trigger_config")
|
||||
if trigger_config_update is not None:
|
||||
if not isinstance(trigger_config_update, dict):
|
||||
return web.json_response(
|
||||
{"error": "'trigger_config' must be an object"},
|
||||
status=400,
|
||||
)
|
||||
merged_trigger_config = dict(tdef.trigger_config)
|
||||
merged_trigger_config.update(trigger_config_update)
|
||||
|
||||
if tdef.trigger_type == "timer":
|
||||
cron_expr = merged_trigger_config.get("cron")
|
||||
interval = merged_trigger_config.get("interval_minutes")
|
||||
if cron_expr is not None and not isinstance(cron_expr, str):
|
||||
return web.json_response(
|
||||
{"error": "'trigger_config.cron' must be a string"},
|
||||
status=400,
|
||||
)
|
||||
if cron_expr:
|
||||
try:
|
||||
from croniter import croniter
|
||||
|
||||
if not croniter.is_valid(cron_expr):
|
||||
return web.json_response(
|
||||
{"error": f"Invalid cron expression: {cron_expr}"},
|
||||
status=400,
|
||||
)
|
||||
except ImportError:
|
||||
return web.json_response(
|
||||
{
|
||||
"error": (
|
||||
"croniter package not installed — cannot validate cron expression."
|
||||
)
|
||||
},
|
||||
status=500,
|
||||
)
|
||||
merged_trigger_config.pop("interval_minutes", None)
|
||||
elif interval is None:
|
||||
return web.json_response(
|
||||
{
|
||||
"error": (
|
||||
"Timer trigger needs 'cron' or 'interval_minutes' in trigger_config."
|
||||
)
|
||||
},
|
||||
status=400,
|
||||
)
|
||||
elif not isinstance(interval, (int, float)) or interval <= 0:
|
||||
return web.json_response(
|
||||
{"error": "'trigger_config.interval_minutes' must be > 0"},
|
||||
status=400,
|
||||
)
|
||||
tdef.trigger_config = merged_trigger_config
|
||||
updates["trigger_config"] = tdef.trigger_config
|
||||
|
||||
if not updates:
|
||||
return web.json_response(
|
||||
{"error": "Provide at least one of 'task' or 'trigger_config'"},
|
||||
status=400,
|
||||
)
|
||||
|
||||
# Persist to session state and agent definition
|
||||
from framework.tools.queen_lifecycle_tools import (
|
||||
_persist_active_triggers,
|
||||
_save_trigger_to_agent,
|
||||
_start_trigger_timer,
|
||||
_start_trigger_webhook,
|
||||
)
|
||||
|
||||
if "trigger_config" in updates and trigger_id in getattr(session, "active_trigger_ids", set()):
|
||||
task = session.active_timer_tasks.pop(trigger_id, None)
|
||||
if task and not task.done():
|
||||
task.cancel()
|
||||
with contextlib.suppress(asyncio.CancelledError):
|
||||
await task
|
||||
getattr(session, "trigger_next_fire", {}).pop(trigger_id, None)
|
||||
|
||||
webhook_subs = getattr(session, "active_webhook_subs", {})
|
||||
if sub_id := webhook_subs.pop(trigger_id, None):
|
||||
with contextlib.suppress(Exception):
|
||||
session.event_bus.unsubscribe(sub_id)
|
||||
|
||||
if tdef.trigger_type == "timer":
|
||||
await _start_trigger_timer(session, trigger_id, tdef)
|
||||
elif tdef.trigger_type == "webhook":
|
||||
await _start_trigger_webhook(session, trigger_id, tdef)
|
||||
|
||||
if trigger_id in getattr(session, "active_trigger_ids", set()):
|
||||
session_id = request.match_info["session_id"]
|
||||
await _persist_active_triggers(session, session_id)
|
||||
|
||||
_save_trigger_to_agent(session, trigger_id, tdef)
|
||||
|
||||
# Emit SSE event so the frontend updates the graph and detail panel
|
||||
bus = getattr(session, "event_bus", None)
|
||||
if bus:
|
||||
from framework.runtime.event_bus import AgentEvent, EventType
|
||||
|
||||
await bus.publish(
|
||||
AgentEvent(
|
||||
type=EventType.TRIGGER_UPDATED,
|
||||
stream_id="queen",
|
||||
data={
|
||||
"trigger_id": trigger_id,
|
||||
"task": tdef.task,
|
||||
"trigger_config": tdef.trigger_config,
|
||||
"trigger_type": tdef.trigger_type,
|
||||
"name": tdef.description or trigger_id,
|
||||
"entry_node": getattr(
|
||||
getattr(getattr(session, "runner", None), "graph", None),
|
||||
"entry_node",
|
||||
None,
|
||||
),
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
return web.json_response(
|
||||
{
|
||||
"entry_points": [
|
||||
{
|
||||
"id": ep.id,
|
||||
"name": ep.name,
|
||||
"entry_node": ep.entry_node,
|
||||
"trigger_type": ep.trigger_type,
|
||||
"trigger_config": ep.trigger_config,
|
||||
**(
|
||||
{"next_fire_in": nf}
|
||||
if rt and (nf := rt.get_timer_next_fire_in(ep.id)) is not None
|
||||
else {}
|
||||
),
|
||||
}
|
||||
for ep in eps
|
||||
]
|
||||
"trigger_id": trigger_id,
|
||||
"task": tdef.task,
|
||||
"trigger_config": tdef.trigger_config,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -397,23 +593,28 @@ async def handle_list_worker_sessions(request: web.Request) -> web.Response:
|
||||
"""List worker sessions on disk."""
|
||||
session, err = resolve_session(request)
|
||||
if err:
|
||||
return err
|
||||
|
||||
if not session.worker_path:
|
||||
return web.json_response({"sessions": []})
|
||||
|
||||
sess_dir = sessions_dir(session)
|
||||
# Fall back to cold session lookup from disk
|
||||
sid = request.match_info["session_id"]
|
||||
sess_dir = cold_sessions_dir(sid)
|
||||
if sess_dir is None:
|
||||
return err
|
||||
else:
|
||||
if not session.worker_path:
|
||||
return web.json_response({"sessions": []})
|
||||
sess_dir = sessions_dir(session)
|
||||
if not sess_dir.exists():
|
||||
return web.json_response({"sessions": []})
|
||||
|
||||
sessions = []
|
||||
for d in sorted(sess_dir.iterdir(), reverse=True):
|
||||
if not d.is_dir() or not d.name.startswith("session_"):
|
||||
if not d.is_dir():
|
||||
continue
|
||||
state_path = d / "state.json"
|
||||
if not d.name.startswith("session_") and not state_path.exists():
|
||||
continue
|
||||
|
||||
entry: dict = {"session_id": d.name}
|
||||
|
||||
state_path = d / "state.json"
|
||||
if state_path.exists():
|
||||
try:
|
||||
state = json.loads(state_path.read_text(encoding="utf-8"))
|
||||
@@ -564,48 +765,85 @@ async def handle_messages(request: web.Request) -> web.Response:
|
||||
"""Get messages for a worker session."""
|
||||
session, err = resolve_session(request)
|
||||
if err:
|
||||
return err
|
||||
|
||||
if not session.worker_path:
|
||||
return web.json_response({"error": "No worker loaded"}, status=503)
|
||||
# Fall back to cold session lookup from disk
|
||||
sid = request.match_info["session_id"]
|
||||
sess_dir = cold_sessions_dir(sid)
|
||||
if sess_dir is None:
|
||||
return err
|
||||
else:
|
||||
if not session.worker_path:
|
||||
return web.json_response({"error": "No worker loaded"}, status=503)
|
||||
sess_dir = sessions_dir(session)
|
||||
|
||||
ws_id = request.match_info.get("ws_id") or request.match_info.get("session_id", "")
|
||||
ws_id = safe_path_segment(ws_id)
|
||||
|
||||
convs_dir = sessions_dir(session) / ws_id / "conversations"
|
||||
convs_dir = sess_dir / ws_id / "conversations"
|
||||
if not convs_dir.exists():
|
||||
return web.json_response({"messages": []})
|
||||
|
||||
filter_node = request.query.get("node_id")
|
||||
all_messages = []
|
||||
|
||||
for node_dir in convs_dir.iterdir():
|
||||
if not node_dir.is_dir():
|
||||
continue
|
||||
if filter_node and node_dir.name != filter_node:
|
||||
continue
|
||||
|
||||
parts_dir = node_dir / "parts"
|
||||
def _collect_msg_parts(parts_dir: Path, node_id: str) -> None:
|
||||
if not parts_dir.exists():
|
||||
continue
|
||||
|
||||
return
|
||||
for part_file in sorted(parts_dir.iterdir()):
|
||||
if part_file.suffix != ".json":
|
||||
continue
|
||||
try:
|
||||
part = json.loads(part_file.read_text(encoding="utf-8"))
|
||||
part["_node_id"] = node_dir.name
|
||||
part["_node_id"] = node_id
|
||||
part.setdefault("created_at", part_file.stat().st_mtime)
|
||||
all_messages.append(part)
|
||||
except (json.JSONDecodeError, OSError):
|
||||
continue
|
||||
|
||||
# Flat layout: conversations/parts/*.json
|
||||
if not filter_node:
|
||||
_collect_msg_parts(convs_dir / "parts", "worker")
|
||||
|
||||
# Node-based layout: conversations/<node_id>/parts/*.json
|
||||
for node_dir in convs_dir.iterdir():
|
||||
if not node_dir.is_dir() or node_dir.name == "parts":
|
||||
continue
|
||||
if filter_node and node_dir.name != filter_node:
|
||||
continue
|
||||
_collect_msg_parts(node_dir / "parts", node_dir.name)
|
||||
|
||||
# Merge run lifecycle markers from runs.jsonl (for historical dividers)
|
||||
runs_file = sess_dir / ws_id / "runs.jsonl"
|
||||
if runs_file.exists():
|
||||
try:
|
||||
for line in runs_file.read_text(encoding="utf-8").splitlines():
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
try:
|
||||
record = json.loads(line)
|
||||
all_messages.append(
|
||||
{
|
||||
"seq": -1,
|
||||
"role": "system",
|
||||
"content": "",
|
||||
"_node_id": "_run_marker",
|
||||
"is_run_marker": True,
|
||||
"run_id": record.get("run_id"),
|
||||
"run_event": record.get("event"),
|
||||
"created_at": record.get("created_at", 0),
|
||||
}
|
||||
)
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
all_messages.sort(key=lambda m: m.get("created_at", m.get("seq", 0)))
|
||||
|
||||
client_only = request.query.get("client_only", "").lower() in ("true", "1")
|
||||
if client_only:
|
||||
client_facing_nodes: set[str] = set()
|
||||
if session.runner and hasattr(session.runner, "graph"):
|
||||
if session and session.runner and hasattr(session.runner, "graph"):
|
||||
for node in session.runner.graph.nodes:
|
||||
if node.client_facing:
|
||||
client_facing_nodes.add(node.id)
|
||||
@@ -614,63 +852,51 @@ async def handle_messages(request: web.Request) -> web.Response:
|
||||
all_messages = [
|
||||
m
|
||||
for m in all_messages
|
||||
if not m.get("is_transition_marker")
|
||||
and m["role"] != "tool"
|
||||
and not (m["role"] == "assistant" and m.get("tool_calls"))
|
||||
and (
|
||||
(m["role"] == "user" and m.get("is_client_input"))
|
||||
or (m["role"] == "assistant" and m.get("_node_id") in client_facing_nodes)
|
||||
if m.get("is_run_marker")
|
||||
or (
|
||||
not m.get("is_transition_marker")
|
||||
and m["role"] != "tool"
|
||||
and not (m["role"] == "assistant" and m.get("tool_calls"))
|
||||
and (
|
||||
(m["role"] == "user" and m.get("is_client_input"))
|
||||
or (m["role"] == "assistant" and m.get("_node_id") in client_facing_nodes)
|
||||
)
|
||||
)
|
||||
]
|
||||
|
||||
return web.json_response({"messages": all_messages})
|
||||
|
||||
|
||||
async def handle_queen_messages(request: web.Request) -> web.Response:
|
||||
"""GET /api/sessions/{session_id}/queen-messages — get queen conversation.
|
||||
async def handle_session_events_history(request: web.Request) -> web.Response:
|
||||
"""GET /api/sessions/{session_id}/events/history — persisted eventbus log.
|
||||
|
||||
Reads directly from disk so it works for both live sessions and cold
|
||||
(post-server-restart) sessions — no live session required.
|
||||
Reads ``events.jsonl`` from the session directory on disk so it works for
|
||||
both live sessions and cold (post-server-restart) sessions. The frontend
|
||||
replays these events through ``sseEventToChatMessage`` to fully reconstruct
|
||||
the UI state on resume.
|
||||
"""
|
||||
session_id = request.match_info["session_id"]
|
||||
|
||||
queen_dir = Path.home() / ".hive" / "queen" / "session" / session_id
|
||||
convs_dir = queen_dir / "conversations"
|
||||
if not convs_dir.exists():
|
||||
return web.json_response({"messages": [], "session_id": session_id})
|
||||
events_path = queen_dir / "events.jsonl"
|
||||
if not events_path.exists():
|
||||
return web.json_response({"events": [], "session_id": session_id})
|
||||
|
||||
all_messages: list[dict] = []
|
||||
for node_dir in convs_dir.iterdir():
|
||||
if not node_dir.is_dir():
|
||||
continue
|
||||
parts_dir = node_dir / "parts"
|
||||
if not parts_dir.exists():
|
||||
continue
|
||||
for part_file in sorted(parts_dir.iterdir()):
|
||||
if part_file.suffix != ".json":
|
||||
continue
|
||||
try:
|
||||
part = json.loads(part_file.read_text(encoding="utf-8"))
|
||||
part["_node_id"] = node_dir.name
|
||||
# Use file mtime as created_at so frontend can order
|
||||
# queen and worker messages chronologically.
|
||||
part.setdefault("created_at", part_file.stat().st_mtime)
|
||||
all_messages.append(part)
|
||||
except (json.JSONDecodeError, OSError):
|
||||
continue
|
||||
events: list[dict] = []
|
||||
try:
|
||||
with open(events_path, encoding="utf-8") as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
try:
|
||||
events.append(json.loads(line))
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
except OSError:
|
||||
return web.json_response({"events": [], "session_id": session_id})
|
||||
|
||||
all_messages.sort(key=lambda m: m.get("created_at", m.get("seq", 0)))
|
||||
|
||||
# Filter to client-facing messages only
|
||||
all_messages = [
|
||||
m
|
||||
for m in all_messages
|
||||
if not m.get("is_transition_marker")
|
||||
and m["role"] != "tool"
|
||||
and not (m["role"] == "assistant" and m.get("tool_calls"))
|
||||
]
|
||||
|
||||
return web.json_response({"messages": all_messages, "session_id": session_id})
|
||||
return web.json_response({"events": events, "session_id": session_id})
|
||||
|
||||
|
||||
async def handle_session_history(request: web.Request) -> web.Response:
|
||||
@@ -746,6 +972,7 @@ async def handle_discover(request: web.Request) -> web.Response:
|
||||
"description": entry.description,
|
||||
"category": entry.category,
|
||||
"session_count": entry.session_count,
|
||||
"run_count": entry.run_count,
|
||||
"node_count": entry.node_count,
|
||||
"tool_count": entry.tool_count,
|
||||
"tags": entry.tags,
|
||||
@@ -757,6 +984,29 @@ async def handle_discover(request: web.Request) -> web.Response:
|
||||
return web.json_response(result)
|
||||
|
||||
|
||||
async def handle_reveal_session_folder(request: web.Request) -> web.Response:
|
||||
"""POST /api/sessions/{session_id}/reveal — open session data folder in the OS file manager."""
|
||||
manager: SessionManager = request.app["manager"]
|
||||
session_id = request.match_info["session_id"]
|
||||
|
||||
session = manager.get_session(session_id)
|
||||
storage_session_id = (session.queen_resume_from or session.id) if session else session_id
|
||||
folder = Path.home() / ".hive" / "queen" / "session" / storage_session_id
|
||||
folder.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
try:
|
||||
if sys.platform == "darwin":
|
||||
subprocess.Popen(["open", str(folder)])
|
||||
elif sys.platform == "win32":
|
||||
subprocess.Popen(["explorer", str(folder)])
|
||||
else:
|
||||
subprocess.Popen(["xdg-open", str(folder)])
|
||||
except Exception as exc:
|
||||
return web.json_response({"error": str(exc)}, status=500)
|
||||
|
||||
return web.json_response({"path": str(folder)})
|
||||
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Route registration
|
||||
# ------------------------------------------------------------------
|
||||
@@ -781,10 +1031,15 @@ def register_routes(app: web.Application) -> None:
|
||||
app.router.add_delete("/api/sessions/{session_id}/worker", handle_unload_worker)
|
||||
|
||||
# Session info
|
||||
app.router.add_post("/api/sessions/{session_id}/reveal", handle_reveal_session_folder)
|
||||
app.router.add_get("/api/sessions/{session_id}/stats", handle_session_stats)
|
||||
app.router.add_get("/api/sessions/{session_id}/entry-points", handle_session_entry_points)
|
||||
app.router.add_patch(
|
||||
"/api/sessions/{session_id}/triggers/{trigger_id}", handle_update_trigger_task
|
||||
)
|
||||
app.router.add_get("/api/sessions/{session_id}/graphs", handle_session_graphs)
|
||||
app.router.add_get("/api/sessions/{session_id}/queen-messages", handle_queen_messages)
|
||||
|
||||
app.router.add_get("/api/sessions/{session_id}/events/history", handle_session_events_history)
|
||||
|
||||
# Worker session browsing (session-primary)
|
||||
app.router.add_get("/api/sessions/{session_id}/worker-sessions", handle_list_worker_sessions)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -5,6 +5,7 @@ Uses aiohttp TestClient with mocked sessions to test all endpoints
|
||||
without requiring actual LLM calls or agent loading.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
@@ -13,9 +14,13 @@ from unittest.mock import AsyncMock, MagicMock
|
||||
import pytest
|
||||
from aiohttp.test_utils import TestClient, TestServer
|
||||
|
||||
from framework.runtime.triggers import TriggerDefinition
|
||||
from framework.server.app import create_app
|
||||
from framework.server.session_manager import Session
|
||||
|
||||
REPO_ROOT = Path(__file__).resolve().parents[4]
|
||||
EXAMPLE_AGENT_PATH = REPO_ROOT / "examples" / "templates" / "deep_research_agent"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Mock helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -169,6 +174,7 @@ def _make_session(
|
||||
runner.intro_message = "Test intro"
|
||||
|
||||
mock_event_bus = MagicMock()
|
||||
mock_event_bus.publish = AsyncMock()
|
||||
mock_llm = MagicMock()
|
||||
|
||||
queen_executor = _make_queen_executor() if with_queen else None
|
||||
@@ -207,11 +213,8 @@ def tmp_agent_dir(tmp_path, monkeypatch):
|
||||
return tmp_path, agent_name, base
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_session(tmp_agent_dir):
|
||||
"""Create a sample session with state.json, checkpoints, and conversations."""
|
||||
tmp_path, agent_name, base = tmp_agent_dir
|
||||
session_id = "session_20260220_120000_abc12345"
|
||||
def _write_sample_session(base: Path, session_id: str):
|
||||
"""Create a sample worker session on disk."""
|
||||
session_dir = base / "sessions" / session_id
|
||||
|
||||
# state.json
|
||||
@@ -292,6 +295,20 @@ def sample_session(tmp_agent_dir):
|
||||
return session_id, session_dir, state
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_session(tmp_agent_dir):
|
||||
"""Create a sample session with state.json, checkpoints, and conversations."""
|
||||
_tmp_path, _agent_name, base = tmp_agent_dir
|
||||
return _write_sample_session(base, "session_20260220_120000_abc12345")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def custom_id_session(tmp_agent_dir):
|
||||
"""Create a sample session that uses a custom non-session_* ID."""
|
||||
_tmp_path, _agent_name, base = tmp_agent_dir
|
||||
return _write_sample_session(base, "my-custom-session")
|
||||
|
||||
|
||||
def _make_app_with_session(session):
|
||||
"""Create an aiohttp app with a pre-loaded session."""
|
||||
app = create_app()
|
||||
@@ -347,6 +364,35 @@ class TestHealth:
|
||||
|
||||
|
||||
class TestSessionCRUD:
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_session_with_worker_forwards_session_id(self):
|
||||
app = create_app()
|
||||
manager = app["manager"]
|
||||
manager.create_session_with_worker = AsyncMock(
|
||||
return_value=_make_session(agent_id="my-custom-session")
|
||||
)
|
||||
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.post(
|
||||
"/api/sessions",
|
||||
json={
|
||||
"session_id": "my-custom-session",
|
||||
"agent_path": str(EXAMPLE_AGENT_PATH),
|
||||
},
|
||||
)
|
||||
data = await resp.json()
|
||||
|
||||
assert resp.status == 201
|
||||
assert data["session_id"] == "my-custom-session"
|
||||
manager.create_session_with_worker.assert_awaited_once_with(
|
||||
str(EXAMPLE_AGENT_PATH.resolve()),
|
||||
agent_id=None,
|
||||
session_id="my-custom-session",
|
||||
model=None,
|
||||
initial_prompt=None,
|
||||
queen_resume_from=None,
|
||||
)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_sessions_empty(self):
|
||||
app = create_app()
|
||||
@@ -441,6 +487,70 @@ class TestSessionCRUD:
|
||||
data = await resp.json()
|
||||
assert "primary" in data["graphs"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_update_trigger_task(self, tmp_path):
|
||||
session = _make_session(tmp_dir=tmp_path)
|
||||
session.available_triggers["daily"] = TriggerDefinition(
|
||||
id="daily",
|
||||
trigger_type="timer",
|
||||
trigger_config={"cron": "0 5 * * *"},
|
||||
task="Old task",
|
||||
)
|
||||
app = _make_app_with_session(session)
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.patch(
|
||||
"/api/sessions/test_agent/triggers/daily",
|
||||
json={"task": "New task"},
|
||||
)
|
||||
assert resp.status == 200
|
||||
data = await resp.json()
|
||||
assert data["task"] == "New task"
|
||||
assert data["trigger_config"]["cron"] == "0 5 * * *"
|
||||
assert session.available_triggers["daily"].task == "New task"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_update_trigger_cron_restarts_active_timer(self, tmp_path):
|
||||
session = _make_session(tmp_dir=tmp_path)
|
||||
session.available_triggers["daily"] = TriggerDefinition(
|
||||
id="daily",
|
||||
trigger_type="timer",
|
||||
trigger_config={"cron": "0 5 * * *"},
|
||||
task="Run task",
|
||||
active=True,
|
||||
)
|
||||
session.active_trigger_ids.add("daily")
|
||||
session.active_timer_tasks["daily"] = asyncio.create_task(asyncio.sleep(60))
|
||||
app = _make_app_with_session(session)
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.patch(
|
||||
"/api/sessions/test_agent/triggers/daily",
|
||||
json={"trigger_config": {"cron": "0 6 * * *"}},
|
||||
)
|
||||
assert resp.status == 200
|
||||
data = await resp.json()
|
||||
assert data["trigger_config"]["cron"] == "0 6 * * *"
|
||||
assert "daily" in session.active_timer_tasks
|
||||
assert session.active_timer_tasks["daily"] is not None
|
||||
assert session.available_triggers["daily"].trigger_config["cron"] == "0 6 * * *"
|
||||
session.active_timer_tasks["daily"].cancel()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_update_trigger_cron_rejects_invalid_expression(self, tmp_path):
|
||||
session = _make_session(tmp_dir=tmp_path)
|
||||
session.available_triggers["daily"] = TriggerDefinition(
|
||||
id="daily",
|
||||
trigger_type="timer",
|
||||
trigger_config={"cron": "0 5 * * *"},
|
||||
task="Run task",
|
||||
)
|
||||
app = _make_app_with_session(session)
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.patch(
|
||||
"/api/sessions/test_agent/triggers/daily",
|
||||
json={"trigger_config": {"cron": "not a cron"}},
|
||||
)
|
||||
assert resp.status == 400
|
||||
|
||||
|
||||
class TestExecution:
|
||||
@pytest.mark.asyncio
|
||||
@@ -767,6 +877,22 @@ class TestWorkerSessions:
|
||||
assert data["sessions"][0]["status"] == "paused"
|
||||
assert data["sessions"][0]["steps"] == 5
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_sessions_includes_custom_id(self, custom_id_session, tmp_agent_dir):
|
||||
session_id, session_dir, state = custom_id_session
|
||||
tmp_path, agent_name, base = tmp_agent_dir
|
||||
|
||||
session = _make_session(tmp_dir=tmp_path / ".hive" / "agents" / agent_name)
|
||||
app = _make_app_with_session(session)
|
||||
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.get("/api/sessions/test_agent/worker-sessions")
|
||||
assert resp.status == 200
|
||||
data = await resp.json()
|
||||
assert len(data["sessions"]) == 1
|
||||
assert data["sessions"][0]["session_id"] == session_id
|
||||
assert data["sessions"][0]["status"] == "paused"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_sessions_empty(self, tmp_agent_dir):
|
||||
tmp_path, agent_name, base = tmp_agent_dir
|
||||
@@ -1284,6 +1410,28 @@ class TestLogs:
|
||||
assert len(data["logs"]) >= 1
|
||||
assert data["logs"][0]["run_id"] == session_id
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_logs_list_summaries_with_custom_id(self, custom_id_session, tmp_agent_dir):
|
||||
session_id, session_dir, state = custom_id_session
|
||||
tmp_path, agent_name, base = tmp_agent_dir
|
||||
|
||||
from framework.runtime.runtime_log_store import RuntimeLogStore
|
||||
|
||||
log_store = RuntimeLogStore(base)
|
||||
session = _make_session(
|
||||
tmp_dir=tmp_path / ".hive" / "agents" / agent_name,
|
||||
log_store=log_store,
|
||||
)
|
||||
app = _make_app_with_session(session)
|
||||
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.get("/api/sessions/test_agent/logs")
|
||||
assert resp.status == 200
|
||||
data = await resp.json()
|
||||
assert "logs" in data
|
||||
assert len(data["logs"]) >= 1
|
||||
assert data["logs"][0]["run_id"] == session_id
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_logs_session_summary(self, sample_session, tmp_agent_dir):
|
||||
session_id, session_dir, state = sample_session
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
"""Hive Agent Skills — discovery, parsing, trust gating, and injection of SKILL.md packages.
|
||||
|
||||
Implements the open Agent Skills standard (agentskills.io) for portable
|
||||
skill discovery and activation, plus built-in default skills for runtime
|
||||
operational discipline, and AS-13 trust gating for project-scope skills.
|
||||
"""
|
||||
|
||||
from framework.skills.catalog import SkillCatalog
|
||||
from framework.skills.config import DefaultSkillConfig, SkillsConfig
|
||||
from framework.skills.defaults import DefaultSkillManager
|
||||
from framework.skills.discovery import DiscoveryConfig, SkillDiscovery
|
||||
from framework.skills.manager import SkillsManager, SkillsManagerConfig
|
||||
from framework.skills.models import TrustStatus
|
||||
from framework.skills.parser import ParsedSkill, parse_skill_md
|
||||
from framework.skills.skill_errors import SkillError, SkillErrorCode, log_skill_error
|
||||
from framework.skills.trust import TrustedRepoStore, TrustGate
|
||||
|
||||
__all__ = [
|
||||
"DefaultSkillConfig",
|
||||
"DefaultSkillManager",
|
||||
"DiscoveryConfig",
|
||||
"ParsedSkill",
|
||||
"SkillCatalog",
|
||||
"SkillDiscovery",
|
||||
"SkillsConfig",
|
||||
"SkillsManager",
|
||||
"SkillsManagerConfig",
|
||||
"TrustGate",
|
||||
"TrustedRepoStore",
|
||||
"TrustStatus",
|
||||
"parse_skill_md",
|
||||
"SkillError",
|
||||
"SkillErrorCode",
|
||||
"log_skill_error",
|
||||
]
|
||||
@@ -0,0 +1,24 @@
|
||||
---
|
||||
name: hive.batch-ledger
|
||||
description: Track per-item status when processing collections to prevent skipped or duplicated items.
|
||||
metadata:
|
||||
author: hive
|
||||
type: default-skill
|
||||
---
|
||||
|
||||
## Operational Protocol: Batch Progress Ledger
|
||||
|
||||
When processing a collection of items, maintain a batch ledger in `_batch_ledger`.
|
||||
|
||||
Initialize when you identify the batch:
|
||||
- `_batch_total`: total item count
|
||||
- `_batch_ledger`: JSON with per-item status
|
||||
|
||||
Per-item statuses: pending → in_progress → completed|failed|skipped
|
||||
|
||||
- Set `in_progress` BEFORE processing
|
||||
- Set final status AFTER processing with 1-line result_summary
|
||||
- Include error reason for failed/skipped items
|
||||
- Update aggregate counts after each item
|
||||
- NEVER remove items from the ledger
|
||||
- If resuming, skip items already marked completed
|
||||
@@ -0,0 +1,22 @@
|
||||
---
|
||||
name: hive.context-preservation
|
||||
description: Proactively preserve critical information before automatic context pruning destroys it.
|
||||
metadata:
|
||||
author: hive
|
||||
type: default-skill
|
||||
---
|
||||
|
||||
## Operational Protocol: Context Preservation
|
||||
|
||||
You operate under a finite context window. Important information WILL be pruned.
|
||||
|
||||
Save-As-You-Go: After any tool call producing information you'll need later,
|
||||
immediately extract key data into `_working_notes` or `_preserved_data`.
|
||||
Do NOT rely on referring back to old tool results.
|
||||
|
||||
What to extract: URLs and key snippets (not full pages), relevant API fields
|
||||
(not raw JSON), specific lines/values (not entire files), analysis results
|
||||
(not raw data).
|
||||
|
||||
Before transitioning to the next phase/node, write a handoff summary to
|
||||
`_handoff_context` with everything the next phase needs to know.
|
||||
@@ -0,0 +1,18 @@
|
||||
---
|
||||
name: hive.error-recovery
|
||||
description: Follow a structured recovery protocol when tool calls fail instead of blindly retrying or giving up.
|
||||
metadata:
|
||||
author: hive
|
||||
type: default-skill
|
||||
---
|
||||
|
||||
## Operational Protocol: Error Recovery
|
||||
|
||||
When a tool call fails:
|
||||
|
||||
1. Diagnose — record error in notes, classify as transient or structural
|
||||
2. Decide — transient: retry once. Structural fixable: fix and retry.
|
||||
Structural unfixable: record as failed, move to next item.
|
||||
Blocking all progress: record escalation note.
|
||||
3. Adapt — if same tool failed 3+ times, stop using it and find alternative.
|
||||
Update plan in notes. Never silently drop the failed item.
|
||||
@@ -0,0 +1,27 @@
|
||||
---
|
||||
name: hive.note-taking
|
||||
description: Maintain structured working notes throughout execution to prevent information loss during context pruning.
|
||||
metadata:
|
||||
author: hive
|
||||
type: default-skill
|
||||
---
|
||||
|
||||
## Operational Protocol: Structured Note-Taking
|
||||
|
||||
Maintain structured working notes in shared memory key `_working_notes`.
|
||||
Update at these checkpoints:
|
||||
|
||||
- After completing each discrete subtask or batch item
|
||||
- After receiving new information that changes your plan
|
||||
- Before any tool call that will produce substantial output
|
||||
|
||||
Structure:
|
||||
|
||||
### Objective — restate the goal
|
||||
### Current Plan — numbered steps, mark completed with ✓
|
||||
### Key Decisions — decisions made and WHY
|
||||
### Working Data — intermediate results, extracted values
|
||||
### Open Questions — uncertainties to verify
|
||||
### Blockers — anything preventing progress
|
||||
|
||||
Update incrementally — do not rewrite from scratch each time.
|
||||
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: hive.quality-monitor
|
||||
description: Periodically self-assess output quality to catch degradation before the judge does.
|
||||
metadata:
|
||||
author: hive
|
||||
type: default-skill
|
||||
---
|
||||
|
||||
## Operational Protocol: Quality Self-Assessment
|
||||
|
||||
Every 5 iterations, self-assess:
|
||||
|
||||
1. On-task? Still working toward the stated objective?
|
||||
2. Thorough? Cutting corners compared to earlier?
|
||||
3. Non-repetitive? Producing new value or rehashing?
|
||||
4. Consistent? Latest output contradict earlier decisions?
|
||||
5. Complete? Tracking all items, or silently dropped some?
|
||||
|
||||
If degrading: write assessment to `_quality_log`, re-read `_working_notes`,
|
||||
change approach explicitly. If acceptable: brief note in `_quality_log`.
|
||||
@@ -0,0 +1,17 @@
|
||||
---
|
||||
name: hive.task-decomposition
|
||||
description: Decompose complex tasks into explicit subtasks before diving in.
|
||||
metadata:
|
||||
author: hive
|
||||
type: default-skill
|
||||
---
|
||||
|
||||
## Operational Protocol: Task Decomposition
|
||||
|
||||
Before starting a complex task:
|
||||
|
||||
1. Decompose — break into numbered subtasks in `_working_notes` Current Plan
|
||||
2. Estimate — relative effort per subtask (small/medium/large)
|
||||
3. Execute — work through in order, mark ✓ when complete
|
||||
4. Budget — if running low on iterations, prioritize by impact
|
||||
5. Verify — before declaring done, every subtask must be ✓, skipped (with reason), or blocked
|
||||
@@ -0,0 +1,116 @@
|
||||
"""Skill catalog — in-memory index with system prompt generation.
|
||||
|
||||
Builds the XML catalog injected into the system prompt for model-driven
|
||||
skill activation per the Agent Skills standard.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from xml.sax.saxutils import escape
|
||||
|
||||
from framework.skills.parser import ParsedSkill
|
||||
from framework.skills.skill_errors import SkillErrorCode, log_skill_error
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_BEHAVIORAL_INSTRUCTION = (
|
||||
"The following skills provide specialized instructions for specific tasks.\n"
|
||||
"When a task matches a skill's description, read the SKILL.md at the listed\n"
|
||||
"location to load the full instructions before proceeding.\n"
|
||||
"When a skill references relative paths, resolve them against the skill's\n"
|
||||
"directory (the parent of SKILL.md) and use absolute paths in tool calls."
|
||||
)
|
||||
|
||||
|
||||
class SkillCatalog:
|
||||
"""In-memory catalog of discovered skills."""
|
||||
|
||||
def __init__(self, skills: list[ParsedSkill] | None = None):
|
||||
self._skills: dict[str, ParsedSkill] = {}
|
||||
self._activated: set[str] = set()
|
||||
if skills:
|
||||
for skill in skills:
|
||||
self.add(skill)
|
||||
|
||||
def add(self, skill: ParsedSkill) -> None:
|
||||
"""Add a skill to the catalog."""
|
||||
self._skills[skill.name] = skill
|
||||
|
||||
def get(self, name: str) -> ParsedSkill | None:
|
||||
"""Look up a skill by name."""
|
||||
return self._skills.get(name)
|
||||
|
||||
def mark_activated(self, name: str) -> None:
|
||||
"""Mark a skill as activated in the current session."""
|
||||
self._activated.add(name)
|
||||
|
||||
def is_activated(self, name: str) -> bool:
|
||||
"""Check if a skill has been activated."""
|
||||
return name in self._activated
|
||||
|
||||
@property
|
||||
def skill_count(self) -> int:
|
||||
return len(self._skills)
|
||||
|
||||
@property
|
||||
def allowlisted_dirs(self) -> list[str]:
|
||||
"""All skill base directories for file access allowlisting."""
|
||||
return [skill.base_dir for skill in self._skills.values()]
|
||||
|
||||
def to_prompt(self) -> str:
|
||||
"""Generate the catalog prompt for system prompt injection.
|
||||
|
||||
Returns empty string if no community/user skills are discovered
|
||||
(default skills are handled separately by DefaultSkillManager).
|
||||
"""
|
||||
# Filter out framework-scope skills (default skills) — they're
|
||||
# injected via the protocols prompt, not the catalog
|
||||
community_skills = [s for s in self._skills.values() if s.source_scope != "framework"]
|
||||
|
||||
if not community_skills:
|
||||
return ""
|
||||
|
||||
lines = ["<available_skills>"]
|
||||
for skill in sorted(community_skills, key=lambda s: s.name):
|
||||
lines.append(" <skill>")
|
||||
lines.append(f" <name>{escape(skill.name)}</name>")
|
||||
lines.append(f" <description>{escape(skill.description)}</description>")
|
||||
lines.append(f" <location>{escape(skill.location)}</location>")
|
||||
lines.append(f" <base_dir>{escape(skill.base_dir)}</base_dir>")
|
||||
lines.append(" </skill>")
|
||||
lines.append("</available_skills>")
|
||||
|
||||
xml_block = "\n".join(lines)
|
||||
return f"{_BEHAVIORAL_INSTRUCTION}\n\n{xml_block}"
|
||||
|
||||
def build_pre_activated_prompt(self, skill_names: list[str]) -> str:
|
||||
"""Build prompt content for pre-activated skills.
|
||||
|
||||
Pre-activated skills get their full SKILL.md body loaded into
|
||||
the system prompt at startup (tier 2), bypassing model-driven
|
||||
activation.
|
||||
|
||||
Returns empty string if no skills match.
|
||||
"""
|
||||
parts: list[str] = []
|
||||
|
||||
for name in skill_names:
|
||||
skill = self.get(name)
|
||||
if skill is None:
|
||||
log_skill_error(
|
||||
logger,
|
||||
"warning",
|
||||
SkillErrorCode.SKILL_NOT_FOUND,
|
||||
what=f"Pre-activated skill '{name}' not found in catalog",
|
||||
why="The skill was listed for pre-activation but was not discovered.",
|
||||
fix=f"Check that a SKILL.md for '{name}' exists in a scanned directory.",
|
||||
)
|
||||
continue
|
||||
if self.is_activated(name):
|
||||
continue # Already activated, skip duplicate
|
||||
|
||||
self.mark_activated(name)
|
||||
parts.append(f"--- Pre-Activated Skill: {skill.name} ---\n{skill.body}")
|
||||
|
||||
return "\n\n".join(parts)
|
||||
@@ -0,0 +1,120 @@
|
||||
"""CLI commands for the Hive skill system.
|
||||
|
||||
Phase 1 commands (AS-13):
|
||||
hive skill list — list discovered skills across all scopes
|
||||
hive skill trust <path> — permanently trust a project repo's skills
|
||||
|
||||
Full CLI suite (CLI-1 through CLI-13) is Phase 2.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def register_skill_commands(subparsers) -> None:
|
||||
"""Register the ``hive skill`` subcommand group."""
|
||||
skill_parser = subparsers.add_parser("skill", help="Manage skills")
|
||||
skill_sub = skill_parser.add_subparsers(dest="skill_command", required=True)
|
||||
|
||||
# hive skill list
|
||||
list_parser = skill_sub.add_parser("list", help="List discovered skills across all scopes")
|
||||
list_parser.add_argument(
|
||||
"--project-dir",
|
||||
default=None,
|
||||
metavar="PATH",
|
||||
help="Project directory to scan (default: current directory)",
|
||||
)
|
||||
list_parser.set_defaults(func=cmd_skill_list)
|
||||
|
||||
# hive skill trust
|
||||
trust_parser = skill_sub.add_parser(
|
||||
"trust",
|
||||
help="Permanently trust a project repository so its skills load without prompting",
|
||||
)
|
||||
trust_parser.add_argument(
|
||||
"project_path",
|
||||
help="Path to the project directory (must contain a .git with a remote origin)",
|
||||
)
|
||||
trust_parser.set_defaults(func=cmd_skill_trust)
|
||||
|
||||
|
||||
def cmd_skill_list(args) -> int:
|
||||
"""List all discovered skills grouped by scope."""
|
||||
from framework.skills.discovery import DiscoveryConfig, SkillDiscovery
|
||||
|
||||
project_dir = Path(args.project_dir).resolve() if args.project_dir else Path.cwd()
|
||||
skills = SkillDiscovery(DiscoveryConfig(project_root=project_dir)).discover()
|
||||
|
||||
if not skills:
|
||||
print("No skills discovered.")
|
||||
return 0
|
||||
|
||||
scope_headers = {
|
||||
"project": "PROJECT SKILLS",
|
||||
"user": "USER SKILLS",
|
||||
"framework": "FRAMEWORK SKILLS",
|
||||
}
|
||||
|
||||
for scope in ("project", "user", "framework"):
|
||||
scope_skills = [s for s in skills if s.source_scope == scope]
|
||||
if not scope_skills:
|
||||
continue
|
||||
print(f"\n{scope_headers[scope]}")
|
||||
print("─" * 40)
|
||||
for skill in scope_skills:
|
||||
print(f" • {skill.name}")
|
||||
print(f" {skill.description}")
|
||||
print(f" {skill.location}")
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def cmd_skill_trust(args) -> int:
|
||||
"""Permanently trust a project repository's skills."""
|
||||
from framework.skills.trust import TrustedRepoStore, _normalize_remote_url
|
||||
|
||||
project_path = Path(args.project_path).resolve()
|
||||
|
||||
if not project_path.exists():
|
||||
print(f"Error: path does not exist: {project_path}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
if not (project_path / ".git").exists():
|
||||
print(
|
||||
f"Error: {project_path} is not a git repository (no .git directory).",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return 1
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git", "-C", str(project_path), "remote", "get-url", "origin"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=3,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
print(
|
||||
"Error: no remote 'origin' configured in this repository.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return 1
|
||||
remote_url = result.stdout.strip()
|
||||
except subprocess.TimeoutExpired:
|
||||
print("Error: git remote lookup timed out.", file=sys.stderr)
|
||||
return 1
|
||||
except (FileNotFoundError, OSError) as e:
|
||||
print(f"Error reading git remote: {e}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
repo_key = _normalize_remote_url(remote_url)
|
||||
store = TrustedRepoStore()
|
||||
store.trust(repo_key, project_path=str(project_path))
|
||||
|
||||
print(f"✓ Trusted: {repo_key}")
|
||||
print(" Stored in ~/.hive/trusted_repos.json")
|
||||
print(" Skills from this repository will load without prompting in future runs.")
|
||||
return 0
|
||||
@@ -0,0 +1,100 @@
|
||||
"""Skill configuration dataclasses.
|
||||
|
||||
Handles agent-level skill configuration from module-level variables
|
||||
(``default_skills`` and ``skills``).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
|
||||
|
||||
@dataclass
|
||||
class DefaultSkillConfig:
|
||||
"""Configuration for a single default skill."""
|
||||
|
||||
enabled: bool = True
|
||||
overrides: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict[str, Any]) -> DefaultSkillConfig:
|
||||
enabled = data.get("enabled", True)
|
||||
overrides = {k: v for k, v in data.items() if k != "enabled"}
|
||||
return cls(enabled=enabled, overrides=overrides)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SkillsConfig:
|
||||
"""Agent-level skill configuration.
|
||||
|
||||
Built from module-level variables in agent.py::
|
||||
|
||||
# Pre-activated community skills
|
||||
skills = ["deep-research", "code-review"]
|
||||
|
||||
# Default skill configuration
|
||||
default_skills = {
|
||||
"hive.note-taking": {"enabled": True},
|
||||
"hive.batch-ledger": {"enabled": True, "checkpoint_every_n": 10},
|
||||
"hive.quality-monitor": {"enabled": False},
|
||||
}
|
||||
"""
|
||||
|
||||
# Per-default-skill config, keyed by skill name (e.g. "hive.note-taking")
|
||||
default_skills: dict[str, DefaultSkillConfig] = field(default_factory=dict)
|
||||
|
||||
# Pre-activated community skills (by name)
|
||||
skills: list[str] = field(default_factory=list)
|
||||
|
||||
# Master switch: disable all default skills at once
|
||||
all_defaults_disabled: bool = False
|
||||
|
||||
def is_default_enabled(self, skill_name: str) -> bool:
|
||||
"""Check if a specific default skill is enabled."""
|
||||
if self.all_defaults_disabled:
|
||||
return False
|
||||
config = self.default_skills.get(skill_name)
|
||||
if config is None:
|
||||
return True # enabled by default
|
||||
return config.enabled
|
||||
|
||||
def get_default_overrides(self, skill_name: str) -> dict[str, Any]:
|
||||
"""Get skill-specific configuration overrides."""
|
||||
config = self.default_skills.get(skill_name)
|
||||
if config is None:
|
||||
return {}
|
||||
return config.overrides
|
||||
|
||||
@classmethod
|
||||
def from_agent_vars(
|
||||
cls,
|
||||
default_skills: dict[str, Any] | None = None,
|
||||
skills: list[str] | None = None,
|
||||
) -> SkillsConfig:
|
||||
"""Build config from agent module-level variables.
|
||||
|
||||
Args:
|
||||
default_skills: Dict from agent module, e.g.
|
||||
``{"hive.note-taking": {"enabled": True}}``
|
||||
skills: List of pre-activated skill names from agent module
|
||||
"""
|
||||
all_disabled = False
|
||||
parsed_defaults: dict[str, DefaultSkillConfig] = {}
|
||||
|
||||
if default_skills:
|
||||
for name, config_dict in default_skills.items():
|
||||
if name == "_all":
|
||||
if isinstance(config_dict, dict) and not config_dict.get("enabled", True):
|
||||
all_disabled = True
|
||||
continue
|
||||
if isinstance(config_dict, dict):
|
||||
parsed_defaults[name] = DefaultSkillConfig.from_dict(config_dict)
|
||||
elif isinstance(config_dict, bool):
|
||||
parsed_defaults[name] = DefaultSkillConfig(enabled=config_dict)
|
||||
|
||||
return cls(
|
||||
default_skills=parsed_defaults,
|
||||
skills=list(skills or []),
|
||||
all_defaults_disabled=all_disabled,
|
||||
)
|
||||
@@ -0,0 +1,200 @@
|
||||
"""DefaultSkillManager — load, configure, and inject built-in default skills.
|
||||
|
||||
Default skills are SKILL.md packages shipped with the framework that provide
|
||||
runtime operational protocols (note-taking, batch tracking, error recovery, etc.).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
from framework.skills.config import SkillsConfig
|
||||
from framework.skills.parser import ParsedSkill, parse_skill_md
|
||||
from framework.skills.skill_errors import SkillErrorCode, log_skill_error
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Default skills directory relative to this module
|
||||
_DEFAULT_SKILLS_DIR = Path(__file__).parent / "_default_skills"
|
||||
|
||||
# Ordered list of default skills (name → directory)
|
||||
SKILL_REGISTRY: dict[str, str] = {
|
||||
"hive.note-taking": "note-taking",
|
||||
"hive.batch-ledger": "batch-ledger",
|
||||
"hive.context-preservation": "context-preservation",
|
||||
"hive.quality-monitor": "quality-monitor",
|
||||
"hive.error-recovery": "error-recovery",
|
||||
"hive.task-decomposition": "task-decomposition",
|
||||
}
|
||||
|
||||
# All shared memory keys used by default skills (for permission auto-inclusion)
|
||||
SHARED_MEMORY_KEYS: list[str] = [
|
||||
# note-taking
|
||||
"_working_notes",
|
||||
"_notes_updated_at",
|
||||
# batch-ledger
|
||||
"_batch_ledger",
|
||||
"_batch_total",
|
||||
"_batch_completed",
|
||||
"_batch_failed",
|
||||
# context-preservation
|
||||
"_handoff_context",
|
||||
"_preserved_data",
|
||||
# quality-monitor
|
||||
"_quality_log",
|
||||
"_quality_degradation_count",
|
||||
# error-recovery
|
||||
"_error_log",
|
||||
"_failed_tools",
|
||||
"_escalation_needed",
|
||||
# task-decomposition
|
||||
"_subtasks",
|
||||
"_iteration_budget_remaining",
|
||||
]
|
||||
|
||||
|
||||
class DefaultSkillManager:
|
||||
"""Manages loading, configuration, and prompt generation for default skills."""
|
||||
|
||||
def __init__(self, config: SkillsConfig | None = None):
|
||||
self._config = config or SkillsConfig()
|
||||
self._skills: dict[str, ParsedSkill] = {}
|
||||
self._loaded = False
|
||||
self._error_count = 0
|
||||
|
||||
def load(self) -> None:
|
||||
"""Load all enabled default skill SKILL.md files."""
|
||||
if self._loaded:
|
||||
return
|
||||
|
||||
error_count = 0
|
||||
for skill_name, dir_name in SKILL_REGISTRY.items():
|
||||
if not self._config.is_default_enabled(skill_name):
|
||||
logger.info("Default skill '%s' disabled by config", skill_name)
|
||||
continue
|
||||
|
||||
skill_path = _DEFAULT_SKILLS_DIR / dir_name / "SKILL.md"
|
||||
if not skill_path.is_file():
|
||||
log_skill_error(
|
||||
logger,
|
||||
"error",
|
||||
SkillErrorCode.SKILL_NOT_FOUND,
|
||||
what=f"Default skill SKILL.md not found: '{skill_path}'",
|
||||
why=f"The framework skill '{skill_name}' is missing its SKILL.md file.",
|
||||
fix="Reinstall the hive framework — this file is part of the package.",
|
||||
)
|
||||
error_count += 1
|
||||
continue
|
||||
|
||||
parsed = parse_skill_md(skill_path, source_scope="framework")
|
||||
if parsed is None:
|
||||
log_skill_error(
|
||||
logger,
|
||||
"error",
|
||||
SkillErrorCode.SKILL_PARSE_ERROR,
|
||||
what=f"Failed to parse default skill '{skill_name}'",
|
||||
why=f"parse_skill_md returned None for '{skill_path}'.",
|
||||
fix="Reinstall the hive framework — this file may be corrupted.",
|
||||
)
|
||||
error_count += 1
|
||||
continue
|
||||
|
||||
self._skills[skill_name] = parsed
|
||||
|
||||
self._loaded = True
|
||||
self._error_count = error_count
|
||||
|
||||
def build_protocols_prompt(self) -> str:
|
||||
"""Build the combined operational protocols section.
|
||||
|
||||
Extracts protocol sections from all enabled default skills and
|
||||
combines them into a single ``## Operational Protocols`` block
|
||||
for system prompt injection.
|
||||
|
||||
Returns empty string if all defaults are disabled.
|
||||
"""
|
||||
if not self._skills:
|
||||
return ""
|
||||
|
||||
parts: list[str] = ["## Operational Protocols\n"]
|
||||
|
||||
for skill_name in SKILL_REGISTRY:
|
||||
skill = self._skills.get(skill_name)
|
||||
if skill is None:
|
||||
continue
|
||||
# Use the full body — each SKILL.md contains exactly one protocol section
|
||||
parts.append(skill.body)
|
||||
|
||||
if len(parts) <= 1:
|
||||
return ""
|
||||
|
||||
combined = "\n\n".join(parts)
|
||||
|
||||
# Token budget warning (approximate: 1 token ≈ 4 chars)
|
||||
approx_tokens = len(combined) // 4
|
||||
if approx_tokens > 2000:
|
||||
logger.warning(
|
||||
"Default skill protocols exceed 2000 token budget "
|
||||
"(~%d tokens, %d chars). Consider trimming.",
|
||||
approx_tokens,
|
||||
len(combined),
|
||||
)
|
||||
|
||||
return combined
|
||||
|
||||
def log_active_skills(self) -> None:
|
||||
"""Log which default skills are active and their configuration."""
|
||||
if not self._skills:
|
||||
logger.info("Default skills: all disabled")
|
||||
|
||||
# DX-3: Per-skill structured startup log
|
||||
for skill_name in SKILL_REGISTRY:
|
||||
if skill_name in self._skills:
|
||||
overrides = self._config.get_default_overrides(skill_name)
|
||||
status = f"loaded overrides={overrides}" if overrides else "loaded"
|
||||
elif not self._config.is_default_enabled(skill_name):
|
||||
status = "disabled"
|
||||
else:
|
||||
status = "error"
|
||||
logger.info(
|
||||
"skill_startup name=%s scope=framework status=%s",
|
||||
skill_name,
|
||||
status,
|
||||
)
|
||||
|
||||
# Original active skills log line (preserved for backward compatibility)
|
||||
active = []
|
||||
for skill_name in SKILL_REGISTRY:
|
||||
if skill_name in self._skills:
|
||||
overrides = self._config.get_default_overrides(skill_name)
|
||||
if overrides:
|
||||
active.append(f"{skill_name} ({overrides})")
|
||||
else:
|
||||
active.append(skill_name)
|
||||
|
||||
if active:
|
||||
logger.info("Default skills active: %s", ", ".join(active))
|
||||
|
||||
# DX-3: Summary line with error count
|
||||
total = len(SKILL_REGISTRY)
|
||||
active_count = len(self._skills)
|
||||
error_count = getattr(self, "_error_count", 0)
|
||||
disabled_count = total - active_count - error_count
|
||||
logger.info(
|
||||
"Skills: %d default (%d active, %d disabled, %d error)",
|
||||
total,
|
||||
active_count,
|
||||
disabled_count,
|
||||
error_count,
|
||||
)
|
||||
|
||||
@property
|
||||
def active_skill_names(self) -> list[str]:
|
||||
"""Names of all currently active default skills."""
|
||||
return list(self._skills.keys())
|
||||
|
||||
@property
|
||||
def active_skills(self) -> dict[str, ParsedSkill]:
|
||||
"""All active default skills keyed by name."""
|
||||
return dict(self._skills)
|
||||
@@ -0,0 +1,186 @@
|
||||
"""Skill discovery — scan standard directories for SKILL.md files.
|
||||
|
||||
Implements the Agent Skills standard discovery paths plus Hive-specific
|
||||
locations. Resolves name collisions deterministically.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
|
||||
from framework.skills.parser import ParsedSkill, parse_skill_md
|
||||
from framework.skills.skill_errors import SkillErrorCode, log_skill_error
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Directories to skip during scanning
|
||||
_SKIP_DIRS = frozenset(
|
||||
{
|
||||
".git",
|
||||
"node_modules",
|
||||
"__pycache__",
|
||||
".venv",
|
||||
"venv",
|
||||
".mypy_cache",
|
||||
".pytest_cache",
|
||||
".ruff_cache",
|
||||
}
|
||||
)
|
||||
|
||||
# Scope priority (higher = takes precedence)
|
||||
_SCOPE_PRIORITY = {
|
||||
"framework": 0,
|
||||
"user": 1,
|
||||
"project": 2,
|
||||
}
|
||||
|
||||
# Within the same scope, Hive-specific paths override cross-client paths.
|
||||
# We encode this by scanning cross-client first, then Hive-specific (later wins).
|
||||
|
||||
|
||||
@dataclass
|
||||
class DiscoveryConfig:
|
||||
"""Configuration for skill discovery."""
|
||||
|
||||
project_root: Path | None = None
|
||||
skip_user_scope: bool = False
|
||||
skip_framework_scope: bool = False
|
||||
max_depth: int = 4
|
||||
max_dirs: int = 2000
|
||||
|
||||
|
||||
class SkillDiscovery:
|
||||
"""Scans standard directories for SKILL.md files and resolves collisions."""
|
||||
|
||||
def __init__(self, config: DiscoveryConfig | None = None):
|
||||
self._config = config or DiscoveryConfig()
|
||||
|
||||
def discover(self) -> list[ParsedSkill]:
|
||||
"""Scan all scopes and return deduplicated skill list.
|
||||
|
||||
Scanning order (lowest to highest precedence):
|
||||
1. Framework defaults
|
||||
2. User cross-client (~/.agents/skills/)
|
||||
3. User Hive-specific (~/.hive/skills/)
|
||||
4. Project cross-client (<project>/.agents/skills/)
|
||||
5. Project Hive-specific (<project>/.hive/skills/)
|
||||
|
||||
Later entries override earlier ones on name collision.
|
||||
"""
|
||||
all_skills: list[ParsedSkill] = []
|
||||
|
||||
# Framework scope (lowest precedence)
|
||||
if not self._config.skip_framework_scope:
|
||||
framework_dir = Path(__file__).parent / "_default_skills"
|
||||
if framework_dir.is_dir():
|
||||
all_skills.extend(self._scan_scope(framework_dir, "framework"))
|
||||
|
||||
# User scope
|
||||
if not self._config.skip_user_scope:
|
||||
home = Path.home()
|
||||
|
||||
# Cross-client (lower precedence within user scope)
|
||||
user_agents = home / ".agents" / "skills"
|
||||
if user_agents.is_dir():
|
||||
all_skills.extend(self._scan_scope(user_agents, "user"))
|
||||
|
||||
# Hive-specific (higher precedence within user scope)
|
||||
user_hive = home / ".hive" / "skills"
|
||||
if user_hive.is_dir():
|
||||
all_skills.extend(self._scan_scope(user_hive, "user"))
|
||||
|
||||
# Project scope (highest precedence)
|
||||
if self._config.project_root:
|
||||
root = self._config.project_root
|
||||
|
||||
# Cross-client
|
||||
project_agents = root / ".agents" / "skills"
|
||||
if project_agents.is_dir():
|
||||
all_skills.extend(self._scan_scope(project_agents, "project"))
|
||||
|
||||
# Hive-specific
|
||||
project_hive = root / ".hive" / "skills"
|
||||
if project_hive.is_dir():
|
||||
all_skills.extend(self._scan_scope(project_hive, "project"))
|
||||
|
||||
resolved = self._resolve_collisions(all_skills)
|
||||
|
||||
logger.info(
|
||||
"Skill discovery: found %d skills (%d after dedup) across all scopes",
|
||||
len(all_skills),
|
||||
len(resolved),
|
||||
)
|
||||
return resolved
|
||||
|
||||
def _scan_scope(self, root: Path, scope: str) -> list[ParsedSkill]:
|
||||
"""Scan a single directory for skill directories containing SKILL.md."""
|
||||
skills: list[ParsedSkill] = []
|
||||
dirs_scanned = 0
|
||||
|
||||
for skill_md in self._find_skill_files(root, depth=0):
|
||||
if dirs_scanned >= self._config.max_dirs:
|
||||
logger.warning(
|
||||
"Hit max directory limit (%d) scanning %s",
|
||||
self._config.max_dirs,
|
||||
root,
|
||||
)
|
||||
break
|
||||
|
||||
parsed = parse_skill_md(skill_md, source_scope=scope)
|
||||
if parsed is not None:
|
||||
skills.append(parsed)
|
||||
dirs_scanned += 1
|
||||
|
||||
return skills
|
||||
|
||||
def _find_skill_files(self, directory: Path, depth: int) -> list[Path]:
|
||||
"""Recursively find SKILL.md files up to max_depth."""
|
||||
if depth > self._config.max_depth:
|
||||
return []
|
||||
|
||||
results: list[Path] = []
|
||||
|
||||
try:
|
||||
entries = sorted(directory.iterdir())
|
||||
except OSError:
|
||||
return []
|
||||
|
||||
for entry in entries:
|
||||
if not entry.is_dir():
|
||||
continue
|
||||
if entry.name in _SKIP_DIRS:
|
||||
continue
|
||||
|
||||
skill_md = entry / "SKILL.md"
|
||||
if skill_md.is_file():
|
||||
results.append(skill_md)
|
||||
else:
|
||||
# Recurse into subdirectories
|
||||
results.extend(self._find_skill_files(entry, depth + 1))
|
||||
|
||||
return results
|
||||
|
||||
def _resolve_collisions(self, skills: list[ParsedSkill]) -> list[ParsedSkill]:
|
||||
"""Resolve name collisions deterministically.
|
||||
|
||||
Later entries in the list override earlier ones (because we scan
|
||||
from lowest to highest precedence). On collision, log a warning.
|
||||
"""
|
||||
seen: dict[str, ParsedSkill] = {}
|
||||
|
||||
for skill in skills:
|
||||
if skill.name in seen:
|
||||
existing = seen[skill.name]
|
||||
log_skill_error(
|
||||
logger,
|
||||
"warning",
|
||||
SkillErrorCode.SKILL_COLLISION,
|
||||
what=f"Skill name collision: '{skill.name}'",
|
||||
why=f"'{skill.location}' overrides '{existing.location}'.",
|
||||
fix="Rename one of the conflicting skill directories to use a unique name.",
|
||||
)
|
||||
seen[skill.name] = skill
|
||||
|
||||
return list(seen.values())
|
||||
@@ -0,0 +1,194 @@
|
||||
"""Unified skill lifecycle manager.
|
||||
|
||||
``SkillsManager`` is the single facade that owns skill discovery, loading,
|
||||
and prompt renderation. The runtime creates one at startup and downstream
|
||||
layers read the cached prompt strings.
|
||||
|
||||
Typical usage — **config-driven** (runner passes configuration)::
|
||||
|
||||
config = SkillsManagerConfig(
|
||||
skills_config=SkillsConfig.from_agent_vars(...),
|
||||
project_root=agent_path,
|
||||
)
|
||||
mgr = SkillsManager(config)
|
||||
mgr.load()
|
||||
print(mgr.protocols_prompt) # default skill protocols
|
||||
print(mgr.skills_catalog_prompt) # community skills XML
|
||||
|
||||
Typical usage — **bare** (exported agents, SDK users)::
|
||||
|
||||
mgr = SkillsManager() # default config
|
||||
mgr.load() # loads all 6 default skills, no community discovery
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
|
||||
from framework.skills.config import SkillsConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SkillsManagerConfig:
|
||||
"""Everything the runtime needs to configure skills.
|
||||
|
||||
Attributes:
|
||||
skills_config: Per-skill enable/disable and overrides.
|
||||
project_root: Agent directory for community skill discovery.
|
||||
When ``None``, community discovery is skipped.
|
||||
skip_community_discovery: Explicitly skip community scanning
|
||||
even when ``project_root`` is set.
|
||||
interactive: Whether trust gating can prompt the user interactively.
|
||||
When ``False``, untrusted project skills are silently skipped.
|
||||
"""
|
||||
|
||||
skills_config: SkillsConfig = field(default_factory=SkillsConfig)
|
||||
project_root: Path | None = None
|
||||
skip_community_discovery: bool = False
|
||||
interactive: bool = True
|
||||
|
||||
|
||||
class SkillsManager:
|
||||
"""Unified skill lifecycle: discovery → loading → prompt renderation.
|
||||
|
||||
The runtime creates one instance during init and owns it for the
|
||||
lifetime of the process. Downstream layers (``ExecutionStream``,
|
||||
``GraphExecutor``, ``NodeContext``, ``EventLoopNode``) receive the
|
||||
cached prompt strings via property accessors.
|
||||
"""
|
||||
|
||||
def __init__(self, config: SkillsManagerConfig | None = None) -> None:
|
||||
self._config = config or SkillsManagerConfig()
|
||||
self._loaded = False
|
||||
self._catalog_prompt: str = ""
|
||||
self._protocols_prompt: str = ""
|
||||
self._allowlisted_dirs: list[str] = []
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Factory for backwards-compat bridge
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@classmethod
|
||||
def from_precomputed(
|
||||
cls,
|
||||
skills_catalog_prompt: str = "",
|
||||
protocols_prompt: str = "",
|
||||
) -> SkillsManager:
|
||||
"""Wrap pre-rendered prompt strings (legacy callers).
|
||||
|
||||
Returns a manager that skips discovery/loading and just returns
|
||||
the provided strings. Used by the deprecation bridge in
|
||||
``AgentRuntime`` when callers pass raw prompt strings.
|
||||
"""
|
||||
mgr = cls.__new__(cls)
|
||||
mgr._config = SkillsManagerConfig()
|
||||
mgr._loaded = True # skip load()
|
||||
mgr._catalog_prompt = skills_catalog_prompt
|
||||
mgr._protocols_prompt = protocols_prompt
|
||||
mgr._allowlisted_dirs = []
|
||||
return mgr
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Lifecycle
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def load(self) -> None:
|
||||
"""Discover, load, and cache skill prompts. Idempotent."""
|
||||
if self._loaded:
|
||||
return
|
||||
self._loaded = True
|
||||
|
||||
try:
|
||||
self._do_load()
|
||||
except Exception:
|
||||
logger.warning("Skill system init failed (non-fatal)", exc_info=True)
|
||||
|
||||
def _do_load(self) -> None:
|
||||
"""Internal load — may raise; caller catches."""
|
||||
from framework.skills.catalog import SkillCatalog
|
||||
from framework.skills.defaults import DefaultSkillManager
|
||||
from framework.skills.discovery import DiscoveryConfig, SkillDiscovery
|
||||
|
||||
skills_config = self._config.skills_config
|
||||
|
||||
# 1. Community skill discovery (when project_root is available)
|
||||
catalog_prompt = ""
|
||||
if self._config.project_root is not None and not self._config.skip_community_discovery:
|
||||
from framework.skills.trust import TrustGate
|
||||
|
||||
discovery = SkillDiscovery(DiscoveryConfig(project_root=self._config.project_root))
|
||||
discovered = discovery.discover()
|
||||
|
||||
# Trust-gate project-scope skills (AS-13)
|
||||
discovered = TrustGate(interactive=self._config.interactive).filter_and_gate(
|
||||
discovered, project_dir=self._config.project_root
|
||||
)
|
||||
|
||||
catalog = SkillCatalog(discovered)
|
||||
self._allowlisted_dirs = catalog.allowlisted_dirs
|
||||
catalog_prompt = catalog.to_prompt()
|
||||
|
||||
# Pre-activated community skills
|
||||
if skills_config.skills:
|
||||
pre_activated = catalog.build_pre_activated_prompt(skills_config.skills)
|
||||
if pre_activated:
|
||||
if catalog_prompt:
|
||||
catalog_prompt = f"{catalog_prompt}\n\n{pre_activated}"
|
||||
else:
|
||||
catalog_prompt = pre_activated
|
||||
|
||||
# 2. Default skills (always loaded unless explicitly disabled)
|
||||
default_mgr = DefaultSkillManager(config=skills_config)
|
||||
default_mgr.load()
|
||||
default_mgr.log_active_skills()
|
||||
protocols_prompt = default_mgr.build_protocols_prompt()
|
||||
# DX-3: Community skill startup summary
|
||||
if self._config.project_root is not None and not self._config.skip_community_discovery:
|
||||
community_count = len(catalog._skills) if catalog_prompt else 0
|
||||
pre_activated_count = len(skills_config.skills) if skills_config.skills else 0
|
||||
logger.info(
|
||||
"Skills: %d community (%d catalog, %d pre-activated)",
|
||||
community_count,
|
||||
community_count,
|
||||
pre_activated_count,
|
||||
)
|
||||
|
||||
# 3. Cache
|
||||
self._catalog_prompt = catalog_prompt
|
||||
self._protocols_prompt = protocols_prompt
|
||||
|
||||
if protocols_prompt:
|
||||
logger.info(
|
||||
"Skill system ready: protocols=%d chars, catalog=%d chars",
|
||||
len(protocols_prompt),
|
||||
len(catalog_prompt),
|
||||
)
|
||||
else:
|
||||
logger.warning("Skill system produced empty protocols_prompt")
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Prompt accessors (consumed by downstream layers)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@property
|
||||
def skills_catalog_prompt(self) -> str:
|
||||
"""Community skills XML catalog for system prompt injection."""
|
||||
return self._catalog_prompt
|
||||
|
||||
@property
|
||||
def protocols_prompt(self) -> str:
|
||||
"""Default skill operational protocols for system prompt injection."""
|
||||
return self._protocols_prompt
|
||||
|
||||
@property
|
||||
def allowlisted_dirs(self) -> list[str]:
|
||||
"""Skill base directories for Tier 3 resource access (AS-6)."""
|
||||
return self._allowlisted_dirs
|
||||
|
||||
@property
|
||||
def is_loaded(self) -> bool:
|
||||
return self._loaded
|
||||
@@ -0,0 +1,52 @@
|
||||
"""Data models for the Hive skill system (Agent Skills standard)."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from enum import StrEnum
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
class SkillScope(StrEnum):
|
||||
"""Where a skill was discovered."""
|
||||
|
||||
PROJECT = "project"
|
||||
USER = "user"
|
||||
FRAMEWORK = "framework"
|
||||
|
||||
|
||||
class TrustStatus(StrEnum):
|
||||
"""Trust state of a skill entry."""
|
||||
|
||||
TRUSTED = "trusted"
|
||||
PENDING_CONSENT = "pending_consent"
|
||||
DENIED = "denied"
|
||||
|
||||
|
||||
@dataclass
|
||||
class SkillEntry:
|
||||
"""In-memory record for a discovered skill (PRD §4.2)."""
|
||||
|
||||
name: str
|
||||
"""Skill name from SKILL.md frontmatter."""
|
||||
|
||||
description: str
|
||||
"""Skill description from SKILL.md frontmatter."""
|
||||
|
||||
location: Path
|
||||
"""Absolute path to SKILL.md."""
|
||||
|
||||
base_dir: Path
|
||||
"""Parent directory of SKILL.md (skill root)."""
|
||||
|
||||
source_scope: SkillScope
|
||||
"""Which scope this skill was found in."""
|
||||
|
||||
trust_status: TrustStatus = TrustStatus.TRUSTED
|
||||
"""Trust state; project-scope skills start as PENDING_CONSENT before gating."""
|
||||
|
||||
# Optional frontmatter fields
|
||||
license: str | None = None
|
||||
compatibility: list[str] = field(default_factory=list)
|
||||
allowed_tools: list[str] = field(default_factory=list)
|
||||
metadata: dict = field(default_factory=dict)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user