Merge branch 'main' into feat/image-capabilities

This commit is contained in:
Timothy
2026-03-20 18:42:55 -07:00
33 changed files with 3810 additions and 289 deletions
+2 -1
View File
@@ -41,7 +41,8 @@ Generate a swarm of worker agents with a coding agent(queen) that control them.
Visit [adenhq.com](https://adenhq.com) for complete documentation, examples, and guides.
https://github.com/user-attachments/assets/aad3a035-e7b3-4cac-b13d-4a83c7002c30
https://github.com/user-attachments/assets/bf10edc3-06ba-48b6-98ba-d069b15fb69d
## Who Is Hive For?
+583
View File
@@ -0,0 +1,583 @@
#!/usr/bin/env python3
"""Antigravity authentication CLI.
Implements OAuth2 flow for Google's Antigravity Code Assist gateway.
Credentials are stored in ~/.hive/antigravity-accounts.json.
Usage:
python -m antigravity_auth auth account add
python -m antigravity_auth auth account list
python -m antigravity_auth auth account remove <email>
"""
from __future__ import annotations
import argparse
import json
import logging
import os
import secrets
import socket
import sys
import time
import urllib.parse
import urllib.request
import webbrowser
from http.server import BaseHTTPRequestHandler, HTTPServer
from pathlib import Path
from typing import Any
logging.basicConfig(level=logging.INFO, format="%(message)s")
logger = logging.getLogger(__name__)
# OAuth endpoints
_OAUTH_AUTH_URL = "https://accounts.google.com/o/oauth2/v2/auth"
_OAUTH_TOKEN_URL = "https://oauth2.googleapis.com/token"
# Scopes for Antigravity/Cloud Code Assist
_OAUTH_SCOPES = [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/userinfo.email",
"https://www.googleapis.com/auth/userinfo.profile",
]
# Credentials file path in ~/.hive/
_ACCOUNTS_FILE = Path.home() / ".hive" / "antigravity-accounts.json"
# Default project ID
_DEFAULT_PROJECT_ID = "rising-fact-p41fc"
_DEFAULT_REDIRECT_PORT = 51121
# OAuth credentials fetched from the opencode-antigravity-auth project.
# This project reverse-engineered and published the public OAuth credentials
# for Google's Antigravity/Cloud Code Assist API.
# Source: https://github.com/NoeFabris/opencode-antigravity-auth
_CREDENTIALS_URL = (
"https://raw.githubusercontent.com/NoeFabris/opencode-antigravity-auth/dev/src/constants.ts"
)
# Cached credentials fetched from public source
_cached_client_id: str | None = None
_cached_client_secret: str | None = None
def _fetch_credentials_from_public_source() -> tuple[str | None, str | None]:
"""Fetch OAuth client ID and secret from the public npm package source on GitHub."""
global _cached_client_id, _cached_client_secret
if _cached_client_id and _cached_client_secret:
return _cached_client_id, _cached_client_secret
try:
req = urllib.request.Request(
_CREDENTIALS_URL, headers={"User-Agent": "Hive-Antigravity-Auth/1.0"}
)
with urllib.request.urlopen(req, timeout=10) as resp:
content = resp.read().decode("utf-8")
import re
id_match = re.search(r'ANTIGRAVITY_CLIENT_ID\s*=\s*"([^"]+)"', content)
secret_match = re.search(r'ANTIGRAVITY_CLIENT_SECRET\s*=\s*"([^"]+)"', content)
if id_match:
_cached_client_id = id_match.group(1)
if secret_match:
_cached_client_secret = secret_match.group(1)
return _cached_client_id, _cached_client_secret
except Exception as e:
logger.debug(f"Failed to fetch credentials from public source: {e}")
return None, None
def get_client_id() -> str:
"""Get OAuth client ID from env, config, or public source."""
env_id = os.environ.get("ANTIGRAVITY_CLIENT_ID")
if env_id:
return env_id
# Try hive config
hive_cfg = Path.home() / ".hive" / "configuration.json"
if hive_cfg.exists():
try:
with open(hive_cfg) as f:
cfg = json.load(f)
cfg_id = cfg.get("llm", {}).get("antigravity_client_id")
if cfg_id:
return cfg_id
except Exception:
pass
# Fetch from public source
client_id, _ = _fetch_credentials_from_public_source()
if client_id:
return client_id
raise RuntimeError("Could not obtain Antigravity OAuth client ID")
def get_client_secret() -> str | None:
"""Get OAuth client secret from env, config, or public source."""
secret = os.environ.get("ANTIGRAVITY_CLIENT_SECRET")
if secret:
return secret
# Try to read from hive config
hive_cfg = Path.home() / ".hive" / "configuration.json"
if hive_cfg.exists():
try:
with open(hive_cfg) as f:
cfg = json.load(f)
secret = cfg.get("llm", {}).get("antigravity_client_secret")
if secret:
return secret
except Exception:
pass
# Fetch from public source (npm package on GitHub)
_, secret = _fetch_credentials_from_public_source()
return secret
def find_free_port() -> int:
"""Find an available local port."""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("", 0))
s.listen(1)
return s.getsockname()[1]
class OAuthCallbackHandler(BaseHTTPRequestHandler):
"""Handle OAuth callback from browser."""
auth_code: str | None = None
state: str | None = None
error: str | None = None
def log_message(self, format: str, *args: Any) -> None:
pass # Suppress default logging
def do_GET(self) -> None:
parsed = urllib.parse.urlparse(self.path)
if parsed.path == "/oauth-callback":
query = urllib.parse.parse_qs(parsed.query)
if "error" in query:
self.error = query["error"][0]
self._send_response("Authentication failed. You can close this window.")
return
if "code" in query and "state" in query:
OAuthCallbackHandler.auth_code = query["code"][0]
OAuthCallbackHandler.state = query["state"][0]
self._send_response(
"Authentication successful! You can close this window "
"and return to the terminal."
)
return
self._send_response("Waiting for authentication...")
def _send_response(self, message: str) -> None:
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.end_headers()
html = f"""<!DOCTYPE html>
<html>
<head><title>Antigravity Auth</title></head>
<body style="font-family: system-ui; display: flex; align-items: center;
justify-content: center; height: 100vh; margin: 0; background: #1a1a2e;
color: #eee;">
<div style="text-align: center;">
<h2>{message}</h2>
</div>
</body>
</html>"""
self.wfile.write(html.encode())
def wait_for_callback(port: int, timeout: int = 300) -> tuple[str | None, str | None, str | None]:
"""Start local server and wait for OAuth callback."""
server = HTTPServer(("localhost", port), OAuthCallbackHandler)
server.timeout = 1
start = time.time()
while time.time() - start < timeout:
if OAuthCallbackHandler.auth_code:
return (
OAuthCallbackHandler.auth_code,
OAuthCallbackHandler.state,
OAuthCallbackHandler.error,
)
server.handle_request()
return None, None, "timeout"
def exchange_code_for_tokens(
code: str, redirect_uri: str, client_id: str, client_secret: str | None
) -> dict[str, Any] | None:
"""Exchange authorization code for tokens."""
data = {
"code": code,
"client_id": client_id,
"redirect_uri": redirect_uri,
"grant_type": "authorization_code",
}
if client_secret:
data["client_secret"] = client_secret
body = urllib.parse.urlencode(data).encode()
req = urllib.request.Request(
_OAUTH_TOKEN_URL,
data=body,
headers={"Content-Type": "application/x-www-form-urlencoded"},
method="POST",
)
try:
with urllib.request.urlopen(req, timeout=30) as resp:
return json.loads(resp.read())
except Exception as e:
logger.error(f"Token exchange failed: {e}")
return None
def get_user_email(access_token: str) -> str | None:
"""Get user email from Google API."""
req = urllib.request.Request(
"https://www.googleapis.com/oauth2/v2/userinfo",
headers={"Authorization": f"Bearer {access_token}"},
)
try:
with urllib.request.urlopen(req, timeout=10) as resp:
data = json.loads(resp.read())
return data.get("email")
except Exception:
return None
def load_accounts() -> dict[str, Any]:
"""Load existing accounts from file."""
if not _ACCOUNTS_FILE.exists():
return {"schemaVersion": 4, "accounts": []}
try:
with open(_ACCOUNTS_FILE) as f:
return json.load(f)
except Exception:
return {"schemaVersion": 4, "accounts": []}
def save_accounts(data: dict[str, Any]) -> None:
"""Save accounts to file."""
_ACCOUNTS_FILE.parent.mkdir(parents=True, exist_ok=True)
with open(_ACCOUNTS_FILE, "w") as f:
json.dump(data, f, indent=2)
logger.info(f"Saved credentials to {_ACCOUNTS_FILE}")
def validate_credentials(access_token: str, project_id: str = _DEFAULT_PROJECT_ID) -> bool:
"""Test if credentials work by making a simple API call to Antigravity.
Returns True if credentials are valid, False otherwise.
"""
endpoint = "https://daily-cloudcode-pa.sandbox.googleapis.com"
body = {
"project": project_id,
"model": "gemini-3-flash",
"request": {
"contents": [{"role": "user", "parts": [{"text": "hi"}]}],
"generationConfig": {"maxOutputTokens": 10},
},
"requestType": "agent",
"userAgent": "antigravity",
"requestId": "validation-test",
}
headers = {
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json",
"User-Agent": (
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) "
"AppleWebKit/537.36 (KHTML, like Gecko) Antigravity/1.18.3"
),
"X-Goog-Api-Client": "google-cloud-sdk vscode_cloudshelleditor/0.1",
}
try:
req = urllib.request.Request(
f"{endpoint}/v1internal:generateContent",
data=json.dumps(body).encode("utf-8"),
headers=headers,
method="POST",
)
with urllib.request.urlopen(req, timeout=30) as resp:
json.loads(resp.read())
return True
except Exception:
return False
def refresh_access_token(
refresh_token: str, client_id: str, client_secret: str | None
) -> dict | None:
"""Refresh the access token using the refresh token."""
data = {
"grant_type": "refresh_token",
"refresh_token": refresh_token,
"client_id": client_id,
}
if client_secret:
data["client_secret"] = client_secret
body = urllib.parse.urlencode(data).encode()
req = urllib.request.Request(
_OAUTH_TOKEN_URL,
data=body,
headers={"Content-Type": "application/x-www-form-urlencoded"},
method="POST",
)
try:
with urllib.request.urlopen(req, timeout=30) as resp:
return json.loads(resp.read())
except Exception as e:
logger.debug(f"Token refresh failed: {e}")
return None
def cmd_account_add(args: argparse.Namespace) -> int:
"""Add a new Antigravity account via OAuth2.
First checks if valid credentials already exist. If so, validates them
and skips OAuth if they work. Otherwise, proceeds with OAuth flow.
"""
client_id = get_client_id()
client_secret = get_client_secret()
# Check if credentials already exist
accounts_data = load_accounts()
accounts = accounts_data.get("accounts", [])
if accounts:
account = next((a for a in accounts if a.get("enabled", True) is not False), accounts[0])
access_token = account.get("access")
refresh_token_str = account.get("refresh", "")
refresh_token = refresh_token_str.split("|")[0] if refresh_token_str else None
project_id = (
refresh_token_str.split("|")[1] if "|" in refresh_token_str else _DEFAULT_PROJECT_ID
)
email = account.get("email", "unknown")
expires_ms = account.get("expires", 0)
expires_at = expires_ms / 1000.0 if expires_ms else 0.0
# Check if token is expired or near expiry
if access_token and expires_at and time.time() < expires_at - 60:
# Token still valid, test it
logger.info(f"Found existing credentials for: {email}")
logger.info("Validating existing credentials...")
if validate_credentials(access_token, project_id):
logger.info("✓ Credentials valid! Skipping OAuth.")
return 0
else:
logger.info("Credentials failed validation, refreshing...")
elif refresh_token:
logger.info(f"Found expired credentials for: {email}")
logger.info("Attempting token refresh...")
tokens = refresh_access_token(refresh_token, client_id, client_secret)
if tokens:
new_access = tokens.get("access_token")
expires_in = tokens.get("expires_in", 3600)
if new_access:
# Update the account
account["access"] = new_access
account["expires"] = int((time.time() + expires_in) * 1000)
accounts_data["last_refresh"] = time.strftime(
"%Y-%m-%dT%H:%M:%SZ", time.gmtime()
)
save_accounts(accounts_data)
# Validate the refreshed token
logger.info("Validating refreshed credentials...")
if validate_credentials(new_access, project_id):
logger.info("✓ Credentials refreshed and validated!")
return 0
else:
logger.info("Refreshed token failed validation, proceeding with OAuth...")
else:
logger.info("Token refresh failed, proceeding with OAuth...")
# No valid credentials, proceed with OAuth
if not client_secret:
logger.warning(
"No client secret configured. Token refresh may fail.\n"
"Set ANTIGRAVITY_CLIENT_SECRET env var or add "
"'antigravity_client_secret' to ~/.hive/configuration.json"
)
# Use fixed port and path matching Google's expected OAuth redirect URI
port = _DEFAULT_REDIRECT_PORT
redirect_uri = f"http://localhost:{port}/oauth-callback"
# Generate state for CSRF protection
state = secrets.token_urlsafe(16)
# Build authorization URL
params = {
"client_id": client_id,
"redirect_uri": redirect_uri,
"response_type": "code",
"scope": " ".join(_OAUTH_SCOPES),
"state": state,
"access_type": "offline",
"prompt": "consent",
}
auth_url = f"{_OAUTH_AUTH_URL}?{urllib.parse.urlencode(params)}"
logger.info("Opening browser for authentication...")
logger.info(f"If the browser doesn't open, visit: {auth_url}\n")
# Open browser
webbrowser.open(auth_url)
# Wait for callback
logger.info(f"Listening for callback on port {port}...")
code, received_state, error = wait_for_callback(port)
if error:
logger.error(f"Authentication failed: {error}")
return 1
if not code:
logger.error("No authorization code received")
return 1
if received_state != state:
logger.error("State mismatch - possible CSRF attack")
return 1
# Exchange code for tokens
logger.info("Exchanging authorization code for tokens...")
tokens = exchange_code_for_tokens(code, redirect_uri, client_id, client_secret)
if not tokens:
return 1
access_token = tokens.get("access_token")
refresh_token = tokens.get("refresh_token")
expires_in = tokens.get("expires_in", 3600)
if not access_token:
logger.error("No access token in response")
return 1
# Get user email
email = get_user_email(access_token)
if email:
logger.info(f"Authenticated as: {email}")
# Load existing accounts and add/update
accounts_data = load_accounts()
accounts = accounts_data.get("accounts", [])
# Build new account entry (V4 schema)
expires_ms = int((time.time() + expires_in) * 1000)
refresh_entry = f"{refresh_token}|{_DEFAULT_PROJECT_ID}"
new_account = {
"access": access_token,
"refresh": refresh_entry,
"expires": expires_ms,
"email": email,
"enabled": True,
}
# Update existing account or add new one
existing_idx = next((i for i, a in enumerate(accounts) if a.get("email") == email), None)
if existing_idx is not None:
accounts[existing_idx] = new_account
logger.info(f"Updated existing account: {email}")
else:
accounts.append(new_account)
logger.info(f"Added new account: {email}")
accounts_data["accounts"] = accounts
accounts_data["schemaVersion"] = 4
accounts_data["last_refresh"] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
save_accounts(accounts_data)
logger.info("\n✓ Authentication complete!")
return 0
def cmd_account_list(args: argparse.Namespace) -> int:
"""List all stored accounts."""
data = load_accounts()
accounts = data.get("accounts", [])
if not accounts:
logger.info("No accounts configured.")
logger.info("Run 'antigravity auth account add' to add one.")
return 0
logger.info("Configured accounts:\n")
for i, account in enumerate(accounts, 1):
email = account.get("email", "unknown")
enabled = "enabled" if account.get("enabled", True) else "disabled"
logger.info(f" {i}. {email} ({enabled})")
return 0
def cmd_account_remove(args: argparse.Namespace) -> int:
"""Remove an account by email."""
email = args.email
data = load_accounts()
accounts = data.get("accounts", [])
original_len = len(accounts)
accounts = [a for a in accounts if a.get("email") != email]
if len(accounts) == original_len:
logger.error(f"No account found with email: {email}")
return 1
data["accounts"] = accounts
save_accounts(data)
logger.info(f"Removed account: {email}")
return 0
def main() -> int:
parser = argparse.ArgumentParser(
description="Antigravity authentication CLI",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
subparsers = parser.add_subparsers(dest="command", help="Commands")
# auth account add
auth_parser = subparsers.add_parser("auth", help="Authentication commands")
auth_subparsers = auth_parser.add_subparsers(dest="auth_command")
account_parser = auth_subparsers.add_parser("account", help="Account management")
account_subparsers = account_parser.add_subparsers(dest="account_command")
add_parser = account_subparsers.add_parser("add", help="Add a new account via OAuth2")
add_parser.set_defaults(func=cmd_account_add)
list_parser = account_subparsers.add_parser("list", help="List configured accounts")
list_parser.set_defaults(func=cmd_account_list)
remove_parser = account_subparsers.add_parser("remove", help="Remove an account")
remove_parser.add_argument("email", help="Email of account to remove")
remove_parser.set_defaults(func=cmd_account_remove)
args = parser.parse_args()
if hasattr(args, "func"):
return args.func(args)
parser.print_help()
return 0
if __name__ == "__main__":
sys.exit(main())
+107
View File
@@ -116,6 +116,16 @@ def get_worker_api_key() -> str | None:
except ImportError:
pass
if worker_llm.get("use_antigravity_subscription"):
try:
from framework.runner.runner import get_antigravity_token
token = get_antigravity_token()
if token:
return token
except ImportError:
pass
api_key_env_var = worker_llm.get("api_key_env_var")
if api_key_env_var:
return os.environ.get(api_key_env_var)
@@ -134,6 +144,9 @@ def get_worker_api_base() -> str | None:
return "https://chatgpt.com/backend-api/codex"
if worker_llm.get("use_kimi_code_subscription"):
return "https://api.kimi.com/coding"
if worker_llm.get("use_antigravity_subscription"):
# Antigravity uses AntigravityProvider directly — no api_base needed.
return None
if worker_llm.get("api_base"):
return worker_llm["api_base"]
if str(worker_llm.get("provider", "")).lower() == "openrouter":
@@ -251,6 +264,17 @@ def get_api_key() -> str | None:
except ImportError:
pass
# Antigravity subscription: read OAuth token from accounts JSON
if llm.get("use_antigravity_subscription"):
try:
from framework.runner.runner import get_antigravity_token
token = get_antigravity_token()
if token:
return token
except ImportError:
pass
# Standard env-var path (covers ZAI Code and all API-key providers)
api_key_env_var = llm.get("api_key_env_var")
if api_key_env_var:
@@ -258,6 +282,86 @@ def get_api_key() -> str | None:
return None
# OAuth credentials for Antigravity are fetched from the opencode-antigravity-auth project.
# This project reverse-engineered and published the public OAuth credentials
# for Google's Antigravity/Cloud Code Assist API.
# Source: https://github.com/NoeFabris/opencode-antigravity-auth
_ANTIGRAVITY_CREDENTIALS_URL = (
"https://raw.githubusercontent.com/NoeFabris/opencode-antigravity-auth/dev/src/constants.ts"
)
_antigravity_credentials_cache: tuple[str | None, str | None] = (None, None)
def _fetch_antigravity_credentials() -> tuple[str | None, str | None]:
"""Fetch OAuth client ID and secret from the public npm package source on GitHub."""
global _antigravity_credentials_cache
if _antigravity_credentials_cache[0] and _antigravity_credentials_cache[1]:
return _antigravity_credentials_cache
import re
import urllib.request
try:
req = urllib.request.Request(
_ANTIGRAVITY_CREDENTIALS_URL, headers={"User-Agent": "Hive/1.0"}
)
with urllib.request.urlopen(req, timeout=10) as resp:
content = resp.read().decode("utf-8")
id_match = re.search(r'ANTIGRAVITY_CLIENT_ID\s*=\s*"([^"]+)"', content)
secret_match = re.search(r'ANTIGRAVITY_CLIENT_SECRET\s*=\s*"([^"]+)"', content)
client_id = id_match.group(1) if id_match else None
client_secret = secret_match.group(1) if secret_match else None
if client_id and client_secret:
_antigravity_credentials_cache = (client_id, client_secret)
return client_id, client_secret
except Exception as e:
logger.debug("Failed to fetch Antigravity credentials from public source: %s", e)
return None, None
def get_antigravity_client_id() -> str:
"""Return the Antigravity OAuth application client ID.
Checked in order:
1. ``ANTIGRAVITY_CLIENT_ID`` environment variable
2. ``llm.antigravity_client_id`` in ~/.hive/configuration.json
3. Fetch from public source (opencode-antigravity-auth project on GitHub)
"""
env = os.environ.get("ANTIGRAVITY_CLIENT_ID")
if env:
return env
cfg_val = get_hive_config().get("llm", {}).get("antigravity_client_id")
if cfg_val:
return cfg_val
# Fetch from public source
client_id, _ = _fetch_antigravity_credentials()
if client_id:
return client_id
raise RuntimeError("Could not obtain Antigravity OAuth client ID")
def get_antigravity_client_secret() -> str | None:
"""Return the Antigravity OAuth client secret.
Checked in order:
1. ``ANTIGRAVITY_CLIENT_SECRET`` environment variable
2. ``llm.antigravity_client_secret`` in ~/.hive/configuration.json
3. Fetch from public source (opencode-antigravity-auth project on GitHub)
Returns None when not found token refresh will be skipped and
the caller must use whatever access token is already available.
"""
env = os.environ.get("ANTIGRAVITY_CLIENT_SECRET")
if env:
return env
cfg_val = get_hive_config().get("llm", {}).get("antigravity_client_secret") or None
if cfg_val:
return cfg_val
# Fetch from public source
_, secret = _fetch_antigravity_credentials()
return secret
def get_gcu_enabled() -> bool:
"""Return whether GCU (browser automation) is enabled in user config."""
return get_hive_config().get("gcu_enabled", True)
@@ -280,6 +384,9 @@ def get_api_base() -> str | None:
if llm.get("use_kimi_code_subscription"):
# Kimi Code uses an Anthropic-compatible endpoint (no /v1 suffix).
return "https://api.kimi.com/coding"
if llm.get("use_antigravity_subscription"):
# Antigravity uses AntigravityProvider directly — no api_base needed.
return None
if llm.get("api_base"):
return llm["api_base"]
if str(llm.get("provider", "")).lower() == "openrouter":
+62 -13
View File
@@ -536,12 +536,28 @@ class EventLoopNode(NodeProtocol):
_restored_recent_responses = restored.recent_responses
_restored_tool_fingerprints = restored.recent_tool_fingerprints
# Refresh the system prompt with full 3-layer composition.
# The stored prompt may be stale after code changes or when
# runtime-injected context (e.g. worker identity) has changed.
# On resume, we rebuild identity + narrative + focus so the LLM
# understands the session history, not just the node directive.
from framework.graph.prompt_composer import compose_system_prompt
# Refresh the system prompt with full composition including
# execution preamble and node-type preamble. The stored
# prompt may be stale after code changes or when runtime-
# injected context (e.g. worker identity) has changed.
from framework.graph.prompt_composer import (
EXECUTION_SCOPE_PREAMBLE,
compose_system_prompt,
)
_exec_preamble = None
if (
not ctx.is_subagent_mode
and ctx.node_spec.node_type in ("event_loop", "gcu")
and ctx.node_spec.output_keys
):
_exec_preamble = EXECUTION_SCOPE_PREAMBLE
_node_type_preamble = None
if ctx.node_spec.node_type == "gcu":
from framework.graph.gcu import GCU_BROWSER_SYSTEM_PROMPT
_node_type_preamble = GCU_BROWSER_SYSTEM_PROMPT
_current_prompt = compose_system_prompt(
identity_prompt=ctx.identity_prompt or None,
@@ -550,6 +566,8 @@ class EventLoopNode(NodeProtocol):
accounts_prompt=ctx.accounts_prompt or None,
skills_catalog_prompt=ctx.skills_catalog_prompt or None,
protocols_prompt=ctx.protocols_prompt or None,
execution_preamble=_exec_preamble,
node_type_preamble=_node_type_preamble,
)
if conversation.system_prompt != _current_prompt:
conversation.update_system_prompt(_current_prompt)
@@ -2497,6 +2515,27 @@ class EventLoopNode(NodeProtocol):
results_by_id[tc.tool_use_id] = result
elif tc.tool_name == "delegate_to_sub_agent":
# Guard: in continuous mode the LLM may see delegate
# calls from a previous node's conversation history and
# attempt to re-use the tool on a node that doesn't own
# it. Only accept if the tool was actually offered.
if not any(t.name == "delegate_to_sub_agent" for t in tools):
logger.warning(
"[%s] LLM called delegate_to_sub_agent but tool "
"was not offered to this node — rejecting",
node_id,
)
result = ToolResult(
tool_use_id=tc.tool_use_id,
content=(
"ERROR: delegate_to_sub_agent is not available "
"on this node. This tool belongs to a different "
"node in the workflow."
),
is_error=True,
)
results_by_id[tc.tool_use_id] = result
continue
# --- Framework-level subagent delegation ---
# Queue for parallel execution in Phase 2
logger.info(
@@ -5194,7 +5233,20 @@ class EventLoopNode(NodeProtocol):
write_keys=[], # Read-only!
)
# 2b. Set up report callback (one-way channel to parent / event bus)
# 2b. Compute instance counter early so node_id is available for the
# report callback and the NodeContext. Each delegation to the same
# agent_id gets a unique suffix (instance 1 has no suffix for backward
# compat; instance 2+ appends ":N").
self._subagent_instance_counter.setdefault(agent_id, 0)
self._subagent_instance_counter[agent_id] += 1
_sa_instance = self._subagent_instance_counter[agent_id]
if _sa_instance > 1:
sa_node_id = f"{ctx.node_id}:subagent:{agent_id}:{_sa_instance}"
else:
sa_node_id = f"{ctx.node_id}:subagent:{agent_id}"
subagent_instance = str(_sa_instance)
# 2c. Set up report callback (one-way channel to parent / event bus)
subagent_reports: list[dict] = []
async def _report_callback(
@@ -5207,7 +5259,7 @@ class EventLoopNode(NodeProtocol):
if self._event_bus:
await self._event_bus.emit_subagent_report(
stream_id=ctx.node_id,
node_id=f"{ctx.node_id}:subagent:{agent_id}",
node_id=sa_node_id,
subagent_id=agent_id,
message=message,
data=data,
@@ -5297,7 +5349,7 @@ class EventLoopNode(NodeProtocol):
max_iter = min(self._config.max_iterations, 10)
subagent_ctx = NodeContext(
runtime=ctx.runtime,
node_id=f"{ctx.node_id}:subagent:{agent_id}",
node_id=sa_node_id,
node_spec=subagent_spec,
memory=scoped_memory,
input_data={"task": task, **parent_data},
@@ -5325,10 +5377,7 @@ class EventLoopNode(NodeProtocol):
# Derive a conversation store for the subagent from the parent's store.
# Each invocation gets a unique path so that repeated delegate calls
# (e.g. one per profile) don't restore a stale completed conversation.
self._subagent_instance_counter.setdefault(agent_id, 0)
self._subagent_instance_counter[agent_id] += 1
subagent_instance = str(self._subagent_instance_counter[agent_id])
# (Instance counter was computed earlier in step 2b.)
subagent_conv_store = None
if self._conversation_store is not None:
from framework.storage.conversation_store import FileConversationStore
-8
View File
@@ -167,14 +167,6 @@ class Goal(BaseModel):
return met_weight >= total_weight * 0.9 # 90% threshold
def check_constraint(self, constraint_id: str, value: Any) -> bool:
"""Check if a specific constraint is satisfied."""
for c in self.constraints:
if c.id == constraint_id:
# This would be expanded with actual evaluation logic
return True
return True
def to_prompt_context(self) -> str:
"""Generate context string for LLM prompts.
+15
View File
@@ -152,6 +152,8 @@ def compose_system_prompt(
accounts_prompt: str | None = None,
skills_catalog_prompt: str | None = None,
protocols_prompt: str | None = None,
execution_preamble: str | None = None,
node_type_preamble: str | None = None,
) -> str:
"""Compose the multi-layer system prompt.
@@ -162,6 +164,10 @@ def compose_system_prompt(
accounts_prompt: Connected accounts block (sits between identity and narrative).
skills_catalog_prompt: Available skills catalog XML (Agent Skills standard).
protocols_prompt: Default skill operational protocols section.
execution_preamble: EXECUTION_SCOPE_PREAMBLE for worker nodes
(prepended before focus so the LLM knows its pipeline scope).
node_type_preamble: Node-type-specific preamble, e.g. GCU browser
best-practices prompt (prepended before focus).
Returns:
Composed system prompt with all layers present, plus current datetime.
@@ -188,6 +194,15 @@ def compose_system_prompt(
if narrative:
parts.append(f"\n--- Context (what has happened so far) ---\n{narrative}")
# Execution scope preamble (worker nodes — tells the LLM it is one
# step in a multi-node pipeline and should not overreach)
if execution_preamble:
parts.append(f"\n{execution_preamble}")
# Node-type preamble (e.g. GCU browser best-practices)
if node_type_preamble:
parts.append(f"\n{node_type_preamble}")
# Layer 3: Focus (current phase directive)
if focus_prompt:
parts.append(f"\n--- Current Focus ---\n{focus_prompt}")
+706
View File
@@ -0,0 +1,706 @@
"""Antigravity (Google internal Cloud Code Assist) LLM provider.
Antigravity is Google's unified gateway API that routes requests to Gemini,
Claude, and GPT-OSS models through a single Gemini-style interface. It is
NOT the public ``generativelanguage.googleapis.com`` API.
Authentication uses Google OAuth2. Token refresh is done directly with the
OAuth client secret no local proxy required.
Credential sources (checked in order):
1. ``~/.hive/antigravity-accounts.json`` (native OAuth implementation)
2. Antigravity IDE SQLite state DB (macOS / Linux)
"""
from __future__ import annotations
import json
import logging
import re
import time
import uuid
from collections.abc import AsyncIterator, Callable, Iterator
from pathlib import Path
from typing import Any
from framework.llm.provider import LLMProvider, LLMResponse, Tool
from framework.llm.stream_events import (
FinishEvent,
StreamErrorEvent,
StreamEvent,
TextDeltaEvent,
TextEndEvent,
ToolCallEvent,
)
logger = logging.getLogger(__name__)
# ---------------------------------------------------------------------------
# Constants
# ---------------------------------------------------------------------------
_TOKEN_URL = "https://oauth2.googleapis.com/token"
# Fallback order: daily sandbox → autopush sandbox → production
_ENDPOINTS = [
"https://daily-cloudcode-pa.sandbox.googleapis.com",
"https://autopush-cloudcode-pa.sandbox.googleapis.com",
"https://cloudcode-pa.googleapis.com",
]
_DEFAULT_PROJECT_ID = "rising-fact-p41fc"
_TOKEN_REFRESH_BUFFER_SECS = 60
# Credentials file in ~/.hive/ (native implementation)
_ACCOUNTS_FILE = Path.home() / ".hive" / "antigravity-accounts.json"
_IDE_STATE_DB_MAC = (
Path.home()
/ "Library"
/ "Application Support"
/ "Antigravity"
/ "User"
/ "globalStorage"
/ "state.vscdb"
)
_IDE_STATE_DB_LINUX = (
Path.home() / ".config" / "Antigravity" / "User" / "globalStorage" / "state.vscdb"
)
_IDE_STATE_DB_KEY = "antigravityUnifiedStateSync.oauthToken"
_BASE_HEADERS: dict[str, str] = {
# Mimic the Antigravity Electron app so the API accepts the request.
"User-Agent": (
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 "
"(KHTML, like Gecko) Antigravity/1.18.3 Chrome/138.0.7204.235 "
"Electron/37.3.1 Safari/537.36"
),
"X-Goog-Api-Client": "google-cloud-sdk vscode_cloudshelleditor/0.1",
"Client-Metadata": '{"ideType":"ANTIGRAVITY","platform":"MACOS","pluginType":"GEMINI"}',
}
# ---------------------------------------------------------------------------
# Credential loading helpers
# ---------------------------------------------------------------------------
def _load_from_json_file() -> tuple[str | None, str | None, str, float]:
"""Read credentials from JSON accounts file.
Reads from ~/.hive/antigravity-accounts.json.
Returns ``(access_token | None, refresh_token | None, project_id, expires_at)``.
``expires_at`` is a Unix timestamp (seconds); 0.0 means unknown.
"""
if not _ACCOUNTS_FILE.exists():
return None, None, _DEFAULT_PROJECT_ID, 0.0
try:
with open(_ACCOUNTS_FILE, encoding="utf-8") as fh:
data = json.load(fh)
except (OSError, json.JSONDecodeError) as exc:
logger.debug("Failed to read Antigravity accounts file: %s", exc)
return None, None, _DEFAULT_PROJECT_ID, 0.0
accounts = data.get("accounts", [])
if not accounts:
return None, None, _DEFAULT_PROJECT_ID, 0.0
account = next((a for a in accounts if a.get("enabled", True) is not False), accounts[0])
schema_version = data.get("schemaVersion", 1)
if schema_version >= 4:
# V4 schema: refresh = "refreshToken|projectId[|managedProjectId]"
refresh_str = account.get("refresh", "")
parts = refresh_str.split("|") if refresh_str else []
refresh_token: str | None = parts[0] if parts else None
project_id = parts[1] if len(parts) >= 2 and parts[1] else _DEFAULT_PROJECT_ID
access_token: str | None = account.get("access")
expires_ms: int = account.get("expires", 0)
expires_at = float(expires_ms) / 1000.0 if expires_ms else 0.0
# Treat near-expiry tokens as absent so _ensure_token() triggers a refresh.
if access_token and expires_at and time.time() >= expires_at - _TOKEN_REFRESH_BUFFER_SECS:
access_token = None
expires_at = 0.0
return access_token, refresh_token, project_id, expires_at
else:
# V1V3 schema: plain accessToken / refreshToken fields
access_token = account.get("accessToken")
refresh_token = account.get("refreshToken")
# Estimate expiry from last_refresh + 1 h
last_refresh_str: str | None = data.get("last_refresh")
expires_at = 0.0
if last_refresh_str:
try:
from datetime import datetime # noqa: PLC0415
ts = datetime.fromisoformat(last_refresh_str.replace("Z", "+00:00")).timestamp()
expires_at = ts + 3600.0
if time.time() >= expires_at - _TOKEN_REFRESH_BUFFER_SECS:
access_token = None
except (ValueError, TypeError):
pass
return access_token, refresh_token, _DEFAULT_PROJECT_ID, expires_at
def _load_from_ide_db() -> tuple[str | None, str | None, float]:
"""Extract ``(access_token, refresh_token, expires_at)`` from the IDE SQLite DB."""
import base64 # noqa: PLC0415
import sqlite3 # noqa: PLC0415
for db_path in (_IDE_STATE_DB_MAC, _IDE_STATE_DB_LINUX):
if not db_path.exists():
continue
try:
con = sqlite3.connect(f"file:{db_path}?mode=ro", uri=True)
try:
row = con.execute(
"SELECT value FROM ItemTable WHERE key = ?",
(_IDE_STATE_DB_KEY,),
).fetchone()
finally:
con.close()
if not row:
continue
blob = base64.b64decode(row[0])
candidates = re.findall(rb"[A-Za-z0-9+/=_\-]{40,}", blob)
access_token: str | None = None
refresh_token: str | None = None
for candidate in candidates:
try:
padded = candidate + b"=" * (-len(candidate) % 4)
inner = base64.urlsafe_b64decode(padded)
except Exception:
continue
if not access_token:
m = re.search(rb"ya29\.[A-Za-z0-9_\-\.]+", inner)
if m:
access_token = m.group(0).decode("ascii")
if not refresh_token:
m = re.search(rb"1//[A-Za-z0-9_\-\.]+", inner)
if m:
refresh_token = m.group(0).decode("ascii")
if access_token and refresh_token:
break
if access_token:
# Estimate expiry from DB mtime (IDE refreshes while running)
mtime = db_path.stat().st_mtime
expires_at = mtime + 3600.0
return access_token, refresh_token, expires_at
except Exception as exc:
logger.debug("Failed to read Antigravity IDE state DB: %s", exc)
continue
return None, None, 0.0
def _do_token_refresh(refresh_token: str) -> tuple[str, float] | None:
"""POST to Google OAuth endpoint and return ``(new_access_token, expires_at)``.
The client secret is sourced via ``get_antigravity_client_secret()`` (env var,
config file, or npm package fallback). When unavailable the refresh is attempted
without it Google will reject it for web-app clients, but the npm fallback in
``get_antigravity_client_secret()`` should ensure the secret is found at runtime.
Returns None when the HTTP request fails.
"""
from framework.config import get_antigravity_client_secret # noqa: PLC0415
client_secret = get_antigravity_client_secret()
if not client_secret:
logger.debug(
"Antigravity client secret not configured — attempting refresh without it. "
"Set ANTIGRAVITY_CLIENT_SECRET or run quickstart to configure."
)
import urllib.error # noqa: PLC0415
import urllib.parse # noqa: PLC0415
import urllib.request # noqa: PLC0415
from framework.config import get_antigravity_client_id # noqa: PLC0415
params: dict[str, str] = {
"grant_type": "refresh_token",
"refresh_token": refresh_token,
"client_id": get_antigravity_client_id(),
}
if client_secret:
params["client_secret"] = client_secret
body = urllib.parse.urlencode(params).encode("utf-8")
req = urllib.request.Request(
_TOKEN_URL,
data=body,
headers={"Content-Type": "application/x-www-form-urlencoded"},
method="POST",
)
try:
with urllib.request.urlopen(req, timeout=15) as resp: # noqa: S310
payload = json.loads(resp.read())
access_token: str = payload["access_token"]
expires_in: int = payload.get("expires_in", 3600)
logger.debug("Antigravity token refreshed successfully")
return access_token, time.time() + expires_in
except Exception as exc:
logger.debug("Antigravity token refresh failed: %s", exc)
return None
# ---------------------------------------------------------------------------
# Message conversion helpers
# ---------------------------------------------------------------------------
def _clean_tool_name(name: str) -> str:
"""Sanitize a tool name for the Antigravity function-calling schema."""
name = re.sub(r"[/\s]", "_", name)
if name and not (name[0].isalpha() or name[0] == "_"):
name = "_" + name
return name[:64]
def _to_gemini_contents(
messages: list[dict[str, Any]],
thought_sigs: dict[str, str] | None = None,
) -> list[dict[str, Any]]:
"""Convert OpenAI-format messages to Gemini-style ``contents`` array."""
# Pre-build a map tool_call_id → function_name from assistant messages.
# Tool result messages (role="tool") only carry tool_call_id, not the name,
# but Gemini requires functionResponse.name to match the functionCall.name.
tc_id_to_name: dict[str, str] = {}
for msg in messages:
if msg.get("role") == "assistant":
for tc in msg.get("tool_calls") or []:
tc_id = tc.get("id")
fn_name = tc.get("function", {}).get("name", "")
if tc_id and fn_name:
tc_id_to_name[tc_id] = fn_name
contents: list[dict[str, Any]] = []
# Consecutive tool-result messages must be batched into one user turn.
pending_tool_parts: list[dict[str, Any]] = []
def _flush_tool_results() -> None:
if pending_tool_parts:
contents.append({"role": "user", "parts": list(pending_tool_parts)})
pending_tool_parts.clear()
for msg in messages:
role = msg.get("role", "user")
content = msg.get("content")
if role == "system":
continue # Handled via systemInstruction, not in contents.
if role == "tool":
# OpenAI tool result → Gemini functionResponse part.
result_str = content if isinstance(content, str) else str(content or "")
tc_id = msg.get("tool_call_id", "")
# Look up function name from the pre-built map; fall back to msg.name.
fn_name = tc_id_to_name.get(tc_id) or msg.get("name", "")
pending_tool_parts.append(
{
"functionResponse": {
"name": fn_name,
"id": tc_id,
"response": {"content": result_str},
}
}
)
continue
_flush_tool_results()
gemini_role = "model" if role == "assistant" else "user"
parts: list[dict[str, Any]] = []
if isinstance(content, str) and content:
parts.append({"text": content})
elif isinstance(content, list):
for block in content:
if not isinstance(block, dict):
continue
if block.get("type") == "text":
text = block.get("text", "")
if text:
parts.append({"text": text})
# Other block types (image_url etc.) skipped.
# Assistant messages may carry OpenAI-style tool_calls.
for tc in msg.get("tool_calls") or []:
fn = tc.get("function", {})
try:
args = json.loads(fn.get("arguments", "{}") or "{}")
except (json.JSONDecodeError, TypeError):
args = {}
tc_id = tc.get("id", str(uuid.uuid4()))
fc_part: dict[str, Any] = {
"functionCall": {
"name": fn.get("name", ""),
"args": args,
"id": tc_id,
}
}
if thought_sigs:
sig = thought_sigs.get(tc_id, "")
if sig:
fc_part["thoughtSignature"] = sig # part-level, not inside functionCall
parts.append(fc_part)
if parts:
contents.append({"role": gemini_role, "parts": parts})
_flush_tool_results()
# Gemini requires the first turn to be a user turn. Drop any leading
# model messages so the API doesn't reject with a 400.
while contents and contents[0].get("role") == "model":
contents.pop(0)
return contents
# ---------------------------------------------------------------------------
# Response parsing helpers
# ---------------------------------------------------------------------------
def _map_finish_reason(reason: str) -> str:
return {"STOP": "stop", "MAX_TOKENS": "max_tokens", "OTHER": "tool_use"}.get(
(reason or "").upper(), "stop"
)
def _parse_complete_response(raw: dict[str, Any], model: str) -> LLMResponse:
"""Parse a non-streaming Antigravity response dict → LLMResponse."""
payload: dict[str, Any] = raw.get("response", raw)
candidates: list[dict[str, Any]] = payload.get("candidates", [])
usage: dict[str, Any] = payload.get("usageMetadata", {})
text_parts: list[str] = []
if candidates:
for part in candidates[0].get("content", {}).get("parts", []):
if "text" in part and not part.get("thought"):
text_parts.append(part["text"])
return LLMResponse(
content="".join(text_parts),
model=payload.get("modelVersion", model),
input_tokens=usage.get("promptTokenCount", 0),
output_tokens=usage.get("candidatesTokenCount", 0),
stop_reason=_map_finish_reason(candidates[0].get("finishReason", "") if candidates else ""),
raw_response=raw,
)
def _parse_sse_stream(
response: Any,
model: str,
on_thought_signature: Callable[[str, str], None] | None = None,
) -> Iterator[StreamEvent]:
"""Parse Antigravity SSE response line-by-line → StreamEvents.
Each SSE line looks like::
data: {"response": {"candidates": [...], "usageMetadata": {...}}, "traceId": "..."}
"""
accumulated = ""
input_tokens = 0
output_tokens = 0
finish_reason = ""
for raw_line in response:
line: str = raw_line.decode("utf-8", errors="replace").rstrip("\r\n")
if not line.startswith("data:"):
continue
data_str = line[5:].strip()
if not data_str or data_str == "[DONE]":
continue
try:
data: dict[str, Any] = json.loads(data_str)
except json.JSONDecodeError:
continue
# The outer envelope is {"response": {...}, "traceId": "..."}.
payload: dict[str, Any] = data.get("response", data)
usage = payload.get("usageMetadata", {})
if usage:
input_tokens = usage.get("promptTokenCount", input_tokens)
output_tokens = usage.get("candidatesTokenCount", output_tokens)
for candidate in payload.get("candidates", []):
fr = candidate.get("finishReason", "")
if fr:
finish_reason = fr
for part in candidate.get("content", {}).get("parts", []):
if "text" in part and not part.get("thought"):
delta: str = part["text"]
accumulated += delta
yield TextDeltaEvent(content=delta, snapshot=accumulated)
elif "functionCall" in part:
fc: dict[str, Any] = part["functionCall"]
tool_use_id = fc.get("id") or str(uuid.uuid4())
thought_sig = part.get("thoughtSignature", "") # sibling of functionCall
if thought_sig and on_thought_signature:
on_thought_signature(tool_use_id, thought_sig)
args = fc.get("args", {})
if isinstance(args, str):
try:
args = json.loads(args)
except json.JSONDecodeError:
args = {}
yield ToolCallEvent(
tool_use_id=tool_use_id,
tool_name=fc.get("name", ""),
tool_input=args,
)
if accumulated:
yield TextEndEvent(full_text=accumulated)
yield FinishEvent(
stop_reason=_map_finish_reason(finish_reason),
input_tokens=input_tokens,
output_tokens=output_tokens,
model=model,
)
# ---------------------------------------------------------------------------
# Provider
# ---------------------------------------------------------------------------
class AntigravityProvider(LLMProvider):
"""LLM provider for Google's internal Antigravity Code Assist gateway.
No local proxy required. Handles OAuth token refresh, Gemini-format
request/response conversion, and SSE streaming directly.
"""
def __init__(self, model: str = "gemini-3-flash") -> None:
# Strip any provider prefix ("openai/gemini-3-flash" → "gemini-3-flash").
if "/" in model:
model = model.split("/", 1)[1]
self.model = model
self._access_token: str | None = None
self._refresh_token: str | None = None
self._project_id: str = _DEFAULT_PROJECT_ID
self._token_expires_at: float = 0.0
self._thought_sigs: dict[str, str] = {} # tool_use_id → thoughtSignature
self._init_credentials()
# --- Credential management -------------------------------------------- #
def _init_credentials(self) -> None:
"""Load credentials from the best available source."""
access, refresh, project_id, expires_at = _load_from_json_file()
if refresh:
self._refresh_token = refresh
self._project_id = project_id
self._access_token = access
self._token_expires_at = expires_at
return
# Fall back to IDE state DB.
access, refresh, expires_at = _load_from_ide_db()
if access:
self._access_token = access
self._refresh_token = refresh
self._token_expires_at = expires_at
def has_credentials(self) -> bool:
"""Return True if any credential is available."""
return bool(self._access_token or self._refresh_token)
def _ensure_token(self) -> str:
"""Return a valid access token, refreshing via OAuth if needed."""
if (
self._access_token
and self._token_expires_at
and time.time() < self._token_expires_at - _TOKEN_REFRESH_BUFFER_SECS
):
return self._access_token
if self._refresh_token:
result = _do_token_refresh(self._refresh_token)
if result:
self._access_token, self._token_expires_at = result
return self._access_token
if self._access_token:
logger.warning("Using potentially stale Antigravity access token")
return self._access_token
raise RuntimeError(
"No valid Antigravity credentials. "
"Run: uv run python core/antigravity_auth.py auth account add"
)
# --- Request building -------------------------------------------------- #
def _build_body(
self,
messages: list[dict[str, Any]],
system: str,
tools: list[Tool] | None,
max_tokens: int,
) -> dict[str, Any]:
contents = _to_gemini_contents(messages, self._thought_sigs)
inner: dict[str, Any] = {
"contents": contents,
"generationConfig": {"maxOutputTokens": max_tokens},
}
if system:
inner["systemInstruction"] = {"parts": [{"text": system}]}
if tools:
inner["tools"] = [
{
"functionDeclarations": [
{
"name": _clean_tool_name(t.name),
"description": t.description,
"parameters": t.parameters
or {
"type": "object",
"properties": {},
},
}
for t in tools
]
}
]
return {
"project": self._project_id,
"model": self.model,
"request": inner,
"requestType": "agent",
"userAgent": "antigravity",
"requestId": f"agent-{uuid.uuid4()}",
}
# --- HTTP transport ---------------------------------------------------- #
def _post(self, body: dict[str, Any], *, streaming: bool) -> Any:
"""POST to the Antigravity endpoint, falling back through the endpoint list."""
import urllib.error # noqa: PLC0415
import urllib.request # noqa: PLC0415
token = self._ensure_token()
body_bytes = json.dumps(body).encode("utf-8")
path = (
"/v1internal:streamGenerateContent?alt=sse"
if streaming
else "/v1internal:generateContent"
)
headers = {
**_BASE_HEADERS,
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
}
if streaming:
headers["Accept"] = "text/event-stream"
last_exc: Exception | None = None
for base_url in _ENDPOINTS:
url = f"{base_url}{path}"
req = urllib.request.Request(url, data=body_bytes, headers=headers, method="POST")
try:
return urllib.request.urlopen(req, timeout=120) # noqa: S310
except urllib.error.HTTPError as exc:
if exc.code in (401, 403) and self._refresh_token:
# Token rejected — refresh once and retry this endpoint.
result = _do_token_refresh(self._refresh_token)
if result:
self._access_token, self._token_expires_at = result
headers["Authorization"] = f"Bearer {self._access_token}"
req2 = urllib.request.Request(
url, data=body_bytes, headers=headers, method="POST"
)
try:
return urllib.request.urlopen(req2, timeout=120) # noqa: S310
except urllib.error.HTTPError as exc2:
last_exc = exc2
continue
last_exc = exc
continue
elif exc.code >= 500:
last_exc = exc
continue
# Include the API response body in the exception for easier debugging.
try:
err_body = exc.read().decode("utf-8", errors="replace")
except Exception:
err_body = "(unreadable)"
raise RuntimeError(f"Antigravity HTTP {exc.code} from {url}: {err_body}") from exc
except (urllib.error.URLError, OSError) as exc:
last_exc = exc
continue
raise RuntimeError(
f"All Antigravity endpoints failed. Last error: {last_exc}"
) from last_exc
# --- LLMProvider interface --------------------------------------------- #
def complete(
self,
messages: list[dict[str, Any]],
system: str = "",
tools: list[Tool] | None = None,
max_tokens: int = 1024,
response_format: dict[str, Any] | None = None,
json_mode: bool = False,
max_retries: int | None = None,
) -> LLMResponse:
if json_mode:
suffix = "\n\nPlease respond with a valid JSON object."
system = (system + suffix) if system else suffix.strip()
body = self._build_body(messages, system, tools, max_tokens)
resp = self._post(body, streaming=False)
return _parse_complete_response(json.loads(resp.read()), self.model)
async def stream(
self,
messages: list[dict[str, Any]],
system: str = "",
tools: list[Tool] | None = None,
max_tokens: int = 4096,
) -> AsyncIterator[StreamEvent]:
import asyncio # noqa: PLC0415
import concurrent.futures # noqa: PLC0415
loop = asyncio.get_running_loop()
queue: asyncio.Queue[StreamEvent | None] = asyncio.Queue()
def _blocking_work() -> None:
try:
body = self._build_body(messages, system, tools, max_tokens)
http_resp = self._post(body, streaming=True)
for event in _parse_sse_stream(
http_resp, self.model, self._thought_sigs.__setitem__
):
loop.call_soon_threadsafe(queue.put_nowait, event)
except Exception as exc:
logger.error("Antigravity stream error: %s", exc)
loop.call_soon_threadsafe(queue.put_nowait, StreamErrorEvent(error=str(exc)))
finally:
loop.call_soon_threadsafe(queue.put_nowait, None) # sentinel
executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
fut = loop.run_in_executor(executor, _blocking_work)
try:
while True:
event = await queue.get()
if event is None:
break
yield event
finally:
await fut
executor.shutdown(wait=False)
+2
View File
@@ -525,6 +525,8 @@ class LiteLLMProvider(LLMProvider):
self._codex_backend = bool(
self.api_base and "chatgpt.com/backend-api/codex" in self.api_base
)
# Antigravity routes through a local OpenAI-compatible proxy — no patches needed.
self._antigravity = bool(self.api_base and "localhost:8069" in self.api_base)
if litellm is None:
raise ImportError(
+329
View File
@@ -552,6 +552,319 @@ def get_kimi_code_token() -> str | None:
return None
# ---------------------------------------------------------------------------
# Antigravity subscription token helpers
# ---------------------------------------------------------------------------
# Antigravity IDE (native macOS/Linux app) stores OAuth tokens in its
# VSCode-style SQLite state database under the key
# "antigravityUnifiedStateSync.oauthToken" as a base64-encoded protobuf blob.
ANTIGRAVITY_IDE_STATE_DB = (
Path.home()
/ "Library"
/ "Application Support"
/ "Antigravity"
/ "User"
/ "globalStorage"
/ "state.vscdb"
)
# Linux fallback for the IDE state DB
ANTIGRAVITY_IDE_STATE_DB_LINUX = (
Path.home() / ".config" / "Antigravity" / "User" / "globalStorage" / "state.vscdb"
)
# Antigravity credentials stored by native OAuth implementation
ANTIGRAVITY_AUTH_FILE = Path.home() / ".hive" / "antigravity-accounts.json"
ANTIGRAVITY_OAUTH_TOKEN_URL = "https://oauth2.googleapis.com/token"
_ANTIGRAVITY_TOKEN_LIFETIME_SECS = 3600 # Google access tokens expire in 1 hour
_ANTIGRAVITY_IDE_STATE_DB_KEY = "antigravityUnifiedStateSync.oauthToken"
def _read_antigravity_ide_credentials() -> dict | None:
"""Read credentials from the Antigravity IDE's SQLite state database.
The Antigravity desktop IDE (VSCode-based) stores its OAuth token as a
base64-encoded protobuf blob in a SQLite database. The access token is
a standard Google OAuth ``ya29.*`` bearer token.
Returns:
Dict with ``accessToken`` and optionally ``refreshToken`` keys,
plus ``_source: "ide"`` to skip file-based save on refresh.
Returns None if the database is absent or the key is not found.
"""
import re
import sqlite3
for db_path in (ANTIGRAVITY_IDE_STATE_DB, ANTIGRAVITY_IDE_STATE_DB_LINUX):
if not db_path.exists():
continue
try:
con = sqlite3.connect(f"file:{db_path}?mode=ro", uri=True)
try:
row = con.execute(
"SELECT value FROM ItemTable WHERE key = ?",
(_ANTIGRAVITY_IDE_STATE_DB_KEY,),
).fetchone()
finally:
con.close()
if not row:
continue
import base64
blob = base64.b64decode(row[0])
# The protobuf blob contains the access token (ya29.*) and
# refresh token (1//*) as length-prefixed UTF-8 strings.
# Decode the inner base64 layer and extract with regex.
inner_b64_candidates = re.findall(rb"[A-Za-z0-9+/=_\-]{40,}", blob)
access_token: str | None = None
refresh_token: str | None = None
for candidate in inner_b64_candidates:
try:
padded = candidate + b"=" * (-len(candidate) % 4)
inner = base64.urlsafe_b64decode(padded)
except Exception:
continue
if not access_token:
m = re.search(rb"ya29\.[A-Za-z0-9_\-\.]+", inner)
if m:
access_token = m.group(0).decode("ascii")
if not refresh_token:
m = re.search(rb"1//[A-Za-z0-9_\-\.]+", inner)
if m:
refresh_token = m.group(0).decode("ascii")
if access_token and refresh_token:
break
if access_token:
return {
"accounts": [
{
"accessToken": access_token,
"refreshToken": refresh_token or "",
}
],
"_source": "ide",
"_db_path": str(db_path),
}
except Exception as exc:
logger.debug("Failed to read Antigravity IDE state DB: %s", exc)
continue
return None
def _read_antigravity_credentials() -> dict | None:
"""Read Antigravity auth data from all supported credential sources.
Checks in order:
1. Antigravity IDE SQLite state database (native macOS/Linux app)
2. Native OAuth credentials file (~/.hive/antigravity-accounts.json)
Returns:
Auth data dict with an ``accounts`` list on success, None otherwise.
"""
# 1. Native Antigravity IDE (primary on macOS)
ide_creds = _read_antigravity_ide_credentials()
if ide_creds:
return ide_creds
# 2. Native OAuth credentials file
if ANTIGRAVITY_AUTH_FILE.exists():
try:
with open(ANTIGRAVITY_AUTH_FILE, encoding="utf-8") as f:
data = json.load(f)
accounts = data.get("accounts", [])
if accounts and isinstance(accounts[0], dict):
return data
except (json.JSONDecodeError, OSError):
pass
return None
def _is_antigravity_token_expired(auth_data: dict) -> bool:
"""Check whether the Antigravity access token is expired or near expiry.
For IDE-sourced credentials: uses the state DB's mtime as last_refresh
since the IDE keeps the DB fresh while it's running.
For JSON-sourced credentials: uses the ``last_refresh`` field or file mtime.
"""
import time
from datetime import datetime
now = time.time()
if auth_data.get("_source") == "ide":
# The IDE refreshes tokens automatically while running.
# Use the DB file's mtime as a proxy for when the token was last updated.
try:
db_path = Path(auth_data.get("_db_path", str(ANTIGRAVITY_IDE_STATE_DB)))
last_refresh: float = db_path.stat().st_mtime
except OSError:
return True
expires_at = last_refresh + _ANTIGRAVITY_TOKEN_LIFETIME_SECS
return now >= (expires_at - _TOKEN_REFRESH_BUFFER_SECS)
last_refresh_val: float | str | None = auth_data.get("last_refresh")
if last_refresh_val is None:
try:
last_refresh_val = ANTIGRAVITY_AUTH_FILE.stat().st_mtime
except OSError:
return True
elif isinstance(last_refresh_val, str):
try:
last_refresh_val = datetime.fromisoformat(
last_refresh_val.replace("Z", "+00:00")
).timestamp()
except (ValueError, TypeError):
return True
expires_at = float(last_refresh_val) + _ANTIGRAVITY_TOKEN_LIFETIME_SECS
return now >= (expires_at - _TOKEN_REFRESH_BUFFER_SECS)
def _refresh_antigravity_token(refresh_token: str) -> dict | None:
"""Refresh the Antigravity access token via Google OAuth.
POSTs form-encoded ``grant_type=refresh_token`` to the Google token
endpoint using Antigravity's public OAuth client ID.
Returns:
Parsed response dict (containing ``access_token``) on success,
None on any error.
"""
import urllib.error
import urllib.parse
import urllib.request
from framework.config import get_antigravity_client_id, get_antigravity_client_secret
client_id = get_antigravity_client_id()
client_secret = get_antigravity_client_secret()
params: dict = {
"grant_type": "refresh_token",
"refresh_token": refresh_token,
"client_id": client_id,
}
if client_secret:
params["client_secret"] = client_secret
data = urllib.parse.urlencode(params).encode("utf-8")
req = urllib.request.Request(
ANTIGRAVITY_OAUTH_TOKEN_URL,
data=data,
headers={"Content-Type": "application/x-www-form-urlencoded"},
method="POST",
)
try:
with urllib.request.urlopen(req, timeout=15) as resp: # noqa: S310
return json.loads(resp.read())
except (urllib.error.URLError, json.JSONDecodeError, TimeoutError, OSError) as exc:
logger.debug("Antigravity token refresh failed: %s", exc)
return None
def _save_refreshed_antigravity_credentials(auth_data: dict, token_data: dict) -> None:
"""Write refreshed tokens back to the Antigravity JSON credentials file.
Skipped for IDE-sourced credentials (the IDE manages its own DB).
Updates ``accounts[0].accessToken`` (and ``refreshToken`` if present),
then persists ``last_refresh`` as an ISO-8601 UTC string.
"""
from datetime import datetime
# IDE manages its own state — we do not write back to its SQLite DB
if auth_data.get("_source") == "ide":
return
try:
accounts = auth_data.get("accounts", [])
if not accounts:
return
account = accounts[0]
account["accessToken"] = token_data["access_token"]
if "refresh_token" in token_data:
account["refreshToken"] = token_data["refresh_token"]
auth_data["accounts"] = accounts
auth_data["last_refresh"] = datetime.now(UTC).isoformat()
ANTIGRAVITY_AUTH_FILE.parent.mkdir(parents=True, exist_ok=True)
fd = os.open(ANTIGRAVITY_AUTH_FILE, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
with os.fdopen(fd, "w", encoding="utf-8") as f:
json.dump(auth_data, f, indent=2)
logger.debug("Antigravity credentials refreshed and saved")
except (OSError, KeyError) as exc:
logger.debug("Failed to save refreshed Antigravity credentials: %s", exc)
def get_antigravity_token() -> str | None:
"""Get the OAuth access token from an Antigravity subscription.
Credential sources checked in order:
1. Antigravity IDE SQLite state DB (native app, macOS/Linux)
2. antigravity-auth CLI JSON file
For IDE credentials the token is read directly (the IDE refreshes it
automatically while running). For JSON credentials an automatic OAuth
refresh is attempted when the token is near expiry.
Returns:
The ``ya29.*`` Google OAuth access token, or None if unavailable.
"""
auth_data = _read_antigravity_credentials()
if not auth_data:
return None
accounts = auth_data.get("accounts", [])
if not accounts:
return None
account = accounts[0]
access_token = account.get("accessToken")
if not access_token:
return None
if not _is_antigravity_token_expired(auth_data):
return access_token
# Token is expired or near expiry — attempt a refresh
refresh_token = account.get("refreshToken")
if not refresh_token:
logger.warning(
"Antigravity token expired and no refresh token available. "
"Re-open the Antigravity IDE to refresh, or run 'antigravity-auth accounts add'."
)
return access_token # return stale token; proxy may still accept it briefly
logger.info("Antigravity token expired or near expiry, refreshing...")
token_data = _refresh_antigravity_token(refresh_token)
if token_data and "access_token" in token_data:
_save_refreshed_antigravity_credentials(auth_data, token_data)
return token_data["access_token"]
logger.warning(
"Antigravity token refresh failed. "
"Re-open the Antigravity IDE or run 'antigravity-auth accounts add'."
)
return access_token
def _is_antigravity_proxy_available() -> bool:
"""Return True if antigravity-auth serve is running on localhost:8069."""
import socket
try:
with socket.create_connection(("localhost", 8069), timeout=0.5):
return True
except (OSError, TimeoutError):
return False
@dataclass
class AgentInfo:
"""Information about an exported agent."""
@@ -1158,6 +1471,7 @@ class AgentRunner:
use_claude_code = llm_config.get("use_claude_code_subscription", False)
use_codex = llm_config.get("use_codex_subscription", False)
use_kimi_code = llm_config.get("use_kimi_code_subscription", False)
use_antigravity = llm_config.get("use_antigravity_subscription", False)
api_base = llm_config.get("api_base")
api_key = None
@@ -1179,6 +1493,8 @@ class AgentRunner:
if not api_key:
print("Warning: Kimi Code subscription configured but no key found.")
print("Run 'kimi /login' to authenticate, then try again.")
elif use_antigravity:
pass # AntigravityProvider handles credentials internally
if api_key and use_claude_code:
# Use litellm's built-in Anthropic OAuth support.
@@ -1217,6 +1533,19 @@ class AgentRunner:
api_key=api_key,
api_base=api_base,
)
elif use_antigravity:
# Direct OAuth to Google's internal Cloud Code Assist gateway.
# No local proxy required — AntigravityProvider handles token
# refresh and Gemini-format request/response conversion natively.
from framework.llm.antigravity import AntigravityProvider # noqa: PLC0415
provider = AntigravityProvider(model=self.model)
if not provider.has_credentials():
print(
"Warning: Antigravity credentials not found. "
"Run: uv run python core/antigravity_auth.py auth account add"
)
self._llm = provider
else:
# Local models (e.g. Ollama) don't need an API key
if self._is_local_model(self.model):
+42 -20
View File
@@ -96,8 +96,7 @@ class SessionManager:
Internal helper use create_session() or create_session_with_worker().
"""
from framework.config import RuntimeConfig
from framework.llm.litellm import LiteLLMProvider
from framework.config import RuntimeConfig, get_hive_config
from framework.runtime.event_bus import EventBus
ts = datetime.now().strftime("%Y%m%d_%H%M%S")
@@ -111,12 +110,20 @@ class SessionManager:
rc = RuntimeConfig(model=model or self._model or RuntimeConfig().model)
# Session owns these — shared with queen and worker
llm = LiteLLMProvider(
model=rc.model,
api_key=rc.api_key,
api_base=rc.api_base,
**rc.extra_kwargs,
)
llm_config = get_hive_config().get("llm", {})
if llm_config.get("use_antigravity_subscription"):
from framework.llm.antigravity import AntigravityProvider
llm = AntigravityProvider(model=rc.model)
else:
from framework.llm.litellm import LiteLLMProvider
llm = LiteLLMProvider(
model=rc.model,
api_key=rc.api_key,
api_base=rc.api_base,
**rc.extra_kwargs,
)
event_bus = EventBus()
session = Session(
@@ -313,17 +320,25 @@ class SessionManager:
# with the correct worker credentials so _setup() doesn't fall back
# to the queen's llm config (which may be a different provider).
if worker_model and not model:
from framework.llm.litellm import LiteLLMProvider
from framework.config import get_hive_config
worker_api_key = get_worker_api_key()
worker_api_base = get_worker_api_base()
worker_extra = get_worker_llm_extra_kwargs()
runner._llm = LiteLLMProvider(
model=resolved_model,
api_key=worker_api_key,
api_base=worker_api_base,
**worker_extra,
)
worker_llm_cfg = get_hive_config().get("worker_llm", {})
if worker_llm_cfg.get("use_antigravity_subscription"):
from framework.llm.antigravity import AntigravityProvider
runner._llm = AntigravityProvider(model=resolved_model)
else:
from framework.llm.litellm import LiteLLMProvider
worker_api_key = get_worker_api_key()
worker_api_base = get_worker_api_base()
worker_extra = get_worker_llm_extra_kwargs()
runner._llm = LiteLLMProvider(
model=resolved_model,
api_key=worker_api_key,
api_base=worker_api_base,
**worker_extra,
)
# Setup with session's event bus
if runner._agent_runtime is None:
@@ -1032,10 +1047,17 @@ class SessionManager:
_consolidation_session_dir = queen_dir
async def _on_compaction(_event) -> None:
# Only consolidate on queen compactions — worker and subagent
# compactions are frequent and don't warrant a memory update.
if getattr(_event, "stream_id", None) != "queen":
return
from framework.agents.queen.queen_memory import consolidate_queen_memory
await consolidate_queen_memory(
session.id, _consolidation_session_dir, _consolidation_llm
asyncio.create_task(
consolidate_queen_memory(
session.id, _consolidation_session_dir, _consolidation_llm
),
name=f"queen-memory-consolidation-{session.id}",
)
from framework.runtime.event_bus import EventType as _ET
+4
View File
@@ -12,6 +12,7 @@ from framework.skills.discovery import DiscoveryConfig, SkillDiscovery
from framework.skills.manager import SkillsManager, SkillsManagerConfig
from framework.skills.models import TrustStatus
from framework.skills.parser import ParsedSkill, parse_skill_md
from framework.skills.skill_errors import SkillError, SkillErrorCode, log_skill_error
from framework.skills.trust import TrustedRepoStore, TrustGate
__all__ = [
@@ -28,4 +29,7 @@ __all__ = [
"TrustedRepoStore",
"TrustStatus",
"parse_skill_md",
"SkillError",
"SkillErrorCode",
"log_skill_error",
]
+9 -1
View File
@@ -10,6 +10,7 @@ import logging
from xml.sax.saxutils import escape
from framework.skills.parser import ParsedSkill
from framework.skills.skill_errors import SkillErrorCode, log_skill_error
logger = logging.getLogger(__name__)
@@ -97,7 +98,14 @@ class SkillCatalog:
for name in skill_names:
skill = self.get(name)
if skill is None:
logger.warning("Pre-activated skill '%s' not found in catalog", name)
log_skill_error(
logger,
"warning",
SkillErrorCode.SKILL_NOT_FOUND,
what=f"Pre-activated skill '{name}' not found in catalog",
why="The skill was listed for pre-activation but was not discovered.",
fix=f"Check that a SKILL.md for '{name}' exists in a scanned directory.",
)
continue
if self.is_activated(name):
continue # Already activated, skip duplicate
+53 -4
View File
@@ -11,6 +11,7 @@ from pathlib import Path
from framework.skills.config import SkillsConfig
from framework.skills.parser import ParsedSkill, parse_skill_md
from framework.skills.skill_errors import SkillErrorCode, log_skill_error
logger = logging.getLogger(__name__)
@@ -60,12 +61,14 @@ class DefaultSkillManager:
self._config = config or SkillsConfig()
self._skills: dict[str, ParsedSkill] = {}
self._loaded = False
self._error_count = 0
def load(self) -> None:
"""Load all enabled default skill SKILL.md files."""
if self._loaded:
return
error_count = 0
for skill_name, dir_name in SKILL_REGISTRY.items():
if not self._config.is_default_enabled(skill_name):
logger.info("Default skill '%s' disabled by config", skill_name)
@@ -73,17 +76,34 @@ class DefaultSkillManager:
skill_path = _DEFAULT_SKILLS_DIR / dir_name / "SKILL.md"
if not skill_path.is_file():
logger.error("Default skill SKILL.md not found: %s", skill_path)
log_skill_error(
logger,
"error",
SkillErrorCode.SKILL_NOT_FOUND,
what=f"Default skill SKILL.md not found: '{skill_path}'",
why=f"The framework skill '{skill_name}' is missing its SKILL.md file.",
fix="Reinstall the hive framework — this file is part of the package.",
)
error_count += 1
continue
parsed = parse_skill_md(skill_path, source_scope="framework")
if parsed is None:
logger.error("Failed to parse default skill: %s", skill_path)
log_skill_error(
logger,
"error",
SkillErrorCode.SKILL_PARSE_ERROR,
what=f"Failed to parse default skill '{skill_name}'",
why=f"parse_skill_md returned None for '{skill_path}'.",
fix="Reinstall the hive framework — this file may be corrupted.",
)
error_count += 1
continue
self._skills[skill_name] = parsed
self._loaded = True
self._error_count = error_count
def build_protocols_prompt(self) -> str:
"""Build the combined operational protocols section.
@@ -127,8 +147,23 @@ class DefaultSkillManager:
"""Log which default skills are active and their configuration."""
if not self._skills:
logger.info("Default skills: all disabled")
return
# DX-3: Per-skill structured startup log
for skill_name in SKILL_REGISTRY:
if skill_name in self._skills:
overrides = self._config.get_default_overrides(skill_name)
status = f"loaded overrides={overrides}" if overrides else "loaded"
elif not self._config.is_default_enabled(skill_name):
status = "disabled"
else:
status = "error"
logger.info(
"skill_startup name=%s scope=framework status=%s",
skill_name,
status,
)
# Original active skills log line (preserved for backward compatibility)
active = []
for skill_name in SKILL_REGISTRY:
if skill_name in self._skills:
@@ -138,7 +173,21 @@ class DefaultSkillManager:
else:
active.append(skill_name)
logger.info("Default skills active: %s", ", ".join(active))
if active:
logger.info("Default skills active: %s", ", ".join(active))
# DX-3: Summary line with error count
total = len(SKILL_REGISTRY)
active_count = len(self._skills)
error_count = getattr(self, "_error_count", 0)
disabled_count = total - active_count - error_count
logger.info(
"Skills: %d default (%d active, %d disabled, %d error)",
total,
active_count,
disabled_count,
error_count,
)
@property
def active_skill_names(self) -> list[str]:
+8 -5
View File
@@ -11,6 +11,7 @@ from dataclasses import dataclass
from pathlib import Path
from framework.skills.parser import ParsedSkill, parse_skill_md
from framework.skills.skill_errors import SkillErrorCode, log_skill_error
logger = logging.getLogger(__name__)
@@ -172,11 +173,13 @@ class SkillDiscovery:
for skill in skills:
if skill.name in seen:
existing = seen[skill.name]
logger.warning(
"Skill name collision: '%s' from %s overrides %s",
skill.name,
skill.location,
existing.location,
log_skill_error(
logger,
"warning",
SkillErrorCode.SKILL_COLLISION,
what=f"Skill name collision: '{skill.name}'",
why=f"'{skill.location}' overrides '{existing.location}'.",
fix="Rename one of the conflicting skill directories to use a unique name.",
)
seen[skill.name] = skill
+10
View File
@@ -146,6 +146,16 @@ class SkillsManager:
default_mgr.load()
default_mgr.log_active_skills()
protocols_prompt = default_mgr.build_protocols_prompt()
# DX-3: Community skill startup summary
if self._config.project_root is not None and not self._config.skip_community_discovery:
community_count = len(catalog._skills) if catalog_prompt else 0
pre_activated_count = len(skills_config.skills) if skills_config.skills else 0
logger.info(
"Skills: %d community (%d catalog, %d pre-activated)",
community_count,
community_count,
pre_activated_count,
)
# 3. Cache
self._catalog_prompt = catalog_prompt
+81 -14
View File
@@ -13,6 +13,8 @@ from dataclasses import dataclass
from pathlib import Path
from typing import Any
from framework.skills.skill_errors import SkillErrorCode, log_skill_error
logger = logging.getLogger(__name__)
# Maximum name length before a warning is logged
@@ -74,17 +76,38 @@ def parse_skill_md(path: Path, source_scope: str = "project") -> ParsedSkill | N
try:
content = path.read_text(encoding="utf-8")
except OSError as exc:
logger.error("Failed to read %s: %s", path, exc)
log_skill_error(
logger,
"error",
SkillErrorCode.SKILL_ACTIVATION_FAILED,
what=f"Failed to read '{path}'",
why=str(exc),
fix="Check the file exists and has read permissions.",
)
return None
if not content.strip():
logger.error("Empty SKILL.md: %s", path)
log_skill_error(
logger,
"error",
SkillErrorCode.SKILL_PARSE_ERROR,
what=f"Invalid SKILL.md at '{path}'",
why="The file exists but contains no content.",
fix="Add valid YAML frontmatter and a markdown body to the SKILL.md.",
)
return None
# Split on --- delimiters (first two occurrences)
parts = content.split("---", 2)
if len(parts) < 3:
logger.error("SKILL.md missing YAML frontmatter delimiters (---): %s", path)
log_skill_error(
logger,
"error",
SkillErrorCode.SKILL_PARSE_ERROR,
what=f"Invalid SKILL.md at '{path}'",
why="Missing YAML frontmatter (---).",
fix="Wrap the frontmatter with --- on its own line at the top and bottom.",
)
return None
# parts[0] is content before first --- (should be empty or whitespace)
@@ -94,7 +117,14 @@ def parse_skill_md(path: Path, source_scope: str = "project") -> ParsedSkill | N
body = parts[2].strip()
if not raw_yaml:
logger.error("Empty YAML frontmatter in %s", path)
log_skill_error(
logger,
"error",
SkillErrorCode.SKILL_PARSE_ERROR,
what=f"Invalid SKILL.md at '{path}'",
why="The --- delimiters are present but the YAML block is empty.",
fix="Add at least 'name' and 'description' fields to the frontmatter.",
)
return None
# Parse YAML
@@ -108,19 +138,47 @@ def parse_skill_md(path: Path, source_scope: str = "project") -> ParsedSkill | N
try:
fixed = _try_fix_yaml(raw_yaml)
frontmatter = yaml.safe_load(fixed)
logger.warning("Fixed YAML parse issues in %s (unquoted colons)", path)
log_skill_error(
logger,
"warning",
SkillErrorCode.SKILL_YAML_FIXUP,
what=f"Auto-fixed YAML in '{path}'",
why="Unquoted colon values detected in frontmatter.",
fix='Wrap values containing colons in quotes e.g. description: "Use for: research"',
)
except yaml.YAMLError as exc:
logger.error("Unparseable YAML in %s: %s", path, exc)
log_skill_error(
logger,
"error",
SkillErrorCode.SKILL_PARSE_ERROR,
what=f"Invalid SKILL.md at '{path}'",
why=str(exc),
fix="Validate the YAML frontmatter at https://yaml-online-parser.appspot.com/",
)
return None
if not isinstance(frontmatter, dict):
logger.error("YAML frontmatter is not a mapping in %s", path)
log_skill_error(
logger,
"error",
SkillErrorCode.SKILL_PARSE_ERROR,
what=f"Invalid SKILL.md at '{path}'",
why="YAML frontmatter is not a key-value mapping.",
fix="Ensure the frontmatter is valid YAML with key: value pairs.",
)
return None
# Required: description
description = frontmatter.get("description")
if not description or not str(description).strip():
logger.error("Missing or empty 'description' in %s — skipping skill", path)
log_skill_error(
logger,
"error",
SkillErrorCode.SKILL_MISSING_DESCRIPTION,
what=f"Missing 'description' in '{path}'",
why="The 'description' field is required but is absent or empty.",
fix="Add a non-empty 'description' field to the YAML frontmatter.",
)
return None
# Required: name (fallback to parent directory name)
@@ -128,7 +186,14 @@ def parse_skill_md(path: Path, source_scope: str = "project") -> ParsedSkill | N
parent_dir_name = path.parent.name
if not name or not str(name).strip():
name = parent_dir_name
logger.warning("Missing 'name' in %s — using directory name '%s'", path, name)
log_skill_error(
logger,
"warning",
SkillErrorCode.SKILL_NAME_MISMATCH,
what=f"Missing 'name' in '{path}' — using directory name '{name}'",
why="The 'name' field is absent from the YAML frontmatter.",
fix=f"Add 'name: {name}' to the frontmatter to make this explicit.",
)
else:
name = str(name).strip()
@@ -137,11 +202,13 @@ def parse_skill_md(path: Path, source_scope: str = "project") -> ParsedSkill | N
logger.warning("Skill name exceeds %d chars in %s: '%s'", _MAX_NAME_LENGTH, path, name)
if name != parent_dir_name and not name.endswith(f".{parent_dir_name}"):
logger.warning(
"Skill name '%s' doesn't match parent directory '%s' in %s",
name,
parent_dir_name,
path,
log_skill_error(
logger,
"warning",
SkillErrorCode.SKILL_NAME_MISMATCH,
what=f"Name mismatch in '{path}'",
why=f"Skill name '{name}' doesn't match directory '{parent_dir_name}'.",
fix=f"Rename the directory to '{name}' or set name to '{parent_dir_name}'.",
)
return ParsedSkill(
+70
View File
@@ -0,0 +1,70 @@
"""Structured error codes and diagnostics for the Hive skill system.
Implements DX-1 (structured error codes) and DX-2 (what/why/fix format)
from the skill system PRD §7.5.
"""
from __future__ import annotations
import logging
from enum import Enum
class SkillErrorCode(Enum):
"""Standardized error codes for skill system operations (DX-1)."""
SKILL_NOT_FOUND = "SKILL_NOT_FOUND"
SKILL_PARSE_ERROR = "SKILL_PARSE_ERROR"
SKILL_ACTIVATION_FAILED = "SKILL_ACTIVATION_FAILED"
SKILL_MISSING_DESCRIPTION = "SKILL_MISSING_DESCRIPTION"
SKILL_YAML_FIXUP = "SKILL_YAML_FIXUP"
SKILL_NAME_MISMATCH = "SKILL_NAME_MISMATCH"
SKILL_COLLISION = "SKILL_COLLISION"
class SkillError(Exception):
"""Structured exception for skill system errors (DX-2).
Raised in strict validation paths. Also used as the base
format contract for log_skill_error() log messages.
"""
def __init__(self, code: SkillErrorCode, what: str, why: str, fix: str):
self.code = code
self.what = what
self.why = why
self.fix = fix
self.message = (
f"[{self.code.value}]\nWhat failed: {self.what}\nWhy: {self.why}\nFix: {self.fix}"
)
super().__init__(self.message)
def log_skill_error(
logger: logging.Logger,
level: str,
code: SkillErrorCode,
what: str,
why: str,
fix: str,
) -> None:
"""Emit a structured skill diagnostic log with consistent format (DX-2).
Args:
logger: The module logger to emit to.
level: Log level string 'error', 'warning', or 'info'.
code: Structured error code.
what: What failed (specific skill name and path).
why: Root cause.
fix: Concrete next step for the developer.
"""
msg = f"[{code.value}] What failed: {what} | Why: {why} | Fix: {fix}"
getattr(logger, level)(
msg,
extra={
"skill_error_code": code.value,
"what": what,
"why": why,
"fix": fix,
},
)
+375 -135
View File
@@ -1,5 +1,14 @@
import { memo, useState, useRef, useEffect } from "react";
import { Send, Square, Crown, Cpu, Check, Loader2, Paperclip, X } from "lucide-react";
import { memo, useState, useRef, useEffect, useMemo } from "react";
import {
Send,
Square,
Crown,
Cpu,
Check,
Loader2,
Paperclip,
X,
} from "lucide-react";
export interface ImageContent {
type: "image_url";
@@ -15,6 +24,9 @@ export interface ContextUsageEntry {
import MarkdownContent from "@/components/MarkdownContent";
import QuestionWidget from "@/components/QuestionWidget";
import MultiQuestionWidget from "@/components/MultiQuestionWidget";
import ParallelSubagentBubble, {
type SubagentGroup,
} from "@/components/ParallelSubagentBubble";
export interface ChatMessage {
id: string;
@@ -22,7 +34,13 @@ export interface ChatMessage {
agentColor: string;
content: string;
timestamp: string;
type?: "system" | "agent" | "user" | "tool_status" | "worker_input_request" | "run_divider";
type?:
| "system"
| "agent"
| "user"
| "tool_status"
| "worker_input_request"
| "run_divider";
role?: "queen" | "worker";
/** Which worker thread this message belongs to (worker agent name) */
thread?: string;
@@ -32,6 +50,10 @@ export interface ChatMessage {
phase?: "planning" | "building" | "staging" | "running";
/** Images attached to a user message */
images?: ImageContent[];
/** Backend node_id that produced this message — used for subagent grouping */
nodeId?: string;
/** Backend execution_id for this message */
executionId?: string;
}
interface ChatPanelProps {
@@ -52,7 +74,9 @@ interface ChatPanelProps {
/** Options for the pending question */
pendingOptions?: string[] | null;
/** Multiple questions from ask_user_multiple */
pendingQuestions?: { id: string; prompt: string; options?: string[] }[] | null;
pendingQuestions?:
| { id: string; prompt: string; options?: string[] }[]
| null;
/** Called when user submits an answer to the pending question */
onQuestionSubmit?: (answer: string, isOther: boolean) => void;
/** Called when user submits answers to multiple questions */
@@ -88,7 +112,8 @@ const TOOL_HEX = [
function toolHex(name: string): string {
let hash = 0;
for (let i = 0; i < name.length; i++) hash = (hash * 31 + name.charCodeAt(i)) | 0;
for (let i = 0; i < name.length; i++)
hash = (hash * 31 + name.charCodeAt(i)) | 0;
return TOOL_HEX[Math.abs(hash) % TOOL_HEX.length];
}
@@ -136,12 +161,18 @@ function ToolActivityRow({ content }: { content: string }) {
<span
key={`run-${p.name}`}
className="inline-flex items-center gap-1 text-[11px] px-2.5 py-0.5 rounded-full"
style={{ color: hex, backgroundColor: `${hex}18`, border: `1px solid ${hex}35` }}
style={{
color: hex,
backgroundColor: `${hex}18`,
border: `1px solid ${hex}35`,
}}
>
<Loader2 className="w-2.5 h-2.5 animate-spin" />
{p.name}
{p.count > 1 && (
<span className="text-[10px] font-medium opacity-70">×{p.count}</span>
<span className="text-[10px] font-medium opacity-70">
×{p.count}
</span>
)}
</span>
);
@@ -152,7 +183,11 @@ function ToolActivityRow({ content }: { content: string }) {
<span
key={`done-${p.name}`}
className="inline-flex items-center gap-1 text-[11px] px-2.5 py-0.5 rounded-full"
style={{ color: hex, backgroundColor: `${hex}18`, border: `1px solid ${hex}35` }}
style={{
color: hex,
backgroundColor: `${hex}18`,
border: `1px solid ${hex}35`,
}}
>
<Check className="w-2.5 h-2.5" />
{p.name}
@@ -167,109 +202,148 @@ function ToolActivityRow({ content }: { content: string }) {
);
}
const MessageBubble = memo(function MessageBubble({ msg, queenPhase }: { msg: ChatMessage; queenPhase?: "planning" | "building" | "staging" | "running" }) {
const isUser = msg.type === "user";
const isQueen = msg.role === "queen";
const color = getColor(msg.agent, msg.role);
const MessageBubble = memo(
function MessageBubble({
msg,
queenPhase,
}: {
msg: ChatMessage;
queenPhase?: "planning" | "building" | "staging" | "running";
}) {
const isUser = msg.type === "user";
const isQueen = msg.role === "queen";
const color = getColor(msg.agent, msg.role);
if (msg.type === "run_divider") {
return (
<div className="flex items-center gap-3 py-2 my-1">
<div className="flex-1 h-px bg-border/60" />
<span className="text-[10px] text-muted-foreground font-medium uppercase tracking-wider">
{msg.content}
</span>
<div className="flex-1 h-px bg-border/60" />
</div>
);
}
if (msg.type === "system") {
return (
<div className="flex justify-center py-1">
<span className="text-[11px] text-muted-foreground bg-muted/60 px-3 py-1.5 rounded-full">
{msg.content}
</span>
</div>
);
}
if (msg.type === "tool_status") {
return <ToolActivityRow content={msg.content} />;
}
if (isUser) {
return (
<div className="flex justify-end">
<div className="max-w-[75%] bg-primary text-primary-foreground text-sm leading-relaxed rounded-2xl rounded-br-md px-4 py-3">
{msg.images && msg.images.length > 0 && (
<div className="flex flex-wrap gap-2 mb-2">
{msg.images.map((img, i) => (
<img
key={i}
src={img.image_url.url}
alt={`attachment ${i + 1}`}
className="max-h-48 max-w-full rounded-lg object-contain"
/>
))}
</div>
)}
{msg.content && <p className="whitespace-pre-wrap break-words">{msg.content}</p>}
if (msg.type === "run_divider") {
return (
<div className="flex items-center gap-3 py-2 my-1">
<div className="flex-1 h-px bg-border/60" />
<span className="text-[10px] text-muted-foreground font-medium uppercase tracking-wider">
{msg.content}
</span>
<div className="flex-1 h-px bg-border/60" />
</div>
</div>
);
}
);
}
return (
<div className="flex gap-3">
<div
className={`flex-shrink-0 ${isQueen ? "w-9 h-9" : "w-7 h-7"} rounded-xl flex items-center justify-center`}
style={{
backgroundColor: `${color}18`,
border: `1.5px solid ${color}35`,
boxShadow: isQueen ? `0 0 12px ${color}20` : undefined,
}}
>
{isQueen ? (
<Crown className="w-4 h-4" style={{ color }} />
) : (
<Cpu className="w-3.5 h-3.5" style={{ color }} />
)}
</div>
<div className={`flex-1 min-w-0 ${isQueen ? "max-w-[85%]" : "max-w-[75%]"}`}>
<div className="flex items-center gap-2 mb-1">
<span className={`font-medium ${isQueen ? "text-sm" : "text-xs"}`} style={{ color }}>
{msg.agent}
</span>
<span
className={`text-[10px] font-medium px-1.5 py-0.5 rounded-md ${
isQueen ? "bg-primary/15 text-primary" : "bg-muted text-muted-foreground"
}`}
>
{isQueen
? ((msg.phase ?? queenPhase) === "running"
? "running"
: (msg.phase ?? queenPhase) === "staging"
? "staging"
: (msg.phase ?? queenPhase) === "planning"
? "planning"
: "building")
: "Worker"}
if (msg.type === "system") {
return (
<div className="flex justify-center py-1">
<span className="text-[11px] text-muted-foreground bg-muted/60 px-3 py-1.5 rounded-full">
{msg.content}
</span>
</div>
);
}
if (msg.type === "tool_status") {
return <ToolActivityRow content={msg.content} />;
}
if (isUser) {
return (
<div className="flex justify-end">
<div className="max-w-[75%] bg-primary text-primary-foreground text-sm leading-relaxed rounded-2xl rounded-br-md px-4 py-3">
{msg.images && msg.images.length > 0 && (
<div className="flex flex-wrap gap-2 mb-2">
{msg.images.map((img, i) => (
<img
key={i}
src={img.image_url.url}
alt={`attachment ${i + 1}`}
className="max-h-48 max-w-full rounded-lg object-contain"
/>
))}
</div>
)}
{msg.content && (
<p className="whitespace-pre-wrap break-words">{msg.content}</p>
)}
</div>
</div>
);
}
return (
<div className="flex gap-3">
<div
className={`flex-shrink-0 ${isQueen ? "w-9 h-9" : "w-7 h-7"} rounded-xl flex items-center justify-center`}
style={{
backgroundColor: `${color}18`,
border: `1.5px solid ${color}35`,
boxShadow: isQueen ? `0 0 12px ${color}20` : undefined,
}}
>
{isQueen ? (
<Crown className="w-4 h-4" style={{ color }} />
) : (
<Cpu className="w-3.5 h-3.5" style={{ color }} />
)}
</div>
<div
className={`text-sm leading-relaxed rounded-2xl rounded-tl-md px-4 py-3 ${
isQueen ? "border border-primary/20 bg-primary/5" : "bg-muted/60"
}`}
className={`flex-1 min-w-0 ${isQueen ? "max-w-[85%]" : "max-w-[75%]"}`}
>
<MarkdownContent content={msg.content} />
<div className="flex items-center gap-2 mb-1">
<span
className={`font-medium ${isQueen ? "text-sm" : "text-xs"}`}
style={{ color }}
>
{msg.agent}
</span>
<span
className={`text-[10px] font-medium px-1.5 py-0.5 rounded-md ${
isQueen
? "bg-primary/15 text-primary"
: "bg-muted text-muted-foreground"
}`}
>
{isQueen
? (msg.phase ?? queenPhase) === "running"
? "running"
: (msg.phase ?? queenPhase) === "staging"
? "staging"
: (msg.phase ?? queenPhase) === "planning"
? "planning"
: "building"
: "Worker"}
</span>
</div>
<div
className={`text-sm leading-relaxed rounded-2xl rounded-tl-md px-4 py-3 ${
isQueen ? "border border-primary/20 bg-primary/5" : "bg-muted/60"
}`}
>
<MarkdownContent content={msg.content} />
</div>
</div>
</div>
</div>
);
}, (prev, next) => prev.msg.id === next.msg.id && prev.msg.content === next.msg.content && prev.msg.phase === next.msg.phase && prev.queenPhase === next.queenPhase);
);
},
(prev, next) =>
prev.msg.id === next.msg.id &&
prev.msg.content === next.msg.content &&
prev.msg.phase === next.msg.phase &&
prev.queenPhase === next.queenPhase,
);
export default function ChatPanel({ messages, onSend, isWaiting, isWorkerWaiting, isBusy, activeThread, disabled, onCancel, pendingQuestion, pendingOptions, pendingQuestions, onQuestionSubmit, onMultiQuestionSubmit, onQuestionDismiss, queenPhase, contextUsage }: ChatPanelProps) {
export default function ChatPanel({
messages,
onSend,
isWaiting,
isWorkerWaiting,
isBusy,
activeThread,
disabled,
onCancel,
pendingQuestion,
pendingOptions,
pendingQuestions,
onQuestionSubmit,
onMultiQuestionSubmit,
onQuestionDismiss,
queenPhase,
contextUsage,
}: ChatPanelProps) {
const [input, setInput] = useState("");
const [pendingImages, setPendingImages] = useState<ImageContent[]>([]);
const [readMap, setReadMap] = useState<Record<string, number>>({});
@@ -286,10 +360,90 @@ export default function ChatPanel({ messages, onSend, isWaiting, isWorkerWaiting
// tool-use-only turns that have no visible text. During live operation
// tool pills provide context, but on resume the pills are gone so
// the empty bubble is meaningless.
if (m.role === "queen" && !m.type && (!m.content || !m.content.trim())) return false;
if (m.role === "queen" && !m.type && (!m.content || !m.content.trim()))
return false;
return true;
});
// Group subagent messages into parallel bubbles.
// A subagent message has nodeId containing ":subagent:".
// The run only ends on hard boundaries (user messages, run_dividers)
// so interleaved queen/tool/system messages don't fragment the bubble.
type RenderItem =
| { kind: "message"; msg: ChatMessage }
| { kind: "parallel"; groupId: string; groups: SubagentGroup[] };
const renderItems = useMemo<RenderItem[]>(() => {
const items: RenderItem[] = [];
let i = 0;
while (i < threadMessages.length) {
const msg = threadMessages[i];
const isSubagent = msg.nodeId?.includes(":subagent:");
if (!isSubagent) {
items.push({ kind: "message", msg });
i++;
continue;
}
// Start a subagent run. Collect all subagent messages, allowing
// non-subagent messages in between (they render as normal items
// before the bubble). Only break on hard boundaries.
const subagentMsgs: ChatMessage[] = [];
const interleaved: { idx: number; msg: ChatMessage }[] = [];
const firstId = msg.id;
while (i < threadMessages.length) {
const m = threadMessages[i];
const isSa = m.nodeId?.includes(":subagent:");
if (isSa) {
subagentMsgs.push(m);
i++;
continue;
}
// Hard boundary — stop the run
if (m.type === "user" || m.type === "run_divider") break;
// Worker message from a non-subagent node means the graph has
// moved on to the next stage. Close the bubble even if some
// subagents are still streaming in the background.
if (m.role === "worker" && m.nodeId && !m.nodeId.includes(":subagent:"))
break;
// Soft interruption (queen output, system, tool_status without
// nodeId) — render it normally but keep the subagent run going
interleaved.push({ idx: items.length + interleaved.length, msg: m });
i++;
}
// Emit interleaved messages first (before the bubble)
for (const { msg: im } of interleaved) {
items.push({ kind: "message", msg: im });
}
// Build the single parallel bubble from all collected subagent msgs
if (subagentMsgs.length > 0) {
const byNode = new Map<string, ChatMessage[]>();
for (const m of subagentMsgs) {
const nid = m.nodeId!;
if (!byNode.has(nid)) byNode.set(nid, []);
byNode.get(nid)!.push(m);
}
const groups: SubagentGroup[] = [];
for (const [nodeId, msgs] of byNode) {
groups.push({
nodeId,
messages: msgs,
contextUsage: contextUsage?.[nodeId],
});
}
items.push({ kind: "parallel", groupId: `par-${firstId}`, groups });
}
}
return items;
}, [threadMessages, contextUsage]);
// Mark current thread as read
useEffect(() => {
const count = messages.filter((m) => m.thread === activeThread).length;
@@ -321,7 +475,11 @@ export default function ChatPanel({ messages, onSend, isWaiting, isWorkerWaiting
const handleSubmit = (e: React.FormEvent) => {
e.preventDefault();
if (!input.trim() && pendingImages.length === 0) return;
onSend(input.trim(), activeThread, pendingImages.length > 0 ? pendingImages : undefined);
onSend(
input.trim(),
activeThread,
pendingImages.length > 0 ? pendingImages : undefined,
);
setInput("");
setPendingImages([]);
if (textareaRef.current) textareaRef.current.style.height = "auto";
@@ -334,7 +492,10 @@ export default function ChatPanel({ messages, onSend, isWaiting, isWorkerWaiting
const reader = new FileReader();
reader.onload = (ev) => {
const url = ev.target?.result as string;
setPendingImages((prev) => [...prev, { type: "image_url", image_url: { url } }]);
setPendingImages((prev) => [
...prev,
{ type: "image_url", image_url: { url } },
]);
};
reader.readAsDataURL(file);
});
@@ -346,16 +507,31 @@ export default function ChatPanel({ messages, onSend, isWaiting, isWorkerWaiting
<div className="flex flex-col h-full min-w-0">
{/* Compact sub-header */}
<div className="px-5 pt-4 pb-2 flex items-center gap-2">
<p className="text-[11px] text-muted-foreground font-medium uppercase tracking-wider">Conversation</p>
<p className="text-[11px] text-muted-foreground font-medium uppercase tracking-wider">
Conversation
</p>
</div>
{/* Messages */}
<div ref={scrollRef} onScroll={handleScroll} className="flex-1 overflow-auto px-5 py-4 space-y-3">
{threadMessages.map((msg) => (
<div key={msg.id}>
<MessageBubble msg={msg} queenPhase={queenPhase} />
</div>
))}
<div
ref={scrollRef}
onScroll={handleScroll}
className="flex-1 overflow-auto px-5 py-4 space-y-3"
>
{renderItems.map((item) =>
item.kind === "parallel" ? (
<div key={item.groupId}>
<ParallelSubagentBubble
groupId={item.groupId}
groups={item.groups}
/>
</div>
) : (
<div key={item.msg.id}>
<MessageBubble msg={item.msg} queenPhase={queenPhase} />
</div>
),
)}
{/* Show typing indicator while waiting for first queen response (disabled + empty chat) */}
{(isWaiting || (disabled && threadMessages.length === 0)) && (
@@ -372,9 +548,18 @@ export default function ChatPanel({ messages, onSend, isWaiting, isWorkerWaiting
</div>
<div className="border border-primary/20 bg-primary/5 rounded-2xl rounded-tl-md px-4 py-3">
<div className="flex gap-1.5">
<span className="w-1.5 h-1.5 rounded-full bg-muted-foreground animate-bounce" style={{ animationDelay: "0ms" }} />
<span className="w-1.5 h-1.5 rounded-full bg-muted-foreground animate-bounce" style={{ animationDelay: "150ms" }} />
<span className="w-1.5 h-1.5 rounded-full bg-muted-foreground animate-bounce" style={{ animationDelay: "300ms" }} />
<span
className="w-1.5 h-1.5 rounded-full bg-muted-foreground animate-bounce"
style={{ animationDelay: "0ms" }}
/>
<span
className="w-1.5 h-1.5 rounded-full bg-muted-foreground animate-bounce"
style={{ animationDelay: "150ms" }}
/>
<span
className="w-1.5 h-1.5 rounded-full bg-muted-foreground animate-bounce"
style={{ animationDelay: "300ms" }}
/>
</div>
</div>
</div>
@@ -392,9 +577,18 @@ export default function ChatPanel({ messages, onSend, isWaiting, isWorkerWaiting
</div>
<div className="bg-muted/60 rounded-2xl rounded-tl-md px-4 py-3">
<div className="flex gap-1.5">
<span className="w-1.5 h-1.5 rounded-full bg-muted-foreground animate-bounce" style={{ animationDelay: "0ms" }} />
<span className="w-1.5 h-1.5 rounded-full bg-muted-foreground animate-bounce" style={{ animationDelay: "150ms" }} />
<span className="w-1.5 h-1.5 rounded-full bg-muted-foreground animate-bounce" style={{ animationDelay: "300ms" }} />
<span
className="w-1.5 h-1.5 rounded-full bg-muted-foreground animate-bounce"
style={{ animationDelay: "0ms" }}
/>
<span
className="w-1.5 h-1.5 rounded-full bg-muted-foreground animate-bounce"
style={{ animationDelay: "150ms" }}
/>
<span
className="w-1.5 h-1.5 rounded-full bg-muted-foreground animate-bounce"
style={{ animationDelay: "300ms" }}
/>
</div>
</div>
</div>
@@ -406,46 +600,84 @@ export default function ChatPanel({ messages, onSend, isWaiting, isWorkerWaiting
{(() => {
if (!contextUsage) return null;
const queenUsage = contextUsage["__queen__"];
const workerEntries = Object.entries(contextUsage).filter(([k]) => k !== "__queen__");
const workerUsage = workerEntries.length > 0
? workerEntries.reduce((best, [, v]) => (v.usagePct > best.usagePct ? v : best), workerEntries[0][1])
: undefined;
const workerEntries = Object.entries(contextUsage).filter(
([k]) => k !== "__queen__",
);
const workerUsage =
workerEntries.length > 0
? workerEntries.reduce(
(best, [, v]) => (v.usagePct > best.usagePct ? v : best),
workerEntries[0][1],
)
: undefined;
if (!queenUsage && !workerUsage) return null;
return (
<div className="flex items-center gap-3 mx-4 px-3 py-1 rounded-lg bg-muted/30 border border-border/20 group/ctx flex-shrink-0">
{queenUsage && (
<div className="flex items-center gap-2 flex-1 min-w-0" title={`Queen: ${(queenUsage.estimatedTokens / 1000).toFixed(1)}k / ${(queenUsage.maxTokens / 1000).toFixed(0)}k tokens \u00b7 ${queenUsage.messageCount} messages`}>
<Crown className="w-3 h-3 flex-shrink-0" style={{ color: "hsl(45,95%,58%)" }} />
<div
className="flex items-center gap-2 flex-1 min-w-0"
title={`Queen: ${(queenUsage.estimatedTokens / 1000).toFixed(1)}k / ${(queenUsage.maxTokens / 1000).toFixed(0)}k tokens \u00b7 ${queenUsage.messageCount} messages`}
>
<Crown
className="w-3 h-3 flex-shrink-0"
style={{ color: "hsl(45,95%,58%)" }}
/>
<div className="flex-1 h-1.5 rounded-full bg-muted/50 overflow-hidden min-w-[60px]">
<div
className="h-full rounded-full transition-all duration-500 ease-out"
style={{
width: `${Math.min(queenUsage.usagePct, 100)}%`,
backgroundColor: queenUsage.usagePct >= 90 ? "hsl(0,65%,55%)" : queenUsage.usagePct >= 70 ? "hsl(35,90%,55%)" : "hsl(45,95%,58%)",
backgroundColor:
queenUsage.usagePct >= 90
? "hsl(0,65%,55%)"
: queenUsage.usagePct >= 70
? "hsl(35,90%,55%)"
: "hsl(45,95%,58%)",
}}
/>
</div>
<span className="text-[10px] text-muted-foreground/70 flex-shrink-0 tabular-nums">
<span className="group-hover/ctx:hidden">{queenUsage.usagePct}%</span>
<span className="hidden group-hover/ctx:inline">{(queenUsage.estimatedTokens / 1000).toFixed(1)}k / {(queenUsage.maxTokens / 1000).toFixed(0)}k</span>
<span className="group-hover/ctx:hidden">
{queenUsage.usagePct}%
</span>
<span className="hidden group-hover/ctx:inline">
{(queenUsage.estimatedTokens / 1000).toFixed(1)}k /{" "}
{(queenUsage.maxTokens / 1000).toFixed(0)}k
</span>
</span>
</div>
)}
{workerUsage && (
<div className="flex items-center gap-2 flex-1 min-w-0" title={`Worker: ${(workerUsage.estimatedTokens / 1000).toFixed(1)}k / ${(workerUsage.maxTokens / 1000).toFixed(0)}k tokens \u00b7 ${workerUsage.messageCount} messages`}>
<Cpu className="w-3 h-3 flex-shrink-0" style={{ color: "hsl(220,60%,55%)" }} />
<div
className="flex items-center gap-2 flex-1 min-w-0"
title={`Worker: ${(workerUsage.estimatedTokens / 1000).toFixed(1)}k / ${(workerUsage.maxTokens / 1000).toFixed(0)}k tokens \u00b7 ${workerUsage.messageCount} messages`}
>
<Cpu
className="w-3 h-3 flex-shrink-0"
style={{ color: "hsl(220,60%,55%)" }}
/>
<div className="flex-1 h-1.5 rounded-full bg-muted/50 overflow-hidden min-w-[60px]">
<div
className="h-full rounded-full transition-all duration-500 ease-out"
style={{
width: `${Math.min(workerUsage.usagePct, 100)}%`,
backgroundColor: workerUsage.usagePct >= 90 ? "hsl(0,65%,55%)" : workerUsage.usagePct >= 70 ? "hsl(35,90%,55%)" : "hsl(220,60%,55%)",
backgroundColor:
workerUsage.usagePct >= 90
? "hsl(0,65%,55%)"
: workerUsage.usagePct >= 70
? "hsl(35,90%,55%)"
: "hsl(220,60%,55%)",
}}
/>
</div>
<span className="text-[10px] text-muted-foreground/70 flex-shrink-0 tabular-nums">
<span className="group-hover/ctx:hidden">{workerUsage.usagePct}%</span>
<span className="hidden group-hover/ctx:inline">{(workerUsage.estimatedTokens / 1000).toFixed(1)}k / {(workerUsage.maxTokens / 1000).toFixed(0)}k</span>
<span className="group-hover/ctx:hidden">
{workerUsage.usagePct}%
</span>
<span className="hidden group-hover/ctx:inline">
{(workerUsage.estimatedTokens / 1000).toFixed(1)}k /{" "}
{(workerUsage.maxTokens / 1000).toFixed(0)}k
</span>
</span>
</div>
)}
@@ -454,7 +686,9 @@ export default function ChatPanel({ messages, onSend, isWaiting, isWorkerWaiting
})()}
{/* Input area — question widget replaces textarea when a question is pending */}
{pendingQuestions && pendingQuestions.length >= 2 && onMultiQuestionSubmit ? (
{pendingQuestions &&
pendingQuestions.length >= 2 &&
onMultiQuestionSubmit ? (
<MultiQuestionWidget
questions={pendingQuestions}
onSubmit={onMultiQuestionSubmit}
@@ -481,7 +715,9 @@ export default function ChatPanel({ messages, onSend, isWaiting, isWorkerWaiting
/>
<button
type="button"
onClick={() => setPendingImages((prev) => prev.filter((_, j) => j !== i))}
onClick={() =>
setPendingImages((prev) => prev.filter((_, j) => j !== i))
}
className="absolute -top-1.5 -right-1.5 w-4 h-4 rounded-full bg-destructive text-destructive-foreground flex items-center justify-center opacity-0 group-hover:opacity-100 transition-opacity"
>
<X className="w-2.5 h-2.5" />
@@ -524,7 +760,9 @@ export default function ChatPanel({ messages, onSend, isWaiting, isWorkerWaiting
handleSubmit(e);
}
}}
placeholder={disabled ? "Connecting to agent..." : "Message Queen Bee..."}
placeholder={
disabled ? "Connecting to agent..." : "Message Queen Bee..."
}
disabled={disabled}
className="flex-1 bg-transparent text-sm text-foreground outline-none placeholder:text-muted-foreground disabled:opacity-50 disabled:cursor-not-allowed resize-none overflow-y-auto"
/>
@@ -539,7 +777,9 @@ export default function ChatPanel({ messages, onSend, isWaiting, isWorkerWaiting
) : (
<button
type="submit"
disabled={(!input.trim() && pendingImages.length === 0) || disabled}
disabled={
(!input.trim() && pendingImages.length === 0) || disabled
}
className="p-2 rounded-lg bg-primary text-primary-foreground disabled:opacity-30 hover:opacity-90 transition-opacity"
>
<Send className="w-4 h-4" />
@@ -0,0 +1,413 @@
import { memo, useState, useRef, useEffect } from "react";
import { ChevronDown, ChevronUp, Cpu } from "lucide-react";
import type { ChatMessage, ContextUsageEntry } from "@/components/ChatPanel";
import MarkdownContent from "@/components/MarkdownContent";
// ---------------------------------------------------------------------------
// Shared helpers
// ---------------------------------------------------------------------------
const workerColor = "hsl(220,60%,55%)";
const SUBAGENT_COLORS = [
"hsl(220,60%,55%)",
"hsl(260,50%,55%)",
"hsl(180,50%,45%)",
"hsl(30,70%,50%)",
"hsl(340,55%,50%)",
"hsl(150,45%,45%)",
"hsl(45,80%,50%)",
"hsl(290,45%,55%)",
];
function colorForIndex(i: number): string {
return SUBAGENT_COLORS[i % SUBAGENT_COLORS.length];
}
function subagentLabel(nodeId: string): string {
const parts = nodeId.split(":subagent:");
const raw = parts.length >= 2 ? parts[1] : nodeId;
return raw
.replace(/:\d+$/, "") // strip instance suffix like ":3"
.replace(/[_-]/g, " ")
.replace(/\b\w/g, (c) => c.toUpperCase())
.trim();
}
function last<T>(arr: T[]): T | undefined {
return arr[arr.length - 1];
}
export interface SubagentGroup {
nodeId: string;
messages: ChatMessage[];
contextUsage?: ContextUsageEntry;
}
interface ParallelSubagentBubbleProps {
groups: SubagentGroup[];
groupId: string;
}
// ---------------------------------------------------------------------------
// Thermometer — vertical context gauge on right edge of each pane
// ---------------------------------------------------------------------------
// ---------------------------------------------------------------------------
// Tool overlay — shown when a tool_status message is active (not all done)
// ---------------------------------------------------------------------------
function ToolOverlay({
toolName,
color,
visible,
}: {
toolName: string;
color: string;
visible: boolean;
}) {
return (
<div
className="absolute inset-0 top-[22px] flex items-center justify-center transition-opacity duration-200 z-10"
style={{
background: "rgba(8,8,14,0.82)",
opacity: visible ? 1 : 0,
pointerEvents: visible ? "auto" : "none",
}}
>
<div className="text-center px-3 py-2 rounded-md border" style={{ borderColor: `${color}40` }}>
<div className="text-[10px] font-medium" style={{ color }}>
{toolName}
</div>
<div className="text-[11px] mt-0.5" style={{ color }}>
{visible ? "..." : "\u2713"}
</div>
</div>
</div>
);
}
// ---------------------------------------------------------------------------
// Single tmux pane
// ---------------------------------------------------------------------------
function MuxPane({
group,
index,
label,
isFocused,
isZoomed,
onClickTitle,
}: {
group: SubagentGroup;
index: number;
label: string;
isFocused: boolean;
isZoomed: boolean;
onClickTitle: () => void;
}) {
const bodyRef = useRef<HTMLDivElement>(null);
const stickRef = useRef(true);
const color = colorForIndex(index);
const pct = group.contextUsage?.usagePct ?? 0;
const streamMsgs = group.messages.filter((m) => m.type !== "tool_status");
const latestContent = last(streamMsgs)?.content ?? "";
const msgCount = streamMsgs.length;
// Detect active tool and finished state from latest tool_status
const latestTool = last(
group.messages.filter((m) => m.type === "tool_status")
);
let activeToolName = "";
let toolRunning = false;
let isFinished = false;
if (latestTool) {
try {
const parsed = JSON.parse(latestTool.content);
const tools: { name: string; done: boolean }[] = parsed.tools || [];
const allDone = parsed.allDone as boolean | undefined;
const running = tools.find((t) => !t.done);
if (running) {
activeToolName = running.name;
toolRunning = true;
}
// Finished when all tools are done and one of them is set_output
// or report_to_parent (terminal tool calls)
if (allDone && tools.length > 0) {
const hasTerminal = tools.some(
(t) =>
t.done &&
(t.name === "set_output" || t.name === "report_to_parent")
);
if (hasTerminal) isFinished = true;
}
} catch {
/* ignore */
}
}
// Auto-scroll
useEffect(() => {
if (stickRef.current && bodyRef.current) {
bodyRef.current.scrollTop = bodyRef.current.scrollHeight;
}
}, [latestContent]);
const handleScroll = () => {
const el = bodyRef.current;
if (!el) return;
stickRef.current = el.scrollHeight - el.scrollTop - el.clientHeight < 30;
};
return (
<div
className="flex flex-col min-h-0 overflow-hidden relative transition-all duration-200"
style={{
borderWidth: 1,
borderStyle: "solid",
borderColor: isFocused && !isFinished ? `${color}60` : "transparent",
opacity: isFinished ? 0.4 : isFocused || isZoomed ? 1 : 0.55,
...(isZoomed
? { gridColumn: "1 / -1", gridRow: "1 / -1", zIndex: 10 }
: {}),
}}
>
{/* Title bar */}
<div
className="flex items-center gap-1.5 px-2 py-[3px] flex-shrink-0 cursor-pointer select-none"
style={{ background: "#0e0e16", borderBottom: "1px solid #1a1a2a" }}
onClick={onClickTitle}
>
{isFinished ? (
<span className="text-[8px] flex-shrink-0 leading-none" style={{ color: "#4a4" }}>&#10003;</span>
) : (
<div
className="w-[6px] h-[6px] rounded-full flex-shrink-0"
style={{ background: color }}
/>
)}
<span className="text-[9px] flex-shrink-0" style={{ color: isFinished ? "#555" : color }}>
{label}
</span>
<span className="flex-1" />
<span className="text-[8px] tabular-nums flex-shrink-0" style={{ color: "#555" }}>
{msgCount}
</span>
<div
className="w-[36px] h-[3px] rounded-full overflow-hidden flex-shrink-0"
style={{ background: "#1a1a2a" }}
>
<div
className="h-full rounded-full transition-all duration-500"
style={{
width: `${Math.min(pct, 100)}%`,
backgroundColor:
pct >= 80 ? "hsl(0,65%,55%)" : pct >= 50 ? "hsl(35,90%,55%)" : color,
}}
/>
</div>
<span className="text-[8px] tabular-nums flex-shrink-0" style={{ color: "#555" }}>
{pct}%
</span>
</div>
{/* Body */}
<div
ref={bodyRef}
onScroll={handleScroll}
className="flex-1 min-h-0 overflow-y-auto px-2 py-1 text-[10px] leading-[1.7]"
style={{ background: "#08080e", color: "#555", fontFamily: "monospace" }}
>
{latestContent ? (
<div style={{ color: "#ccc" }}>
<MarkdownContent content={latestContent} />
</div>
) : (
<span style={{ color: "#333" }}>waiting...</span>
)}
{/* Blinking cursor — hidden when finished */}
{!isFinished && (
<span
className="inline-block w-[6px] h-[11px] align-middle ml-0.5"
style={{
background: color,
animation: "cursorBlink 1s step-end infinite",
}}
/>
)}
</div>
{/* Tool overlay */}
<ToolOverlay
toolName={activeToolName}
color={color}
visible={toolRunning}
/>
</div>
);
}
// ---------------------------------------------------------------------------
// Main component
// ---------------------------------------------------------------------------
const ParallelSubagentBubble = memo(
function ParallelSubagentBubble({ groups }: ParallelSubagentBubbleProps) {
const [expanded, setExpanded] = useState(false);
const [zoomedIdx, setZoomedIdx] = useState<number | null>(null);
// Labels with instance numbers for duplicates
const labels: string[] = (() => {
const countByBase = new Map<string, number>();
const bases = groups.map((g) => subagentLabel(g.nodeId));
for (const b of bases)
countByBase.set(b, (countByBase.get(b) ?? 0) + 1);
const idxByBase = new Map<string, number>();
return bases.map((b) => {
if ((countByBase.get(b) ?? 1) <= 1) return b;
const idx = (idxByBase.get(b) ?? 0) + 1;
idxByBase.set(b, idx);
return `${b} #${idx}`;
});
})();
// Latest-active pane
const latestIdx = groups.reduce<number>((best, g, i) => {
const filtered = g.messages.filter((m) => m.type !== "tool_status");
const lm = last(filtered);
if (!lm) return best;
if (best < 0) return i;
const bm = last(
groups[best].messages.filter((m) => m.type !== "tool_status")
);
if (!bm) return i;
return (lm.createdAt ?? 0) >= (bm.createdAt ?? 0) ? i : best;
}, -1);
// Per-group finished detection (same logic as MuxPane)
const finishedFlags = groups.map((g) => {
const lt = last(g.messages.filter((m) => m.type === "tool_status"));
if (!lt) return false;
try {
const p = JSON.parse(lt.content);
const tools: { name: string; done: boolean }[] = p.tools || [];
if (!p.allDone || tools.length === 0) return false;
return tools.some(
(t) => t.done && (t.name === "set_output" || t.name === "report_to_parent")
);
} catch { return false; }
});
const activeCount = finishedFlags.filter((f) => !f).length;
if (groups.length === 0) return null;
// Grid sizing: 2 columns, auto rows capped at a fixed height
const rows = Math.ceil(groups.length / 2);
const gridHeight = expanded
? Math.min(rows * 200, 480)
: Math.min(rows * 100, 240);
return (
<div className="flex gap-3">
{/* Left icon */}
<div
className="flex-shrink-0 w-7 h-7 rounded-xl flex items-center justify-center mt-1"
style={{
backgroundColor: `${workerColor}18`,
border: `1.5px solid ${workerColor}35`,
}}
>
<Cpu className="w-3.5 h-3.5" style={{ color: workerColor }} />
</div>
<div className="flex-1 min-w-0 max-w-[90%]">
{/* Header */}
<div className="flex items-center gap-2 mb-1">
<span className="font-medium text-xs" style={{ color: workerColor }}>
{groups.length === 1 ? "Sub-agent" : "Parallel Agents"}
</span>
<span className="text-[10px] font-medium px-1.5 py-0.5 rounded-md bg-muted text-muted-foreground">
{activeCount > 0 ? `${activeCount} running` : `${groups.length} done`}
</span>
<button
onClick={() => {
setExpanded((v) => !v);
setZoomedIdx(null);
}}
className="ml-auto text-muted-foreground/60 hover:text-muted-foreground transition-colors p-0.5 rounded"
title={expanded ? "Collapse" : "Expand"}
>
{expanded ? (
<ChevronUp className="w-3.5 h-3.5" />
) : (
<ChevronDown className="w-3.5 h-3.5" />
)}
</button>
</div>
{/* Mux frame */}
<div
className="rounded-lg overflow-hidden"
style={{
border: "2px solid #1a1a2a",
background: "#08080e",
}}
>
{/* Grid */}
<div
className="grid gap-px"
style={{
gridTemplateColumns:
groups.length === 1 ? "1fr" : "1fr 1fr",
gridTemplateRows: `repeat(${rows}, 1fr)`,
height: gridHeight,
background: "#111",
}}
>
{groups.map((group, i) => (
<MuxPane
key={group.nodeId}
group={group}
index={i}
label={labels[i]}
isFocused={latestIdx === i}
isZoomed={zoomedIdx === i}
onClickTitle={() =>
setZoomedIdx(zoomedIdx === i ? null : i)
}
/>
))}
</div>
</div>
</div>
</div>
);
},
(prev, next) =>
prev.groupId === next.groupId &&
prev.groups.length === next.groups.length &&
prev.groups.every(
(g, i) =>
g.nodeId === next.groups[i].nodeId &&
g.messages.length === next.groups[i].messages.length &&
last(g.messages)?.content === last(next.groups[i].messages)?.content &&
g.contextUsage?.usagePct === next.groups[i].contextUsage?.usagePct
)
);
export default ParallelSubagentBubble;
// Injected as a global style (keyframes can't be inline)
if (typeof document !== "undefined") {
const id = "parallel-subagent-keyframes";
if (!document.getElementById(id)) {
const style = document.createElement("style");
style.id = id;
style.textContent = `
@keyframes cursorBlink { 0%, 100% { opacity: 1; } 50% { opacity: 0; } }
@keyframes thermoPulse { 0%, 100% { opacity: 1; } 50% { opacity: 0.4; } }
`;
document.head.appendChild(style);
}
}
+4
View File
@@ -72,6 +72,8 @@ export function sseEventToChatMessage(
role: "worker",
thread,
createdAt,
nodeId: event.node_id || undefined,
executionId: event.execution_id || undefined,
};
}
@@ -110,6 +112,8 @@ export function sseEventToChatMessage(
role: "worker",
thread,
createdAt,
nodeId: event.node_id || undefined,
executionId: event.execution_id || undefined,
};
}
+4
View File
@@ -2011,6 +2011,8 @@ export default function Workspace() {
role,
thread: agentType,
createdAt: eventCreatedAt,
nodeId: event.node_id || undefined,
executionId: event.execution_id || undefined,
});
return {
...prev,
@@ -2082,6 +2084,8 @@ export default function Workspace() {
role,
thread: agentType,
createdAt: eventCreatedAt,
nodeId: event.node_id || undefined,
executionId: event.execution_id || undefined,
});
return {
...prev,
+172
View File
@@ -0,0 +1,172 @@
"""Integration test: Run a real EventLoopNode against the Antigravity backend.
Run: .venv/bin/python core/tests/test_antigravity_eventloop.py
Requires:
- ~/.hive/antigravity-accounts.json with valid credentials
(run 'uv run python core/antigravity_auth.py auth account add' to authenticate)
"""
import asyncio
import logging
import sys
from unittest.mock import MagicMock
sys.path.insert(0, "core")
logging.basicConfig(level=logging.WARNING, format="%(levelname)s %(name)s: %(message)s")
# Show our provider's retry/stream logs
logging.getLogger("framework.llm.litellm").setLevel(logging.DEBUG)
from framework.config import RuntimeConfig # noqa: E402
from framework.graph.event_loop_node import EventLoopNode, LoopConfig # noqa: E402
from framework.graph.node import NodeContext, NodeResult, NodeSpec, SharedMemory # noqa: E402
from framework.llm.litellm import LiteLLMProvider # noqa: E402
def make_provider() -> LiteLLMProvider:
cfg = RuntimeConfig()
if not cfg.api_key:
print("ERROR: No Antigravity token found.")
print(" 1. Run 'antigravity-auth accounts add' to authenticate.")
print(" 2. Run 'antigravity-auth serve' to start the local proxy.")
print(" 3. Configure Hive: run quickstart.sh and select option 7 (Antigravity).")
sys.exit(1)
print(f"Model : {cfg.model}")
print(f"Base : {cfg.api_base}")
print(f"Antigravity : {'localhost:8069' in (cfg.api_base or '')}")
return LiteLLMProvider(
model=cfg.model,
api_key=cfg.api_key,
api_base=cfg.api_base,
**cfg.extra_kwargs,
)
def make_context(
llm: LiteLLMProvider,
*,
node_id: str = "test",
system_prompt: str = "You are a helpful assistant.",
output_keys: list[str] | None = None,
) -> NodeContext:
if output_keys is None:
output_keys = ["answer"]
spec = NodeSpec(
id=node_id,
name="Test Node",
description="Integration test node",
node_type="event_loop",
output_keys=output_keys,
system_prompt=system_prompt,
)
runtime = MagicMock()
runtime.start_run = MagicMock(return_value="run-1")
runtime.decide = MagicMock(return_value="dec-1")
runtime.record_outcome = MagicMock()
runtime.end_run = MagicMock()
memory = SharedMemory()
return NodeContext(
runtime=runtime,
node_id=node_id,
node_spec=spec,
memory=memory,
input_data={},
llm=llm,
available_tools=[],
max_tokens=4096,
)
async def run_test(
name: str, llm: LiteLLMProvider, system: str, output_keys: list[str]
) -> NodeResult:
print(f"\n{'=' * 60}")
print(f"TEST: {name}")
print(f"{'=' * 60}")
ctx = make_context(llm, system_prompt=system, output_keys=output_keys)
node = EventLoopNode(config=LoopConfig(max_iterations=3))
try:
result = await node.execute(ctx)
print(f" Success : {result.success}")
print(f" Output : {result.output}")
if result.error:
print(f" Error : {result.error}")
return result
except Exception as e:
print(f" EXCEPTION: {type(e).__name__}: {e}")
import traceback
traceback.print_exc()
return NodeResult(success=False, error=str(e))
async def main():
llm = make_provider()
print()
# Test 1: Simple text output — the node should call set_output to fill "answer"
r1 = await run_test(
name="Simple text generation",
llm=llm,
system=(
"You are a helpful assistant. When asked a question, use the "
"set_output tool to store your answer in the 'answer' key. "
"Keep answers short (1-2 sentences)."
),
output_keys=["answer"],
)
# Test 2: If test 1 failed, try bare stream() to isolate the issue
if not r1.success:
print(f"\n{'=' * 60}")
print("FALLBACK: Testing bare provider.stream() directly")
print(f"{'=' * 60}")
try:
from framework.llm.stream_events import (
FinishEvent,
StreamErrorEvent,
TextDeltaEvent,
ToolCallEvent,
)
text = ""
events = []
async for event in llm.stream(
messages=[{"role": "user", "content": "Say hello in 3 words."}],
):
events.append(type(event).__name__)
if isinstance(event, TextDeltaEvent):
text = event.snapshot
elif isinstance(event, FinishEvent):
print(
f" Finish: stop={event.stop_reason}"
f" in={event.input_tokens}"
f" out={event.output_tokens}"
)
elif isinstance(event, StreamErrorEvent):
print(f" StreamError: {event.error} (recoverable={event.recoverable})")
elif isinstance(event, ToolCallEvent):
print(f" ToolCall: {event.tool_name}")
print(f" Text : {text!r}")
print(f" Events : {events}")
print(f" RESULT : {'OK' if text else 'EMPTY'}")
except Exception as e:
print(f" EXCEPTION: {type(e).__name__}: {e}")
import traceback
traceback.print_exc()
print(f"\n{'=' * 60}")
print("DONE")
print(f"{'=' * 60}")
if __name__ == "__main__":
asyncio.run(main())
+151
View File
@@ -0,0 +1,151 @@
"""Tests for skill system structured error codes and diagnostics."""
from __future__ import annotations
import logging
from framework.skills.skill_errors import (
SkillError,
SkillErrorCode,
log_skill_error,
)
class TestSkillErrorCode:
def test_all_codes_defined(self):
codes = {e.value for e in SkillErrorCode}
assert "SKILL_NOT_FOUND" in codes
assert "SKILL_PARSE_ERROR" in codes
assert "SKILL_ACTIVATION_FAILED" in codes
assert "SKILL_MISSING_DESCRIPTION" in codes
assert "SKILL_YAML_FIXUP" in codes
assert "SKILL_NAME_MISMATCH" in codes
assert "SKILL_COLLISION" in codes
class TestSkillError:
def test_code_stored(self):
err = SkillError(
code=SkillErrorCode.SKILL_NOT_FOUND,
what="Skill 'my-skill' not found",
why="Not in catalog",
fix="Check discovery paths",
)
assert err.code == SkillErrorCode.SKILL_NOT_FOUND
def test_message_format(self):
err = SkillError(
code=SkillErrorCode.SKILL_MISSING_DESCRIPTION,
what="Missing description in '/path/SKILL.md'",
why="The description field is absent",
fix="Add a description field to the frontmatter",
)
expected = (
"[SKILL_MISSING_DESCRIPTION]\n"
"What failed: Missing description in '/path/SKILL.md'\n"
"Why: The description field is absent\n"
"Fix: Add a description field to the frontmatter"
)
assert str(err) == expected
def test_is_exception(self):
err = SkillError(
code=SkillErrorCode.SKILL_PARSE_ERROR,
what="Parse failed",
why="Invalid YAML",
fix="Fix the YAML",
)
assert isinstance(err, Exception)
def test_what_why_fix_attributes(self):
err = SkillError(
code=SkillErrorCode.SKILL_COLLISION,
what="Name collision",
why="Two skills share the same name",
fix="Rename one skill directory",
)
assert err.what == "Name collision"
assert err.why == "Two skills share the same name"
assert err.fix == "Rename one skill directory"
class TestLogSkillError:
def test_emits_log(self, caplog):
test_logger = logging.getLogger("test_skill")
with caplog.at_level(logging.ERROR, logger="test_skill"):
log_skill_error(
test_logger,
"error",
SkillErrorCode.SKILL_PARSE_ERROR,
what="Invalid SKILL.md at '/path'",
why="Empty file",
fix="Add content",
)
assert "SKILL_PARSE_ERROR" in caplog.text
def test_warning_level(self, caplog):
test_logger = logging.getLogger("test_skill_warn")
with caplog.at_level(logging.WARNING, logger="test_skill_warn"):
log_skill_error(
test_logger,
"warning",
SkillErrorCode.SKILL_YAML_FIXUP,
what="Auto-fixed YAML",
why="Unquoted colons",
fix="Quote values",
)
assert "SKILL_YAML_FIXUP" in caplog.text
def test_message_contains_all_parts(self, caplog):
test_logger = logging.getLogger("test_skill_parts")
with caplog.at_level(logging.ERROR, logger="test_skill_parts"):
log_skill_error(
test_logger,
"error",
SkillErrorCode.SKILL_NOT_FOUND,
what="Skill not found",
why="Not discovered",
fix="Check paths",
)
assert "Skill not found" in caplog.text
assert "Not discovered" in caplog.text
assert "Check paths" in caplog.text
class TestSkillErrorInParser:
def test_missing_description_returns_none(self, tmp_path):
from framework.skills.parser import parse_skill_md
skill_dir = tmp_path / "no-desc"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text("---\nname: no-desc\n---\nBody.\n", encoding="utf-8")
result = parse_skill_md(skill_dir / "SKILL.md")
assert result is None
def test_empty_file_returns_none(self, tmp_path):
from framework.skills.parser import parse_skill_md
skill_dir = tmp_path / "empty"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text("", encoding="utf-8")
result = parse_skill_md(skill_dir / "SKILL.md")
assert result is None
def test_nonexistent_returns_none(self, tmp_path):
from framework.skills.parser import parse_skill_md
result = parse_skill_md(tmp_path / "ghost" / "SKILL.md")
assert result is None
def test_yaml_fixup_still_parses(self, tmp_path):
from framework.skills.parser import parse_skill_md
skill_dir = tmp_path / "colon-test"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text(
"---\nname: colon-test\ndescription: Use for: research\n---\nBody.\n",
encoding="utf-8",
)
result = parse_skill_md(skill_dir / "SKILL.md")
assert result is not None
assert "research" in result.description
+119 -18
View File
@@ -774,6 +774,7 @@ Write-Host ""
$ProviderMap = [ordered]@{
ANTHROPIC_API_KEY = @{ Name = "Anthropic (Claude)"; Id = "anthropic" }
OPENAI_API_KEY = @{ Name = "OpenAI (GPT)"; Id = "openai" }
MINIMAX_API_KEY = @{ Name = "MiniMax"; Id = "minimax" }
GEMINI_API_KEY = @{ Name = "Google Gemini"; Id = "gemini" }
GOOGLE_API_KEY = @{ Name = "Google AI"; Id = "google" }
GROQ_API_KEY = @{ Name = "Groq"; Id = "groq" }
@@ -787,6 +788,7 @@ $ProviderMap = [ordered]@{
$DefaultModels = @{
anthropic = "claude-haiku-4-5-20251001"
openai = "gpt-5-mini"
minimax = "MiniMax-M2.5"
gemini = "gemini-3-flash-preview"
groq = "moonshotai/kimi-k2-instruct-0905"
cerebras = "zai-glm-4.7"
@@ -968,6 +970,11 @@ $CodexCredDetected = $false
$codexAuthPath = Join-Path $env:USERPROFILE ".codex\auth.json"
if (Test-Path $codexAuthPath) { $CodexCredDetected = $true }
$MinimaxCredDetected = $false
$minimaxKey = [System.Environment]::GetEnvironmentVariable("MINIMAX_API_KEY", "User")
if (-not $minimaxKey) { $minimaxKey = $env:MINIMAX_API_KEY }
if ($minimaxKey) { $MinimaxCredDetected = $true }
$ZaiCredDetected = $false
$zaiKey = [System.Environment]::GetEnvironmentVariable("ZAI_API_KEY", "User")
if (-not $zaiKey) { $zaiKey = $env:ZAI_API_KEY }
@@ -1015,6 +1022,7 @@ if (Test-Path $HiveConfigFile) {
elseif ($prevLlm.use_codex_subscription) { $PrevSubMode = "codex" }
elseif ($prevLlm.use_kimi_code_subscription) { $PrevSubMode = "kimi_code" }
elseif ($prevLlm.api_base -and $prevLlm.api_base -like "*api.z.ai*") { $PrevSubMode = "zai_code" }
elseif ($prevLlm.provider -eq "minimax" -or ($prevLlm.api_base -and $prevLlm.api_base -like "*api.minimax.io*")) { $PrevSubMode = "minimax_code" }
elseif ($prevLlm.api_base -and $prevLlm.api_base -like "*api.kimi.com*") { $PrevSubMode = "kimi_code" }
elseif ($prevLlm.provider -eq "hive" -or ($prevLlm.api_base -and $prevLlm.api_base -like "*adenhq.com*")) { $PrevSubMode = "hive_llm" }
}
@@ -1029,6 +1037,7 @@ if ($PrevSubMode -or $PrevProvider) {
"claude_code" { if ($ClaudeCredDetected) { $prevCredValid = $true } }
"zai_code" { if ($ZaiCredDetected) { $prevCredValid = $true } }
"codex" { if ($CodexCredDetected) { $prevCredValid = $true } }
"minimax_code" { if ($MinimaxCredDetected) { $prevCredValid = $true } }
"kimi_code" { if ($KimiCredDetected) { $prevCredValid = $true } }
"hive_llm" { if ($HiveCredDetected) { $prevCredValid = $true } }
default {
@@ -1044,18 +1053,20 @@ if ($PrevSubMode -or $PrevProvider) {
"claude_code" { $DefaultChoice = "1" }
"zai_code" { $DefaultChoice = "2" }
"codex" { $DefaultChoice = "3" }
"kimi_code" { $DefaultChoice = "4" }
"hive_llm" { $DefaultChoice = "5" }
"minimax_code" { $DefaultChoice = "4" }
"kimi_code" { $DefaultChoice = "5" }
"hive_llm" { $DefaultChoice = "6" }
}
if (-not $DefaultChoice) {
switch ($PrevProvider) {
"anthropic" { $DefaultChoice = "6" }
"openai" { $DefaultChoice = "7" }
"gemini" { $DefaultChoice = "8" }
"groq" { $DefaultChoice = "9" }
"cerebras" { $DefaultChoice = "10" }
"openrouter" { $DefaultChoice = "11" }
"kimi" { $DefaultChoice = "4" }
"anthropic" { $DefaultChoice = "7" }
"openai" { $DefaultChoice = "8" }
"gemini" { $DefaultChoice = "9" }
"groq" { $DefaultChoice = "10" }
"cerebras" { $DefaultChoice = "11" }
"openrouter" { $DefaultChoice = "12" }
"minimax" { $DefaultChoice = "4" }
"kimi" { $DefaultChoice = "5" }
}
}
}
@@ -1087,16 +1098,23 @@ Write-Host ") OpenAI Codex Subscription " -NoNewline
Write-Color -Text "(use your Codex/ChatGPT Plus plan)" -Color DarkGray -NoNewline
if ($CodexCredDetected) { Write-Color -Text " (credential detected)" -Color Green } else { Write-Host "" }
# 4) Kimi Code
# 4) MiniMax Coding Key
Write-Host " " -NoNewline
Write-Color -Text "4" -Color Cyan -NoNewline
Write-Host ") MiniMax Coding Key " -NoNewline
Write-Color -Text "(use your MiniMax coding key)" -Color DarkGray -NoNewline
if ($MinimaxCredDetected) { Write-Color -Text " (credential detected)" -Color Green } else { Write-Host "" }
# 5) Kimi Code
Write-Host " " -NoNewline
Write-Color -Text "5" -Color Cyan -NoNewline
Write-Host ") Kimi Code Subscription " -NoNewline
Write-Color -Text "(use your Kimi Code plan)" -Color DarkGray -NoNewline
if ($KimiCredDetected) { Write-Color -Text " (credential detected)" -Color Green } else { Write-Host "" }
# 5) Hive LLM
# 6) Hive LLM
Write-Host " " -NoNewline
Write-Color -Text "5" -Color Cyan -NoNewline
Write-Color -Text "6" -Color Cyan -NoNewline
Write-Host ") Hive LLM " -NoNewline
Write-Color -Text "(use your Hive API key)" -Color DarkGray -NoNewline
if ($HiveCredDetected) { Write-Color -Text " (credential detected)" -Color Green } else { Write-Host "" }
@@ -1104,9 +1122,9 @@ if ($HiveCredDetected) { Write-Color -Text " (credential detected)" -Color Gree
Write-Host ""
Write-Color -Text " API key providers:" -Color Cyan
# 6-11) API key providers
# 7-12) API key providers
for ($idx = 0; $idx -lt $ProviderMenuEnvVars.Count; $idx++) {
$num = $idx + 6
$num = $idx + 7
$envVal = [System.Environment]::GetEnvironmentVariable($ProviderMenuEnvVars[$idx], "Process")
if (-not $envVal) { $envVal = [System.Environment]::GetEnvironmentVariable($ProviderMenuEnvVars[$idx], "User") }
Write-Host " " -NoNewline
@@ -1115,7 +1133,7 @@ for ($idx = 0; $idx -lt $ProviderMenuEnvVars.Count; $idx++) {
if ($envVal) { Write-Color -Text " (credential detected)" -Color Green } else { Write-Host "" }
}
$SkipChoice = 6 + $ProviderMenuEnvVars.Count
$SkipChoice = 7 + $ProviderMenuEnvVars.Count
Write-Host " " -NoNewline
Write-Color -Text "$SkipChoice" -Color Cyan -NoNewline
Write-Host ") Skip for now"
@@ -1205,6 +1223,19 @@ switch ($num) {
}
}
4 {
# MiniMax Coding Key
$SubscriptionMode = "minimax_code"
$SelectedProviderId = "minimax"
$SelectedEnvVar = "MINIMAX_API_KEY"
$SelectedModel = "MiniMax-M2.5"
$SelectedMaxTokens = 32768
$SelectedMaxContextTokens = 900000
$SelectedApiBase = "https://api.minimax.io/v1"
Write-Host ""
Write-Ok "Using MiniMax coding key"
Write-Color -Text " Model: MiniMax-M2.5 | API: api.minimax.io" -Color DarkGray
}
5 {
# Kimi Code Subscription
$SubscriptionMode = "kimi_code"
$SelectedProviderId = "kimi"
@@ -1216,7 +1247,7 @@ switch ($num) {
Write-Ok "Using Kimi Code subscription"
Write-Color -Text " Model: kimi-k2.5 | API: api.kimi.com/coding" -Color DarkGray
}
5 {
6 {
# Hive LLM
$SubscriptionMode = "hive_llm"
$SelectedProviderId = "hive"
@@ -1240,9 +1271,9 @@ switch ($num) {
}
Write-Color -Text " Model: $SelectedModel | API: $HiveLlmEndpoint" -Color DarkGray
}
{ $_ -ge 6 -and $_ -le 11 } {
{ $_ -ge 7 -and $_ -le 12 } {
# API key providers
$provIdx = $num - 6
$provIdx = $num - 7
$SelectedEnvVar = $ProviderMenuEnvVars[$provIdx]
$SelectedProviderId = $ProviderMenuIds[$provIdx]
$providerName = $ProviderMenuNames[$provIdx] -replace ' - .*', '' # strip description
@@ -1334,6 +1365,70 @@ switch ($num) {
}
}
# For MiniMax coding key: prompt for API key with verification + retry
if ($SubscriptionMode -eq "minimax_code") {
while ($true) {
$existingMinimax = [System.Environment]::GetEnvironmentVariable("MINIMAX_API_KEY", "User")
if (-not $existingMinimax) { $existingMinimax = $env:MINIMAX_API_KEY }
if ($existingMinimax) {
$masked = $existingMinimax.Substring(0, [Math]::Min(4, $existingMinimax.Length)) + "..." + $existingMinimax.Substring([Math]::Max(0, $existingMinimax.Length - 4))
Write-Host ""
Write-Color -Text " $([char]0x2B22) Current MiniMax key: $masked" -Color Green
$apiKey = Read-Host " Press Enter to keep, or paste a new key to replace"
} else {
Write-Host ""
Write-Host "Get your API key from: " -NoNewline
Write-Color -Text "https://platform.minimax.io/user-center/basic-information/interface-key" -Color Cyan
Write-Host ""
$apiKey = Read-Host "Paste your MiniMax API key (or press Enter to skip)"
}
if ($apiKey) {
[System.Environment]::SetEnvironmentVariable("MINIMAX_API_KEY", $apiKey, "User")
$env:MINIMAX_API_KEY = $apiKey
Write-Host ""
Write-Ok "MiniMax API key saved as User environment variable"
# Health check the new key
Write-Host " Verifying MiniMax API key... " -NoNewline
try {
$hcResult = & $UvCmd run python (Join-Path $ScriptDir "scripts/check_llm_key.py") "minimax" $apiKey "https://api.minimax.io/v1" 2>$null
$hcJson = $hcResult | ConvertFrom-Json
if ($hcJson.valid -eq $true) {
Write-Color -Text "ok" -Color Green
break
} elseif ($hcJson.valid -eq $false) {
Write-Color -Text "failed" -Color Red
Write-Warn $hcJson.message
[System.Environment]::SetEnvironmentVariable("MINIMAX_API_KEY", $null, "User")
Remove-Item -Path "Env:\MINIMAX_API_KEY" -ErrorAction SilentlyContinue
Write-Host ""
Read-Host " Press Enter to try again"
} else {
Write-Color -Text "--" -Color Yellow
Write-Color -Text " Could not verify key (network issue). The key has been saved." -Color DarkGray
break
}
} catch {
Write-Color -Text "--" -Color Yellow
Write-Color -Text " Could not verify key (network issue). The key has been saved." -Color DarkGray
break
}
} elseif (-not $existingMinimax) {
Write-Host ""
Write-Warn "Skipped. Add your MiniMax API key later:"
Write-Color -Text " [System.Environment]::SetEnvironmentVariable('MINIMAX_API_KEY', 'your-key', 'User')" -Color Cyan
$SelectedEnvVar = ""
$SelectedProviderId = ""
$SubscriptionMode = ""
break
} else {
break
}
}
}
# For ZAI subscription: prompt for API key (allow replacement if already set) with verification + retry
if ($SubscriptionMode -eq "zai_code") {
while ($true) {
@@ -1564,6 +1659,9 @@ if ($SelectedProviderId) {
} elseif ($SubscriptionMode -eq "zai_code") {
$config.llm["api_base"] = "https://api.z.ai/api/coding/paas/v4"
$config.llm["api_key_env_var"] = $SelectedEnvVar
} elseif ($SubscriptionMode -eq "minimax_code") {
$config.llm["api_base"] = $SelectedApiBase
$config.llm["api_key_env_var"] = $SelectedEnvVar
} elseif ($SubscriptionMode -eq "kimi_code") {
$config.llm["api_base"] = "https://api.kimi.com/coding"
$config.llm["api_key_env_var"] = $SelectedEnvVar
@@ -1870,6 +1968,9 @@ if ($SelectedProviderId) {
} elseif ($SubscriptionMode -eq "zai_code") {
Write-Ok "ZAI Code Subscription -> $SelectedModel"
Write-Color -Text " API: api.z.ai (OpenAI-compatible)" -Color DarkGray
} elseif ($SubscriptionMode -eq "minimax_code") {
Write-Ok "MiniMax Coding Key -> $SelectedModel"
Write-Color -Text " API: api.minimax.io/v1 (OpenAI-compatible)" -Color DarkGray
} elseif ($SubscriptionMode -eq "codex") {
Write-Ok "OpenAI Codex Subscription -> $SelectedModel"
} elseif ($SelectedProviderId -eq "openrouter") {
+99 -16
View File
@@ -847,7 +847,7 @@ prompt_model_selection() {
}
# Function to save configuration
# Args: provider_id env_var model max_tokens max_context_tokens [use_claude_code_sub] [api_base] [use_codex_sub]
# Args: provider_id env_var model max_tokens max_context_tokens [use_claude_code_sub] [api_base] [use_codex_sub] [use_antigravity_sub]
save_configuration() {
local provider_id="$1"
local env_var="$2"
@@ -857,6 +857,7 @@ save_configuration() {
local use_claude_code_sub="${6:-}"
local api_base="${7:-}"
local use_codex_sub="${8:-}"
local use_antigravity_sub="${9:-}"
# Fallbacks if not provided
if [ -z "$model" ]; then
@@ -878,6 +879,7 @@ save_configuration() {
"$use_claude_code_sub" \
"$api_base" \
"$use_codex_sub" \
"$use_antigravity_sub" \
"$(date -u +"%Y-%m-%dT%H:%M:%S+00:00")" 2>/dev/null <<'PY'
import json
import sys
@@ -892,8 +894,9 @@ from pathlib import Path
use_claude_code_sub,
api_base,
use_codex_sub,
use_antigravity_sub,
created_at,
) = sys.argv[1:10]
) = sys.argv[1:11]
cfg_path = Path.home() / ".hive" / "configuration.json"
cfg_path.parent.mkdir(parents=True, exist_ok=True)
@@ -925,6 +928,23 @@ if use_codex_sub == "true":
else:
config["llm"].pop("use_codex_subscription", None)
if use_antigravity_sub == "true":
config["llm"]["use_antigravity_subscription"] = True
config["llm"].pop("api_key_env_var", None)
# Store the Antigravity OAuth client secret so token refresh works
# without hardcoding it in source code (read at runtime via config.py).
import os as _os
_secret = _os.environ.get("ANTIGRAVITY_CLIENT_SECRET") or ""
if _secret:
config["llm"]["antigravity_client_secret"] = _secret
_client_id = _os.environ.get("ANTIGRAVITY_CLIENT_ID") or ""
if _client_id:
config["llm"]["antigravity_client_id"] = _client_id
else:
config["llm"].pop("use_antigravity_subscription", None)
config["llm"].pop("antigravity_client_secret", None)
config["llm"].pop("antigravity_client_id", None)
if api_base:
config["llm"]["api_base"] = api_base
else:
@@ -993,6 +1013,17 @@ if [ -n "${HIVE_API_KEY:-}" ]; then
HIVE_CRED_DETECTED=true
fi
ANTIGRAVITY_CRED_DETECTED=false
# Check native Antigravity IDE (macOS/Linux) SQLite state DB first
if [ -f "$HOME/Library/Application Support/Antigravity/User/globalStorage/state.vscdb" ]; then
ANTIGRAVITY_CRED_DETECTED=true
elif [ -f "$HOME/.config/Antigravity/User/globalStorage/state.vscdb" ]; then
ANTIGRAVITY_CRED_DETECTED=true
# Native OAuth credentials
elif [ -f "$HOME/.hive/antigravity-accounts.json" ]; then
ANTIGRAVITY_CRED_DETECTED=true
fi
# Detect API key providers
if [ "$USE_ASSOC_ARRAYS" = true ]; then
for env_var in "${!PROVIDER_NAMES[@]}"; do
@@ -1035,6 +1066,8 @@ try:
sub = "codex"
elif llm.get("use_kimi_code_subscription"):
sub = "kimi_code"
elif llm.get("use_antigravity_subscription"):
sub = "antigravity"
elif llm.get("provider", "") == "minimax" or "api.minimax.io" in llm.get("api_base", ""):
sub = "minimax_code"
elif llm.get("provider", "") == "hive" or "adenhq.com" in llm.get("api_base", ""):
@@ -1058,6 +1091,7 @@ if [ -n "$PREV_SUB_MODE" ] || [ -n "$PREV_PROVIDER" ]; then
codex) [ "$CODEX_CRED_DETECTED" = true ] && PREV_CRED_VALID=true ;;
kimi_code) [ "$KIMI_CRED_DETECTED" = true ] && PREV_CRED_VALID=true ;;
hive_llm) [ "$HIVE_CRED_DETECTED" = true ] && PREV_CRED_VALID=true ;;
antigravity) [ "$ANTIGRAVITY_CRED_DETECTED" = true ] && PREV_CRED_VALID=true ;;
*)
# API key provider — check if the env var is set
if [ -n "$PREV_ENV_VAR" ] && [ -n "${!PREV_ENV_VAR}" ]; then
@@ -1074,15 +1108,16 @@ if [ -n "$PREV_SUB_MODE" ] || [ -n "$PREV_PROVIDER" ]; then
minimax_code) DEFAULT_CHOICE=4 ;;
kimi_code) DEFAULT_CHOICE=5 ;;
hive_llm) DEFAULT_CHOICE=6 ;;
antigravity) DEFAULT_CHOICE=7 ;;
esac
if [ -z "$DEFAULT_CHOICE" ]; then
case "$PREV_PROVIDER" in
anthropic) DEFAULT_CHOICE=7 ;;
openai) DEFAULT_CHOICE=8 ;;
gemini) DEFAULT_CHOICE=9 ;;
groq) DEFAULT_CHOICE=10 ;;
cerebras) DEFAULT_CHOICE=11 ;;
openrouter) DEFAULT_CHOICE=12 ;;
anthropic) DEFAULT_CHOICE=8 ;;
openai) DEFAULT_CHOICE=9 ;;
gemini) DEFAULT_CHOICE=10 ;;
groq) DEFAULT_CHOICE=11 ;;
cerebras) DEFAULT_CHOICE=12 ;;
openrouter) DEFAULT_CHOICE=13 ;;
minimax) DEFAULT_CHOICE=4 ;;
kimi) DEFAULT_CHOICE=5 ;;
hive) DEFAULT_CHOICE=6 ;;
@@ -1138,14 +1173,21 @@ else
echo -e " ${CYAN}6)${NC} Hive LLM ${DIM}(use your Hive API key)${NC}"
fi
# 7) Antigravity
if [ "$ANTIGRAVITY_CRED_DETECTED" = true ]; then
echo -e " ${CYAN}7)${NC} Antigravity Subscription ${DIM}(use your Google/Gemini plan)${NC} ${GREEN}(credential detected)${NC}"
else
echo -e " ${CYAN}7)${NC} Antigravity Subscription ${DIM}(use your Google/Gemini plan)${NC}"
fi
echo ""
echo -e " ${CYAN}${BOLD}API key providers:${NC}"
# 7-12) API key providers — show (credential detected) if key already set
# 8-13) API key providers — show (credential detected) if key already set
PROVIDER_MENU_ENVS=(ANTHROPIC_API_KEY OPENAI_API_KEY GEMINI_API_KEY GROQ_API_KEY CEREBRAS_API_KEY OPENROUTER_API_KEY)
PROVIDER_MENU_NAMES=("Anthropic (Claude) - Recommended" "OpenAI (GPT)" "Google Gemini - Free tier available" "Groq - Fast, free tier" "Cerebras - Fast, free tier" "OpenRouter - Bring any OpenRouter model")
for idx in "${!PROVIDER_MENU_ENVS[@]}"; do
num=$((idx + 7))
num=$((idx + 8))
env_var="${PROVIDER_MENU_ENVS[$idx]}"
if [ -n "${!env_var}" ]; then
echo -e " ${CYAN}$num)${NC} ${PROVIDER_MENU_NAMES[$idx]} ${GREEN}(credential detected)${NC}"
@@ -1154,7 +1196,7 @@ for idx in "${!PROVIDER_MENU_ENVS[@]}"; do
fi
done
SKIP_CHOICE=$((7 + ${#PROVIDER_MENU_ENVS[@]}))
SKIP_CHOICE=$((8 + ${#PROVIDER_MENU_ENVS[@]}))
echo -e " ${CYAN}$SKIP_CHOICE)${NC} Skip for now"
echo ""
@@ -1297,36 +1339,75 @@ case $choice in
echo -e " ${DIM}Model: $SELECTED_MODEL | API: ${HIVE_LLM_ENDPOINT}${NC}"
;;
7)
# Antigravity Subscription
if [ "$ANTIGRAVITY_CRED_DETECTED" = false ]; then
echo ""
echo -e "${CYAN} Setting up Antigravity authentication...${NC}"
echo ""
echo -e " ${YELLOW}A browser window will open for Google OAuth.${NC}"
echo -e " Sign in with your Google account that has Antigravity access."
echo ""
# Run native OAuth flow
if uv run python "$SCRIPT_DIR/core/antigravity_auth.py" auth account add; then
# Re-detect credentials
if [ -f "$HOME/.hive/antigravity-accounts.json" ]; then
ANTIGRAVITY_CRED_DETECTED=true
fi
fi
if [ "$ANTIGRAVITY_CRED_DETECTED" = false ]; then
echo ""
echo -e "${RED} Authentication failed or was cancelled.${NC}"
echo ""
SELECTED_PROVIDER_ID=""
fi
fi
if [ "$ANTIGRAVITY_CRED_DETECTED" = true ]; then
SUBSCRIPTION_MODE="antigravity"
SELECTED_PROVIDER_ID="openai"
SELECTED_MODEL="gemini-3-flash"
SELECTED_MAX_TOKENS=32768
SELECTED_MAX_CONTEXT_TOKENS=1000000 # Gemini 3 Flash — 1M context window
echo ""
echo -e "${YELLOW} ⚠ Using Antigravity can technically cause your account suspension. Please use at your own risk.${NC}"
echo ""
echo -e "${GREEN}${NC} Using Antigravity subscription"
echo -e " ${DIM}Model: gemini-3-flash | Direct OAuth (no proxy required)${NC}"
fi
;;
8)
SELECTED_ENV_VAR="ANTHROPIC_API_KEY"
SELECTED_PROVIDER_ID="anthropic"
PROVIDER_NAME="Anthropic"
SIGNUP_URL="https://console.anthropic.com/settings/keys"
;;
8)
9)
SELECTED_ENV_VAR="OPENAI_API_KEY"
SELECTED_PROVIDER_ID="openai"
PROVIDER_NAME="OpenAI"
SIGNUP_URL="https://platform.openai.com/api-keys"
;;
9)
10)
SELECTED_ENV_VAR="GEMINI_API_KEY"
SELECTED_PROVIDER_ID="gemini"
PROVIDER_NAME="Google Gemini"
SIGNUP_URL="https://aistudio.google.com/apikey"
;;
10)
11)
SELECTED_ENV_VAR="GROQ_API_KEY"
SELECTED_PROVIDER_ID="groq"
PROVIDER_NAME="Groq"
SIGNUP_URL="https://console.groq.com/keys"
;;
11)
12)
SELECTED_ENV_VAR="CEREBRAS_API_KEY"
SELECTED_PROVIDER_ID="cerebras"
PROVIDER_NAME="Cerebras"
SIGNUP_URL="https://cloud.cerebras.ai/"
;;
12)
13)
SELECTED_ENV_VAR="OPENROUTER_API_KEY"
SELECTED_PROVIDER_ID="openrouter"
SELECTED_API_BASE="https://openrouter.ai/api/v1"
@@ -1491,6 +1572,8 @@ if [ -n "$SELECTED_PROVIDER_ID" ]; then
save_configuration "$SELECTED_PROVIDER_ID" "" "$SELECTED_MODEL" "$SELECTED_MAX_TOKENS" "$SELECTED_MAX_CONTEXT_TOKENS" "true" "" > /dev/null || SAVE_OK=false
elif [ "$SUBSCRIPTION_MODE" = "codex" ]; then
save_configuration "$SELECTED_PROVIDER_ID" "" "$SELECTED_MODEL" "$SELECTED_MAX_TOKENS" "$SELECTED_MAX_CONTEXT_TOKENS" "" "" "true" > /dev/null || SAVE_OK=false
elif [ "$SUBSCRIPTION_MODE" = "antigravity" ]; then
save_configuration "$SELECTED_PROVIDER_ID" "" "$SELECTED_MODEL" "$SELECTED_MAX_TOKENS" "$SELECTED_MAX_CONTEXT_TOKENS" "" "" "" "true" > /dev/null || SAVE_OK=false
elif [ "$SUBSCRIPTION_MODE" = "zai_code" ]; then
save_configuration "$SELECTED_PROVIDER_ID" "$SELECTED_ENV_VAR" "$SELECTED_MODEL" "$SELECTED_MAX_TOKENS" "$SELECTED_MAX_CONTEXT_TOKENS" "" "https://api.z.ai/api/coding/paas/v4" > /dev/null || SAVE_OK=false
elif [ "$SUBSCRIPTION_MODE" = "minimax_code" ]; then
+7 -6
View File
@@ -33,8 +33,8 @@ OPENROUTER_SEPARATOR_TRANSLATION = str.maketrans(
"\u2212": "-",
"\u2044": "/",
"\u2215": "/",
"\u29F8": "/",
"\uFF0F": "/",
"\u29f8": "/",
"\uff0f": "/",
}
)
@@ -66,9 +66,7 @@ def _sanitize_openrouter_model_id(value: str) -> str:
"""Sanitize pasted OpenRouter model IDs into a comparable slug."""
normalized = unicodedata.normalize("NFKC", value or "")
normalized = "".join(
ch
for ch in normalized
if unicodedata.category(ch) not in {"Cc", "Cf"}
ch for ch in normalized if unicodedata.category(ch) not in {"Cc", "Cf"}
)
normalized = normalized.translate(OPENROUTER_SEPARATOR_TRANSLATION)
normalized = re.sub(r"\s+", "", normalized)
@@ -183,7 +181,10 @@ def check_openrouter(
return {"valid": False, "message": "Invalid OpenRouter API key"}
if r.status_code == 403:
return {"valid": False, "message": "OpenRouter API key lacks permissions"}
return {"valid": False, "message": f"OpenRouter API returned status {r.status_code}"}
return {
"valid": False,
"message": f"OpenRouter API returned status {r.status_code}",
}
def check_openrouter_model(
+2 -4
View File
@@ -876,7 +876,7 @@ def _run_server(
if self.path == "/":
self._respond(200, "text/html; charset=utf-8", html_bytes)
elif self.path.startswith("/api/session/"):
sid = urllib.parse.unquote(self.path[len("/api/session/"):])
sid = urllib.parse.unquote(self.path[len("/api/session/") :])
records = sessions.get(sid)
if records is None:
self._respond(404, "application/json", b"[]")
@@ -917,9 +917,7 @@ def _run_server(
def main() -> int:
args = _parse_args()
records = _discover_records(args.logs_dir.expanduser(), args.limit_files)
summaries, sessions = _group_sessions(
records, include_tests=args.include_tests
)
summaries, sessions = _group_sessions(records, include_tests=args.include_tests)
initial_session_id = args.session or (
summaries[0].execution_id if summaries else ""
+98 -17
View File
@@ -424,7 +424,7 @@ prompt_model_selection() {
}
# ── Save worker_llm section to configuration.json ────────────────────
# Args: provider_id env_var model max_tokens max_context_tokens [use_claude_code_sub] [api_base] [use_codex_sub]
# Args: provider_id env_var model max_tokens max_context_tokens [use_claude_code_sub] [api_base] [use_codex_sub] [use_antigravity_sub]
save_worker_configuration() {
local provider_id="$1"
@@ -435,6 +435,7 @@ save_worker_configuration() {
local use_claude_code_sub="${6:-}"
local api_base="${7:-}"
local use_codex_sub="${8:-}"
local use_antigravity_sub="${9:-}"
if [ -z "$model" ]; then
model="$(get_default_model "$provider_id")"
@@ -451,7 +452,8 @@ save_worker_configuration() {
"$max_context_tokens" \
"$use_claude_code_sub" \
"$api_base" \
"$use_codex_sub" 2>/dev/null <<'PY'
"$use_codex_sub" \
"$use_antigravity_sub" 2>/dev/null <<'PY'
import json
import sys
from pathlib import Path
@@ -465,7 +467,8 @@ from pathlib import Path
use_claude_code_sub,
api_base,
use_codex_sub,
) = sys.argv[1:9]
use_antigravity_sub,
) = sys.argv[1:10]
cfg_path = Path.home() / ".hive" / "configuration.json"
cfg_path.parent.mkdir(parents=True, exist_ok=True)
@@ -496,6 +499,21 @@ if use_codex_sub == "true":
else:
config["worker_llm"].pop("use_codex_subscription", None)
if use_antigravity_sub == "true":
config["worker_llm"]["use_antigravity_subscription"] = True
config["worker_llm"].pop("api_key_env_var", None)
import os as _os
_secret = _os.environ.get("ANTIGRAVITY_CLIENT_SECRET") or ""
if _secret:
config["worker_llm"]["antigravity_client_secret"] = _secret
_client_id = _os.environ.get("ANTIGRAVITY_CLIENT_ID") or ""
if _client_id:
config["worker_llm"]["antigravity_client_id"] = _client_id
else:
config["worker_llm"].pop("use_antigravity_subscription", None)
config["worker_llm"].pop("antigravity_client_secret", None)
config["worker_llm"].pop("antigravity_client_id", None)
if api_base:
config["worker_llm"]["api_base"] = api_base
else:
@@ -591,6 +609,17 @@ if [ -n "${HIVE_API_KEY:-}" ]; then
HIVE_CRED_DETECTED=true
fi
ANTIGRAVITY_CRED_DETECTED=false
# Check native Antigravity IDE (macOS/Linux) SQLite state DB first
if [ -f "$HOME/Library/Application Support/Antigravity/User/globalStorage/state.vscdb" ]; then
ANTIGRAVITY_CRED_DETECTED=true
elif [ -f "$HOME/.config/Antigravity/User/globalStorage/state.vscdb" ]; then
ANTIGRAVITY_CRED_DETECTED=true
# Native OAuth credentials
elif [ -f "$HOME/.hive/antigravity-accounts.json" ]; then
ANTIGRAVITY_CRED_DETECTED=true
fi
# Detect API key providers
if [ "$USE_ASSOC_ARRAYS" = true ]; then
for env_var in "${!PROVIDER_NAMES[@]}"; do
@@ -633,6 +662,8 @@ try:
sub = "codex"
elif llm.get("use_kimi_code_subscription"):
sub = "kimi_code"
elif llm.get("use_antigravity_subscription"):
sub = "antigravity"
elif llm.get("provider", "") == "minimax" or "api.minimax.io" in llm.get("api_base", ""):
sub = "minimax_code"
elif llm.get("provider", "") == "hive" or "adenhq.com" in llm.get("api_base", ""):
@@ -656,6 +687,7 @@ if [ -n "$PREV_SUB_MODE" ] || [ -n "$PREV_PROVIDER" ]; then
codex) [ "$CODEX_CRED_DETECTED" = true ] && PREV_CRED_VALID=true ;;
kimi_code) [ "$KIMI_CRED_DETECTED" = true ] && PREV_CRED_VALID=true ;;
hive_llm) [ "$HIVE_CRED_DETECTED" = true ] && PREV_CRED_VALID=true ;;
antigravity) [ "$ANTIGRAVITY_CRED_DETECTED" = true ] && PREV_CRED_VALID=true ;;
*)
# API key provider — check if the env var is set
if [ -n "$PREV_ENV_VAR" ] && [ -n "${!PREV_ENV_VAR}" ]; then
@@ -672,15 +704,16 @@ if [ -n "$PREV_SUB_MODE" ] || [ -n "$PREV_PROVIDER" ]; then
minimax_code) DEFAULT_CHOICE=4 ;;
kimi_code) DEFAULT_CHOICE=5 ;;
hive_llm) DEFAULT_CHOICE=6 ;;
antigravity) DEFAULT_CHOICE=7 ;;
esac
if [ -z "$DEFAULT_CHOICE" ]; then
case "$PREV_PROVIDER" in
anthropic) DEFAULT_CHOICE=7 ;;
openai) DEFAULT_CHOICE=8 ;;
gemini) DEFAULT_CHOICE=9 ;;
groq) DEFAULT_CHOICE=10 ;;
cerebras) DEFAULT_CHOICE=11 ;;
openrouter) DEFAULT_CHOICE=12 ;;
anthropic) DEFAULT_CHOICE=8 ;;
openai) DEFAULT_CHOICE=9 ;;
gemini) DEFAULT_CHOICE=10 ;;
groq) DEFAULT_CHOICE=11 ;;
cerebras) DEFAULT_CHOICE=12 ;;
openrouter) DEFAULT_CHOICE=13 ;;
minimax) DEFAULT_CHOICE=4 ;;
kimi) DEFAULT_CHOICE=5 ;;
hive) DEFAULT_CHOICE=6 ;;
@@ -736,14 +769,21 @@ else
echo -e " ${CYAN}6)${NC} Hive LLM ${DIM}(use your Hive API key)${NC}"
fi
# 7) Antigravity
if [ "$ANTIGRAVITY_CRED_DETECTED" = true ]; then
echo -e " ${CYAN}7)${NC} Antigravity Subscription ${DIM}(use your Google/Gemini plan)${NC} ${GREEN}(credential detected)${NC}"
else
echo -e " ${CYAN}7)${NC} Antigravity Subscription ${DIM}(use your Google/Gemini plan)${NC}"
fi
echo ""
echo -e " ${CYAN}${BOLD}API key providers:${NC}"
# 7-12) API key providers — show (credential detected) if key already set
# 8-13) API key providers — show (credential detected) if key already set
PROVIDER_MENU_ENVS=(ANTHROPIC_API_KEY OPENAI_API_KEY GEMINI_API_KEY GROQ_API_KEY CEREBRAS_API_KEY OPENROUTER_API_KEY)
PROVIDER_MENU_NAMES=("Anthropic (Claude) - Recommended" "OpenAI (GPT)" "Google Gemini - Free tier available" "Groq - Fast, free tier" "Cerebras - Fast, free tier" "OpenRouter - Bring any OpenRouter model")
for idx in "${!PROVIDER_MENU_ENVS[@]}"; do
num=$((idx + 7))
num=$((idx + 8))
env_var="${PROVIDER_MENU_ENVS[$idx]}"
if [ -n "${!env_var}" ]; then
echo -e " ${CYAN}$num)${NC} ${PROVIDER_MENU_NAMES[$idx]} ${GREEN}(credential detected)${NC}"
@@ -752,7 +792,7 @@ for idx in "${!PROVIDER_MENU_ENVS[@]}"; do
fi
done
SKIP_CHOICE=$((7 + ${#PROVIDER_MENU_ENVS[@]}))
SKIP_CHOICE=$((8 + ${#PROVIDER_MENU_ENVS[@]}))
echo -e " ${CYAN}$SKIP_CHOICE)${NC} Skip for now"
echo ""
@@ -895,36 +935,75 @@ case $choice in
echo -e " ${DIM}Model: $SELECTED_MODEL | API: ${HIVE_LLM_ENDPOINT}${NC}"
;;
7)
# Antigravity Subscription
if [ "$ANTIGRAVITY_CRED_DETECTED" = false ]; then
echo ""
echo -e "${CYAN} Setting up Antigravity authentication...${NC}"
echo ""
echo -e " ${YELLOW}A browser window will open for Google OAuth.${NC}"
echo -e " Sign in with your Google account that has Antigravity access."
echo ""
# Run native OAuth flow
if uv run python "$PROJECT_DIR/core/antigravity_auth.py" auth account add; then
# Re-detect credentials
if [ -f "$HOME/.hive/antigravity-accounts.json" ]; then
ANTIGRAVITY_CRED_DETECTED=true
fi
fi
if [ "$ANTIGRAVITY_CRED_DETECTED" = false ]; then
echo ""
echo -e "${RED} Authentication failed or was cancelled.${NC}"
echo ""
exit 1
fi
fi
if [ "$ANTIGRAVITY_CRED_DETECTED" = true ]; then
SUBSCRIPTION_MODE="antigravity"
SELECTED_PROVIDER_ID="openai"
SELECTED_MODEL="gemini-3-flash"
SELECTED_MAX_TOKENS=32768
SELECTED_MAX_CONTEXT_TOKENS=1000000 # Gemini 3 Flash — 1M context window
echo ""
echo -e "${YELLOW} ⚠ Using Antigravity can technically cause your account suspension. Please use at your own risk.${NC}"
echo ""
echo -e "${GREEN}${NC} Using Antigravity subscription"
echo -e " ${DIM}Model: gemini-3-flash | Direct OAuth (no proxy required)${NC}"
fi
;;
8)
SELECTED_ENV_VAR="ANTHROPIC_API_KEY"
SELECTED_PROVIDER_ID="anthropic"
PROVIDER_NAME="Anthropic"
SIGNUP_URL="https://console.anthropic.com/settings/keys"
;;
8)
9)
SELECTED_ENV_VAR="OPENAI_API_KEY"
SELECTED_PROVIDER_ID="openai"
PROVIDER_NAME="OpenAI"
SIGNUP_URL="https://platform.openai.com/api-keys"
;;
9)
10)
SELECTED_ENV_VAR="GEMINI_API_KEY"
SELECTED_PROVIDER_ID="gemini"
PROVIDER_NAME="Google Gemini"
SIGNUP_URL="https://aistudio.google.com/apikey"
;;
10)
11)
SELECTED_ENV_VAR="GROQ_API_KEY"
SELECTED_PROVIDER_ID="groq"
PROVIDER_NAME="Groq"
SIGNUP_URL="https://console.groq.com/keys"
;;
11)
12)
SELECTED_ENV_VAR="CEREBRAS_API_KEY"
SELECTED_PROVIDER_ID="cerebras"
PROVIDER_NAME="Cerebras"
SIGNUP_URL="https://cloud.cerebras.ai/"
;;
12)
13)
SELECTED_ENV_VAR="OPENROUTER_API_KEY"
SELECTED_PROVIDER_ID="openrouter"
SELECTED_API_BASE="https://openrouter.ai/api/v1"
@@ -1086,6 +1165,8 @@ if [ -n "$SELECTED_PROVIDER_ID" ]; then
save_worker_configuration "$SELECTED_PROVIDER_ID" "" "$SELECTED_MODEL" "$SELECTED_MAX_TOKENS" "$SELECTED_MAX_CONTEXT_TOKENS" "true" "" > /dev/null || SAVE_OK=false
elif [ "$SUBSCRIPTION_MODE" = "codex" ]; then
save_worker_configuration "$SELECTED_PROVIDER_ID" "" "$SELECTED_MODEL" "$SELECTED_MAX_TOKENS" "$SELECTED_MAX_CONTEXT_TOKENS" "" "" "true" > /dev/null || SAVE_OK=false
elif [ "$SUBSCRIPTION_MODE" = "antigravity" ]; then
save_worker_configuration "$SELECTED_PROVIDER_ID" "" "$SELECTED_MODEL" "$SELECTED_MAX_TOKENS" "$SELECTED_MAX_CONTEXT_TOKENS" "" "" "" "true" > /dev/null || SAVE_OK=false
elif [ "$SUBSCRIPTION_MODE" = "zai_code" ]; then
save_worker_configuration "$SELECTED_PROVIDER_ID" "$SELECTED_ENV_VAR" "$SELECTED_MODEL" "$SELECTED_MAX_TOKENS" "$SELECTED_MAX_CONTEXT_TOKENS" "" "https://api.z.ai/api/coding/paas/v4" > /dev/null || SAVE_OK=false
elif [ "$SUBSCRIPTION_MODE" = "minimax_code" ]; then
@@ -10,7 +10,7 @@ def get_secure_path(path: str, workspace_id: str, agent_id: str, session_id: str
raise ValueError("workspace_id, agent_id, and session_id are all required")
# Ensure session directory exists
session_dir = os.path.abspath(os.path.join(WORKSPACES_DIR, workspace_id, agent_id, session_id))
session_dir = os.path.realpath(os.path.join(WORKSPACES_DIR, workspace_id, agent_id, session_id))
os.makedirs(session_dir, exist_ok=True)
# Normalize whitespace to prevent bypass via leading spaces/tabs
@@ -21,9 +21,9 @@ def get_secure_path(path: str, workspace_id: str, agent_id: str, session_id: str
# Strip exactly one leading separator to make path relative to session_dir,
# preserving any subsequent separators (e.g. UNC paths like //server/share)
rel_path = path[1:] if path and path[0] in ("/", "\\") else path
final_path = os.path.abspath(os.path.join(session_dir, rel_path))
final_path = os.path.realpath(os.path.join(session_dir, rel_path))
else:
final_path = os.path.abspath(os.path.join(session_dir, path))
final_path = os.path.realpath(os.path.join(session_dir, path))
# Verify path is within session_dir
try:
@@ -391,7 +391,7 @@ def register_tools(
def google_sheets_update_values(
spreadsheet_id: str,
range_name: str,
values: list[list[Any]],
values: list[list[Any]] | str,
value_input_option: str = "USER_ENTERED",
# Tracking parameters (injected by framework, ignored by tool)
workspace_id: str | None = None,
@@ -405,16 +405,29 @@ def register_tools(
Args:
spreadsheet_id: The spreadsheet ID (from the URL)
range_name: The A1 notation range (e.g., "Sheet1!A1:B10")
values: 2D array of values to write
values: 2D array of values to write. Accepts a list or a JSON string.
value_input_option: How to interpret input
(USER_ENTERED parses, RAW stores as-is)
Returns:
Dict with update result or error
"""
# Credentials check first so missing-creds errors aren't masked
client = _get_client()
if isinstance(client, dict):
return client
# Accept stringified JSON and deserialize
import json
if isinstance(values, str):
try:
values = json.loads(values)
except (json.JSONDecodeError, ValueError):
return {"error": "values is not valid JSON"}
if not isinstance(values, list):
return {
"error": f"values must be a 2D list or JSON string, got {type(values).__name__}"
}
try:
return client.update_values(spreadsheet_id, range_name, values, value_input_option)
except httpx.TimeoutException:
@@ -426,7 +439,7 @@ def register_tools(
def google_sheets_append_values(
spreadsheet_id: str,
range_name: str,
values: list[list[Any]],
values: list[list[Any]] | str,
value_input_option: str = "USER_ENTERED",
# Tracking parameters (injected by framework, ignored by tool)
workspace_id: str | None = None,
@@ -440,16 +453,29 @@ def register_tools(
Args:
spreadsheet_id: The spreadsheet ID (from the URL)
range_name: The A1 notation range (e.g., "Sheet1!A1")
values: 2D array of values to append
values: 2D array of values to append. Accepts a list or a JSON string.
value_input_option: How to interpret input
(USER_ENTERED parses, RAW stores as-is)
Returns:
Dict with append result or error
"""
# Credentials check first so missing-creds errors aren't masked
client = _get_client()
if isinstance(client, dict):
return client
# Accept stringified JSON and deserialize
import json
if isinstance(values, str):
try:
values = json.loads(values)
except (json.JSONDecodeError, ValueError):
return {"error": "values is not valid JSON"}
if not isinstance(values, list):
return {
"error": f"values must be a 2D list or JSON string, got {type(values).__name__}"
}
try:
return client.append_values(spreadsheet_id, range_name, values, value_input_option)
except httpx.TimeoutException:
@@ -2,14 +2,16 @@
PDF Read Tool - Manage Accounting and Financial Operations.
Uses pypdf to read PDF documents and extract text content
along with metadata.
along with metadata. Supports both local file paths and URLs.
"""
from __future__ import annotations
import tempfile
from pathlib import Path
from typing import Any
import httpx
from fastmcp import FastMCP
from pypdf import PdfReader
@@ -98,9 +100,10 @@ def register_tools(mcp: FastMCP) -> None:
Returns text content with page markers and optional metadata.
Use for reading PDFs, reports, documents, or any PDF file.
Supports both local file paths and URLs.
Args:
file_path: Path to the PDF file to read (absolute or relative)
file_path: Path or URL to the PDF file (local path, or http/https URL)
pages: Page range - 'all'/None for all, '5' for single,
'1-10' for range, '1,3,5' for specific
max_pages: Maximum number of pages to process (1-1000, memory safety)
@@ -109,8 +112,48 @@ def register_tools(mcp: FastMCP) -> None:
Returns:
Dict with extracted text and metadata, or error dict
"""
temp_file = None
try:
path = Path(file_path).resolve()
# Check if input is a URL
is_url = file_path.startswith(("http://", "https://"))
if is_url:
# Download PDF from URL to temporary file
try:
response = httpx.get(
file_path,
headers={"User-Agent": "AdenBot/1.0 (PDF Reader)"},
follow_redirects=True,
timeout=60.0,
)
if response.status_code != 200:
return {"error": f"Failed to download PDF: HTTP {response.status_code}"}
# Validate content-type
content_type = response.headers.get("content-type", "").lower()
if "application/pdf" not in content_type:
return {
"error": (
f"URL does not point to a PDF file. Content-Type: {content_type}"
),
"content_type": content_type,
"url": file_path,
}
# Save to temporary file
temp_file = tempfile.NamedTemporaryFile(mode="wb", suffix=".pdf", delete=False)
temp_file.write(response.content)
temp_file.close()
path = Path(temp_file.name)
except httpx.TimeoutException:
return {"error": "PDF download timed out"}
except httpx.RequestError as e:
return {"error": f"Failed to download PDF: {str(e)}"}
else:
# Local file path
path = Path(file_path).resolve()
# Validate file exists
if not path.exists():
@@ -192,3 +235,10 @@ def register_tools(mcp: FastMCP) -> None:
return {"error": f"Permission denied: {file_path}"}
except Exception as e:
return {"error": f"Failed to read PDF: {str(e)}"}
finally:
# Clean up temporary file if it was created
if temp_file is not None:
try:
Path(temp_file.name).unlink(missing_ok=True)
except Exception:
pass # Ignore cleanup errors
+174
View File
@@ -1,7 +1,9 @@
"""Tests for pdf_read tool (FastMCP)."""
from pathlib import Path
from unittest.mock import MagicMock, Mock, patch
import httpx
import pytest
from fastmcp import FastMCP
@@ -111,3 +113,175 @@ class TestPdfReadTool:
# New behavior: explicit truncation metadata instead of silent truncation
assert result.get("truncated") is True
assert "truncation_warning" in result
class TestPdfReadUrlSupport:
"""Tests for URL download support in pdf_read tool."""
@patch("httpx.get")
@patch("aden_tools.tools.pdf_read_tool.pdf_read_tool.PdfReader")
def test_url_download_succeeds(self, mock_pdf_reader, mock_get, pdf_read_fn):
"""Valid PDF URL downloads and parses successfully."""
# Mock HTTP response
mock_response = Mock()
mock_response.status_code = 200
mock_response.headers = {"content-type": "application/pdf"}
mock_response.content = b"%PDF-1.4\nfake pdf content"
mock_get.return_value = mock_response
# Mock PdfReader
mock_reader_instance = MagicMock()
mock_reader_instance.is_encrypted = False
mock_reader_instance.pages = [MagicMock()]
mock_reader_instance.pages[0].extract_text.return_value = "PDF text content"
mock_reader_instance.metadata = None
mock_pdf_reader.return_value = mock_reader_instance
result = pdf_read_fn(file_path="https://example.com/document.pdf")
assert "error" not in result
assert "content" in result
assert "PDF text content" in result["content"]
mock_get.assert_called_once()
@patch("httpx.get")
def test_url_non_pdf_content_type(self, mock_get, pdf_read_fn):
"""URL returning non-PDF content-type returns error."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.headers = {"content-type": "text/html"}
mock_response.content = b"<html>Not a PDF</html>"
mock_get.return_value = mock_response
result = pdf_read_fn(file_path="https://example.com/page.html")
assert "error" in result
assert "does not point to a pdf" in result["error"].lower()
assert "content_type" in result
assert "text/html" in result["content_type"]
@patch("httpx.get")
def test_url_http_404_error(self, mock_get, pdf_read_fn):
"""URL returning 404 returns appropriate error."""
mock_response = Mock()
mock_response.status_code = 404
mock_get.return_value = mock_response
result = pdf_read_fn(file_path="https://example.com/missing.pdf")
assert "error" in result
assert "404" in result["error"]
@patch("httpx.get")
def test_url_http_500_error(self, mock_get, pdf_read_fn):
"""URL returning 500 returns appropriate error."""
mock_response = Mock()
mock_response.status_code = 500
mock_get.return_value = mock_response
result = pdf_read_fn(file_path="https://example.com/error.pdf")
assert "error" in result
assert "500" in result["error"]
@patch("httpx.get")
def test_url_timeout_error(self, mock_get, pdf_read_fn):
"""URL request timeout returns appropriate error."""
mock_get.side_effect = httpx.TimeoutException("Timeout")
result = pdf_read_fn(file_path="https://example.com/slow.pdf")
assert "error" in result
assert "timed out" in result["error"].lower()
@patch("httpx.get")
def test_url_network_error(self, mock_get, pdf_read_fn):
"""Network error returns appropriate error."""
mock_get.side_effect = httpx.RequestError("Connection failed")
result = pdf_read_fn(file_path="https://example.com/doc.pdf")
assert "error" in result
assert "failed to download" in result["error"].lower()
@patch("httpx.get")
@patch("aden_tools.tools.pdf_read_tool.pdf_read_tool.PdfReader")
def test_url_with_http_scheme(self, mock_pdf_reader, mock_get, pdf_read_fn):
"""HTTP URLs (not HTTPS) are handled correctly."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.headers = {"content-type": "application/pdf"}
mock_response.content = b"%PDF-1.4\ncontent"
mock_get.return_value = mock_response
mock_reader_instance = MagicMock()
mock_reader_instance.is_encrypted = False
mock_reader_instance.pages = [MagicMock()]
mock_reader_instance.pages[0].extract_text.return_value = "Text"
mock_reader_instance.metadata = None
mock_pdf_reader.return_value = mock_reader_instance
result = pdf_read_fn(file_path="http://example.com/doc.pdf")
assert "error" not in result
mock_get.assert_called_once()
def test_local_file_path_still_works(self, pdf_read_fn, tmp_path: Path):
"""Local file paths still work (backward compatibility)."""
pdf_file = tmp_path / "local.pdf"
pdf_file.write_bytes(b"%PDF-1.4")
result = pdf_read_fn(file_path=str(pdf_file))
# Will error due to invalid PDF, but should not treat as URL
assert isinstance(result, dict)
# Should not have URL-specific errors
if "error" in result:
assert "download" not in result["error"].lower()
@patch("httpx.get")
@patch("aden_tools.tools.pdf_read_tool.pdf_read_tool.PdfReader")
@patch("aden_tools.tools.pdf_read_tool.pdf_read_tool.tempfile.NamedTemporaryFile")
def test_temporary_file_cleanup(self, mock_tempfile, mock_pdf_reader, mock_get, pdf_read_fn):
"""Temporary file is cleaned up after processing."""
# Mock HTTP response
mock_response = Mock()
mock_response.status_code = 200
mock_response.headers = {"content-type": "application/pdf"}
mock_response.content = b"%PDF-1.4\ncontent"
mock_get.return_value = mock_response
# Mock temporary file
mock_temp = MagicMock()
mock_temp.name = "/tmp/test.pdf"
mock_tempfile.return_value = mock_temp
# Mock PdfReader
mock_reader_instance = MagicMock()
mock_reader_instance.is_encrypted = False
mock_reader_instance.pages = [MagicMock()]
mock_reader_instance.pages[0].extract_text.return_value = "Text"
mock_reader_instance.metadata = None
mock_pdf_reader.return_value = mock_reader_instance
pdf_read_fn(file_path="https://example.com/doc.pdf")
# Verify temp file operations
mock_temp.write.assert_called_once()
mock_temp.close.assert_called_once()
@patch("httpx.get")
def test_url_json_content_type(self, mock_get, pdf_read_fn):
"""URL returning JSON returns appropriate error."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.headers = {"content-type": "application/json"}
mock_response.content = b'{"error": "not a pdf"}'
mock_get.return_value = mock_response
result = pdf_read_fn(file_path="https://api.example.com/data")
assert "error" in result
assert "does not point to a pdf" in result["error"].lower()
assert "content_type" in result
assert "application/json" in result["content_type"]
+23 -17
View File
@@ -1,6 +1,5 @@
"""Tests for security.py - get_secure_path() function."""
import os
from unittest.mock import patch
import pytest
@@ -242,19 +241,14 @@ class TestGetSecurePath:
symlink_path = session_dir / "link_to_target"
symlink_path.symlink_to(target_file)
# Path through symlink should resolve
# Path through symlink should resolve to the real target path
result = get_secure_path("link_to_target", **ids)
assert result == str(symlink_path)
# realpath resolves the symlink, so result points to the real file
assert result == str(target_file.resolve())
def test_symlink_escape_detected_with_realpath(self, ids):
"""Symlinks pointing outside sandbox can be detected using realpath.
Note: get_secure_path uses abspath (not realpath), so it validates the
lexical path. To fully protect against symlink attacks, callers should
verify realpath(result) is still within the sandbox before file I/O.
This test documents that pattern.
"""
def test_symlink_escape_blocked(self, ids):
"""Symlinks pointing outside sandbox are blocked by get_secure_path."""
from aden_tools.tools.file_system_toolkits.security import get_secure_path
# Create session directory
@@ -267,10 +261,22 @@ class TestGetSecurePath:
symlink_path = session_dir / "escape_link"
symlink_path.symlink_to(outside_target)
# get_secure_path accepts the lexical path (symlink is inside session)
result = get_secure_path("escape_link", **ids)
assert result == str(symlink_path)
# get_secure_path now resolves symlinks and blocks the escape
with pytest.raises(ValueError, match="outside the session sandbox"):
get_secure_path("escape_link", **ids)
# However, realpath reveals the escape - callers should check this
real_path = os.path.realpath(result)
assert os.path.commonpath([real_path, str(session_dir)]) != str(session_dir)
def test_symlink_to_root_escape_blocked(self, ids):
"""Symlink to / inside sandbox then traversing through it is blocked."""
from aden_tools.tools.file_system_toolkits.security import get_secure_path
# Create session directory
session_dir = self.workspaces_dir / "test-workspace" / "test-agent" / "test-session"
session_dir.mkdir(parents=True, exist_ok=True)
# Create a symlink to root filesystem inside the sandbox
symlink_path = session_dir / "root"
symlink_path.symlink_to("/")
# Attempting to access files through the symlink should be blocked
with pytest.raises(ValueError, match="outside the session sandbox"):
get_secure_path("root/etc/passwd", **ids)