Merge remote-tracking branch 'origin/main' into feature/windows-filesysten

This commit is contained in:
Timothy
2026-03-04 15:59:49 -08:00
62 changed files with 1559 additions and 740 deletions
+1
View File
@@ -79,3 +79,4 @@ core/tests/*dumps/*
screenshots/*
.gemini/*
+4
View File
@@ -2,6 +2,10 @@
Shared agent instructions for this workspace.
## Deprecations
- **TUI is deprecated.** The terminal UI (`hive tui`) is no longer maintained. Use the browser-based interface (`hive open`) instead.
## Coding Agent Notes
-
+13 -1
View File
@@ -20,8 +20,20 @@ check: ## Run all checks without modifying files (CI-safe)
cd core && ruff format --check .
cd tools && ruff format --check .
test: ## Run all tests
test: ## Run all tests (core + tools, excludes live)
cd core && uv run python -m pytest tests/ -v
cd tools && uv run python -m pytest -v
test-tools: ## Run tool tests only (mocked, no credentials needed)
cd tools && uv run python -m pytest -v
test-live: ## Run live integration tests (requires real API credentials)
cd tools && uv run python -m pytest -m live -s -o "addopts=" --log-cli-level=INFO
test-all: ## Run everything including live tests
cd core && uv run python -m pytest tests/ -v
cd tools && uv run python -m pytest -v
cd tools && uv run python -m pytest -m live -s -o "addopts=" --log-cli-level=INFO
install-hooks: ## Install pre-commit hooks
uv pip install pre-commit
+2
View File
@@ -113,6 +113,8 @@ This sets up:
- At last, it will initiate the open hive interface in your browser
> **Tip:** To reopen the dashboard later, run `hive open` from the project directory.
<img width="2500" height="1214" alt="home-screen" src="https://github.com/user-attachments/assets/134d897f-5e75-4874-b00b-e0505f6b45c4" />
### Build Your First Agent
+1 -1
View File
@@ -10,7 +10,7 @@ def _load_preferred_model() -> str:
config_path = Path.home() / ".hive" / "configuration.json"
if config_path.exists():
try:
with open(config_path) as f:
with open(config_path, encoding="utf-8") as f:
config = json.load(f)
llm = config.get("llm", {})
if llm.get("provider") and llm.get("model"):
@@ -7,11 +7,11 @@ from framework.graph import NodeSpec
# Load reference docs at import time so they're always in the system prompt.
# No voluntary read_file() calls needed — the LLM gets everything upfront.
_ref_dir = Path(__file__).parent.parent / "reference"
_framework_guide = (_ref_dir / "framework_guide.md").read_text()
_file_templates = (_ref_dir / "file_templates.md").read_text()
_anti_patterns = (_ref_dir / "anti_patterns.md").read_text()
_framework_guide = (_ref_dir / "framework_guide.md").read_text(encoding="utf-8")
_file_templates = (_ref_dir / "file_templates.md").read_text(encoding="utf-8")
_anti_patterns = (_ref_dir / "anti_patterns.md").read_text(encoding="utf-8")
_gcu_guide_path = _ref_dir / "gcu_guide.md"
_gcu_guide = _gcu_guide_path.read_text() if _gcu_guide_path.exists() else ""
_gcu_guide = _gcu_guide_path.read_text(encoding="utf-8") if _gcu_guide_path.exists() else ""
def _is_gcu_enabled() -> bool:
+2 -2
View File
@@ -660,7 +660,7 @@ class GraphBuilder:
# Generate Python code
code = self._generate_code(graph)
Path(path).write_text(code)
Path(path).write_text(code, encoding="utf-8")
self.session.phase = BuildPhase.EXPORTED
self._save_session()
@@ -754,7 +754,7 @@ class GraphBuilder:
"""Save session to disk."""
self.session.updated_at = datetime.now()
path = self.storage_path / f"{self.session.id}.json"
path.write_text(self.session.model_dump_json(indent=2))
path.write_text(self.session.model_dump_json(indent=2), encoding="utf-8")
def _load_session(self, session_id: str) -> BuildSession:
"""Load session from disk."""
+1 -1
View File
@@ -92,7 +92,7 @@ def get_api_key() -> str | None:
def get_gcu_enabled() -> bool:
"""Return whether GCU (browser automation) is enabled in user config."""
return get_hive_config().get("gcu_enabled", False)
return get_hive_config().get("gcu_enabled", True)
def get_api_base() -> str | None:
+1 -1
View File
@@ -69,7 +69,7 @@ def save_credential_key(key: str) -> Path:
# Restrict the secrets directory itself
path.parent.chmod(stat.S_IRWXU) # 0o700
path.write_text(key)
path.write_text(key, encoding="utf-8")
path.chmod(stat.S_IRUSR | stat.S_IWUSR) # 0o600
os.environ[CREDENTIAL_KEY_ENV_VAR] = key
+1 -1
View File
@@ -568,7 +568,7 @@ def _load_nodes_from_python_agent(agent_path: Path) -> list:
def _load_nodes_from_json_agent(agent_json: Path) -> list:
"""Load nodes from a JSON-based agent."""
try:
with open(agent_json) as f:
with open(agent_json, encoding="utf-8") as f:
data = json.load(f)
from framework.graph import NodeSpec
+3 -3
View File
@@ -227,7 +227,7 @@ class EncryptedFileStorage(CredentialStorage):
index_path = self.base_path / "metadata" / "index.json"
if not index_path.exists():
return []
with open(index_path) as f:
with open(index_path, encoding="utf-8") as f:
index = json.load(f)
return list(index.get("credentials", {}).keys())
@@ -268,7 +268,7 @@ class EncryptedFileStorage(CredentialStorage):
index_path = self.base_path / "metadata" / "index.json"
if index_path.exists():
with open(index_path) as f:
with open(index_path, encoding="utf-8") as f:
index = json.load(f)
else:
index = {"credentials": {}, "version": "1.0"}
@@ -283,7 +283,7 @@ class EncryptedFileStorage(CredentialStorage):
index["last_modified"] = datetime.now(UTC).isoformat()
with open(index_path, "w") as f:
with open(index_path, "w", encoding="utf-8") as f:
json.dump(index, f, indent=2)
+1 -2
View File
@@ -431,8 +431,7 @@ class GraphSpec(BaseModel):
max_tokens: int = Field(default=None) # resolved by _resolve_max_tokens validator
# Cleanup LLM for JSON extraction fallback (fast/cheap model preferred)
# If not set, uses CEREBRAS_API_KEY -> cerebras/llama-3.3-70b or
# ANTHROPIC_API_KEY -> claude-haiku-4-5 as fallback
# If not set, uses CEREBRAS_API_KEY -> cerebras/llama-3.3-70b
cleanup_llm_model: str | None = None
# Execution limits
+3 -2
View File
@@ -183,11 +183,12 @@ class GraphExecutor:
self.tool_provider_map = tool_provider_map
self.dynamic_tools_provider = dynamic_tools_provider
# Initialize output cleaner
# Initialize output cleaner — uses its own dedicated fast model (CEREBRAS_API_KEY),
# never the main agent LLM. Passing the main LLM here would cause expensive
# Anthropic calls for output cleaning whenever ANTHROPIC_API_KEY is set.
self.cleansing_config = cleansing_config or CleansingConfig()
self.output_cleaner = OutputCleaner(
config=self.cleansing_config,
llm_provider=llm,
)
# Parallel execution settings
+4 -56
View File
@@ -154,69 +154,17 @@ class HITLProtocol:
"""
Parse human's raw input into structured response.
Uses Haiku to intelligently extract answers for each question.
Maps the raw input to the first question. For multi-question HITL,
the caller should present one question at a time.
"""
import os
response = HITLResponse(request_id=request.request_id, raw_input=raw_input)
# If no questions, just return raw input
if not request.questions:
return response
# Try to use Haiku for intelligent parsing
api_key = os.environ.get("ANTHROPIC_API_KEY")
if not use_haiku or not api_key:
# Simple fallback: treat as answer to first question
if request.questions:
response.answers[request.questions[0].id] = raw_input
return response
# Use Haiku to extract answers
try:
import json
import anthropic
questions_str = "\n".join(
[f"{i + 1}. {q.question} (id: {q.id})" for i, q in enumerate(request.questions)]
)
prompt = f"""Parse the user's response and extract answers for each question.
Questions asked:
{questions_str}
User's response:
{raw_input}
Extract the answer for each question. Output JSON with question IDs as keys.
Example format:
{{"question-1": "answer here", "question-2": "answer here"}}"""
client = anthropic.Anthropic(api_key=api_key)
message = client.messages.create(
model="claude-haiku-4-5-20251001",
max_tokens=500,
messages=[{"role": "user", "content": prompt}],
)
# Parse Haiku's response
import re
response_text = message.content[0].text.strip()
json_match = re.search(r"\{[^{}]*\}", response_text, re.DOTALL)
if json_match:
parsed = json.loads(json_match.group())
response.answers = parsed
except Exception:
# Fallback: use raw input for first question
if request.questions:
response.answers[request.questions[0].id] = raw_input
# Map raw input to first question
response.answers[request.questions[0].id] = raw_input
return response
@staticmethod
+7 -54
View File
@@ -585,7 +585,6 @@ class NodeResult:
Generate a human-readable summary of this node's execution and output.
This is like toString() - it describes what the node produced in its current state.
Uses Haiku to intelligently summarize complex outputs.
"""
if not self.success:
return f"❌ Failed: {self.error}"
@@ -593,59 +592,13 @@ class NodeResult:
if not self.output:
return "✓ Completed (no output)"
# Use Haiku to generate intelligent summary
import os
api_key = os.environ.get("ANTHROPIC_API_KEY")
if not api_key:
# Fallback: simple key-value listing
parts = [f"✓ Completed with {len(self.output)} outputs:"]
for key, value in list(self.output.items())[:5]: # Limit to 5 keys
value_str = str(value)[:100]
if len(str(value)) > 100:
value_str += "..."
parts.append(f"{key}: {value_str}")
return "\n".join(parts)
# Use Haiku to generate intelligent summary
try:
import json
import anthropic
node_context = ""
if node_spec:
node_context = f"\nNode: {node_spec.name}\nPurpose: {node_spec.description}"
output_json = json.dumps(self.output, indent=2, default=str)[:2000]
prompt = (
f"Generate a 1-2 sentence human-readable summary of "
f"what this node produced.{node_context}\n\n"
f"Node output:\n{output_json}\n\n"
"Provide a concise, clear summary that a human can quickly "
"understand. Focus on the key information produced."
)
client = anthropic.Anthropic(api_key=api_key)
message = client.messages.create(
model="claude-haiku-4-5-20251001",
max_tokens=200,
messages=[{"role": "user", "content": prompt}],
)
summary = message.content[0].text.strip()
return f"{summary}"
except Exception:
# Fallback on error
parts = [f"✓ Completed with {len(self.output)} outputs:"]
for key, value in list(self.output.items())[:3]:
value_str = str(value)[:80]
if len(str(value)) > 80:
value_str += "..."
parts.append(f"{key}: {value_str}")
return "\n".join(parts)
parts = [f"✓ Completed with {len(self.output)} outputs:"]
for key, value in list(self.output.items())[:5]: # Limit to 5 keys
value_str = str(value)[:100]
if len(str(value)) > 100:
value_str += "..."
parts.append(f"{key}: {value_str}")
return "\n".join(parts)
class NodeProtocol(ABC):
+1 -1
View File
@@ -170,7 +170,7 @@ def _dump_failed_request(
"temperature": kwargs.get("temperature"),
}
with open(filepath, "w") as f:
with open(filepath, "w", encoding="utf-8") as f:
json.dump(dump_data, f, indent=2, default=str)
return str(filepath)
+7 -5
View File
@@ -162,7 +162,7 @@ def _load_session(session_id: str) -> BuildSession:
if not session_file.exists():
raise ValueError(f"Session '{session_id}' not found")
with open(session_file) as f:
with open(session_file, encoding="utf-8") as f:
data = json.load(f)
return BuildSession.from_dict(data)
@@ -174,7 +174,7 @@ def _load_active_session() -> BuildSession | None:
return None
try:
with open(ACTIVE_SESSION_FILE) as f:
with open(ACTIVE_SESSION_FILE, encoding="utf-8") as f:
session_id = f.read().strip()
if session_id:
@@ -228,7 +228,7 @@ def list_sessions() -> str:
if SESSIONS_DIR.exists():
for session_file in SESSIONS_DIR.glob("*.json"):
try:
with open(session_file) as f:
with open(session_file, encoding="utf-8") as f:
data = json.load(f)
sessions.append(
{
@@ -248,7 +248,7 @@ def list_sessions() -> str:
active_id = None
if ACTIVE_SESSION_FILE.exists():
try:
with open(ACTIVE_SESSION_FILE) as f:
with open(ACTIVE_SESSION_FILE, encoding="utf-8") as f:
active_id = f.read().strip()
except Exception:
pass
@@ -310,7 +310,7 @@ def delete_session(session_id: Annotated[str, "ID of the session to delete"]) ->
_session = None
if ACTIVE_SESSION_FILE.exists():
with open(ACTIVE_SESSION_FILE) as f:
with open(ACTIVE_SESSION_FILE, encoding="utf-8") as f:
active_id = f.read().strip()
if active_id == session_id:
ACTIVE_SESSION_FILE.unlink()
@@ -2894,6 +2894,7 @@ def run_tests(
try:
result = subprocess.run(
cmd,
encoding="utf-8",
capture_output=True,
text=True,
timeout=600, # 10 minute timeout
@@ -3086,6 +3087,7 @@ def debug_test(
try:
result = subprocess.run(
cmd,
encoding="utf-8",
capture_output=True,
text=True,
timeout=120, # 2 minute timeout for single test
+72 -57
View File
@@ -401,6 +401,43 @@ def register_commands(subparsers: argparse._SubParsersAction) -> None:
)
serve_parser.set_defaults(func=cmd_serve)
# open command (serve + auto-open browser)
open_parser = subparsers.add_parser(
"open",
help="Start HTTP server and open dashboard in browser",
description="Shortcut for 'hive serve --open'. "
"Starts the HTTP server and opens the dashboard.",
)
open_parser.add_argument(
"--host",
type=str,
default="127.0.0.1",
help="Host to bind (default: 127.0.0.1)",
)
open_parser.add_argument(
"--port",
"-p",
type=int,
default=8787,
help="Port to listen on (default: 8787)",
)
open_parser.add_argument(
"--agent",
"-a",
type=str,
action="append",
default=[],
help="Agent path to preload (repeatable)",
)
open_parser.add_argument(
"--model",
"-m",
type=str,
default=None,
help="LLM model for preloaded agents",
)
open_parser.set_defaults(func=cmd_open)
def _load_resume_state(
agent_path: str, session_id: str, checkpoint_id: str | None = None
@@ -517,7 +554,7 @@ def cmd_run(args: argparse.Namespace) -> int:
return 1
elif args.input_file:
try:
with open(args.input_file) as f:
with open(args.input_file, encoding="utf-8") as f:
context = json.load(f)
except (FileNotFoundError, json.JSONDecodeError) as e:
print(f"Error reading input file: {e}", file=sys.stderr)
@@ -659,7 +696,7 @@ def cmd_run(args: argparse.Namespace) -> int:
# Output results
if args.output:
with open(args.output, "w") as f:
with open(args.output, "w", encoding="utf-8") as f:
json.dump(output, f, indent=2, default=str)
if not args.quiet:
print(f"Results written to {args.output}")
@@ -1053,62 +1090,19 @@ def _interactive_approval(request):
def _format_natural_language_to_json(
user_input: str, input_keys: list[str], agent_description: str, session_context: dict = None
) -> dict:
"""Use Haiku to convert natural language input to JSON based on agent's input schema."""
import os
"""Convert natural language input to JSON based on agent's input schema.
import anthropic
Maps user input to the primary input field. For follow-up inputs,
appends to the existing value.
"""
main_field = input_keys[0] if input_keys else "objective"
client = anthropic.Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY"))
# Build prompt for Haiku
session_info = ""
if session_context:
# Extract the main field (usually 'objective') that we'll append to
main_field = input_keys[0] if input_keys else "objective"
existing_value = session_context.get(main_field, "")
if existing_value:
return {main_field: f"{existing_value}\n\n{user_input}"}
session_info = (
f'\n\nExisting {main_field}: "{existing_value}"\n\n'
f"The user is providing ADDITIONAL information. Append this new "
f"information to the existing {main_field} to create an enriched, "
"more detailed version."
)
prompt = f"""You are formatting user input for an agent that requires specific input fields.
Agent: {agent_description}
Required input fields: {", ".join(input_keys)}{session_info}
User input: {user_input}
{"If this is a follow-up, APPEND new info to the existing field value." if session_context else ""}
Output ONLY valid JSON, no explanation:"""
try:
message = client.messages.create(
model="claude-haiku-4-5-20251001", # Fast and cheap
max_tokens=500,
messages=[{"role": "user", "content": prompt}],
)
json_str = message.content[0].text.strip()
# Remove markdown code blocks if present
if json_str.startswith("```"):
json_str = json_str.split("```")[1]
if json_str.startswith("json"):
json_str = json_str[4:]
json_str = json_str.strip()
return json.loads(json_str)
except Exception:
# Fallback: try to infer the main field
if len(input_keys) == 1:
return {input_keys[0]: user_input}
else:
# Put it in the first field as fallback
return {input_keys[0]: user_input}
return {main_field: user_input}
def cmd_shell(args: argparse.Namespace) -> int:
@@ -1517,7 +1511,7 @@ def _extract_python_agent_metadata(agent_path: Path) -> tuple[str, str]:
return fallback_name, fallback_desc
try:
with open(config_path) as f:
with open(config_path, encoding="utf-8") as f:
tree = ast.parse(f.read())
# Find AgentMetadata class definition
@@ -1928,14 +1922,27 @@ def cmd_setup_credentials(args: argparse.Namespace) -> int:
def _open_browser(url: str) -> None:
"""Open URL in the default browser (best-effort, non-blocking)."""
import subprocess
import sys
try:
if sys.platform == "darwin":
subprocess.Popen(["open", url], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
subprocess.Popen(
["open", url],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
encoding="utf-8",
)
elif sys.platform == "win32":
subprocess.Popen(
["cmd", "/c", "start", "", url],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
elif sys.platform == "linux":
subprocess.Popen(
["xdg-open", url], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
["xdg-open", url],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
encoding="utf-8",
)
except Exception:
pass # Best-effort — don't crash if browser can't open
@@ -1980,12 +1987,14 @@ def _build_frontend() -> bool:
# Ensure deps are installed
subprocess.run(
["npm", "install", "--no-fund", "--no-audit"],
encoding="utf-8",
cwd=frontend_dir,
check=True,
capture_output=True,
)
subprocess.run(
["npm", "run", "build"],
encoding="utf-8",
cwd=frontend_dir,
check=True,
capture_output=True,
@@ -2074,3 +2083,9 @@ def cmd_serve(args: argparse.Namespace) -> int:
print("\nServer stopped.")
return 0
def cmd_open(args: argparse.Namespace) -> int:
"""Start the HTTP API server and open the dashboard in the browser."""
args.open = True
return cmd_serve(args)
+111 -21
View File
@@ -39,6 +39,7 @@ logger = logging.getLogger(__name__)
CLAUDE_CREDENTIALS_FILE = Path.home() / ".claude" / ".credentials.json"
CLAUDE_OAUTH_TOKEN_URL = "https://console.anthropic.com/v1/oauth/token"
CLAUDE_OAUTH_CLIENT_ID = "9d1c250a-e61b-44d9-88ed-5944d1962f5e"
CLAUDE_KEYCHAIN_SERVICE = "Claude Code-credentials"
# Buffer in seconds before token expiry to trigger a proactive refresh
_TOKEN_REFRESH_BUFFER_SECS = 300 # 5 minutes
@@ -51,6 +52,96 @@ CODEX_KEYCHAIN_SERVICE = "Codex Auth"
_CODEX_TOKEN_LIFETIME_SECS = 3600 # 1 hour (no explicit expiry field)
def _read_claude_keychain() -> dict | None:
"""Read Claude Code credentials from macOS Keychain.
Returns the parsed JSON dict, or None if not on macOS or entry missing.
"""
import getpass
import platform
import subprocess
if platform.system() != "Darwin":
return None
try:
account = getpass.getuser()
result = subprocess.run(
[
"security",
"find-generic-password",
"-s",
CLAUDE_KEYCHAIN_SERVICE,
"-a",
account,
"-w",
],
capture_output=True,
encoding="utf-8",
timeout=5,
)
if result.returncode != 0:
return None
raw = result.stdout.strip()
if not raw:
return None
return json.loads(raw)
except (subprocess.TimeoutExpired, json.JSONDecodeError, OSError) as exc:
logger.debug("Claude keychain read failed: %s", exc)
return None
def _save_claude_keychain(creds: dict) -> bool:
"""Write Claude Code credentials to macOS Keychain. Returns True on success."""
import getpass
import platform
import subprocess
if platform.system() != "Darwin":
return False
try:
account = getpass.getuser()
data = json.dumps(creds)
result = subprocess.run(
[
"security",
"add-generic-password",
"-U",
"-s",
CLAUDE_KEYCHAIN_SERVICE,
"-a",
account,
"-w",
data,
],
capture_output=True,
timeout=5,
)
return result.returncode == 0
except (subprocess.TimeoutExpired, OSError) as exc:
logger.debug("Claude keychain write failed: %s", exc)
return False
def _read_claude_credentials() -> dict | None:
"""Read Claude Code credentials from Keychain (macOS) or file (Linux/Windows)."""
# Try macOS Keychain first
creds = _read_claude_keychain()
if creds:
return creds
# Fall back to file
if not CLAUDE_CREDENTIALS_FILE.exists():
return None
try:
with open(CLAUDE_CREDENTIALS_FILE, encoding="utf-8") as f:
return json.load(f)
except (json.JSONDecodeError, OSError):
return None
def _refresh_claude_code_token(refresh_token: str) -> dict | None:
"""Refresh the Claude Code OAuth token using the refresh token.
@@ -89,16 +180,14 @@ def _refresh_claude_code_token(refresh_token: str) -> dict | None:
def _save_refreshed_credentials(token_data: dict) -> None:
"""Write refreshed token data back to ~/.claude/.credentials.json."""
"""Write refreshed token data back to Keychain (macOS) or credentials file."""
import time
if not CLAUDE_CREDENTIALS_FILE.exists():
creds = _read_claude_credentials()
if not creds:
return
try:
with open(CLAUDE_CREDENTIALS_FILE) as f:
creds = json.load(f)
oauth = creds.get("claudeAiOauth", {})
oauth["accessToken"] = token_data["access_token"]
if "refresh_token" in token_data:
@@ -107,9 +196,15 @@ def _save_refreshed_credentials(token_data: dict) -> None:
oauth["expiresAt"] = int((time.time() + token_data["expires_in"]) * 1000)
creds["claudeAiOauth"] = oauth
with open(CLAUDE_CREDENTIALS_FILE, "w") as f:
json.dump(creds, f, indent=2)
logger.debug("Claude Code credentials refreshed successfully")
# Try Keychain first (macOS), fall back to file
if _save_claude_keychain(creds):
logger.debug("Claude Code credentials refreshed in Keychain")
return
if CLAUDE_CREDENTIALS_FILE.exists():
with open(CLAUDE_CREDENTIALS_FILE, "w", encoding="utf-8") as f:
json.dump(creds, f, indent=2)
logger.debug("Claude Code credentials refreshed in file")
except (json.JSONDecodeError, OSError, KeyError) as exc:
logger.debug("Failed to save refreshed credentials: %s", exc)
@@ -117,8 +212,8 @@ def _save_refreshed_credentials(token_data: dict) -> None:
def get_claude_code_token() -> str | None:
"""Get the OAuth token from Claude Code subscription with auto-refresh.
Reads from ~/.claude/.credentials.json which is created by the
Claude Code CLI when users authenticate with their subscription.
Reads from macOS Keychain (on Darwin) or ~/.claude/.credentials.json
(on Linux/Windows), as created by the Claude Code CLI.
If the token is expired or close to expiry, attempts an automatic
refresh using the stored refresh token.
@@ -128,13 +223,8 @@ def get_claude_code_token() -> str | None:
"""
import time
if not CLAUDE_CREDENTIALS_FILE.exists():
return None
try:
with open(CLAUDE_CREDENTIALS_FILE) as f:
creds = json.load(f)
except (json.JSONDecodeError, OSError):
creds = _read_claude_credentials()
if not creds:
return None
oauth = creds.get("claudeAiOauth", {})
@@ -212,7 +302,7 @@ def _read_codex_keychain() -> dict | None:
"-w",
],
capture_output=True,
text=True,
encoding="utf-8",
timeout=5,
)
if result.returncode != 0:
@@ -231,7 +321,7 @@ def _read_codex_auth_file() -> dict | None:
if not CODEX_AUTH_FILE.exists():
return None
try:
with open(CODEX_AUTH_FILE) as f:
with open(CODEX_AUTH_FILE, encoding="utf-8") as f:
return json.load(f)
except (json.JSONDecodeError, OSError):
return None
@@ -324,7 +414,7 @@ def _save_refreshed_codex_credentials(auth_data: dict, token_data: dict) -> None
CODEX_AUTH_FILE.parent.mkdir(parents=True, exist_ok=True, mode=0o700)
fd = os.open(CODEX_AUTH_FILE, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
with os.fdopen(fd, "w") as f:
with os.fdopen(fd, "w", encoding="utf-8") as f:
json.dump(auth_data, f, indent=2)
logger.debug("Codex credentials refreshed successfully")
except (OSError, KeyError) as exc:
@@ -869,7 +959,7 @@ class AgentRunner:
if not agent_json_path.exists():
raise FileNotFoundError(f"No agent.py or agent.json found in {agent_path}")
with open(agent_json_path) as f:
with open(agent_json_path, encoding="utf-8") as f:
graph, goal = load_agent_export(f.read())
return cls(
+1 -1
View File
@@ -437,7 +437,7 @@ class ToolRegistry:
self._mcp_config_path = Path(config_path)
try:
with open(config_path) as f:
with open(config_path, encoding="utf-8") as f:
config = json.load(f)
except Exception as e:
logger.warning(f"Failed to load MCP config from {config_path}: {e}")
+55 -1
View File
@@ -288,6 +288,60 @@ async def handle_resume(request: web.Request) -> web.Response:
)
async def handle_pause(request: web.Request) -> web.Response:
"""POST /api/sessions/{session_id}/pause — pause the worker (queen stays alive).
Mirrors the queen's stop_worker() tool: cancels all active worker
executions, pauses timers so nothing auto-restarts, but does NOT
touch the queen so she can observe and react to the pause.
"""
session, err = resolve_session(request)
if err:
return err
if not session.worker_runtime:
return web.json_response({"error": "No worker loaded in this session"}, status=503)
runtime = session.worker_runtime
cancelled = []
for graph_id in runtime.list_graphs():
reg = runtime.get_graph_registration(graph_id)
if reg is None:
continue
for _ep_id, stream in reg.streams.items():
# Signal shutdown on active nodes to abort in-flight LLM streams
for executor in stream._active_executors.values():
for node in executor.node_registry.values():
if hasattr(node, "signal_shutdown"):
node.signal_shutdown()
if hasattr(node, "cancel_current_turn"):
node.cancel_current_turn()
for exec_id in list(stream.active_execution_ids):
try:
ok = await stream.cancel_execution(exec_id)
if ok:
cancelled.append(exec_id)
except Exception:
pass
# Pause timers so the next tick doesn't restart execution
runtime.pause_timers()
# Switch to staging (agent still loaded, ready to re-run)
if session.mode_state is not None:
await session.mode_state.switch_to_staging(source="frontend")
return web.json_response(
{
"stopped": bool(cancelled),
"cancelled": cancelled,
"timers_paused": True,
}
)
async def handle_stop(request: web.Request) -> web.Response:
"""POST /api/sessions/{session_id}/stop — cancel a running execution.
@@ -416,7 +470,7 @@ def register_routes(app: web.Application) -> None:
app.router.add_post("/api/sessions/{session_id}/chat", handle_chat)
app.router.add_post("/api/sessions/{session_id}/queen-context", handle_queen_context)
app.router.add_post("/api/sessions/{session_id}/worker-input", handle_worker_input)
app.router.add_post("/api/sessions/{session_id}/pause", handle_stop)
app.router.add_post("/api/sessions/{session_id}/pause", handle_pause)
app.router.add_post("/api/sessions/{session_id}/resume", handle_resume)
app.router.add_post("/api/sessions/{session_id}/stop", handle_stop)
app.router.add_post("/api/sessions/{session_id}/cancel-queen", handle_cancel_queen)
+26 -13
View File
@@ -74,6 +74,7 @@ class MockStream:
is_awaiting_input: bool = False
_execution_tasks: dict = field(default_factory=dict)
_active_executors: dict = field(default_factory=dict)
active_execution_ids: set = field(default_factory=set)
async def cancel_execution(self, execution_id: str) -> bool:
return execution_id in self._execution_tasks
@@ -117,6 +118,9 @@ class MockRuntime:
async def inject_input(self, node_id, content, graph_id=None, *, is_client_input=False):
return True
def pause_timers(self):
pass
async def get_goal_progress(self):
return {"progress": 0.5, "criteria": []}
@@ -537,18 +541,8 @@ class TestExecution:
assert resp.status == 400
@pytest.mark.asyncio
async def test_pause_not_found(self):
session = _make_session()
app = _make_app_with_session(session)
async with TestClient(TestServer(app)) as client:
resp = await client.post(
"/api/sessions/test_agent/pause",
json={"execution_id": "nonexistent"},
)
assert resp.status == 404
@pytest.mark.asyncio
async def test_pause_missing_execution_id(self):
async def test_pause_no_active_executions(self):
"""Pause with no active executions returns stopped=False."""
session = _make_session()
app = _make_app_with_session(session)
async with TestClient(TestServer(app)) as client:
@@ -556,7 +550,26 @@ class TestExecution:
"/api/sessions/test_agent/pause",
json={},
)
assert resp.status == 400
assert resp.status == 200
data = await resp.json()
assert data["stopped"] is False
assert data["cancelled"] == []
assert data["timers_paused"] is True
@pytest.mark.asyncio
async def test_pause_does_not_cancel_queen(self):
"""Pause should stop the worker but leave the queen running."""
session = _make_session()
app = _make_app_with_session(session)
async with TestClient(TestServer(app)) as client:
resp = await client.post(
"/api/sessions/test_agent/pause",
json={},
)
assert resp.status == 200
# Queen's cancel_current_turn should NOT have been called
queen_node = session.queen_executor.node_registry["queen"]
queen_node.cancel_current_turn.assert_not_called()
@pytest.mark.asyncio
async def test_goal_progress(self):
+2 -2
View File
@@ -270,10 +270,10 @@ def _edit_test_code(code: str) -> str:
try:
# Open editor
subprocess.run([editor, temp_path], check=True)
subprocess.run([editor, temp_path], check=True, encoding="utf-8")
# Read edited code
with open(temp_path) as f:
with open(temp_path, encoding="utf-8") as f:
return f.read()
except subprocess.CalledProcessError:
print("Editor failed, keeping original code")
+2
View File
@@ -190,6 +190,7 @@ def cmd_test_run(args: argparse.Namespace) -> int:
try:
result = subprocess.run(
cmd,
encoding="utf-8",
env=env,
timeout=600, # 10 minute timeout
)
@@ -248,6 +249,7 @@ def cmd_test_debug(args: argparse.Namespace) -> int:
try:
result = subprocess.run(
cmd,
encoding="utf-8",
env=env,
timeout=120, # 2 minute timeout for single test
)
+1 -1
View File
@@ -256,7 +256,7 @@ class AdenTUI(App):
"""Override to use native `open` for file:// URLs on macOS."""
if url.startswith("file://") and platform.system() == "Darwin":
path = url.removeprefix("file://")
subprocess.Popen(["open", path])
subprocess.Popen(["open", path], encoding="utf-8")
else:
super().open_url(url, new_tab=new_tab)
+6 -6
View File
@@ -488,7 +488,7 @@ class ChatRepl(Vertical):
if not state_file.exists():
continue
with open(state_file) as f:
with open(state_file, encoding="utf-8") as f:
state = json.load(f)
status = state.get("status", "").lower()
@@ -547,7 +547,7 @@ class ChatRepl(Vertical):
# Read session state
try:
with open(state_file) as f:
with open(state_file, encoding="utf-8") as f:
state = json.load(f)
# Track this session for /resume <number> lookup
@@ -599,7 +599,7 @@ class ChatRepl(Vertical):
try:
import json
with open(state_file) as f:
with open(state_file, encoding="utf-8") as f:
state = json.load(f)
# Basic info
@@ -640,7 +640,7 @@ class ChatRepl(Vertical):
# Load and show checkpoints
for i, cp_file in enumerate(checkpoint_files[-5:], 1): # Last 5
try:
with open(cp_file) as f:
with open(cp_file, encoding="utf-8") as f:
cp_data = json.load(f)
cp_id = cp_data.get("checkpoint_id", cp_file.stem)
@@ -687,7 +687,7 @@ class ChatRepl(Vertical):
import json
with open(state_file) as f:
with open(state_file, encoding="utf-8") as f:
state = json.load(f)
# Resume from session state (not checkpoint)
@@ -1102,7 +1102,7 @@ class ChatRepl(Vertical):
continue
try:
with open(state_file) as f:
with open(state_file, encoding="utf-8") as f:
state = json.load(f)
status = state.get("status", "").lower()
@@ -38,6 +38,7 @@ def _linux_file_dialog() -> subprocess.CompletedProcess | None:
"--title=Select a PDF file",
"--file-filter=PDF files (*.pdf)|*.pdf",
],
encoding="utf-8",
capture_output=True,
text=True,
timeout=300,
@@ -54,6 +55,7 @@ def _linux_file_dialog() -> subprocess.CompletedProcess | None:
".",
"PDF files (*.pdf)",
],
encoding="utf-8",
capture_output=True,
text=True,
timeout=300,
@@ -79,6 +81,7 @@ def _pick_pdf_subprocess() -> Path | None:
'POSIX path of (choose file of type {"com.adobe.pdf"} '
'with prompt "Select a PDF file")',
],
encoding="utf-8",
capture_output=True,
text=True,
timeout=300,
@@ -93,6 +96,7 @@ def _pick_pdf_subprocess() -> Path | None:
)
result = subprocess.run(
["powershell", "-NoProfile", "-Command", ps_script],
encoding="utf-8",
capture_output=True,
text=True,
timeout=300,
@@ -199,10 +199,11 @@ def _copy_to_clipboard(text: str) -> None:
"""Copy text to system clipboard using platform-native tools."""
try:
if sys.platform == "darwin":
subprocess.run(["pbcopy"], input=text.encode(), check=True, timeout=5)
subprocess.run(["pbcopy"], encoding="utf-8", input=text.encode(), check=True, timeout=5)
elif sys.platform == "win32":
subprocess.run(
["clip.exe"],
encoding="utf-8",
input=text.encode("utf-16le"),
check=True,
timeout=5,
@@ -211,6 +212,7 @@ def _copy_to_clipboard(text: str) -> None:
try:
subprocess.run(
["xclip", "-selection", "clipboard"],
encoding="utf-8",
input=text.encode(),
check=True,
timeout=5,
@@ -218,6 +220,7 @@ def _copy_to_clipboard(text: str) -> None:
except (subprocess.SubprocessError, FileNotFoundError):
subprocess.run(
["xsel", "--clipboard", "--input"],
encoding="utf-8",
input=text.encode(),
check=True,
timeout=5,
+10 -3
View File
@@ -53,7 +53,13 @@ def log_error(message: str):
def run_command(cmd: list, error_msg: str) -> bool:
"""Run a command and return success status."""
try:
subprocess.run(cmd, check=True, capture_output=True, text=True)
subprocess.run(
cmd,
check=True,
capture_output=True,
text=True,
encoding="utf-8",
)
return True
except subprocess.CalledProcessError as e:
log_error(error_msg)
@@ -97,7 +103,7 @@ def main():
if mcp_config_path.exists():
log_success("MCP configuration found at .mcp.json")
logger.info("Configuration:")
with open(mcp_config_path) as f:
with open(mcp_config_path, encoding="utf-8") as f:
config = json.load(f)
logger.info(json.dumps(config, indent=2))
else:
@@ -114,7 +120,7 @@ def main():
}
}
with open(mcp_config_path, "w") as f:
with open(mcp_config_path, "w", encoding="utf-8") as f:
json.dump(config, f, indent=2)
log_success("Created .mcp.json")
@@ -129,6 +135,7 @@ def main():
check=True,
capture_output=True,
text=True,
encoding="utf-8",
)
log_success("MCP server module verified")
except subprocess.CalledProcessError as e:
+5
View File
@@ -68,6 +68,7 @@ class TestFrameworkModule:
[sys.executable, "-m", "framework", "--help"],
capture_output=True,
text=True,
encoding="utf-8",
cwd=str(project_root / "core"),
)
assert result.returncode == 0
@@ -79,6 +80,7 @@ class TestFrameworkModule:
[sys.executable, "-m", "framework", "list", "--help"],
capture_output=True,
text=True,
encoding="utf-8",
cwd=str(project_root / "core"),
)
assert result.returncode == 0
@@ -104,6 +106,7 @@ class TestHiveEntryPoint:
["hive", "--help"],
capture_output=True,
text=True,
encoding="utf-8",
)
assert result.returncode == 0
assert "run" in result.stdout.lower()
@@ -115,6 +118,7 @@ class TestHiveEntryPoint:
["hive", "list", "--help"],
capture_output=True,
text=True,
encoding="utf-8",
)
assert result.returncode == 0
@@ -124,5 +128,6 @@ class TestHiveEntryPoint:
["hive", "run", "nonexistent_agent_xyz"],
capture_output=True,
text=True,
encoding="utf-8",
)
assert result.returncode != 0
+2 -2
View File
@@ -232,7 +232,7 @@ async def test_shared_session_reuses_directory_and_memory(tmp_path):
# Verify primary session's state.json exists and has the primary entry_point
primary_state_path = tmp_path / "sessions" / primary_exec_id / "state.json"
assert primary_state_path.exists()
primary_state = json.loads(primary_state_path.read_text())
primary_state = json.loads(primary_state_path.read_text(encoding="utf-8"))
assert primary_state["entry_point"] == "primary"
# Async stream — simulates a webhook entry point sharing the session
@@ -275,7 +275,7 @@ async def test_shared_session_reuses_directory_and_memory(tmp_path):
# State.json should NOT have been overwritten by the async execution
# (it should still show the primary entry point)
final_state = json.loads(primary_state_path.read_text())
final_state = json.loads(primary_state_path.read_text(encoding="utf-8"))
assert final_state["entry_point"] == "primary"
# Verify only ONE session directory exists (not two)
+2 -2
View File
@@ -184,7 +184,7 @@ class TestPathTraversalWithActualFiles:
# Create a secret file outside storage
secret_file = tmpdir_path / "secret.txt"
secret_file.write_text("SENSITIVE_DATA")
secret_file.write_text("SENSITIVE_DATA", encoding="utf-8")
storage = FileStorage(storage_dir)
@@ -193,7 +193,7 @@ class TestPathTraversalWithActualFiles:
storage.get_runs_by_goal("../secret")
# Verify the secret file was not accessed (still contains original data)
assert secret_file.read_text() == "SENSITIVE_DATA"
assert secret_file.read_text(encoding="utf-8") == "SENSITIVE_DATA"
def test_cannot_write_outside_storage(self):
"""Verify that we can't write files outside storage directory."""
+5 -2
View File
@@ -353,7 +353,9 @@ class TestRuntimeLogger:
# Verify the file exists and has one line
jsonl_path = tmp_path / "logs" / "sessions" / run_id / "logs" / "tool_logs.jsonl"
assert jsonl_path.exists()
lines = [line for line in jsonl_path.read_text().strip().split("\n") if line]
lines = [
line for line in jsonl_path.read_text(encoding="utf-8").strip().split("\n") if line
]
assert len(lines) == 1
data = json.loads(lines[0])
@@ -376,7 +378,8 @@ class TestRuntimeLogger:
jsonl_path = tmp_path / "logs" / "sessions" / run_id / "logs" / "details.jsonl"
assert jsonl_path.exists()
lines = [line for line in jsonl_path.read_text().strip().split("\n") if line]
content = jsonl_path.read_text(encoding="utf-8").strip()
lines = [line for line in content.split("\n") if line]
assert len(lines) == 1
data = json.loads(lines[0])
+1 -1
View File
@@ -98,7 +98,7 @@ class TestFileStorageRunOperations:
assert run_file.exists()
# Verify it's valid JSON
with open(run_file) as f:
with open(run_file, encoding="utf-8") as f:
data = json.load(f)
assert data["id"] == "my_run"
+14 -3
View File
@@ -71,6 +71,7 @@ def main():
capture_output=True,
text=True,
check=True,
encoding="utf-8",
)
framework_path = result.stdout.strip()
success(f"installed at {framework_path}")
@@ -84,7 +85,12 @@ def main():
missing_deps = []
for dep in ["mcp", "fastmcp"]:
try:
subprocess.run([sys.executable, "-c", f"import {dep}"], capture_output=True, check=True)
subprocess.run(
[sys.executable, "-c", f"import {dep}"],
capture_output=True,
check=True,
encoding="utf-8",
)
except subprocess.CalledProcessError:
missing_deps.append(dep)
@@ -103,6 +109,7 @@ def main():
capture_output=True,
text=True,
check=True,
encoding="utf-8",
)
success("loads successfully")
except subprocess.CalledProcessError as e:
@@ -115,7 +122,7 @@ def main():
mcp_config = script_dir / ".mcp.json"
if mcp_config.exists():
try:
with open(mcp_config) as f:
with open(mcp_config, encoding="utf-8") as f:
config = json.load(f)
if "mcpServers" in config and "agent-builder" in config["mcpServers"]:
@@ -149,7 +156,10 @@ def main():
for module in modules_to_check:
try:
subprocess.run(
[sys.executable, "-c", f"import {module}"], capture_output=True, check=True
[sys.executable, "-c", f"import {module}"],
capture_output=True,
check=True,
encoding="utf-8",
)
except subprocess.CalledProcessError:
failed_modules.append(module)
@@ -174,6 +184,7 @@ def main():
text=True,
check=True,
timeout=5,
encoding="utf-8",
)
if "OK" in result.stdout:
success("server can start")
+5 -2
View File
@@ -134,7 +134,10 @@ hive/
## Running an Agent
```bash
# Browse and run agents interactively (Recommended)
# Launch the web dashboard in your browser
hive open
# Browse and run agents in terminal
hive tui
# Run a specific agent
@@ -178,7 +181,7 @@ PYTHONPATH=exports uv run python -m my_agent test --type success
## Next Steps
1. **TUI Dashboard**: Run `hive tui` to explore agents interactively
1. **Dashboard**: Run `hive open` to launch the web dashboard, or `hive tui` for the terminal UI
2. **Detailed Setup**: See [environment-setup.md](./environment-setup.md)
3. **Developer Guide**: See [developer-guide.md](./developer-guide.md)
4. **Build Agents**: Use `/hive` skill in Claude Code
+246 -56
View File
@@ -417,6 +417,58 @@ Write-Ok "uv detected: $uvVersion"
Write-Host ""
# Check for Node.js (needed for frontend dashboard)
function Install-NodeViaFnm {
<#
.SYNOPSIS
Install Node.js 20 via fnm (Fast Node Manager) - mirrors nvm approach in quickstart.sh
#>
$fnmCmd = Get-Command fnm -ErrorAction SilentlyContinue
if (-not $fnmCmd) {
$fnmDir = Join-Path $env:LOCALAPPDATA "fnm"
$fnmExe = Join-Path $fnmDir "fnm.exe"
if (-not (Test-Path $fnmExe)) {
try {
Write-Host " Downloading fnm (Fast Node Manager)..." -ForegroundColor DarkGray
$zipUrl = "https://github.com/Schniz/fnm/releases/latest/download/fnm-windows.zip"
$zipPath = Join-Path $env:TEMP "fnm-install.zip"
Invoke-WebRequest -Uri $zipUrl -OutFile $zipPath -UseBasicParsing -ErrorAction Stop
if (-not (Test-Path $fnmDir)) { New-Item -ItemType Directory -Path $fnmDir -Force | Out-Null }
Expand-Archive -Path $zipPath -DestinationPath $fnmDir -Force
Remove-Item $zipPath -Force -ErrorAction SilentlyContinue
} catch {
Write-Fail "fnm download failed"
Write-Host " Install Node.js 20+ manually from https://nodejs.org" -ForegroundColor DarkGray
return $false
}
}
if (Test-Path (Join-Path $fnmDir "fnm.exe")) {
$env:PATH = "$fnmDir;$env:PATH"
} else {
Write-Fail "fnm binary not found after download"
Write-Host " Install Node.js 20+ manually from https://nodejs.org" -ForegroundColor DarkGray
return $false
}
}
try {
$null = & fnm install 20 2>&1
if ($LASTEXITCODE -ne 0) { throw "fnm install 20 exited with code $LASTEXITCODE" }
& fnm env --use-on-cd --shell powershell | Out-String | Invoke-Expression
$null = & fnm use 20 2>&1
$testNode = Get-Command node -ErrorAction SilentlyContinue
if ($testNode) {
$ver = & node --version 2>$null
Write-Ok "Node.js $ver installed via fnm"
return $true
}
throw "node not found after fnm install"
} catch {
Write-Fail "Node.js installation failed"
Write-Host " Install manually from https://nodejs.org" -ForegroundColor DarkGray
return $false
}
}
$NodeAvailable = $false
$nodeCmd = Get-Command node -ErrorAction SilentlyContinue
if ($nodeCmd) {
@@ -428,12 +480,13 @@ if ($nodeCmd) {
$NodeAvailable = $true
} else {
Write-Warn "Node.js $nodeVersion found (20+ required for frontend dashboard)"
Write-Host " Install from https://nodejs.org" -ForegroundColor DarkGray
Write-Host " Installing Node.js 20 via fnm..." -ForegroundColor Yellow
$NodeAvailable = Install-NodeViaFnm
}
}
} else {
Write-Warn "Node.js not found (optional, needed for web dashboard)"
Write-Host " Install from https://nodejs.org" -ForegroundColor DarkGray
Write-Warn "Node.js not found. Installing via fnm..."
$NodeAvailable = Install-NodeViaFnm
}
Write-Host ""
@@ -745,8 +798,8 @@ $ProviderMap = [ordered]@{
}
$DefaultModels = @{
anthropic = "claude-opus-4-6"
openai = "gpt-5.2"
anthropic = "claude-haiku-4-5-20251001"
openai = "gpt-5-mini"
gemini = "gemini-3-flash-preview"
groq = "moonshotai/kimi-k2-instruct-0905"
cerebras = "zai-glm-4.7"
@@ -758,14 +811,14 @@ $DefaultModels = @{
# Model choices: array of hashtables per provider
$ModelChoices = @{
anthropic = @(
@{ Id = "claude-opus-4-6"; Label = "Opus 4.6 - Most capable (recommended)"; MaxTokens = 32768 },
@{ Id = "claude-sonnet-4-5-20250929"; Label = "Sonnet 4.5 - Best balance"; MaxTokens = 16384 },
@{ Id = "claude-sonnet-4-20250514"; Label = "Sonnet 4 - Fast + capable"; MaxTokens = 8192 },
@{ Id = "claude-haiku-4-5-20251001"; Label = "Haiku 4.5 - Fast + cheap"; MaxTokens = 8192 }
@{ Id = "claude-haiku-4-5-20251001"; Label = "Haiku 4.5 - Fast + cheap (recommended)"; MaxTokens = 8192 },
@{ Id = "claude-sonnet-4-20250514"; Label = "Sonnet 4 - Fast + capable"; MaxTokens = 8192 },
@{ Id = "claude-sonnet-4-5-20250929"; Label = "Sonnet 4.5 - Best balance"; MaxTokens = 16384 },
@{ Id = "claude-opus-4-6"; Label = "Opus 4.6 - Most capable"; MaxTokens = 32768 }
)
openai = @(
@{ Id = "gpt-5.2"; Label = "GPT-5.2 - Most capable (recommended)"; MaxTokens = 16384 },
@{ Id = "gpt-5-mini"; Label = "GPT-5 Mini - Fast + cheap"; MaxTokens = 16384 }
@{ Id = "gpt-5-mini"; Label = "GPT-5 Mini - Fast + cheap (recommended)"; MaxTokens = 16384 },
@{ Id = "gpt-5.2"; Label = "GPT-5.2 - Most capable"; MaxTokens = 16384 }
)
gemini = @(
@{ Id = "gemini-3-flash-preview"; Label = "Gemini 3 Flash - Fast (recommended)"; MaxTokens = 8192 },
@@ -792,6 +845,17 @@ function Get-ModelSelection {
return @{ Model = $choices[0].Id; MaxTokens = $choices[0].MaxTokens }
}
# Find default index from previous model (if same provider)
$defaultIdx = "1"
if ($PrevModel -and $PrevProvider -eq $ProviderId) {
for ($j = 0; $j -lt $choices.Count; $j++) {
if ($choices[$j].Id -eq $PrevModel) {
$defaultIdx = [string]($j + 1)
break
}
}
}
Write-Host ""
Write-Color -Text "Select a model:" -Color White
Write-Host ""
@@ -803,8 +867,8 @@ function Get-ModelSelection {
Write-Host ""
while ($true) {
$raw = Read-Host "Enter choice [1]"
if ([string]::IsNullOrWhiteSpace($raw)) { $raw = "1" }
$raw = Read-Host "Enter choice [$defaultIdx]"
if ([string]::IsNullOrWhiteSpace($raw)) { $raw = $defaultIdx }
if ($raw -match '^\d+$') {
$num = [int]$raw
if ($num -ge 1 -and $num -le $choices.Count) {
@@ -860,6 +924,60 @@ $ProviderMenuUrls = @(
"https://cloud.cerebras.ai/"
)
# ── Read previous configuration (if any) ──────────────────────
$PrevProvider = ""
$PrevModel = ""
$PrevEnvVar = ""
$PrevSubMode = ""
if (Test-Path $HiveConfigFile) {
try {
$prevConfig = Get-Content -Path $HiveConfigFile -Raw | ConvertFrom-Json
$prevLlm = $prevConfig.llm
if ($prevLlm) {
$PrevProvider = if ($prevLlm.provider) { $prevLlm.provider } else { "" }
$PrevModel = if ($prevLlm.model) { $prevLlm.model } else { "" }
$PrevEnvVar = if ($prevLlm.api_key_env_var) { $prevLlm.api_key_env_var } else { "" }
if ($prevLlm.use_claude_code_subscription) { $PrevSubMode = "claude_code" }
elseif ($prevLlm.use_codex_subscription) { $PrevSubMode = "codex" }
elseif ($prevLlm.api_base -and $prevLlm.api_base -like "*api.z.ai*") { $PrevSubMode = "zai_code" }
}
} catch { }
}
# Compute default menu number (only if credential is still valid)
$DefaultChoice = ""
if ($PrevSubMode -or $PrevProvider) {
$prevCredValid = $false
switch ($PrevSubMode) {
"claude_code" { if ($ClaudeCredDetected) { $prevCredValid = $true } }
"zai_code" { if ($ZaiCredDetected) { $prevCredValid = $true } }
"codex" { if ($CodexCredDetected) { $prevCredValid = $true } }
default {
if ($PrevEnvVar) {
$envVal = [System.Environment]::GetEnvironmentVariable($PrevEnvVar, "Process")
if (-not $envVal) { $envVal = [System.Environment]::GetEnvironmentVariable($PrevEnvVar, "User") }
if ($envVal) { $prevCredValid = $true }
}
}
}
if ($prevCredValid) {
switch ($PrevSubMode) {
"claude_code" { $DefaultChoice = "1" }
"zai_code" { $DefaultChoice = "2" }
"codex" { $DefaultChoice = "3" }
}
if (-not $DefaultChoice) {
switch ($PrevProvider) {
"anthropic" { $DefaultChoice = "4" }
"openai" { $DefaultChoice = "5" }
"gemini" { $DefaultChoice = "6" }
"groq" { $DefaultChoice = "7" }
"cerebras" { $DefaultChoice = "8" }
}
}
}
}
# ── Show unified provider selection menu ─────────────────────
Write-Color -Text "Select your default LLM provider:" -Color White
Write-Host ""
@@ -905,8 +1023,18 @@ Write-Color -Text "9" -Color Cyan -NoNewline
Write-Host ") Skip for now"
Write-Host ""
if ($DefaultChoice) {
Write-Color -Text " Previously configured: $PrevProvider/$PrevModel. Press Enter to keep." -Color DarkGray
Write-Host ""
}
while ($true) {
$raw = Read-Host "Enter choice (1-9)"
if ($DefaultChoice) {
$raw = Read-Host "Enter choice (1-9) [$DefaultChoice]"
if ([string]::IsNullOrWhiteSpace($raw)) { $raw = $DefaultChoice }
} else {
$raw = Read-Host "Enter choice (1-9)"
}
if ($raw -match '^\d+$') {
$num = [int]$raw
if ($num -ge 1 -and $num -le 9) { break }
@@ -983,28 +1111,68 @@ switch ($num) {
$providerName = $ProviderMenuNames[$provIdx] -replace ' - .*', '' # strip description
$signupUrl = $ProviderMenuUrls[$provIdx]
# Check if key is already set
$existingKey = [System.Environment]::GetEnvironmentVariable($SelectedEnvVar, "User")
if (-not $existingKey) { $existingKey = [System.Environment]::GetEnvironmentVariable($SelectedEnvVar, "Process") }
if (-not $existingKey) {
Write-Host ""
Write-Host "Get your API key from: " -NoNewline
Write-Color -Text $signupUrl -Color Cyan
Write-Host ""
$apiKey = Read-Host "Paste your $providerName API key (or press Enter to skip)"
# Prompt for key (allow replacement if already set) with verification + retry
while ($true) {
$existingKey = [System.Environment]::GetEnvironmentVariable($SelectedEnvVar, "User")
if (-not $existingKey) { $existingKey = [System.Environment]::GetEnvironmentVariable($SelectedEnvVar, "Process") }
if ($existingKey) {
$masked = $existingKey.Substring(0, [Math]::Min(4, $existingKey.Length)) + "..." + $existingKey.Substring([Math]::Max(0, $existingKey.Length - 4))
Write-Host ""
Write-Color -Text " $([char]0x2B22) Current key: $masked" -Color Green
$apiKey = Read-Host " Press Enter to keep, or paste a new key to replace"
} else {
Write-Host ""
Write-Host "Get your API key from: " -NoNewline
Write-Color -Text $signupUrl -Color Cyan
Write-Host ""
$apiKey = Read-Host "Paste your $providerName API key (or press Enter to skip)"
}
if ($apiKey) {
[System.Environment]::SetEnvironmentVariable($SelectedEnvVar, $apiKey, "User")
Set-Item -Path "Env:\$SelectedEnvVar" -Value $apiKey
Write-Host ""
Write-Ok "API key saved as User environment variable: $SelectedEnvVar"
Write-Color -Text " (Persisted for all future sessions)" -Color DarkGray
} else {
# Health check the new key
Write-Host " Verifying API key... " -NoNewline
try {
$hcResult = & uv run python (Join-Path $ScriptDir "scripts/check_llm_key.py") $SelectedProviderId $apiKey 2>$null
$hcJson = $hcResult | ConvertFrom-Json
if ($hcJson.valid -eq $true) {
Write-Color -Text "ok" -Color Green
break
} elseif ($hcJson.valid -eq $false) {
Write-Color -Text "failed" -Color Red
Write-Warn $hcJson.message
# Undo the save so user can retry cleanly
[System.Environment]::SetEnvironmentVariable($SelectedEnvVar, $null, "User")
Remove-Item -Path "Env:\$SelectedEnvVar" -ErrorAction SilentlyContinue
Write-Host ""
Read-Host " Press Enter to try again"
# loop back to key prompt
} else {
Write-Color -Text "--" -Color Yellow
Write-Color -Text " Could not verify key (network issue). The key has been saved." -Color DarkGray
break
}
} catch {
Write-Color -Text "--" -Color Yellow
Write-Color -Text " Could not verify key (network issue). The key has been saved." -Color DarkGray
break
}
} elseif (-not $existingKey) {
# No existing key and user skipped
Write-Host ""
Write-Warn "Skipped. Set the environment variable manually when ready:"
Write-Host " [System.Environment]::SetEnvironmentVariable('$SelectedEnvVar', 'your-key', 'User')"
$SelectedEnvVar = ""
$SelectedProviderId = ""
break
} else {
# User pressed Enter with existing key — keep it
break
}
}
}
@@ -1020,26 +1188,67 @@ switch ($num) {
}
}
# For ZAI subscription: prompt for API key if not already set
# For ZAI subscription: prompt for API key (allow replacement if already set) with verification + retry
if ($SubscriptionMode -eq "zai_code") {
$existingZai = [System.Environment]::GetEnvironmentVariable("ZAI_API_KEY", "User")
if (-not $existingZai) { $existingZai = $env:ZAI_API_KEY }
if (-not $existingZai) {
Write-Host ""
$apiKey = Read-Host "Paste your ZAI API key (or press Enter to skip)"
while ($true) {
$existingZai = [System.Environment]::GetEnvironmentVariable("ZAI_API_KEY", "User")
if (-not $existingZai) { $existingZai = $env:ZAI_API_KEY }
if ($existingZai) {
$masked = $existingZai.Substring(0, [Math]::Min(4, $existingZai.Length)) + "..." + $existingZai.Substring([Math]::Max(0, $existingZai.Length - 4))
Write-Host ""
Write-Color -Text " $([char]0x2B22) Current ZAI key: $masked" -Color Green
$apiKey = Read-Host " Press Enter to keep, or paste a new key to replace"
} else {
Write-Host ""
$apiKey = Read-Host "Paste your ZAI API key (or press Enter to skip)"
}
if ($apiKey) {
[System.Environment]::SetEnvironmentVariable("ZAI_API_KEY", $apiKey, "User")
$env:ZAI_API_KEY = $apiKey
Write-Host ""
Write-Ok "ZAI API key saved as User environment variable"
} else {
# Health check the new key
Write-Host " Verifying ZAI API key... " -NoNewline
try {
$hcResult = & uv run python (Join-Path $ScriptDir "scripts/check_llm_key.py") "zai" $apiKey "https://api.z.ai/api/coding/paas/v4" 2>$null
$hcJson = $hcResult | ConvertFrom-Json
if ($hcJson.valid -eq $true) {
Write-Color -Text "ok" -Color Green
break
} elseif ($hcJson.valid -eq $false) {
Write-Color -Text "failed" -Color Red
Write-Warn $hcJson.message
# Undo the save so user can retry cleanly
[System.Environment]::SetEnvironmentVariable("ZAI_API_KEY", $null, "User")
Remove-Item -Path "Env:\ZAI_API_KEY" -ErrorAction SilentlyContinue
Write-Host ""
Read-Host " Press Enter to try again"
# loop back to key prompt
} else {
Write-Color -Text "--" -Color Yellow
Write-Color -Text " Could not verify key (network issue). The key has been saved." -Color DarkGray
break
}
} catch {
Write-Color -Text "--" -Color Yellow
Write-Color -Text " Could not verify key (network issue). The key has been saved." -Color DarkGray
break
}
} elseif (-not $existingZai) {
# No existing key and user skipped
Write-Host ""
Write-Warn "Skipped. Add your ZAI API key later:"
Write-Color -Text " [System.Environment]::SetEnvironmentVariable('ZAI_API_KEY', 'your-key', 'User')" -Color Cyan
$SelectedEnvVar = ""
$SelectedProviderId = ""
$SubscriptionMode = ""
break
} else {
# User pressed Enter with existing key — keep it
break
}
}
}
@@ -1090,37 +1299,18 @@ if ($SelectedProviderId) {
Write-Host ""
# ============================================================
# Step 5b: Browser Automation (GCU)
# Step 5b: Browser Automation (GCU) — always enabled
# ============================================================
Write-Host ""
Write-Color -Text "Enable browser automation?" -Color White
Write-Color -Text "This lets your agents control a real browser - navigate websites, fill forms," -Color DarkGray
Write-Color -Text "scrape dynamic pages, and interact with web UIs." -Color DarkGray
Write-Host ""
Write-Host " " -NoNewline; Write-Color -Text "1)" -Color Cyan -NoNewline; Write-Host " Yes"
Write-Host " " -NoNewline; Write-Color -Text "2)" -Color Cyan -NoNewline; Write-Host " No"
Write-Host ""
do {
$gcuChoice = Read-Host "Enter choice (1-2)"
} while ($gcuChoice -ne "1" -and $gcuChoice -ne "2")
$GcuEnabled = $false
if ($gcuChoice -eq "1") {
$GcuEnabled = $true
Write-Ok "Browser automation enabled"
} else {
Write-Color -Text " Browser automation skipped" -Color DarkGray
}
Write-Ok "Browser automation enabled"
# Patch gcu_enabled into configuration.json
if (Test-Path $HiveConfigFile) {
$existingConfig = Get-Content -Path $HiveConfigFile -Raw | ConvertFrom-Json
$existingConfig | Add-Member -NotePropertyName "gcu_enabled" -NotePropertyValue $GcuEnabled -Force
$existingConfig | Add-Member -NotePropertyName "gcu_enabled" -NotePropertyValue $true -Force
$existingConfig | ConvertTo-Json -Depth 4 | Set-Content -Path $HiveConfigFile -Encoding UTF8
} elseif ($GcuEnabled) {
# No config file yet (user skipped LLM provider) - create minimal one
} else {
if (-not (Test-Path $HiveConfigDir)) {
New-Item -ItemType Directory -Path $HiveConfigDir -Force | Out-Null
}
@@ -1434,7 +1624,7 @@ if ($FrontendBuilt) {
Write-Color -Text " Starting server on http://localhost:8787" -Color DarkGray
Write-Color -Text " Press Ctrl+C to stop" -Color DarkGray
Write-Host ""
& (Join-Path $ScriptDir "hive.ps1") serve --open
& (Join-Path $ScriptDir "hive.ps1") open
} else {
Write-Color -Text "═══════════════════════════════════════════════════════" -Color Yellow
Write-Host ""
+242 -98
View File
@@ -407,7 +407,7 @@ if [ "$USE_ASSOC_ARRAYS" = true ]; then
)
declare -A DEFAULT_MODELS=(
["anthropic"]="claude-haiku-4-5"
["anthropic"]="claude-haiku-4-5-20251001"
["openai"]="gpt-5-mini"
["gemini"]="gemini-3-flash-preview"
["groq"]="moonshotai/kimi-k2-instruct-0905"
@@ -420,12 +420,12 @@ if [ "$USE_ASSOC_ARRAYS" = true ]; then
# Model choices per provider: composite-key associative arrays
# Keys: "provider:index" -> value
declare -A MODEL_CHOICES_ID=(
["anthropic:0"]="claude-opus-4-6"
["anthropic:1"]="claude-sonnet-4-5-20250929"
["anthropic:2"]="claude-sonnet-4-20250514"
["anthropic:3"]="claude-haiku-4-5-20251001"
["openai:0"]="gpt-5.2"
["openai:1"]="gpt-5-mini"
["anthropic:0"]="claude-haiku-4-5-20251001"
["anthropic:1"]="claude-sonnet-4-20250514"
["anthropic:2"]="claude-sonnet-4-5-20250929"
["anthropic:3"]="claude-opus-4-6"
["openai:0"]="gpt-5-mini"
["openai:1"]="gpt-5.2"
["gemini:0"]="gemini-3-flash-preview"
["gemini:1"]="gemini-3.1-pro-preview"
["groq:0"]="moonshotai/kimi-k2-instruct-0905"
@@ -435,12 +435,12 @@ if [ "$USE_ASSOC_ARRAYS" = true ]; then
)
declare -A MODEL_CHOICES_LABEL=(
["anthropic:0"]="Opus 4.6 - Most capable (recommended)"
["anthropic:1"]="Sonnet 4.5 - Best balance"
["anthropic:2"]="Sonnet 4 - Fast + capable"
["anthropic:3"]="Haiku 4.5 - Fast + cheap"
["openai:0"]="GPT-5.2 - Most capable (recommended)"
["openai:1"]="GPT-5 Mini - Fast + cheap"
["anthropic:0"]="Haiku 4.5 - Fast + cheap (recommended)"
["anthropic:1"]="Sonnet 4 - Fast + capable"
["anthropic:2"]="Sonnet 4.5 - Best balance"
["anthropic:3"]="Opus 4.6 - Most capable"
["openai:0"]="GPT-5 Mini - Fast + cheap (recommended)"
["openai:1"]="GPT-5.2 - Most capable"
["gemini:0"]="Gemini 3 Flash - Fast (recommended)"
["gemini:1"]="Gemini 3.1 Pro - Best quality"
["groq:0"]="Kimi K2 - Best quality (recommended)"
@@ -450,10 +450,10 @@ if [ "$USE_ASSOC_ARRAYS" = true ]; then
)
declare -A MODEL_CHOICES_MAXTOKENS=(
["anthropic:0"]=32768
["anthropic:1"]=16384
["anthropic:2"]=8192
["anthropic:3"]=8192
["anthropic:0"]=8192
["anthropic:1"]=8192
["anthropic:2"]=16384
["anthropic:3"]=32768
["openai:0"]=16384
["openai:1"]=16384
["gemini:0"]=8192
@@ -508,7 +508,7 @@ else
# Default models by provider id (parallel arrays)
MODEL_PROVIDER_IDS=(anthropic openai gemini groq cerebras mistral together_ai deepseek)
MODEL_DEFAULTS=("claude-opus-4-6" "gpt-5.2" "gemini-3-flash-preview" "moonshotai/kimi-k2-instruct-0905" "zai-glm-4.7" "mistral-large-latest" "meta-llama/Llama-3.3-70B-Instruct-Turbo" "deepseek-chat")
MODEL_DEFAULTS=("claude-haiku-4-5-20251001" "gpt-5-mini" "gemini-3-flash-preview" "moonshotai/kimi-k2-instruct-0905" "zai-glm-4.7" "mistral-large-latest" "meta-llama/Llama-3.3-70B-Instruct-Turbo" "deepseek-chat")
# Helper: get provider display name for an env var
get_provider_name() {
@@ -552,9 +552,9 @@ else
# Model choices per provider - flat parallel arrays with provider offsets
# Provider order: anthropic(4), openai(2), gemini(2), groq(2), cerebras(2)
MC_PROVIDERS=(anthropic anthropic anthropic anthropic openai openai gemini gemini groq groq cerebras cerebras)
MC_IDS=("claude-opus-4-6" "claude-sonnet-4-5-20250929" "claude-sonnet-4-20250514" "claude-haiku-4-5-20251001" "gpt-5.2" "gpt-5-mini" "gemini-3-flash-preview" "gemini-3.1-pro-preview" "moonshotai/kimi-k2-instruct-0905" "openai/gpt-oss-120b" "zai-glm-4.7" "qwen3-235b-a22b-instruct-2507")
MC_LABELS=("Opus 4.6 - Most capable (recommended)" "Sonnet 4.5 - Best balance" "Sonnet 4 - Fast + capable" "Haiku 4.5 - Fast + cheap" "GPT-5.2 - Most capable (recommended)" "GPT-5 Mini - Fast + cheap" "Gemini 3 Flash - Fast (recommended)" "Gemini 3.1 Pro - Best quality" "Kimi K2 - Best quality (recommended)" "GPT-OSS 120B - Fast reasoning" "ZAI-GLM 4.7 - Best quality (recommended)" "Qwen3 235B - Frontier reasoning")
MC_MAXTOKENS=(32768 16384 8192 8192 16384 16384 8192 8192 8192 8192 8192 8192)
MC_IDS=("claude-haiku-4-5-20251001" "claude-sonnet-4-20250514" "claude-sonnet-4-5-20250929" "claude-opus-4-6" "gpt-5-mini" "gpt-5.2" "gemini-3-flash-preview" "gemini-3.1-pro-preview" "moonshotai/kimi-k2-instruct-0905" "openai/gpt-oss-120b" "zai-glm-4.7" "qwen3-235b-a22b-instruct-2507")
MC_LABELS=("Haiku 4.5 - Fast + cheap (recommended)" "Sonnet 4 - Fast + capable" "Sonnet 4.5 - Best balance" "Opus 4.6 - Most capable" "GPT-5 Mini - Fast + cheap (recommended)" "GPT-5.2 - Most capable" "Gemini 3 Flash - Fast (recommended)" "Gemini 3.1 Pro - Best quality" "Kimi K2 - Best quality (recommended)" "GPT-OSS 120B - Fast reasoning" "ZAI-GLM 4.7 - Best quality (recommended)" "Qwen3 235B - Frontier reasoning")
MC_MAXTOKENS=(8192 8192 16384 32768 16384 16384 8192 8192 8192 8192 8192 8192)
# Helper: get number of model choices for a provider
get_model_choice_count() {
@@ -687,6 +687,19 @@ prompt_model_selection() {
echo -e "${BOLD}Select a model:${NC}"
echo ""
# Find default index from previous model (if same provider)
local default_idx=""
if [ -n "$PREV_MODEL" ] && [ "$provider_id" = "$PREV_PROVIDER" ]; then
local j=0
while [ $j -lt "$count" ]; do
if [ "$(get_model_choice_id "$provider_id" "$j")" = "$PREV_MODEL" ]; then
default_idx=$((j + 1))
break
fi
j=$((j + 1))
done
fi
local i=0
while [ $i -lt "$count" ]; do
local label
@@ -701,7 +714,12 @@ prompt_model_selection() {
local choice
while true; do
read -r -p "Enter choice (1-$count): " choice || true
if [ -n "$default_idx" ]; then
read -r -p "Enter choice (1-$count) [$default_idx]: " choice || true
choice="${choice:-$default_idx}"
else
read -r -p "Enter choice (1-$count): " choice || true
fi
if [[ "$choice" =~ ^[0-9]+$ ]] && [ "$choice" -ge 1 ] && [ "$choice" -le "$count" ]; then
local idx=$((choice - 1))
SELECTED_MODEL="$(get_model_choice_id "$provider_id" "$idx")"
@@ -781,7 +799,9 @@ SUBSCRIPTION_MODE="" # "claude_code" | "codex" | "zai_code" | ""
# ── Credential detection (silent — just set flags) ───────────
CLAUDE_CRED_DETECTED=false
if [ -f "$HOME/.claude/.credentials.json" ]; then
if command -v security &>/dev/null && security find-generic-password -s "Claude Code-credentials" &>/dev/null 2>&1; then
CLAUDE_CRED_DETECTED=true
elif [ -f "$HOME/.claude/.credentials.json" ]; then
CLAUDE_CRED_DETECTED=true
fi
@@ -814,6 +834,65 @@ else
done
fi
# ── Read previous configuration (if any) ──────────────────────
PREV_PROVIDER=""
PREV_MODEL=""
PREV_ENV_VAR=""
PREV_SUB_MODE=""
if [ -f "$HIVE_CONFIG_FILE" ]; then
eval "$($PYTHON_CMD -c "
import json, sys
try:
with open('$HIVE_CONFIG_FILE') as f:
c = json.load(f)
llm = c.get('llm', {})
print(f'PREV_PROVIDER={llm.get(\"provider\", \"\")}')
print(f'PREV_MODEL={llm.get(\"model\", \"\")}')
print(f'PREV_ENV_VAR={llm.get(\"api_key_env_var\", \"\")}')
sub = ''
if llm.get('use_claude_code_subscription'): sub = 'claude_code'
elif llm.get('use_codex_subscription'): sub = 'codex'
elif 'api.z.ai' in llm.get('api_base', ''): sub = 'zai_code'
print(f'PREV_SUB_MODE={sub}')
except Exception:
pass
" 2>/dev/null)" || true
fi
# Compute default menu number from previous config (only if credential is still valid)
DEFAULT_CHOICE=""
if [ -n "$PREV_SUB_MODE" ] || [ -n "$PREV_PROVIDER" ]; then
PREV_CRED_VALID=false
case "$PREV_SUB_MODE" in
claude_code) [ "$CLAUDE_CRED_DETECTED" = true ] && PREV_CRED_VALID=true ;;
zai_code) [ "$ZAI_CRED_DETECTED" = true ] && PREV_CRED_VALID=true ;;
codex) [ "$CODEX_CRED_DETECTED" = true ] && PREV_CRED_VALID=true ;;
*)
# API key provider — check if the env var is set
if [ -n "$PREV_ENV_VAR" ] && [ -n "${!PREV_ENV_VAR}" ]; then
PREV_CRED_VALID=true
fi
;;
esac
if [ "$PREV_CRED_VALID" = true ]; then
case "$PREV_SUB_MODE" in
claude_code) DEFAULT_CHOICE=1 ;;
zai_code) DEFAULT_CHOICE=2 ;;
codex) DEFAULT_CHOICE=3 ;;
esac
if [ -z "$DEFAULT_CHOICE" ]; then
case "$PREV_PROVIDER" in
anthropic) DEFAULT_CHOICE=4 ;;
openai) DEFAULT_CHOICE=5 ;;
gemini) DEFAULT_CHOICE=6 ;;
groq) DEFAULT_CHOICE=7 ;;
cerebras) DEFAULT_CHOICE=8 ;;
esac
fi
fi
fi
# ── Show unified provider selection menu ─────────────────────
echo -e "${BOLD}Select your default LLM provider:${NC}"
echo ""
@@ -858,8 +937,18 @@ done
echo -e " ${CYAN}9)${NC} Skip for now"
echo ""
if [ -n "$DEFAULT_CHOICE" ]; then
echo -e " ${DIM}Previously configured: ${PREV_PROVIDER}/${PREV_MODEL}. Press Enter to keep.${NC}"
echo ""
fi
while true; do
read -r -p "Enter choice (1-9): " choice || true
if [ -n "$DEFAULT_CHOICE" ]; then
read -r -p "Enter choice (1-9) [$DEFAULT_CHOICE]: " choice || true
choice="${choice:-$DEFAULT_CHOICE}"
else
read -r -p "Enter choice (1-9): " choice || true
fi
if [[ "$choice" =~ ^[0-9]+$ ]] && [ "$choice" -ge 1 ] && [ "$choice" -le 9 ]; then
break
fi
@@ -968,48 +1057,132 @@ case $choice in
;;
esac
# For API-key providers: prompt for key if not already set
if [ -z "$SUBSCRIPTION_MODE" ] && [ -n "$SELECTED_ENV_VAR" ] && [ -z "${!SELECTED_ENV_VAR}" ]; then
echo ""
echo -e "Get your API key from: ${CYAN}$SIGNUP_URL${NC}"
echo ""
read -r -p "Paste your $PROVIDER_NAME API key (or press Enter to skip): " API_KEY
# For API-key providers: prompt for key (allow replacement if already set)
if [ -z "$SUBSCRIPTION_MODE" ] && [ -n "$SELECTED_ENV_VAR" ]; then
while true; do
CURRENT_KEY="${!SELECTED_ENV_VAR}"
if [ -n "$CURRENT_KEY" ]; then
# Key exists — offer to keep or replace
MASKED_KEY="${CURRENT_KEY:0:4}...${CURRENT_KEY: -4}"
echo ""
echo -e " ${GREEN}${NC} Current key: ${DIM}$MASKED_KEY${NC}"
read -r -p " Press Enter to keep, or paste a new key to replace: " API_KEY
else
# No key — prompt for one
echo ""
echo -e "Get your API key from: ${CYAN}$SIGNUP_URL${NC}"
echo ""
read -r -p "Paste your $PROVIDER_NAME API key (or press Enter to skip): " API_KEY
fi
if [ -n "$API_KEY" ]; then
echo "" >> "$SHELL_RC_FILE"
echo "# Hive Agent Framework - $PROVIDER_NAME API key" >> "$SHELL_RC_FILE"
echo "export $SELECTED_ENV_VAR=\"$API_KEY\"" >> "$SHELL_RC_FILE"
export "$SELECTED_ENV_VAR=$API_KEY"
echo ""
echo -e "${GREEN}${NC} API key saved to $SHELL_RC_FILE"
else
echo ""
echo -e "${YELLOW}Skipped.${NC} Add your API key to $SHELL_RC_FILE when ready."
SELECTED_ENV_VAR=""
SELECTED_PROVIDER_ID=""
fi
if [ -n "$API_KEY" ]; then
# Remove old export line(s) for this env var from shell rc, then append new
sed -i.bak "/^export ${SELECTED_ENV_VAR}=/d" "$SHELL_RC_FILE" && rm -f "${SHELL_RC_FILE}.bak"
echo "" >> "$SHELL_RC_FILE"
echo "# Hive Agent Framework - $PROVIDER_NAME API key" >> "$SHELL_RC_FILE"
echo "export $SELECTED_ENV_VAR=\"$API_KEY\"" >> "$SHELL_RC_FILE"
export "$SELECTED_ENV_VAR=$API_KEY"
echo ""
echo -e "${GREEN}${NC} API key saved to $SHELL_RC_FILE"
# Health check the new key
echo -n " Verifying API key... "
HC_RESULT=$(uv run python "$SCRIPT_DIR/scripts/check_llm_key.py" "$SELECTED_PROVIDER_ID" "$API_KEY" 2>/dev/null) || true
HC_VALID=$(echo "$HC_RESULT" | $PYTHON_CMD -c "import json,sys; print(json.loads(sys.stdin.read()).get('valid',''))" 2>/dev/null) || true
HC_MSG=$(echo "$HC_RESULT" | $PYTHON_CMD -c "import json,sys; print(json.loads(sys.stdin.read()).get('message',''))" 2>/dev/null) || true
if [ "$HC_VALID" = "True" ]; then
echo -e "${GREEN}ok${NC}"
break
elif [ "$HC_VALID" = "False" ]; then
echo -e "${RED}failed${NC}"
echo -e " ${YELLOW}$HC_MSG${NC}"
# Undo the save so the user can retry cleanly
sed -i.bak "/^export ${SELECTED_ENV_VAR}=/d" "$SHELL_RC_FILE" && rm -f "${SHELL_RC_FILE}.bak"
# Remove the comment line we just added
sed -i.bak "/^# Hive Agent Framework - $PROVIDER_NAME API key$/d" "$SHELL_RC_FILE" && rm -f "${SHELL_RC_FILE}.bak"
unset "$SELECTED_ENV_VAR"
echo ""
read -r -p " Press Enter to try again: " _
# Loop back to key prompt
else
echo -e "${YELLOW}--${NC}"
echo -e " ${DIM}Could not verify key (network issue). The key has been saved.${NC}"
break
fi
elif [ -z "$CURRENT_KEY" ]; then
# No existing key and user skipped — abort provider
echo ""
echo -e "${YELLOW}Skipped.${NC} Add your API key to $SHELL_RC_FILE when ready."
SELECTED_ENV_VAR=""
SELECTED_PROVIDER_ID=""
break
else
# User pressed Enter with existing key — keep it, proceed normally
break
fi
done
fi
# For ZAI subscription: always prompt for API key
# For ZAI subscription: prompt for API key (allow replacement if already set)
if [ "$SUBSCRIPTION_MODE" = "zai_code" ]; then
echo ""
read -r -p "Paste your ZAI API key (or press Enter to skip): " API_KEY
while true; do
if [ "$ZAI_CRED_DETECTED" = true ] && [ -n "$ZAI_API_KEY" ]; then
# Key exists — offer to keep or replace
MASKED_KEY="${ZAI_API_KEY:0:4}...${ZAI_API_KEY: -4}"
echo ""
echo -e " ${GREEN}${NC} Current ZAI key: ${DIM}$MASKED_KEY${NC}"
read -r -p " Press Enter to keep, or paste a new key to replace: " API_KEY
else
# No key — prompt for one
echo ""
read -r -p "Paste your ZAI API key (or press Enter to skip): " API_KEY
fi
if [ -n "$API_KEY" ]; then
echo "" >> "$SHELL_RC_FILE"
echo "# Hive Agent Framework - ZAI Code subscription API key" >> "$SHELL_RC_FILE"
echo "export ZAI_API_KEY=\"$API_KEY\"" >> "$SHELL_RC_FILE"
export ZAI_API_KEY="$API_KEY"
echo ""
echo -e "${GREEN}${NC} ZAI API key saved to $SHELL_RC_FILE"
else
echo ""
echo -e "${YELLOW}Skipped.${NC} Add your ZAI API key to $SHELL_RC_FILE when ready:"
echo -e " ${CYAN}echo 'export ZAI_API_KEY=\"your-key\"' >> $SHELL_RC_FILE${NC}"
SELECTED_ENV_VAR=""
SELECTED_PROVIDER_ID=""
SUBSCRIPTION_MODE=""
fi
if [ -n "$API_KEY" ]; then
sed -i.bak "/^export ZAI_API_KEY=/d" "$SHELL_RC_FILE" && rm -f "${SHELL_RC_FILE}.bak"
echo "" >> "$SHELL_RC_FILE"
echo "# Hive Agent Framework - ZAI Code subscription API key" >> "$SHELL_RC_FILE"
echo "export ZAI_API_KEY=\"$API_KEY\"" >> "$SHELL_RC_FILE"
export ZAI_API_KEY="$API_KEY"
echo ""
echo -e "${GREEN}${NC} ZAI API key saved to $SHELL_RC_FILE"
# Health check the new key
echo -n " Verifying ZAI API key... "
HC_RESULT=$(uv run python "$SCRIPT_DIR/scripts/check_llm_key.py" "zai" "$API_KEY" "https://api.z.ai/api/coding/paas/v4" 2>/dev/null) || true
HC_VALID=$(echo "$HC_RESULT" | $PYTHON_CMD -c "import json,sys; print(json.loads(sys.stdin.read()).get('valid',''))" 2>/dev/null) || true
HC_MSG=$(echo "$HC_RESULT" | $PYTHON_CMD -c "import json,sys; print(json.loads(sys.stdin.read()).get('message',''))" 2>/dev/null) || true
if [ "$HC_VALID" = "True" ]; then
echo -e "${GREEN}ok${NC}"
break
elif [ "$HC_VALID" = "False" ]; then
echo -e "${RED}failed${NC}"
echo -e " ${YELLOW}$HC_MSG${NC}"
# Undo the save so the user can retry cleanly
sed -i.bak "/^export ZAI_API_KEY=/d" "$SHELL_RC_FILE" && rm -f "${SHELL_RC_FILE}.bak"
sed -i.bak "/^# Hive Agent Framework - ZAI Code subscription API key$/d" "$SHELL_RC_FILE" && rm -f "${SHELL_RC_FILE}.bak"
unset ZAI_API_KEY
ZAI_CRED_DETECTED=false
echo ""
read -r -p " Press Enter to try again: " _
# Loop back to key prompt
else
echo -e "${YELLOW}--${NC}"
echo -e " ${DIM}Could not verify key (network issue). The key has been saved.${NC}"
break
fi
elif [ "$ZAI_CRED_DETECTED" = false ] || [ -z "$ZAI_API_KEY" ]; then
# No existing key and user skipped — abort provider
echo ""
echo -e "${YELLOW}Skipped.${NC} Add your ZAI API key to $SHELL_RC_FILE when ready:"
echo -e " ${CYAN}echo 'export ZAI_API_KEY=\"your-key\"' >> $SHELL_RC_FILE${NC}"
SELECTED_ENV_VAR=""
SELECTED_PROVIDER_ID=""
SUBSCRIPTION_MODE=""
break
else
# User pressed Enter with existing key — keep it, proceed normally
break
fi
done
fi
# Prompt for model if not already selected (manual provider path)
@@ -1037,52 +1210,22 @@ fi
echo ""
# ============================================================
# Step 4b: Browser Automation (GCU)
# Step 4b: Browser Automation (GCU) — always enabled
# ============================================================
echo -e "${BOLD}Enable browser automation?${NC}"
echo -e "${DIM}This lets your agents control a real browser — navigate websites, fill forms,${NC}"
echo -e "${DIM}scrape dynamic pages, and interact with web UIs.${NC}"
echo ""
echo -e " ${CYAN}${BOLD}1)${NC} ${BOLD}Yes${NC}"
echo -e " ${CYAN}2)${NC} No"
echo ""
while true; do
read -r -p "Enter choice (1-2, default 1): " gcu_choice || true
gcu_choice="${gcu_choice:-1}"
if [ "$gcu_choice" = "1" ] || [ "$gcu_choice" = "2" ]; then
break
fi
echo -e "${RED}Invalid choice. Please enter 1 or 2${NC}"
done
if [ "$gcu_choice" = "1" ]; then
GCU_ENABLED=true
echo -e "${GREEN}${NC} Browser automation enabled"
else
GCU_ENABLED=false
echo -e "${DIM}⬡ Browser automation skipped${NC}"
fi
echo -e "${GREEN}${NC} Browser automation enabled"
# Patch gcu_enabled into configuration.json
if [ "$GCU_ENABLED" = "true" ]; then
GCU_PY_VAL="True"
else
GCU_PY_VAL="False"
fi
if [ -f "$HIVE_CONFIG_FILE" ]; then
uv run python -c "
import json
with open('$HIVE_CONFIG_FILE') as f:
config = json.load(f)
config['gcu_enabled'] = $GCU_PY_VAL
config['gcu_enabled'] = True
with open('$HIVE_CONFIG_FILE', 'w') as f:
json.dump(config, f, indent=2)
"
elif [ "$GCU_ENABLED" = "true" ]; then
# No config file yet (user skipped LLM provider) — create minimal one
else
mkdir -p "$HIVE_CONFIG_DIR"
uv run python -c "
import json
@@ -1352,9 +1495,10 @@ if [ "$FRONTEND_BUILT" = true ]; then
echo -e " ${DIM}Starting server on http://localhost:8787${NC}"
echo -e " ${DIM}Press Ctrl+C to stop${NC}"
echo ""
# exec replaces the quickstart process with hive serve
# --open tells it to auto-open the browser once the server is ready
exec "$SCRIPT_DIR/hive" serve --open
echo -e " ${DIM}Tip: You can restart the dashboard anytime with:${NC} ${CYAN}hive open${NC}"
echo ""
# exec replaces the quickstart process with hive open
exec "$SCRIPT_DIR/hive" open
else
# No frontend — show manual instructions
echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
+125
View File
@@ -0,0 +1,125 @@
"""Validate an LLM API key without consuming tokens.
Usage:
python scripts/check_llm_key.py <provider_id> <api_key> [api_base]
Exit codes:
0 = valid key
1 = invalid key
2 = inconclusive (timeout, network error)
Output: single JSON line {"valid": bool, "message": str}
"""
import json
import sys
import httpx
TIMEOUT = 10.0
def check_anthropic(api_key: str, **_: str) -> dict:
"""Send empty messages to trigger 400 without consuming tokens."""
with httpx.Client(timeout=TIMEOUT) as client:
r = client.post(
"https://api.anthropic.com/v1/messages",
headers={
"x-api-key": api_key,
"anthropic-version": "2023-06-01",
"Content-Type": "application/json",
},
json={"model": "claude-sonnet-4-20250514", "max_tokens": 1, "messages": []},
)
if r.status_code in (200, 400, 429):
return {"valid": True, "message": "API key valid"}
if r.status_code == 401:
return {"valid": False, "message": "Invalid API key"}
if r.status_code == 403:
return {"valid": False, "message": "API key lacks permissions"}
return {"valid": False, "message": f"Unexpected status {r.status_code}"}
def check_openai_compatible(api_key: str, endpoint: str, name: str) -> dict:
"""GET /models on any OpenAI-compatible API."""
with httpx.Client(timeout=TIMEOUT) as client:
r = client.get(
endpoint,
headers={"Authorization": f"Bearer {api_key}"},
)
if r.status_code in (200, 429):
return {"valid": True, "message": f"{name} API key valid"}
if r.status_code == 401:
return {"valid": False, "message": f"Invalid {name} API key"}
if r.status_code == 403:
return {"valid": False, "message": f"{name} API key lacks permissions"}
return {"valid": False, "message": f"{name} API returned status {r.status_code}"}
def check_gemini(api_key: str, **_: str) -> dict:
"""List models with query param auth."""
with httpx.Client(timeout=TIMEOUT) as client:
r = client.get(
"https://generativelanguage.googleapis.com/v1beta/models",
params={"key": api_key},
)
if r.status_code in (200, 429):
return {"valid": True, "message": "Gemini API key valid"}
if r.status_code in (400, 401, 403):
return {"valid": False, "message": "Invalid Gemini API key"}
return {"valid": False, "message": f"Gemini API returned status {r.status_code}"}
PROVIDERS = {
"anthropic": lambda key, **kw: check_anthropic(key),
"openai": lambda key, **kw: check_openai_compatible(
key, "https://api.openai.com/v1/models", "OpenAI"
),
"gemini": lambda key, **kw: check_gemini(key),
"groq": lambda key, **kw: check_openai_compatible(
key, "https://api.groq.com/openai/v1/models", "Groq"
),
"cerebras": lambda key, **kw: check_openai_compatible(
key, "https://api.cerebras.ai/v1/models", "Cerebras"
),
}
def main() -> None:
if len(sys.argv) < 3:
print(json.dumps({"valid": False, "message": "Usage: check_llm_key.py <provider> <key> [api_base]"}))
sys.exit(2)
provider_id = sys.argv[1]
api_key = sys.argv[2]
api_base = sys.argv[3] if len(sys.argv) > 3 else ""
try:
if api_base:
# Custom API base (ZAI or other OpenAI-compatible)
endpoint = api_base.rstrip("/") + "/models"
result = check_openai_compatible(api_key, endpoint, "ZAI")
elif provider_id in PROVIDERS:
result = PROVIDERS[provider_id](api_key)
else:
result = {"valid": True, "message": f"No health check for {provider_id}"}
print(json.dumps(result))
sys.exit(0)
print(json.dumps(result))
sys.exit(0 if result["valid"] else 1)
except httpx.TimeoutException:
print(json.dumps({"valid": None, "message": "Request timed out"}))
sys.exit(2)
except httpx.RequestError as e:
msg = str(e)
# Redact key from error messages
if api_key in msg:
msg = msg.replace(api_key, "***")
print(json.dumps({"valid": None, "message": f"Connection failed: {msg}"}))
sys.exit(2)
if __name__ == "__main__":
main()
+2
View File
@@ -20,6 +20,7 @@ def test_check_requirements():
[sys.executable, "scripts/check_requirements.py", "json", "sys", "os"],
capture_output=True,
text=True,
encoding="utf-8",
)
print(f"Exit code: {result.returncode}")
print(f"Output:\n{result.stdout}")
@@ -39,6 +40,7 @@ def test_check_requirements():
[sys.executable, "scripts/check_requirements.py", "json", "nonexistent_module"],
capture_output=True,
text=True,
encoding="utf-8",
)
print(f"Exit code: {result.returncode}")
print(f"Output:\n{result.stdout}")
+10 -1
View File
@@ -132,7 +132,12 @@ def _snapshot_git(*args: str) -> str:
"""Run a git command with the snapshot GIT_DIR and PROJECT_ROOT worktree."""
cmd = ["git", "--git-dir", SNAPSHOT_DIR, "--work-tree", PROJECT_ROOT, *args]
result = subprocess.run(
cmd, capture_output=True, text=True, timeout=30, stdin=subprocess.DEVNULL
cmd,
capture_output=True,
text=True,
timeout=30,
encoding="utf-8",
stdin=subprocess.DEVNULL
)
return result.stdout.strip()
@@ -148,6 +153,7 @@ def _ensure_snapshot_repo():
capture_output=True,
timeout=10,
stdin=subprocess.DEVNULL,
encoding="utf-8",
)
_snapshot_git("config", "core.autocrlf", "false")
@@ -229,6 +235,7 @@ def run_command(command: str, cwd: str = "", timeout: int = 120) -> str:
text=True,
timeout=timeout,
stdin=subprocess.DEVNULL,
encoding="utf-8",
env={
**os.environ,
"PYTHONPATH": os.pathsep.join(
@@ -309,6 +316,7 @@ def undo_changes(path: str = "") -> str:
text=True,
timeout=10,
stdin=subprocess.DEVNULL,
encoding="utf-8",
)
return f"Restored: {path}"
else:
@@ -1114,6 +1122,7 @@ def run_agent_tests(
timeout=120,
env=env,
stdin=subprocess.DEVNULL,
encoding="utf-8",
)
except subprocess.TimeoutExpired:
return json.dumps(
+4
View File
@@ -114,6 +114,10 @@ lint.isort.section-order = [
[tool.pytest.ini_options]
testpaths = ["tests"]
asyncio_mode = "auto"
addopts = "-m 'not live'"
markers = [
"live: Tests that call real external APIs (require credentials, never run in CI)",
]
[dependency-groups]
dev = [
@@ -33,7 +33,6 @@ Usage:
})
Credential categories:
- llm.py: LLM provider credentials (anthropic, openai, etc.)
- search.py: Search tool credentials (brave_search, google_search, etc.)
- email.py: Email provider credentials (resend, google/gmail)
- apollo.py: Apollo.io API credentials
@@ -97,7 +96,6 @@ from .jira import JIRA_CREDENTIALS
from .kafka import KAFKA_CREDENTIALS
from .langfuse import LANGFUSE_CREDENTIALS
from .linear import LINEAR_CREDENTIALS
from .llm import LLM_CREDENTIALS
from .lusha import LUSHA_CREDENTIALS
from .microsoft_graph import MICROSOFT_GRAPH_CREDENTIALS
from .mongodb import MONGODB_CREDENTIALS
@@ -148,7 +146,6 @@ from .zoom import ZOOM_CREDENTIALS
# Merged registry of all credentials
CREDENTIAL_SPECS = {
**AIRTABLE_CREDENTIALS,
**LLM_CREDENTIALS,
**NEWS_CREDENTIALS,
**SEARCH_CREDENTIALS,
**EMAIL_CREDENTIALS,
@@ -248,7 +245,6 @@ __all__ = [
"CREDENTIAL_SPECS",
# Category registries (for direct access if needed)
"AIRTABLE_CREDENTIALS",
"LLM_CREDENTIALS",
"NEWS_CREDENTIALS",
"SEARCH_CREDENTIALS",
"EMAIL_CREDENTIALS",
@@ -40,6 +40,7 @@ def open_browser(url: str) -> tuple[bool, str]:
["open", url],
check=True,
capture_output=True,
encoding="utf-8",
)
return True, "Opened in browser"
@@ -50,6 +51,7 @@ def open_browser(url: str) -> tuple[bool, str]:
["xdg-open", url],
check=True,
capture_output=True,
encoding="utf-8",
)
return True, "Opened in browser"
except FileNotFoundError:
+153 -100
View File
@@ -690,83 +690,6 @@ class CalendlyHealthChecker:
)
class AnthropicHealthChecker:
"""Health checker for Anthropic API credentials."""
ENDPOINT = "https://api.anthropic.com/v1/messages"
TIMEOUT = 10.0
def check(self, api_key: str) -> HealthCheckResult:
"""
Validate Anthropic API key without consuming tokens.
Sends a deliberately invalid request (empty messages) to the messages endpoint.
A 401 means invalid key; 400 (bad request) means the key authenticated
but the payload was rejected confirming the key is valid without
generating any tokens. 429 (rate limited) also indicates a valid key.
"""
try:
with httpx.Client(timeout=self.TIMEOUT) as client:
response = client.post(
self.ENDPOINT,
headers={
"x-api-key": api_key,
"anthropic-version": "2023-06-01",
"Content-Type": "application/json",
},
# Empty messages triggers 400 (not 200), so no tokens are consumed.
json={
"model": "claude-sonnet-4-20250514",
"max_tokens": 1,
"messages": [],
},
)
if response.status_code == 200:
return HealthCheckResult(
valid=True,
message="Anthropic API key valid",
)
elif response.status_code == 401:
return HealthCheckResult(
valid=False,
message="Anthropic API key is invalid",
details={"status_code": 401},
)
elif response.status_code == 429:
# Rate limited but key is valid
return HealthCheckResult(
valid=True,
message="Anthropic API key valid (rate limited)",
details={"status_code": 429, "rate_limited": True},
)
elif response.status_code == 400:
# Bad request but key authenticated - key is valid
return HealthCheckResult(
valid=True,
message="Anthropic API key valid",
details={"status_code": 400},
)
else:
return HealthCheckResult(
valid=False,
message=f"Anthropic API returned status {response.status_code}",
details={"status_code": response.status_code},
)
except httpx.TimeoutException:
return HealthCheckResult(
valid=False,
message="Anthropic API request timed out",
details={"error": "timeout"},
)
except httpx.RequestError as e:
return HealthCheckResult(
valid=False,
message=f"Failed to connect to Anthropic API: {e}",
details={"error": str(e)},
)
class GitHubHealthChecker:
"""Health checker for GitHub Personal Access Token."""
@@ -1260,33 +1183,163 @@ class IntercomHealthChecker(OAuthBearerHealthChecker):
)
# --- Simple Bearer-auth checkers ---
class ApifyHealthChecker(BaseHttpHealthChecker):
ENDPOINT = "https://api.apify.com/v2/users/me"
SERVICE_NAME = "Apify"
class AsanaHealthChecker(BaseHttpHealthChecker):
ENDPOINT = "https://app.asana.com/api/1.0/users/me"
SERVICE_NAME = "Asana"
class AttioHealthChecker(BaseHttpHealthChecker):
ENDPOINT = "https://api.attio.com/v2/workspace_members"
SERVICE_NAME = "Attio"
class DockerHubHealthChecker(BaseHttpHealthChecker):
ENDPOINT = "https://hub.docker.com/v2/user/login"
SERVICE_NAME = "Docker Hub"
class GoogleSearchConsoleHealthChecker(BaseHttpHealthChecker):
ENDPOINT = "https://www.googleapis.com/webmasters/v3/sites"
SERVICE_NAME = "Google Search Console"
class HuggingFaceHealthChecker(BaseHttpHealthChecker):
ENDPOINT = "https://huggingface.co/api/whoami-v2"
SERVICE_NAME = "Hugging Face"
class LinearHealthChecker(BaseHttpHealthChecker):
ENDPOINT = "https://api.linear.app/graphql"
SERVICE_NAME = "Linear"
class MicrosoftGraphHealthChecker(BaseHttpHealthChecker):
ENDPOINT = "https://graph.microsoft.com/v1.0/me"
SERVICE_NAME = "Microsoft Graph"
class PineconeHealthChecker(BaseHttpHealthChecker):
ENDPOINT = "https://api.pinecone.io/indexes"
SERVICE_NAME = "Pinecone"
class VercelHealthChecker(BaseHttpHealthChecker):
ENDPOINT = "https://api.vercel.com/v2/user"
SERVICE_NAME = "Vercel"
# --- Custom-header auth checkers ---
class GitLabHealthChecker(BaseHttpHealthChecker):
ENDPOINT = "https://gitlab.com/api/v4/user"
SERVICE_NAME = "GitLab"
AUTH_TYPE = BaseHttpHealthChecker.AUTH_HEADER
AUTH_HEADER_NAME = "PRIVATE-TOKEN"
AUTH_HEADER_TEMPLATE = "{token}"
class NotionHealthChecker(BaseHttpHealthChecker):
ENDPOINT = "https://api.notion.com/v1/users/me"
SERVICE_NAME = "Notion"
def _build_headers(self, credential_value: str) -> dict[str, str]:
headers = super()._build_headers(credential_value)
headers["Notion-Version"] = "2022-06-28"
return headers
# --- Basic-auth checkers ---
class GreenhouseHealthChecker(BaseHttpHealthChecker):
ENDPOINT = "https://harvest.greenhouse.io/v1/jobs?per_page=1"
SERVICE_NAME = "Greenhouse"
AUTH_TYPE = BaseHttpHealthChecker.AUTH_BASIC
# --- Query-param auth checkers ---
class PipedriveHealthChecker(BaseHttpHealthChecker):
ENDPOINT = "https://api.pipedrive.com/v1/users/me"
SERVICE_NAME = "Pipedrive"
AUTH_TYPE = BaseHttpHealthChecker.AUTH_QUERY
AUTH_QUERY_PARAM_NAME = "api_token"
class TrelloKeyHealthChecker(BaseHttpHealthChecker):
ENDPOINT = "https://api.trello.com/1/members/me"
SERVICE_NAME = "Trello"
AUTH_TYPE = BaseHttpHealthChecker.AUTH_QUERY
AUTH_QUERY_PARAM_NAME = "key"
class TrelloTokenHealthChecker(BaseHttpHealthChecker):
ENDPOINT = "https://api.trello.com/1/members/me"
SERVICE_NAME = "Trello"
AUTH_TYPE = BaseHttpHealthChecker.AUTH_QUERY
AUTH_QUERY_PARAM_NAME = "token"
class YouTubeHealthChecker(BaseHttpHealthChecker):
ENDPOINT = "https://www.googleapis.com/youtube/v3/videoCategories?part=snippet&regionCode=US"
SERVICE_NAME = "YouTube"
AUTH_TYPE = BaseHttpHealthChecker.AUTH_QUERY
AUTH_QUERY_PARAM_NAME = "key"
# Registry of health checkers
HEALTH_CHECKERS: dict[str, CredentialHealthChecker] = {
"discord": DiscordHealthChecker(),
"hubspot": HubSpotHealthChecker(),
"zoho_crm": ZohoCRMHealthChecker(),
"brave_search": BraveSearchHealthChecker(),
"google_calendar_oauth": GoogleCalendarHealthChecker(),
"google": GoogleGmailHealthChecker(),
"slack": SlackHealthChecker(),
"calendly_pat": CalendlyHealthChecker(),
"google_search": GoogleSearchHealthChecker(),
"google_maps": GoogleMapsHealthChecker(),
"anthropic": AnthropicHealthChecker(),
"github": GitHubHealthChecker(),
"intercom": IntercomHealthChecker(),
"resend": ResendHealthChecker(),
"lusha_api_key": LushaHealthChecker(),
"stripe": StripeHealthChecker(),
"exa_search": ExaSearchHealthChecker(),
"google_docs": GoogleDocsHealthChecker(),
"calcom": CalcomHealthChecker(),
"serpapi": SerpApiHealthChecker(),
"apify": ApifyHealthChecker(),
"apollo": ApolloHealthChecker(),
"telegram": TelegramHealthChecker(),
"newsdata": NewsdataHealthChecker(),
"finlight": FinlightHealthChecker(),
"asana": AsanaHealthChecker(),
"attio": AttioHealthChecker(),
"brave_search": BraveSearchHealthChecker(),
"brevo": BrevoHealthChecker(),
"calcom": CalcomHealthChecker(),
"calendly_pat": CalendlyHealthChecker(),
"discord": DiscordHealthChecker(),
"docker_hub": DockerHubHealthChecker(),
"exa_search": ExaSearchHealthChecker(),
"finlight": FinlightHealthChecker(),
"github": GitHubHealthChecker(),
"gitlab_token": GitLabHealthChecker(),
"google": GoogleGmailHealthChecker(),
"google_calendar_oauth": GoogleCalendarHealthChecker(),
"google_docs": GoogleDocsHealthChecker(),
"google_maps": GoogleMapsHealthChecker(),
"google_search": GoogleSearchHealthChecker(),
"google_search_console": GoogleSearchConsoleHealthChecker(),
"greenhouse_token": GreenhouseHealthChecker(),
"hubspot": HubSpotHealthChecker(),
"huggingface": HuggingFaceHealthChecker(),
"intercom": IntercomHealthChecker(),
"linear": LinearHealthChecker(),
"lusha_api_key": LushaHealthChecker(),
"microsoft_graph": MicrosoftGraphHealthChecker(),
"newsdata": NewsdataHealthChecker(),
"notion_token": NotionHealthChecker(),
"pinecone": PineconeHealthChecker(),
"pipedrive": PipedriveHealthChecker(),
"resend": ResendHealthChecker(),
"serpapi": SerpApiHealthChecker(),
"slack": SlackHealthChecker(),
"stripe": StripeHealthChecker(),
"telegram": TelegramHealthChecker(),
"trello_key": TrelloKeyHealthChecker(),
"trello_token": TrelloTokenHealthChecker(),
"vercel": VercelHealthChecker(),
"youtube": YouTubeHealthChecker(),
"zoho_crm": ZohoCRMHealthChecker(),
}
-44
View File
@@ -1,44 +0,0 @@
"""
LLM provider credentials.
Contains credentials for language model providers like Anthropic, OpenAI, etc.
"""
from .base import CredentialSpec
LLM_CREDENTIALS = {
"anthropic": CredentialSpec(
env_var="ANTHROPIC_API_KEY",
tools=[],
node_types=["event_loop"],
required=False, # Not required - agents can use other providers via LiteLLM
startup_required=False, # MCP server doesn't need LLM credentials
help_url="https://console.anthropic.com/settings/keys",
description="API key for Anthropic Claude models",
# Auth method support
direct_api_key_supported=True,
api_key_instructions="""To get an Anthropic API key:
1. Go to https://console.anthropic.com/settings/keys
2. Sign in or create an Anthropic account
3. Click "Create Key"
4. Give your key a descriptive name (e.g., "Hive Agent")
5. Copy the API key (starts with sk-ant-)
6. Store it securely - you won't be able to see the full key again!""",
# Health check configuration
health_check_endpoint="https://api.anthropic.com/v1/messages",
health_check_method="POST",
# Credential store mapping
credential_id="anthropic",
credential_key="api_key",
),
# Future LLM providers:
# "openai": CredentialSpec(
# env_var="OPENAI_API_KEY",
# tools=[],
# node_types=["openai_generate"],
# required=False,
# startup_required=False,
# help_url="https://platform.openai.com/api-keys",
# description="API key for OpenAI models",
# ),
}
@@ -84,7 +84,7 @@ def check_env_var_in_shell_config(
if not config_path.exists():
return False, None
content = config_path.read_text()
content = config_path.read_text(encoding="utf-8")
# Look for export ENV_VAR=value or export ENV_VAR="value"
pattern = rf"^export\s+{re.escape(env_var)}=(.+)$"
@@ -130,7 +130,7 @@ def add_env_var_to_shell_config(
try:
if config_path.exists():
content = config_path.read_text()
content = config_path.read_text(encoding="utf-8")
# Check if already exists
pattern = rf"^export\s+{re.escape(env_var)}=.*$"
@@ -142,11 +142,11 @@ def add_env_var_to_shell_config(
content,
flags=re.MULTILINE,
)
config_path.write_text(new_content)
config_path.write_text(new_content, encoding="utf-8")
return True, str(config_path)
# Append to file
with open(config_path, "a") as f:
with open(config_path, "a", encoding="utf-8") as f:
f.write(f"\n# {comment}\n")
f.write(f"{export_line}\n")
@@ -178,7 +178,7 @@ def remove_env_var_from_shell_config(
return True, "Config file does not exist"
try:
content = config_path.read_text()
content = config_path.read_text(encoding="utf-8")
lines = content.split("\n")
new_lines = []
@@ -206,7 +206,7 @@ def remove_env_var_from_shell_config(
new_lines.append(line)
config_path.write_text("\n".join(new_lines))
config_path.write_text("\n".join(new_lines), encoding="utf-8")
return True, str(config_path)
except PermissionError:
+6 -1
View File
@@ -514,7 +514,12 @@ def register_file_tools(
cmd.append(resolved)
rg_result = subprocess.run(
cmd, capture_output=True, text=True, timeout=30, stdin=subprocess.DEVNULL
cmd,
capture_output=True,
text=True,
timeout=30,
encoding="utf-8",
stdin=subprocess.DEVNULL
)
if rg_result.returncode <= 1:
output = rg_result.stdout.strip()
@@ -48,7 +48,13 @@ def register_tools(mcp: FastMCP) -> None:
secure_cwd = session_root
result = subprocess.run(
command, shell=True, cwd=secure_cwd, capture_output=True, text=True, timeout=60
command,
shell=True,
cwd=secure_cwd,
capture_output=True,
text=True,
timeout=60,
encoding="utf-8",
)
return {
+2 -2
View File
@@ -70,7 +70,7 @@ def allocate_port(profile: str, storage_path: Path | None = None) -> int:
# Check for stored port
if port_file and port_file.exists():
try:
stored_port = int(port_file.read_text().strip())
stored_port = int(port_file.read_text(encoding="utf-8").strip())
if CDP_PORT_MIN <= stored_port <= CDP_PORT_MAX:
if _is_port_available(stored_port):
_allocated_ports.add(stored_port)
@@ -87,7 +87,7 @@ def allocate_port(profile: str, storage_path: Path | None = None) -> int:
# Persist port assignment
if port_file:
try:
port_file.write_text(str(port))
port_file.write_text(str(port), encoding="utf-8")
except OSError as e:
logger.warning(f"Failed to save port to file: {e}")
return port
+60 -1
View File
@@ -1,11 +1,18 @@
"""Shared fixtures for tools tests."""
from __future__ import annotations
import logging
import os
from collections.abc import Callable
from pathlib import Path
import pytest
from fastmcp import FastMCP
from aden_tools.credentials import CredentialStoreAdapter
from aden_tools.credentials import CREDENTIAL_SPECS, CredentialStoreAdapter
logger = logging.getLogger(__name__)
@pytest.fixture
@@ -56,3 +63,55 @@ def large_text_file(tmp_path: Path) -> Path:
large_file = tmp_path / "large.txt"
large_file.write_text("x" * 20_000_000) # 20MB
return large_file
@pytest.fixture(scope="session")
def live_credential_resolver() -> Callable[[str], str | None]:
"""Resolve live credentials for integration tests.
Tries two sources in order:
1. Environment variable (spec.env_var)
2. CredentialStoreAdapter.default() (encrypted store + env fallback)
Returns a callable: resolver(credential_name) -> str | None.
Credential values are never logged or exposed in test output.
"""
_adapter: CredentialStoreAdapter | None = None
_adapter_init_failed = False
def _get_adapter() -> CredentialStoreAdapter | None:
nonlocal _adapter, _adapter_init_failed
if _adapter is not None:
return _adapter
if _adapter_init_failed:
return None
try:
_adapter = CredentialStoreAdapter.default()
except Exception as exc:
logger.debug("Could not initialize CredentialStoreAdapter: %s", exc)
_adapter_init_failed = True
return _adapter
def resolve(credential_name: str) -> str | None:
spec = CREDENTIAL_SPECS.get(credential_name)
if spec is None:
return None
# 1. Try env var directly
value = os.environ.get(spec.env_var)
if value:
return value
# 2. Try the adapter (encrypted store + fallback)
adapter = _get_adapter()
if adapter is not None:
try:
value = adapter.get(credential_name)
if value:
return value
except Exception:
pass
return None
return resolve
+2 -2
View File
@@ -53,7 +53,7 @@ def _discover_tool_modules() -> list[tuple[str, str]]:
continue
if item.is_dir() and (item / "__init__.py").exists():
init_text = (item / "__init__.py").read_text()
init_text = (item / "__init__.py").read_text(encoding="utf-8")
if "register_tools" in init_text:
# Direct tool package (e.g., web_search_tool, email_tool)
@@ -64,7 +64,7 @@ def _discover_tool_modules() -> list[tuple[str, str]]:
if sub.name.startswith("_") or sub.name == "__pycache__":
continue
if sub.is_dir() and (sub / "__init__.py").exists():
sub_init_text = (sub / "__init__.py").read_text()
sub_init_text = (sub / "__init__.py").read_text(encoding="utf-8")
if "register_tools" in sub_init_text:
modules.append(
(
+9 -1
View File
@@ -20,7 +20,15 @@ class TestRegistryCompleteness:
# - google_cse: shares google_search checker (same credential_group)
# - razorpay/razorpay_secret: requires HTTP Basic auth with TWO credentials,
# which the single-value health check dispatcher can't support
KNOWN_EXCEPTIONS = {"google_cse", "razorpay", "razorpay_secret"}
# - plaid_client_id/plaid_secret: requires POST with both client_id and
# secret in JSON body, can't validate with a single credential value
KNOWN_EXCEPTIONS = {
"google_cse",
"razorpay",
"razorpay_secret",
"plaid_client_id",
"plaid_secret",
}
def test_specs_with_endpoint_have_checkers(self):
"""Every CredentialSpec with health_check_endpoint has a HEALTH_CHECKERS entry."""
-12
View File
@@ -308,18 +308,6 @@ class TestCredentialSpecs:
assert spec.startup_required is False
assert "brave.com" in spec.help_url
def test_anthropic_spec_exists(self):
"""CREDENTIAL_SPECS includes anthropic with startup_required=True."""
assert "anthropic" in CREDENTIAL_SPECS
spec = CREDENTIAL_SPECS["anthropic"]
assert spec.env_var == "ANTHROPIC_API_KEY"
assert spec.tools == []
assert "event_loop" in spec.node_types
assert spec.required is False
assert spec.startup_required is False
assert "anthropic.com" in spec.help_url
class TestNodeTypeValidation:
"""Tests for node type credential validation."""
+22 -87
View File
@@ -6,7 +6,6 @@ import httpx
from aden_tools.credentials.health_check import (
HEALTH_CHECKERS,
AnthropicHealthChecker,
ApolloHealthChecker,
BrevoHealthChecker,
CalcomHealthChecker,
@@ -36,11 +35,6 @@ class TestHealthCheckerRegistry:
assert "google_search" in HEALTH_CHECKERS
assert isinstance(HEALTH_CHECKERS["google_search"], GoogleSearchHealthChecker)
def test_anthropic_registered(self):
"""AnthropicHealthChecker is registered in HEALTH_CHECKERS."""
assert "anthropic" in HEALTH_CHECKERS
assert isinstance(HEALTH_CHECKERS["anthropic"], AnthropicHealthChecker)
def test_github_registered(self):
"""GitHubHealthChecker is registered in HEALTH_CHECKERS."""
assert "github" in HEALTH_CHECKERS
@@ -78,106 +72,47 @@ class TestHealthCheckerRegistry:
"brave_search",
"google_search",
"google_maps",
"anthropic",
"github",
"intercom",
"resend",
"google_calendar_oauth",
"google",
"slack",
"lusha_api_key",
"discord",
"stripe",
"exa_search",
"google_docs",
"calcom",
"serpapi",
"apify",
"apollo",
"telegram",
"newsdata",
"finlight",
"asana",
"attio",
"brevo",
"calendly_pat",
"docker_hub",
"finlight",
"gitlab_token",
"google_search_console",
"greenhouse_token",
"huggingface",
"intercom",
"linear",
"lusha_api_key",
"microsoft_graph",
"newsdata",
"notion_token",
"pinecone",
"pipedrive",
"telegram",
"trello_key",
"trello_token",
"vercel",
"youtube",
"zoho_crm",
}
assert set(HEALTH_CHECKERS.keys()) == expected
class TestAnthropicHealthChecker:
"""Tests for AnthropicHealthChecker."""
def _mock_response(self, status_code, json_data=None):
response = MagicMock(spec=httpx.Response)
response.status_code = status_code
if json_data:
response.json.return_value = json_data
return response
@patch("aden_tools.credentials.health_check.httpx.Client")
def test_valid_key_200(self, mock_client_cls):
mock_client = MagicMock()
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
mock_client.post.return_value = self._mock_response(200)
checker = AnthropicHealthChecker()
result = checker.check("sk-ant-test-key")
assert result.valid is True
assert "valid" in result.message.lower()
@patch("aden_tools.credentials.health_check.httpx.Client")
def test_invalid_key_401(self, mock_client_cls):
mock_client = MagicMock()
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
mock_client.post.return_value = self._mock_response(401)
checker = AnthropicHealthChecker()
result = checker.check("invalid-key")
assert result.valid is False
assert result.details["status_code"] == 401
@patch("aden_tools.credentials.health_check.httpx.Client")
def test_rate_limited_429(self, mock_client_cls):
mock_client = MagicMock()
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
mock_client.post.return_value = self._mock_response(429)
checker = AnthropicHealthChecker()
result = checker.check("sk-ant-test-key")
assert result.valid is True
assert result.details.get("rate_limited") is True
@patch("aden_tools.credentials.health_check.httpx.Client")
def test_bad_request_400_still_valid(self, mock_client_cls):
mock_client = MagicMock()
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
mock_client.post.return_value = self._mock_response(400)
checker = AnthropicHealthChecker()
result = checker.check("sk-ant-test-key")
assert result.valid is True
@patch("aden_tools.credentials.health_check.httpx.Client")
def test_timeout(self, mock_client_cls):
mock_client = MagicMock()
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
mock_client.post.side_effect = httpx.TimeoutException("timed out")
checker = AnthropicHealthChecker()
result = checker.check("sk-ant-test-key")
assert result.valid is False
assert result.details["error"] == "timeout"
class TestGitHubHealthChecker:
"""Tests for GitHubHealthChecker."""
+197
View File
@@ -0,0 +1,197 @@
"""Live integration tests for credential health checkers.
These tests make REAL API calls. They are gated behind the ``live`` marker
and never run in CI. Run them manually::
pytest -m live -s --log-cli-level=INFO # all live tests
pytest -m live -k anthropic -s # just anthropic
pytest -m live -k "not google" -s # skip google variants
pytest -m live --tb=short -q # quick summary
Prerequisites:
- Credentials available via env vars or ~/.hive/credentials/ encrypted store
- Tests skip gracefully when credentials are unavailable
- Rate-limited responses (429) are treated as PASS (credential is valid)
"""
from __future__ import annotations
import logging
import pytest
from aden_tools.credentials import CREDENTIAL_SPECS
from aden_tools.credentials.health_check import (
HEALTH_CHECKERS,
check_credential_health,
validate_integration_wiring,
)
logger = logging.getLogger(__name__)
# All credential names that have registered health checkers
CHECKER_NAMES = sorted(HEALTH_CHECKERS.keys())
def _redact(value: str) -> str:
"""Redact a credential for safe logging."""
if len(value) <= 8:
return "****"
return f"{value[:4]}...{value[-2:]}"
# ---------------------------------------------------------------------------
# 1. Direct checker tests
# ---------------------------------------------------------------------------
@pytest.mark.live
class TestLiveHealthCheckers:
"""Call each health checker against the real API."""
@pytest.mark.parametrize("credential_name", CHECKER_NAMES, ids=CHECKER_NAMES)
def test_checker_returns_valid(self, credential_name, live_credential_resolver):
"""Health checker returns valid=True with a real credential."""
credential_value = live_credential_resolver(credential_name)
if credential_value is None:
spec = CREDENTIAL_SPECS.get(credential_name)
env_var = spec.env_var if spec else "???"
pytest.skip(f"No credential available ({env_var})")
checker = HEALTH_CHECKERS[credential_name]
result = checker.check(credential_value)
logger.info(
"Live check %s: valid=%s message=%r",
credential_name,
result.valid,
result.message,
)
assert result.valid is True, (
f"Health check for '{credential_name}' returned valid=False: "
f"{result.message} (details: {result.details})"
)
assert result.message
@pytest.mark.parametrize("credential_name", CHECKER_NAMES, ids=CHECKER_NAMES)
def test_checker_extracts_identity(self, credential_name, live_credential_resolver):
"""Identity metadata (when present) contains non-empty strings."""
credential_value = live_credential_resolver(credential_name)
if credential_value is None:
pytest.skip(f"No credential available for '{credential_name}'")
checker = HEALTH_CHECKERS[credential_name]
result = checker.check(credential_value)
assert result.valid is True, (
f"Cannot verify identity -- health check failed: {result.message}"
)
identity = result.details.get("identity", {})
if identity:
logger.info("Identity for %s: %s", credential_name, identity)
for key, value in identity.items():
assert isinstance(value, str), (
f"Identity key '{key}' is not a string: {type(value)}"
)
assert value, f"Identity key '{key}' is empty"
else:
logger.info("No identity metadata for %s (OK for some APIs)", credential_name)
# ---------------------------------------------------------------------------
# 2. Dispatcher path (check_credential_health)
# ---------------------------------------------------------------------------
@pytest.mark.live
class TestLiveDispatcher:
"""Verify the full check_credential_health() dispatch path."""
@pytest.mark.parametrize("credential_name", CHECKER_NAMES, ids=CHECKER_NAMES)
def test_dispatcher_returns_valid(self, credential_name, live_credential_resolver):
"""check_credential_health() returns valid=True via dispatcher."""
credential_value = live_credential_resolver(credential_name)
if credential_value is None:
pytest.skip(f"No credential available for '{credential_name}'")
result = check_credential_health(credential_name, credential_value)
logger.info(
"Dispatcher check %s: valid=%s message=%r",
credential_name,
result.valid,
result.message,
)
assert result.valid is True, (
f"Dispatcher check for '{credential_name}' returned valid=False: "
f"{result.message} (details: {result.details})"
)
# ---------------------------------------------------------------------------
# 3. Integration wiring verification
# ---------------------------------------------------------------------------
@pytest.mark.live
class TestLiveIntegrationWiring:
"""validate_integration_wiring() passes for every registered checker."""
@pytest.mark.parametrize("credential_name", CHECKER_NAMES, ids=CHECKER_NAMES)
def test_wiring_valid(self, credential_name):
"""No wiring issues for credentials with health checkers."""
issues = validate_integration_wiring(credential_name)
assert not issues, f"Wiring issues for '{credential_name}':\n" + "\n".join(
f" - {i}" for i in issues
)
# ---------------------------------------------------------------------------
# 4. Summary reporter
# ---------------------------------------------------------------------------
@pytest.mark.live
class TestLiveCredentialSummary:
"""Print a human-readable summary of tested vs skipped credentials."""
def test_credential_availability_summary(self, live_credential_resolver):
"""Report which credentials were available for live testing."""
available = []
skipped = []
for name in CHECKER_NAMES:
value = live_credential_resolver(name)
spec = CREDENTIAL_SPECS.get(name)
env_var = spec.env_var if spec else "???"
if value:
available.append((name, env_var))
else:
skipped.append((name, env_var))
lines = [
"",
"=" * 60,
"LIVE CREDENTIAL TEST SUMMARY",
"=" * 60,
f" Available: {len(available)} / {len(CHECKER_NAMES)}",
f" Skipped: {len(skipped)} / {len(CHECKER_NAMES)}",
"",
]
if available:
lines.append(" TESTED:")
for name, env_var in available:
lines.append(f" [PASS] {name} ({env_var})")
if skipped:
lines.append("")
lines.append(" SKIPPED (no credential):")
for name, env_var in skipped:
lines.append(f" [SKIP] {name} ({env_var})")
lines.append("=" * 60)
summary = "\n".join(lines)
logger.info(summary)
print(summary) # noqa: T201 -- visible with pytest -s
+19 -14
View File
@@ -49,7 +49,10 @@ def session_dir(tmp_path: Path) -> Path:
def basic_csv(session_dir: Path) -> Path:
"""Create a basic CSV file for testing."""
csv_file = session_dir / "basic.csv"
csv_file.write_text("name,age,city\nAlice,30,NYC\nBob,25,LA\nCharlie,35,Chicago\n")
csv_file.write_text(
"name,age,city\nAlice,30,NYC\nBob,25,LA\nCharlie,35,Chicago\n",
encoding="utf-8",
)
return csv_file
@@ -60,7 +63,7 @@ def large_csv(session_dir: Path) -> Path:
lines = ["id,value"]
for i in range(100):
lines.append(f"{i},{i * 10}")
csv_file.write_text("\n".join(lines) + "\n")
csv_file.write_text("\n".join(lines) + "\n", encoding="utf-8")
return csv_file
@@ -68,7 +71,7 @@ def large_csv(session_dir: Path) -> Path:
def empty_csv(session_dir: Path) -> Path:
"""Create an empty CSV file (no content)."""
csv_file = session_dir / "empty.csv"
csv_file.write_text("")
csv_file.write_text("", encoding="utf-8")
return csv_file
@@ -76,7 +79,7 @@ def empty_csv(session_dir: Path) -> Path:
def headers_only_csv(session_dir: Path) -> Path:
"""Create a CSV file with only headers."""
csv_file = session_dir / "headers_only.csv"
csv_file.write_text("name,age,city\n")
csv_file.write_text("name,age,city\n", encoding="utf-8")
return csv_file
@@ -217,7 +220,7 @@ class TestCsvRead:
"""Return error for non-CSV file extension."""
# Create a text file
txt_file = session_dir / "data.txt"
txt_file.write_text("name,age\nAlice,30\n")
txt_file.write_text("name,age\nAlice,30\n", encoding="utf-8")
with patch("aden_tools.tools.file_system_toolkits.security.WORKSPACES_DIR", str(tmp_path)):
result = csv_tool_fn(
@@ -317,7 +320,8 @@ class TestCsvRead:
"""Read CSV with quoted fields containing commas."""
csv_file = session_dir / "quoted.csv"
csv_file.write_text(
'name,address,note\n"Smith, John","123 Main St, Apt 4","Hello, world"\n'
'name,address,note\n"Smith, John","123 Main St, Apt 4","Hello, world"\n',
encoding="utf-8",
)
with patch("aden_tools.tools.file_system_toolkits.security.WORKSPACES_DIR", str(tmp_path)):
@@ -385,7 +389,7 @@ class TestCsvWrite:
assert result["rows_written"] == 2
# Verify file content
content = (session_dir / "output.csv").read_text()
content = (session_dir / "output.csv").read_text(encoding="utf-8")
assert "name,age,city" in content
assert "Alice,30,NYC" in content
assert "Bob,25,LA" in content
@@ -449,7 +453,7 @@ class TestCsvWrite:
assert result["success"] is True
content = (session_dir / "output.csv").read_text()
content = (session_dir / "output.csv").read_text(encoding="utf-8")
assert "extra" not in content
assert "ignored" not in content
@@ -468,7 +472,7 @@ class TestCsvWrite:
assert result["success"] is True
assert result["rows_written"] == 0
content = (session_dir / "output.csv").read_text()
content = (session_dir / "output.csv").read_text(encoding="utf-8")
assert "name,age" in content
def test_write_unicode_content(self, csv_tools, session_dir, tmp_path):
@@ -511,7 +515,7 @@ class TestCsvWrite:
csv_file = session_dir / "data.csv"
assert csv_file.exists()
content = csv_file.read_text()
content = csv_file.read_text(encoding="utf-8")
assert "id,value" in content
assert "1,test1" in content
assert "2,test2" in content
@@ -579,7 +583,7 @@ class TestCsvAppend:
assert result["success"] is True
content = (session_dir / "basic.csv").read_text()
content = (session_dir / "basic.csv").read_text(encoding="utf-8")
assert "extra" not in content
assert "ignored" not in content
assert "David" in content
@@ -587,7 +591,7 @@ class TestCsvAppend:
def test_append_non_csv_extension_error(self, csv_tools, session_dir, tmp_path):
"""Return error for non-CSV file extension."""
txt_file = session_dir / "data.txt"
txt_file.write_text("name\nAlice\n")
txt_file.write_text("name\nAlice\n", encoding="utf-8")
with patch("aden_tools.tools.file_system_toolkits.security.WORKSPACES_DIR", str(tmp_path)):
result = csv_tools["csv_append"](
@@ -679,7 +683,7 @@ class TestCsvInfo:
def test_get_info_non_csv_extension_error(self, csv_tools, session_dir, tmp_path):
"""Return error for non-CSV file extension."""
txt_file = session_dir / "data.txt"
txt_file.write_text("name\nAlice\n")
txt_file.write_text("name\nAlice\n", encoding="utf-8")
with patch("aden_tools.tools.file_system_toolkits.security.WORKSPACES_DIR", str(tmp_path)):
result = csv_tools["csv_info"](
@@ -707,7 +711,8 @@ class TestCsvSql:
"2,MacBook,Electronics,1999,30\n"
"3,Coffee Mug,Kitchen,15,200\n"
"4,Headphones,Electronics,299,75\n"
"5,Water Bottle,Kitchen,25,150\n"
"5,Water Bottle,Kitchen,25,150\n",
encoding="utf-8",
)
return csv_file
+4 -4
View File
@@ -280,7 +280,7 @@ class TestExcelRead:
"""Return error for non-Excel file extension."""
# Create a text file
txt_file = session_dir / "data.txt"
txt_file.write_text("name,age\nAlice,30\n")
txt_file.write_text("name,age\nAlice,30\n", encoding="utf-8")
with patch("aden_tools.tools.file_system_toolkits.security.WORKSPACES_DIR", str(tmp_path)):
result = excel_read_fn(
@@ -602,7 +602,7 @@ class TestExcelAppend:
def test_append_non_xlsx_extension_error(self, excel_tools, session_dir, tmp_path):
"""Return error for non-Excel file extension."""
txt_file = session_dir / "data.txt"
txt_file.write_text("name\nAlice\n")
txt_file.write_text("name\nAlice\n", encoding="utf-8")
with patch("aden_tools.tools.file_system_toolkits.security.WORKSPACES_DIR", str(tmp_path)):
result = excel_tools["excel_append"](
@@ -672,7 +672,7 @@ class TestExcelInfo:
def test_get_info_non_xlsx_extension_error(self, excel_tools, session_dir, tmp_path):
"""Return error for non-Excel file extension."""
txt_file = session_dir / "data.txt"
txt_file.write_text("name\nAlice\n")
txt_file.write_text("name\nAlice\n", encoding="utf-8")
with patch("aden_tools.tools.file_system_toolkits.security.WORKSPACES_DIR", str(tmp_path)):
result = excel_tools["excel_info"](
@@ -735,7 +735,7 @@ class TestExcelSheetList:
def test_list_sheets_non_xlsx_extension_error(self, excel_tools, session_dir, tmp_path):
"""Return error for non-Excel file extension."""
txt_file = session_dir / "data.txt"
txt_file.write_text("name\nAlice\n")
txt_file.write_text("name\nAlice\n", encoding="utf-8")
with patch("aden_tools.tools.file_system_toolkits.security.WORKSPACES_DIR", str(tmp_path)):
result = excel_tools["excel_sheet_list"](
+50 -49
View File
@@ -86,7 +86,7 @@ class TestViewFileTool:
def test_view_existing_file(self, view_file_fn, mock_workspace, mock_secure_path, tmp_path):
"""Viewing an existing file returns content and metadata."""
test_file = tmp_path / "test.txt"
test_file.write_text("Hello, World!")
test_file.write_text("Hello, World!", encoding="utf-8")
result = view_file_fn(path="test.txt", **mock_workspace)
@@ -106,7 +106,7 @@ class TestViewFileTool:
"""Viewing a multiline file returns correct line count."""
test_file = tmp_path / "multiline.txt"
content = "Line 1\nLine 2\nLine 3\nLine 4\n"
test_file.write_text(content)
test_file.write_text(content, encoding="utf-8")
result = view_file_fn(path="multiline.txt", **mock_workspace)
@@ -117,7 +117,7 @@ class TestViewFileTool:
def test_view_empty_file(self, view_file_fn, mock_workspace, mock_secure_path, tmp_path):
"""Viewing an empty file returns empty content."""
test_file = tmp_path / "empty.txt"
test_file.write_text("")
test_file.write_text("", encoding="utf-8")
result = view_file_fn(path="empty.txt", **mock_workspace)
@@ -143,7 +143,7 @@ class TestViewFileTool:
nested = tmp_path / "nested" / "dir"
nested.mkdir(parents=True)
test_file = nested / "file.txt"
test_file.write_text("nested content")
test_file.write_text("nested content", encoding="utf-8")
result = view_file_fn(path="nested/dir/file.txt", **mock_workspace)
@@ -156,7 +156,7 @@ class TestViewFileTool:
"""Viewing a file with max_size truncates content when exceeding limit."""
test_file = tmp_path / "large.txt"
content = "x" * 1000
test_file.write_text(content)
test_file.write_text(content, encoding="utf-8")
result = view_file_fn(path="large.txt", max_size=100, **mock_workspace)
@@ -171,7 +171,7 @@ class TestViewFileTool:
):
"""Viewing a file with negative max_size returns error."""
test_file = tmp_path / "test.txt"
test_file.write_text("content")
test_file.write_text("content", encoding="utf-8")
result = view_file_fn(path="test.txt", max_size=-1, **mock_workspace)
@@ -196,7 +196,7 @@ class TestViewFileTool:
):
"""Viewing a file with invalid encoding returns error."""
test_file = tmp_path / "test.txt"
test_file.write_text("content")
test_file.write_text("content", encoding="utf-8")
result = view_file_fn(path="test.txt", encoding="invalid-encoding", **mock_workspace)
@@ -225,12 +225,12 @@ class TestWriteToFileTool:
# Verify file was created
created_file = tmp_path / "new_file.txt"
assert created_file.exists()
assert created_file.read_text() == "Test content"
assert created_file.read_text(encoding="utf-8") == "Test content"
def test_write_append_mode(self, write_to_file_fn, mock_workspace, mock_secure_path, tmp_path):
"""Writing with append=True appends to existing file."""
test_file = tmp_path / "append_test.txt"
test_file.write_text("Line 1\n")
test_file.write_text("Line 1\n", encoding="utf-8")
result = write_to_file_fn(
path="append_test.txt", content="Line 2\n", append=True, **mock_workspace
@@ -238,20 +238,20 @@ class TestWriteToFileTool:
assert result["success"] is True
assert result["mode"] == "appended"
assert test_file.read_text() == "Line 1\nLine 2\n"
assert test_file.read_text(encoding="utf-8") == "Line 1\nLine 2\n"
def test_write_overwrite_existing(
self, write_to_file_fn, mock_workspace, mock_secure_path, tmp_path
):
"""Writing to existing file overwrites it by default."""
test_file = tmp_path / "overwrite.txt"
test_file.write_text("Original content")
test_file.write_text("Original content", encoding="utf-8")
result = write_to_file_fn(path="overwrite.txt", content="New content", **mock_workspace)
assert result["success"] is True
assert result["mode"] == "written"
assert test_file.read_text() == "New content"
assert test_file.read_text(encoding="utf-8") == "New content"
def test_write_creates_parent_directories(
self, write_to_file_fn, mock_workspace, mock_secure_path, tmp_path
@@ -262,7 +262,7 @@ class TestWriteToFileTool:
assert result["success"] is True
created_file = tmp_path / "nested" / "dir" / "file.txt"
assert created_file.exists()
assert created_file.read_text() == "Test"
assert created_file.read_text(encoding="utf-8") == "Test"
def test_write_empty_content(
self, write_to_file_fn, mock_workspace, mock_secure_path, tmp_path
@@ -274,7 +274,7 @@ class TestWriteToFileTool:
assert result["bytes_written"] == 0
created_file = tmp_path / "empty.txt"
assert created_file.exists()
assert created_file.read_text() == ""
assert created_file.read_text(encoding="utf-8") == ""
class TestListDirTool:
@@ -290,8 +290,8 @@ class TestListDirTool:
def test_list_directory(self, list_dir_fn, mock_workspace, mock_secure_path, tmp_path):
"""Listing a directory returns all entries."""
# Create test files and directories
(tmp_path / "file1.txt").write_text("content")
(tmp_path / "file2.txt").write_text("content")
(tmp_path / "file1.txt").write_text("content", encoding="utf-8")
(tmp_path / "file2.txt").write_text("content", encoding="utf-8")
(tmp_path / "subdir").mkdir()
result = list_dir_fn(path=".", **mock_workspace)
@@ -328,8 +328,8 @@ class TestListDirTool:
self, list_dir_fn, mock_workspace, mock_secure_path, tmp_path
):
"""Listing a directory returns file sizes for files."""
(tmp_path / "small.txt").write_text("hi")
(tmp_path / "larger.txt").write_text("hello world")
(tmp_path / "small.txt").write_text("hi", encoding="utf-8")
(tmp_path / "larger.txt").write_text("hello world", encoding="utf-8")
(tmp_path / "subdir").mkdir()
result = list_dir_fn(path=".", **mock_workspace)
@@ -366,7 +366,7 @@ class TestReplaceFileContentTool:
):
"""Replacing content in a file works correctly."""
test_file = tmp_path / "replace_test.txt"
test_file.write_text("Hello World! Hello again!")
test_file.write_text("Hello World! Hello again!", encoding="utf-8")
result = replace_file_content_fn(
path="replace_test.txt", target="Hello", replacement="Hi", **mock_workspace
@@ -374,14 +374,14 @@ class TestReplaceFileContentTool:
assert result["success"] is True
assert result["occurrences_replaced"] == 2
assert test_file.read_text() == "Hi World! Hi again!"
assert test_file.read_text(encoding="utf-8") == "Hi World! Hi again!"
def test_replace_target_not_found(
self, replace_file_content_fn, mock_workspace, mock_secure_path, tmp_path
):
"""Replacing non-existent target returns error."""
test_file = tmp_path / "test.txt"
test_file.write_text("Hello World")
test_file.write_text("Hello World", encoding="utf-8")
result = replace_file_content_fn(
path="test.txt", target="nonexistent", replacement="new", **mock_workspace
@@ -406,7 +406,7 @@ class TestReplaceFileContentTool:
):
"""Replacing content with single occurrence works correctly."""
test_file = tmp_path / "single.txt"
test_file.write_text("Hello World")
test_file.write_text("Hello World", encoding="utf-8")
result = replace_file_content_fn(
path="single.txt", target="Hello", replacement="Hi", **mock_workspace
@@ -414,14 +414,14 @@ class TestReplaceFileContentTool:
assert result["success"] is True
assert result["occurrences_replaced"] == 1
assert test_file.read_text() == "Hi World"
assert test_file.read_text(encoding="utf-8") == "Hi World"
def test_replace_multiline_content(
self, replace_file_content_fn, mock_workspace, mock_secure_path, tmp_path
):
"""Replacing content across multiple lines works correctly."""
test_file = tmp_path / "multiline.txt"
test_file.write_text("Line 1\nTODO: fix this\nLine 3\nTODO: add tests\n")
test_file.write_text("Line 1\nTODO: fix this\nLine 3\nTODO: add tests\n", encoding="utf-8")
result = replace_file_content_fn(
path="multiline.txt", target="TODO:", replacement="DONE:", **mock_workspace
@@ -429,7 +429,8 @@ class TestReplaceFileContentTool:
assert result["success"] is True
assert result["occurrences_replaced"] == 2
assert test_file.read_text() == "Line 1\nDONE: fix this\nLine 3\nDONE: add tests\n"
expected = "Line 1\nDONE: fix this\nLine 3\nDONE: add tests\n"
assert test_file.read_text(encoding="utf-8") == expected
class TestGrepSearchTool:
@@ -447,7 +448,7 @@ class TestGrepSearchTool:
):
"""Searching a single file returns matches."""
test_file = tmp_path / "search_test.txt"
test_file.write_text("Line 1\nLine 2 with pattern\nLine 3")
test_file.write_text("Line 1\nLine 2 with pattern\nLine 3", encoding="utf-8")
result = grep_search_fn(path="search_test.txt", pattern="pattern", **mock_workspace)
@@ -462,7 +463,7 @@ class TestGrepSearchTool:
):
"""Searching with no matches returns empty list."""
test_file = tmp_path / "test.txt"
test_file.write_text("Hello World")
test_file.write_text("Hello World", encoding="utf-8")
result = grep_search_fn(path="test.txt", pattern="nonexistent", **mock_workspace)
@@ -475,13 +476,13 @@ class TestGrepSearchTool:
):
"""Searching directory non-recursively only searches immediate files."""
# Create files in root
(tmp_path / "file1.txt").write_text("pattern here")
(tmp_path / "file2.txt").write_text("no match here")
(tmp_path / "file1.txt").write_text("pattern here", encoding="utf-8")
(tmp_path / "file2.txt").write_text("no match here", encoding="utf-8")
# Create nested directory with file
nested = tmp_path / "nested"
nested.mkdir()
(nested / "nested_file.txt").write_text("pattern in nested")
(nested / "nested_file.txt").write_text("pattern in nested", encoding="utf-8")
result = grep_search_fn(path=".", pattern="pattern", recursive=False, **mock_workspace)
@@ -494,12 +495,12 @@ class TestGrepSearchTool:
):
"""Searching directory recursively finds matches in subdirectories."""
# Create files in root
(tmp_path / "file1.txt").write_text("pattern here")
(tmp_path / "file1.txt").write_text("pattern here", encoding="utf-8")
# Create nested directory with file
nested = tmp_path / "nested"
nested.mkdir()
(nested / "nested_file.txt").write_text("pattern in nested")
(nested / "nested_file.txt").write_text("pattern in nested", encoding="utf-8")
result = grep_search_fn(path=".", pattern="pattern", recursive=True, **mock_workspace)
@@ -512,7 +513,7 @@ class TestGrepSearchTool:
):
"""Searching with regex pattern finds complex matches."""
test_file = tmp_path / "regex_test.txt"
test_file.write_text("foo123bar\nfoo456bar\nbaz789baz\n")
test_file.write_text("foo123bar\nfoo456bar\nbaz789baz\n", encoding="utf-8")
result = grep_search_fn(path="regex_test.txt", pattern=r"foo\d+bar", **mock_workspace)
@@ -526,7 +527,7 @@ class TestGrepSearchTool:
):
"""Searching returns one match per line even with multiple occurrences."""
test_file = tmp_path / "multi_match.txt"
test_file.write_text("hello hello hello\nworld\nhello again")
test_file.write_text("hello hello hello\nworld\nhello again", encoding="utf-8")
result = grep_search_fn(path="multi_match.txt", pattern="hello", **mock_workspace)
@@ -573,7 +574,7 @@ class TestExecuteCommandTool:
):
"""Executing ls command lists files."""
# Create a test file
(tmp_path / "testfile.txt").write_text("content")
(tmp_path / "testfile.txt").write_text("content", encoding="utf-8")
result = execute_command_fn(command=f"ls {tmp_path}", **mock_workspace)
@@ -610,7 +611,7 @@ class TestApplyDiffTool:
def test_apply_diff_successful(self, apply_diff_fn, mock_workspace, mock_secure_path, tmp_path):
"""Applying a valid diff successfully modifies the file."""
test_file = tmp_path / "diff_test.txt"
test_file.write_text("Hello World")
test_file.write_text("Hello World", encoding="utf-8")
# Create a simple diff using diff_match_patch format
import diff_match_patch as dmp_module
@@ -624,13 +625,13 @@ class TestApplyDiffTool:
assert result["success"] is True
assert result["all_successful"] is True
assert result["patches_applied"] > 0
assert test_file.read_text() == "Hello Universe"
assert test_file.read_text(encoding="utf-8") == "Hello Universe"
def test_apply_diff_multiline(self, apply_diff_fn, mock_workspace, mock_secure_path, tmp_path):
"""Applying diff to multiline content works correctly."""
test_file = tmp_path / "multiline.txt"
original = "Line 1\nLine 2\nLine 3\n"
test_file.write_text(original)
test_file.write_text(original, encoding="utf-8")
import diff_match_patch as dmp_module
@@ -643,7 +644,7 @@ class TestApplyDiffTool:
assert result["success"] is True
assert result["all_successful"] is True
assert test_file.read_text() == modified
assert test_file.read_text(encoding="utf-8") == modified
def test_apply_diff_invalid_patch(
self, apply_diff_fn, mock_workspace, mock_secure_path, tmp_path
@@ -651,7 +652,7 @@ class TestApplyDiffTool:
"""Applying an invalid diff handles gracefully."""
test_file = tmp_path / "test.txt"
original_content = "Original content"
test_file.write_text(original_content)
test_file.write_text(original_content, encoding="utf-8")
# Invalid diff text
result = apply_diff_fn(path="test.txt", diff_text="invalid diff format", **mock_workspace)
@@ -660,7 +661,7 @@ class TestApplyDiffTool:
if "error" not in result:
assert result.get("patches_applied", 0) == 0
# File should remain unchanged
assert test_file.read_text() == original_content
assert test_file.read_text(encoding="utf-8") == original_content
class TestApplyPatchTool:
@@ -685,7 +686,7 @@ class TestApplyPatchTool:
):
"""Applying a valid patch successfully modifies the file."""
test_file = tmp_path / "patch_test.txt"
test_file.write_text("Hello World")
test_file.write_text("Hello World", encoding="utf-8")
# Create a simple patch using diff_match_patch format
import diff_match_patch as dmp_module
@@ -699,7 +700,7 @@ class TestApplyPatchTool:
assert result["success"] is True
assert result["all_successful"] is True
assert result["patches_applied"] > 0
assert test_file.read_text() == "Hello Python"
assert test_file.read_text(encoding="utf-8") == "Hello Python"
def test_apply_patch_multiline(
self, apply_patch_fn, mock_workspace, mock_secure_path, tmp_path
@@ -707,7 +708,7 @@ class TestApplyPatchTool:
"""Applying patch to multiline content works correctly."""
test_file = tmp_path / "multiline.txt"
original = "Line 1\nLine 2\nLine 3\n"
test_file.write_text(original)
test_file.write_text(original, encoding="utf-8")
import diff_match_patch as dmp_module
@@ -720,7 +721,7 @@ class TestApplyPatchTool:
assert result["success"] is True
assert result["all_successful"] is True
assert test_file.read_text() == modified
assert test_file.read_text(encoding="utf-8") == modified
def test_apply_patch_invalid_patch(
self, apply_patch_fn, mock_workspace, mock_secure_path, tmp_path
@@ -728,7 +729,7 @@ class TestApplyPatchTool:
"""Applying an invalid patch handles gracefully."""
test_file = tmp_path / "test.txt"
original_content = "Original content"
test_file.write_text(original_content)
test_file.write_text(original_content, encoding="utf-8")
# Invalid patch text
result = apply_patch_fn(
@@ -739,7 +740,7 @@ class TestApplyPatchTool:
if "error" not in result:
assert result.get("patches_applied", 0) == 0
# File should remain unchanged
assert test_file.read_text() == original_content
assert test_file.read_text(encoding="utf-8") == original_content
def test_apply_patch_multiple_changes(
self, apply_patch_fn, mock_workspace, mock_secure_path, tmp_path
@@ -747,7 +748,7 @@ class TestApplyPatchTool:
"""Applying patch with multiple changes works correctly."""
test_file = tmp_path / "complex.txt"
original = "Function foo() {\n return 42;\n}\n"
test_file.write_text(original)
test_file.write_text(original, encoding="utf-8")
import diff_match_patch as dmp_module
@@ -760,4 +761,4 @@ class TestApplyPatchTool:
assert result["success"] is True
assert result["all_successful"] is True
assert test_file.read_text() == modified
assert test_file.read_text(encoding="utf-8") == modified
+1 -1
View File
@@ -28,7 +28,7 @@ class TestPdfReadTool:
def test_read_pdf_invalid_extension(self, pdf_read_fn, tmp_path: Path):
"""Reading non-PDF file returns error."""
txt_file = tmp_path / "test.txt"
txt_file.write_text("not a pdf")
txt_file.write_text("not a pdf", encoding="utf-8")
result = pdf_read_fn(file_path=str(txt_file))
+4 -2
View File
@@ -47,7 +47,8 @@ def runtime_logs_dir(tmp_path: Path) -> Path:
"duration_ms": 3000,
"execution_quality": "clean",
}
)
),
encoding="utf-8",
)
_write_jsonl(
run1_dir / "details.jsonl",
@@ -143,7 +144,8 @@ def runtime_logs_dir(tmp_path: Path) -> Path:
"duration_ms": 60000,
"execution_quality": "failed",
}
)
),
encoding="utf-8",
)
_write_jsonl(
run2_dir / "details.jsonl",
+2 -2
View File
@@ -238,7 +238,7 @@ class TestGetSecurePath:
# Create a target file and a symlink to it
target_file = session_dir / "target.txt"
target_file.write_text("content")
target_file.write_text("content", encoding="utf-8")
symlink_path = session_dir / "link_to_target"
symlink_path.symlink_to(target_file)
@@ -263,7 +263,7 @@ class TestGetSecurePath:
# Create a symlink inside session pointing outside
outside_target = self.workspaces_dir / "outside_file.txt"
outside_target.write_text("sensitive data")
outside_target.write_text("sensitive data", encoding="utf-8")
symlink_path = session_dir / "escape_link"
symlink_path.symlink_to(outside_target)