Compare commits
3 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 8dd1d6e3aa | |||
| 23c66d1059 | |||
| b9d529d94e |
@@ -61,6 +61,137 @@ def get_preferred_model() -> str:
|
||||
return "anthropic/claude-sonnet-4-20250514"
|
||||
|
||||
|
||||
def get_preferred_worker_model() -> str | None:
|
||||
"""Return the user's preferred worker LLM model, or None if not configured.
|
||||
|
||||
Reads from the ``worker_llm`` section of ~/.hive/configuration.json.
|
||||
Returns None when no worker-specific model is set, so callers can
|
||||
fall back to the default (queen) model via ``get_preferred_model()``.
|
||||
"""
|
||||
worker_llm = get_hive_config().get("worker_llm", {})
|
||||
if worker_llm.get("provider") and worker_llm.get("model"):
|
||||
provider = str(worker_llm["provider"])
|
||||
model = str(worker_llm["model"]).strip()
|
||||
if provider.lower() == "openrouter" and model.lower().startswith("openrouter/"):
|
||||
model = model[len("openrouter/") :]
|
||||
if model:
|
||||
return f"{provider}/{model}"
|
||||
return None
|
||||
|
||||
|
||||
def get_worker_api_key() -> str | None:
|
||||
"""Return the API key for the worker LLM, falling back to the default key."""
|
||||
worker_llm = get_hive_config().get("worker_llm", {})
|
||||
if not worker_llm:
|
||||
return get_api_key()
|
||||
|
||||
# Worker-specific subscription / env var
|
||||
if worker_llm.get("use_claude_code_subscription"):
|
||||
try:
|
||||
from framework.runner.runner import get_claude_code_token
|
||||
|
||||
token = get_claude_code_token()
|
||||
if token:
|
||||
return token
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
if worker_llm.get("use_codex_subscription"):
|
||||
try:
|
||||
from framework.runner.runner import get_codex_token
|
||||
|
||||
token = get_codex_token()
|
||||
if token:
|
||||
return token
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
if worker_llm.get("use_kimi_code_subscription"):
|
||||
try:
|
||||
from framework.runner.runner import get_kimi_code_token
|
||||
|
||||
token = get_kimi_code_token()
|
||||
if token:
|
||||
return token
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
api_key_env_var = worker_llm.get("api_key_env_var")
|
||||
if api_key_env_var:
|
||||
return os.environ.get(api_key_env_var)
|
||||
|
||||
# Fall back to default key
|
||||
return get_api_key()
|
||||
|
||||
|
||||
def get_worker_api_base() -> str | None:
|
||||
"""Return the api_base for the worker LLM, falling back to the default."""
|
||||
worker_llm = get_hive_config().get("worker_llm", {})
|
||||
if not worker_llm:
|
||||
return get_api_base()
|
||||
|
||||
if worker_llm.get("use_codex_subscription"):
|
||||
return "https://chatgpt.com/backend-api/codex"
|
||||
if worker_llm.get("use_kimi_code_subscription"):
|
||||
return "https://api.kimi.com/coding"
|
||||
if worker_llm.get("api_base"):
|
||||
return worker_llm["api_base"]
|
||||
if str(worker_llm.get("provider", "")).lower() == "openrouter":
|
||||
return OPENROUTER_API_BASE
|
||||
return None
|
||||
|
||||
|
||||
def get_worker_llm_extra_kwargs() -> dict[str, Any]:
|
||||
"""Return extra kwargs for the worker LLM provider."""
|
||||
worker_llm = get_hive_config().get("worker_llm", {})
|
||||
if not worker_llm:
|
||||
return get_llm_extra_kwargs()
|
||||
|
||||
if worker_llm.get("use_claude_code_subscription"):
|
||||
api_key = get_worker_api_key()
|
||||
if api_key:
|
||||
return {
|
||||
"extra_headers": {"authorization": f"Bearer {api_key}"},
|
||||
}
|
||||
if worker_llm.get("use_codex_subscription"):
|
||||
api_key = get_worker_api_key()
|
||||
if api_key:
|
||||
headers: dict[str, str] = {
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
"User-Agent": "CodexBar",
|
||||
}
|
||||
try:
|
||||
from framework.runner.runner import get_codex_account_id
|
||||
|
||||
account_id = get_codex_account_id()
|
||||
if account_id:
|
||||
headers["ChatGPT-Account-Id"] = account_id
|
||||
except ImportError:
|
||||
pass
|
||||
return {
|
||||
"extra_headers": headers,
|
||||
"store": False,
|
||||
"allowed_openai_params": ["store"],
|
||||
}
|
||||
return {}
|
||||
|
||||
|
||||
def get_worker_max_tokens() -> int:
|
||||
"""Return max_tokens for the worker LLM, falling back to default."""
|
||||
worker_llm = get_hive_config().get("worker_llm", {})
|
||||
if worker_llm and "max_tokens" in worker_llm:
|
||||
return worker_llm["max_tokens"]
|
||||
return get_max_tokens()
|
||||
|
||||
|
||||
def get_worker_max_context_tokens() -> int:
|
||||
"""Return max_context_tokens for the worker LLM, falling back to default."""
|
||||
worker_llm = get_hive_config().get("worker_llm", {})
|
||||
if worker_llm and "max_context_tokens" in worker_llm:
|
||||
return worker_llm["max_context_tokens"]
|
||||
return get_max_context_tokens()
|
||||
|
||||
|
||||
def get_max_tokens() -> int:
|
||||
"""Return the configured max_tokens, falling back to DEFAULT_MAX_TOKENS."""
|
||||
return get_hive_config().get("llm", {}).get("max_tokens", DEFAULT_MAX_TOKENS)
|
||||
|
||||
@@ -3980,68 +3980,6 @@ class EventLoopNode(NodeProtocol):
|
||||
ratio_before = conversation.usage_ratio()
|
||||
phase_grad = getattr(ctx, "continuous_mode", False)
|
||||
|
||||
# Debug snapshot helper
|
||||
def _snap(name: str, **extra: Any) -> dict[str, Any]:
|
||||
roles: dict[str, int] = {}
|
||||
for m in conversation.messages:
|
||||
roles[m.role] = roles.get(m.role, 0) + 1
|
||||
return {
|
||||
"name": name,
|
||||
"message_count": conversation.message_count,
|
||||
"estimated_tokens": conversation.estimate_tokens(),
|
||||
"usage_ratio": f"{conversation.usage_ratio():.2%}",
|
||||
"max_context_tokens": self._config.max_context_tokens,
|
||||
"messages_by_role": roles,
|
||||
**extra,
|
||||
}
|
||||
|
||||
initial = _snap("initial")
|
||||
|
||||
# When over budget, attach a full message inventory so the log
|
||||
# shows exactly what is consuming the context window.
|
||||
if ratio_before >= 1.0:
|
||||
inventory: list[dict[str, Any]] = []
|
||||
for m in conversation.messages:
|
||||
content_chars = len(m.content)
|
||||
tc_chars = 0
|
||||
tool_name = None
|
||||
if m.tool_calls:
|
||||
for tc in m.tool_calls:
|
||||
args = tc.get("function", {}).get("arguments", "")
|
||||
tc_chars += len(args) if isinstance(args, str) else len(json.dumps(args))
|
||||
names = [tc.get("function", {}).get("name", "?") for tc in m.tool_calls]
|
||||
tool_name = ", ".join(names)
|
||||
elif m.role == "tool" and m.tool_use_id:
|
||||
# Try to find the tool name from the preceding assistant message
|
||||
for prev in conversation.messages:
|
||||
if prev.tool_calls:
|
||||
for tc in prev.tool_calls:
|
||||
if tc.get("id") == m.tool_use_id:
|
||||
tool_name = tc.get("function", {}).get("name", "?")
|
||||
break
|
||||
if tool_name:
|
||||
break
|
||||
entry: dict[str, Any] = {
|
||||
"seq": m.seq,
|
||||
"role": m.role,
|
||||
"content_chars": content_chars,
|
||||
}
|
||||
if tc_chars:
|
||||
entry["tool_call_args_chars"] = tc_chars
|
||||
if tool_name:
|
||||
entry["tool"] = tool_name
|
||||
if m.is_error:
|
||||
entry["is_error"] = True
|
||||
if m.phase_id:
|
||||
entry["phase"] = m.phase_id
|
||||
# Content preview for the biggest messages
|
||||
if content_chars > 2000:
|
||||
entry["preview"] = m.content[:200] + "…"
|
||||
inventory.append(entry)
|
||||
initial["message_inventory"] = inventory
|
||||
|
||||
debug_steps: list[dict[str, Any]] = [initial]
|
||||
|
||||
# --- Step 1: Prune old tool results (free, no LLM) ---
|
||||
protect = max(2000, self._config.max_context_tokens // 12)
|
||||
pruned = await conversation.prune_old_tool_results(
|
||||
@@ -4055,10 +3993,8 @@ class EventLoopNode(NodeProtocol):
|
||||
ratio_before * 100,
|
||||
conversation.usage_ratio() * 100,
|
||||
)
|
||||
debug_steps.append(_snap("after_prune", messages_pruned=pruned))
|
||||
if not conversation.needs_compaction():
|
||||
await self._log_compaction(ctx, conversation, ratio_before)
|
||||
self._write_compaction_debug_log(ctx, debug_steps)
|
||||
return
|
||||
|
||||
# --- Step 2: Standard structure-preserving compaction (free, no LLM) ---
|
||||
@@ -4070,14 +4006,8 @@ class EventLoopNode(NodeProtocol):
|
||||
keep_recent=4,
|
||||
phase_graduated=phase_grad,
|
||||
)
|
||||
debug_steps.append(_snap(
|
||||
"after_structural",
|
||||
spillover_dir=spill_dir,
|
||||
keep_recent=4,
|
||||
))
|
||||
if not conversation.needs_compaction():
|
||||
await self._log_compaction(ctx, conversation, ratio_before)
|
||||
self._write_compaction_debug_log(ctx, debug_steps)
|
||||
return
|
||||
|
||||
# --- Step 3: LLM summary compaction ---
|
||||
@@ -4100,20 +4030,11 @@ class EventLoopNode(NodeProtocol):
|
||||
keep_recent=2,
|
||||
phase_graduated=phase_grad,
|
||||
)
|
||||
debug_steps.append(_snap(
|
||||
"after_llm_compact",
|
||||
summary_chars=len(summary),
|
||||
))
|
||||
except Exception as e:
|
||||
logger.warning("LLM compaction failed: %s", e)
|
||||
debug_steps.append(_snap(
|
||||
"llm_compact_failed",
|
||||
error=str(e),
|
||||
))
|
||||
|
||||
if not conversation.needs_compaction():
|
||||
await self._log_compaction(ctx, conversation, ratio_before)
|
||||
self._write_compaction_debug_log(ctx, debug_steps)
|
||||
return
|
||||
|
||||
# --- Step 4: Emergency deterministic summary (LLM failed/unavailable) ---
|
||||
@@ -4127,12 +4048,7 @@ class EventLoopNode(NodeProtocol):
|
||||
keep_recent=1,
|
||||
phase_graduated=phase_grad,
|
||||
)
|
||||
debug_steps.append(_snap(
|
||||
"after_emergency",
|
||||
summary_chars=len(summary),
|
||||
))
|
||||
await self._log_compaction(ctx, conversation, ratio_before)
|
||||
self._write_compaction_debug_log(ctx, debug_steps)
|
||||
|
||||
# --- LLM compaction with binary-search splitting ----------------------
|
||||
|
||||
@@ -4346,91 +4262,6 @@ class EventLoopNode(NodeProtocol):
|
||||
)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _write_compaction_debug_log(
|
||||
ctx: NodeContext,
|
||||
steps: list[dict[str, Any]],
|
||||
) -> None:
|
||||
"""Write detailed compaction analysis to ~/.hive/compaction_log/.
|
||||
|
||||
Only runs when HIVE_COMPACTION_DEBUG is set in the environment.
|
||||
Each compaction produces a timestamped markdown file.
|
||||
"""
|
||||
import os
|
||||
|
||||
if not os.environ.get("HIVE_COMPACTION_DEBUG"):
|
||||
return
|
||||
|
||||
log_dir = Path.home() / ".hive" / "compaction_log"
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
ts = datetime.now(UTC).strftime("%Y%m%dT%H%M%S_%f")
|
||||
node_label = ctx.node_id.replace("/", "_")
|
||||
log_path = log_dir / f"{ts}_{node_label}.md"
|
||||
|
||||
lines: list[str] = []
|
||||
lines.append(f"# Compaction Debug — {ctx.node_id}")
|
||||
lines.append(f"**Time:** {datetime.now(UTC).isoformat()}")
|
||||
lines.append(f"**Node:** {ctx.node_spec.name} (`{ctx.node_id}`)")
|
||||
if ctx.stream_id:
|
||||
lines.append(f"**Stream:** {ctx.stream_id}")
|
||||
lines.append("")
|
||||
|
||||
for step in steps:
|
||||
name = step.get("name", "unknown")
|
||||
lines.append(f"## Step: {name}")
|
||||
for key, val in step.items():
|
||||
if key == "name":
|
||||
continue
|
||||
if key == "messages_by_role":
|
||||
lines.append(f"- **{key}:**")
|
||||
for role, count in val.items():
|
||||
lines.append(f" - {role}: {count}")
|
||||
elif key == "message_inventory":
|
||||
total_chars = sum(e.get("content_chars", 0) + e.get("tool_call_args_chars", 0) for e in val)
|
||||
lines.append(f"### Message Inventory ({len(val)} messages, {total_chars:,} total chars)")
|
||||
lines.append("")
|
||||
# Sort descending by size for the table
|
||||
ranked = sorted(val, key=lambda e: e.get("content_chars", 0) + e.get("tool_call_args_chars", 0), reverse=True)
|
||||
lines.append("| # | seq | role | tool | chars | % of total | flags |")
|
||||
lines.append("|---|-----|------|------|------:|------------|-------|")
|
||||
for i, entry in enumerate(ranked, 1):
|
||||
chars = entry.get("content_chars", 0) + entry.get("tool_call_args_chars", 0)
|
||||
pct = (chars / total_chars * 100) if total_chars else 0
|
||||
tool = entry.get("tool", "")
|
||||
flags = []
|
||||
if entry.get("is_error"):
|
||||
flags.append("error")
|
||||
if entry.get("phase"):
|
||||
flags.append(f"phase={entry['phase']}")
|
||||
lines.append(
|
||||
f"| {i} | {entry['seq']} | {entry['role']} | {tool} "
|
||||
f"| {chars:,} | {pct:.1f}% | {', '.join(flags)} |"
|
||||
)
|
||||
# Previews for large messages
|
||||
large = [e for e in ranked if e.get("preview")]
|
||||
if large:
|
||||
lines.append("")
|
||||
lines.append("#### Large message previews")
|
||||
for entry in large:
|
||||
lines.append(f"\n**seq={entry['seq']}** ({entry['role']}, {entry.get('tool', '')}):")
|
||||
lines.append(f"```\n{entry['preview']}\n```")
|
||||
elif key == "discarded_messages":
|
||||
lines.append(f"- **{key}:** ({len(val)} messages)")
|
||||
for msg_info in val[:50]: # cap at 50
|
||||
lines.append(f" - seq={msg_info['seq']} role={msg_info['role']} chars={msg_info['chars']}")
|
||||
if len(val) > 50:
|
||||
lines.append(f" - ... and {len(val) - 50} more")
|
||||
else:
|
||||
lines.append(f"- **{key}:** {val}")
|
||||
lines.append("")
|
||||
|
||||
try:
|
||||
log_path.write_text("\n".join(lines), encoding="utf-8")
|
||||
logger.debug("Compaction debug log written to %s", log_path)
|
||||
except OSError:
|
||||
logger.debug("Failed to write compaction debug log to %s", log_path)
|
||||
|
||||
def _build_emergency_summary(
|
||||
self,
|
||||
ctx: NodeContext,
|
||||
|
||||
@@ -210,16 +210,6 @@ def configure_logging(
|
||||
# printed on every single completion call). Warnings and errors still show.
|
||||
logging.getLogger("LiteLLM").setLevel(logging.WARNING)
|
||||
|
||||
# Suppress the "Provider List: ..." banner litellm prints to stdout via
|
||||
# print() on every completion call. This is independent of log format.
|
||||
try:
|
||||
import litellm as _litellm
|
||||
|
||||
if hasattr(_litellm, "suppress_debug_info"):
|
||||
_litellm.suppress_debug_info = True # type: ignore[attr-defined]
|
||||
except (ImportError, AttributeError):
|
||||
pass
|
||||
|
||||
# When in JSON mode, configure known third-party loggers to use JSON formatter
|
||||
# This ensures libraries like LiteLLM, httpcore also output clean JSON
|
||||
if format == "json":
|
||||
@@ -242,6 +232,16 @@ def _disable_third_party_colors() -> None:
|
||||
os.environ["NO_COLOR"] = "1"
|
||||
os.environ["FORCE_COLOR"] = "0"
|
||||
|
||||
# Disable LiteLLM debug/verbose output colors if available
|
||||
try:
|
||||
import litellm
|
||||
|
||||
# LiteLLM respects NO_COLOR, but we can also suppress debug info
|
||||
if hasattr(litellm, "suppress_debug_info"):
|
||||
litellm.suppress_debug_info = True # type: ignore[attr-defined]
|
||||
except (ImportError, AttributeError):
|
||||
pass
|
||||
|
||||
|
||||
def set_trace_context(**kwargs: Any) -> None:
|
||||
"""
|
||||
|
||||
@@ -1141,7 +1141,10 @@ class AgentRunner:
|
||||
|
||||
# Create LLM provider
|
||||
# Uses LiteLLM which auto-detects the provider from model name
|
||||
if self.mock_mode:
|
||||
# Skip if already injected (e.g. worker agents with a pre-built LLM)
|
||||
if self._llm is not None:
|
||||
pass # LLM already configured externally
|
||||
elif self.mock_mode:
|
||||
# Use mock LLM for testing without real API calls
|
||||
from framework.llm.mock import MockLLMProvider
|
||||
|
||||
|
||||
@@ -287,7 +287,17 @@ class SessionManager:
|
||||
try:
|
||||
# Blocking I/O — load in executor
|
||||
loop = asyncio.get_running_loop()
|
||||
resolved_model = model or self._model
|
||||
|
||||
# Prioritize: explicit model arg > worker-specific model > session default
|
||||
from framework.config import (
|
||||
get_preferred_worker_model,
|
||||
get_worker_api_base,
|
||||
get_worker_api_key,
|
||||
get_worker_llm_extra_kwargs,
|
||||
)
|
||||
|
||||
worker_model = get_preferred_worker_model()
|
||||
resolved_model = model or worker_model or self._model
|
||||
runner = await loop.run_in_executor(
|
||||
None,
|
||||
lambda: AgentRunner.load(
|
||||
@@ -299,6 +309,22 @@ class SessionManager:
|
||||
),
|
||||
)
|
||||
|
||||
# If a worker-specific model is configured, build an LLM provider
|
||||
# with the correct worker credentials so _setup() doesn't fall back
|
||||
# to the queen's llm config (which may be a different provider).
|
||||
if worker_model and not model:
|
||||
from framework.llm.litellm import LiteLLMProvider
|
||||
|
||||
worker_api_key = get_worker_api_key()
|
||||
worker_api_base = get_worker_api_base()
|
||||
worker_extra = get_worker_llm_extra_kwargs()
|
||||
runner._llm = LiteLLMProvider(
|
||||
model=resolved_model,
|
||||
api_key=worker_api_key,
|
||||
api_base=worker_api_base,
|
||||
**worker_extra,
|
||||
)
|
||||
|
||||
# Setup with session's event bus
|
||||
if runner._agent_runtime is None:
|
||||
await loop.run_in_executor(
|
||||
@@ -923,6 +949,7 @@ class SessionManager:
|
||||
# then use max+1 as offset so resumed sessions produce monotonically
|
||||
# increasing iteration values — preventing frontend message ID collisions.
|
||||
iteration_offset = 0
|
||||
last_phase = ""
|
||||
events_path = queen_dir / "events.jsonl"
|
||||
try:
|
||||
if events_path.exists():
|
||||
@@ -934,17 +961,25 @@ class SessionManager:
|
||||
continue
|
||||
try:
|
||||
evt = json.loads(line)
|
||||
it = evt.get("data", {}).get("iteration")
|
||||
data = evt.get("data", {})
|
||||
it = data.get("iteration")
|
||||
if isinstance(it, int) and it > max_iter:
|
||||
max_iter = it
|
||||
# Track the latest queen phase from QUEEN_PHASE_CHANGED events
|
||||
if evt.get("type") == "queen_phase_changed":
|
||||
phase = data.get("phase")
|
||||
if phase:
|
||||
last_phase = phase
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
continue
|
||||
if max_iter >= 0:
|
||||
iteration_offset = max_iter + 1
|
||||
logger.info(
|
||||
"Session '%s' resuming with iteration_offset=%d (from events.jsonl max)",
|
||||
"Session '%s' resuming with iteration_offset=%d"
|
||||
" (from events.jsonl max), last phase: %s",
|
||||
session.id,
|
||||
iteration_offset,
|
||||
last_phase or "unknown",
|
||||
)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
@@ -1880,6 +1880,9 @@ if ($SelectedProviderId) {
|
||||
Write-Host " -> " -NoNewline
|
||||
Write-Color -Text $SelectedModel -Color DarkGray
|
||||
}
|
||||
Write-Color -Text " To use a different model for worker agents, run:" -Color DarkGray
|
||||
Write-Host " " -NoNewline
|
||||
Write-Color -Text ".\scripts\setup_worker_model.ps1" -Color Cyan
|
||||
Write-Host ""
|
||||
}
|
||||
|
||||
|
||||
@@ -1767,6 +1767,8 @@ if [ -n "$SELECTED_PROVIDER_ID" ]; then
|
||||
else
|
||||
echo -e " ${CYAN}$SELECTED_PROVIDER_ID${NC} → ${DIM}$SELECTED_MODEL${NC}"
|
||||
fi
|
||||
echo -e " ${DIM}To use a different model for worker agents, run:${NC}"
|
||||
echo -e " ${CYAN}./scripts/setup_worker_model.sh${NC}"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
|
||||
@@ -0,0 +1,940 @@
|
||||
#Requires -Version 5.1
|
||||
<#
|
||||
.SYNOPSIS
|
||||
setup_worker_model.ps1 - Configure a separate LLM model for worker agents
|
||||
|
||||
.DESCRIPTION
|
||||
Worker agents can use a different (e.g. cheaper/faster) model than the
|
||||
queen agent. This script writes a "worker_llm" section to
|
||||
~/.hive/configuration.json. If no worker model is configured, workers
|
||||
fall back to the default (queen) model.
|
||||
|
||||
.NOTES
|
||||
Run from the project root: .\scripts\setup_worker_model.ps1
|
||||
#>
|
||||
|
||||
$ErrorActionPreference = "Continue"
|
||||
$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Definition
|
||||
$ProjectDir = Split-Path -Parent $ScriptDir
|
||||
$UvHelperPath = Join-Path $ScriptDir "uv-discovery.ps1"
|
||||
$HiveConfigDir = Join-Path $env:USERPROFILE ".hive"
|
||||
$HiveConfigFile = Join-Path $HiveConfigDir "configuration.json"
|
||||
$HiveLlmEndpoint = "https://api.adenhq.com"
|
||||
|
||||
. $UvHelperPath
|
||||
|
||||
# ============================================================
|
||||
# Colors / helpers
|
||||
# ============================================================
|
||||
|
||||
function Write-Color {
|
||||
param(
|
||||
[string]$Text,
|
||||
[ConsoleColor]$Color = [ConsoleColor]::White,
|
||||
[switch]$NoNewline
|
||||
)
|
||||
$prev = $Host.UI.RawUI.ForegroundColor
|
||||
$Host.UI.RawUI.ForegroundColor = $Color
|
||||
if ($NoNewline) { Write-Host $Text -NoNewline }
|
||||
else { Write-Host $Text }
|
||||
$Host.UI.RawUI.ForegroundColor = $prev
|
||||
}
|
||||
|
||||
function Write-Ok {
|
||||
param([string]$Text)
|
||||
Write-Color -Text "$([char]0x2B22) $Text" -Color Green
|
||||
}
|
||||
|
||||
function Write-Warn {
|
||||
param([string]$Text)
|
||||
Write-Color -Text "$([char]0x2B22) $Text" -Color Yellow
|
||||
}
|
||||
|
||||
function Write-Fail {
|
||||
param([string]$Text)
|
||||
Write-Color -Text " X $Text" -Color Red
|
||||
}
|
||||
|
||||
# ============================================================
|
||||
# Provider / model data
|
||||
# ============================================================
|
||||
|
||||
$ProviderMap = [ordered]@{
|
||||
ANTHROPIC_API_KEY = @{ Name = "Anthropic (Claude)"; Id = "anthropic" }
|
||||
OPENAI_API_KEY = @{ Name = "OpenAI (GPT)"; Id = "openai" }
|
||||
GEMINI_API_KEY = @{ Name = "Google Gemini"; Id = "gemini" }
|
||||
GOOGLE_API_KEY = @{ Name = "Google AI"; Id = "google" }
|
||||
GROQ_API_KEY = @{ Name = "Groq"; Id = "groq" }
|
||||
CEREBRAS_API_KEY = @{ Name = "Cerebras"; Id = "cerebras" }
|
||||
OPENROUTER_API_KEY = @{ Name = "OpenRouter"; Id = "openrouter" }
|
||||
MISTRAL_API_KEY = @{ Name = "Mistral"; Id = "mistral" }
|
||||
TOGETHER_API_KEY = @{ Name = "Together AI"; Id = "together" }
|
||||
DEEPSEEK_API_KEY = @{ Name = "DeepSeek"; Id = "deepseek" }
|
||||
}
|
||||
|
||||
$DefaultModels = @{
|
||||
anthropic = "claude-haiku-4-5-20251001"
|
||||
openai = "gpt-5-mini"
|
||||
gemini = "gemini-3-flash-preview"
|
||||
groq = "moonshotai/kimi-k2-instruct-0905"
|
||||
cerebras = "zai-glm-4.7"
|
||||
mistral = "mistral-large-latest"
|
||||
together_ai = "meta-llama/Llama-3.3-70B-Instruct-Turbo"
|
||||
deepseek = "deepseek-chat"
|
||||
}
|
||||
|
||||
# Model choices: array of hashtables per provider
|
||||
$ModelChoices = @{
|
||||
anthropic = @(
|
||||
@{ Id = "claude-haiku-4-5-20251001"; Label = "Haiku 4.5 - Fast + cheap (recommended)"; MaxTokens = 8192; MaxContextTokens = 180000 },
|
||||
@{ Id = "claude-sonnet-4-20250514"; Label = "Sonnet 4 - Fast + capable"; MaxTokens = 8192; MaxContextTokens = 180000 },
|
||||
@{ Id = "claude-sonnet-4-5-20250929"; Label = "Sonnet 4.5 - Best balance"; MaxTokens = 16384; MaxContextTokens = 180000 },
|
||||
@{ Id = "claude-opus-4-6"; Label = "Opus 4.6 - Most capable"; MaxTokens = 32768; MaxContextTokens = 180000 }
|
||||
)
|
||||
openai = @(
|
||||
@{ Id = "gpt-5-mini"; Label = "GPT-5 Mini - Fast + cheap (recommended)"; MaxTokens = 16384; MaxContextTokens = 120000 },
|
||||
@{ Id = "gpt-5.2"; Label = "GPT-5.2 - Most capable"; MaxTokens = 16384; MaxContextTokens = 120000 }
|
||||
)
|
||||
gemini = @(
|
||||
@{ Id = "gemini-3-flash-preview"; Label = "Gemini 3 Flash - Fast (recommended)"; MaxTokens = 8192; MaxContextTokens = 900000 },
|
||||
@{ Id = "gemini-3.1-pro-preview"; Label = "Gemini 3.1 Pro - Best quality"; MaxTokens = 8192; MaxContextTokens = 900000 }
|
||||
)
|
||||
groq = @(
|
||||
@{ Id = "moonshotai/kimi-k2-instruct-0905"; Label = "Kimi K2 - Best quality (recommended)"; MaxTokens = 8192; MaxContextTokens = 120000 },
|
||||
@{ Id = "openai/gpt-oss-120b"; Label = "GPT-OSS 120B - Fast reasoning"; MaxTokens = 8192; MaxContextTokens = 120000 }
|
||||
)
|
||||
cerebras = @(
|
||||
@{ Id = "zai-glm-4.7"; Label = "ZAI-GLM 4.7 - Best quality (recommended)"; MaxTokens = 8192; MaxContextTokens = 120000 },
|
||||
@{ Id = "qwen3-235b-a22b-instruct-2507"; Label = "Qwen3 235B - Frontier reasoning"; MaxTokens = 8192; MaxContextTokens = 120000 }
|
||||
)
|
||||
}
|
||||
|
||||
function Normalize-OpenRouterModelId {
|
||||
param([string]$ModelId)
|
||||
$normalized = if ($ModelId) { $ModelId.Trim() } else { "" }
|
||||
if ($normalized -match '(?i)^openrouter/(.+)$') {
|
||||
$normalized = $matches[1]
|
||||
}
|
||||
return $normalized
|
||||
}
|
||||
|
||||
function Get-ModelSelection {
|
||||
param([string]$ProviderId)
|
||||
|
||||
if ($ProviderId -eq "openrouter") {
|
||||
$defaultModel = ""
|
||||
if ($PrevModel -and $PrevProvider -eq $ProviderId) {
|
||||
$defaultModel = Normalize-OpenRouterModelId $PrevModel
|
||||
}
|
||||
Write-Host ""
|
||||
Write-Color -Text "Enter your OpenRouter model id:" -Color White
|
||||
Write-Color -Text " Paste from openrouter.ai (example: x-ai/grok-4.20-beta)" -Color DarkGray
|
||||
Write-Color -Text " If calls fail with guardrail/privacy errors: openrouter.ai/settings/privacy" -Color DarkGray
|
||||
Write-Host ""
|
||||
while ($true) {
|
||||
if ($defaultModel) {
|
||||
$rawModel = Read-Host "Model id [$defaultModel]"
|
||||
if ([string]::IsNullOrWhiteSpace($rawModel)) { $rawModel = $defaultModel }
|
||||
} else {
|
||||
$rawModel = Read-Host "Model id"
|
||||
}
|
||||
$normalizedModel = Normalize-OpenRouterModelId $rawModel
|
||||
if (-not [string]::IsNullOrWhiteSpace($normalizedModel)) {
|
||||
$openrouterKey = $null
|
||||
if ($SelectedEnvVar) {
|
||||
$openrouterKey = [System.Environment]::GetEnvironmentVariable($SelectedEnvVar, "Process")
|
||||
if (-not $openrouterKey) {
|
||||
$openrouterKey = [System.Environment]::GetEnvironmentVariable($SelectedEnvVar, "User")
|
||||
}
|
||||
}
|
||||
|
||||
if ($openrouterKey) {
|
||||
Write-Host " Verifying model id... " -NoNewline
|
||||
try {
|
||||
$modelApiBase = if ($SelectedApiBase) { $SelectedApiBase } else { "https://openrouter.ai/api/v1" }
|
||||
Push-Location $ProjectDir
|
||||
$hcResult = & $UvCmd run python (Join-Path $ProjectDir "scripts/check_llm_key.py") "openrouter" $openrouterKey $modelApiBase $normalizedModel 2>$null
|
||||
Pop-Location
|
||||
$hcJson = $hcResult | ConvertFrom-Json
|
||||
if ($hcJson.valid -eq $true) {
|
||||
if ($hcJson.model) {
|
||||
$normalizedModel = [string]$hcJson.model
|
||||
}
|
||||
Write-Color -Text "ok" -Color Green
|
||||
} elseif ($hcJson.valid -eq $false) {
|
||||
Write-Color -Text "failed" -Color Red
|
||||
Write-Warn $hcJson.message
|
||||
Write-Host ""
|
||||
continue
|
||||
} else {
|
||||
Write-Color -Text "--" -Color Yellow
|
||||
Write-Color -Text " Could not verify model id (network issue). Continuing with your selection." -Color DarkGray
|
||||
}
|
||||
} catch {
|
||||
Pop-Location
|
||||
Write-Color -Text "--" -Color Yellow
|
||||
Write-Color -Text " Could not verify model id (network issue). Continuing with your selection." -Color DarkGray
|
||||
}
|
||||
} else {
|
||||
Write-Color -Text " Skipping model verification (OpenRouter key not available in current shell)." -Color DarkGray
|
||||
}
|
||||
|
||||
Write-Host ""
|
||||
Write-Ok "Model: $normalizedModel"
|
||||
return @{ Model = $normalizedModel; MaxTokens = 8192; MaxContextTokens = 120000 }
|
||||
}
|
||||
Write-Color -Text "Model id cannot be empty." -Color Red
|
||||
}
|
||||
}
|
||||
|
||||
$choices = $ModelChoices[$ProviderId]
|
||||
if (-not $choices -or $choices.Count -eq 0) {
|
||||
return @{ Model = $DefaultModels[$ProviderId]; MaxTokens = 8192; MaxContextTokens = 120000 }
|
||||
}
|
||||
if ($choices.Count -eq 1) {
|
||||
return @{ Model = $choices[0].Id; MaxTokens = $choices[0].MaxTokens; MaxContextTokens = $choices[0].MaxContextTokens }
|
||||
}
|
||||
|
||||
# Find default index from previous model (if same provider)
|
||||
$defaultIdx = "1"
|
||||
if ($PrevModel -and $PrevProvider -eq $ProviderId) {
|
||||
for ($j = 0; $j -lt $choices.Count; $j++) {
|
||||
if ($choices[$j].Id -eq $PrevModel) {
|
||||
$defaultIdx = [string]($j + 1)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Write-Host ""
|
||||
Write-Color -Text "Select a model:" -Color White
|
||||
Write-Host ""
|
||||
for ($i = 0; $i -lt $choices.Count; $i++) {
|
||||
Write-Color -Text " $($i + 1)" -Color Cyan -NoNewline
|
||||
Write-Host ") $($choices[$i].Label) " -NoNewline
|
||||
Write-Color -Text "($($choices[$i].Id))" -Color DarkGray
|
||||
}
|
||||
Write-Host ""
|
||||
|
||||
while ($true) {
|
||||
$raw = Read-Host "Enter choice [$defaultIdx]"
|
||||
if ([string]::IsNullOrWhiteSpace($raw)) { $raw = $defaultIdx }
|
||||
if ($raw -match '^\d+$') {
|
||||
$num = [int]$raw
|
||||
if ($num -ge 1 -and $num -le $choices.Count) {
|
||||
$sel = $choices[$num - 1]
|
||||
Write-Host ""
|
||||
Write-Ok "Model: $($sel.Id)"
|
||||
return @{ Model = $sel.Id; MaxTokens = $sel.MaxTokens; MaxContextTokens = $sel.MaxContextTokens }
|
||||
}
|
||||
}
|
||||
Write-Color -Text "Invalid choice. Please enter 1-$($choices.Count)" -Color Red
|
||||
}
|
||||
}
|
||||
|
||||
# ============================================================
|
||||
# Main
|
||||
# ============================================================
|
||||
|
||||
$uvInfo = Find-Uv
|
||||
if (-not $uvInfo) {
|
||||
Write-Color -Text "uv not found. Run quickstart.ps1 first." -Color Red
|
||||
exit 1
|
||||
}
|
||||
$UvCmd = $uvInfo.Path
|
||||
|
||||
Write-Host ""
|
||||
Write-Color -Text "$([char]0x2B22) Worker Model Setup" -Color Yellow
|
||||
Write-Host ""
|
||||
Write-Color -Text "Configure a separate LLM model for worker agents." -Color DarkGray
|
||||
Write-Color -Text "Worker agents will use this model instead of the default queen model." -Color DarkGray
|
||||
Write-Host ""
|
||||
|
||||
# Show current configuration
|
||||
if (Test-Path $HiveConfigFile) {
|
||||
try {
|
||||
Push-Location $ProjectDir
|
||||
$currentConfig = & $UvCmd run python -c "
|
||||
from framework.config import get_preferred_model, get_preferred_worker_model
|
||||
print(f'Queen: {get_preferred_model()}')
|
||||
wm = get_preferred_worker_model()
|
||||
print(f'Worker: {wm if wm else chr(34) + ""(same as queen)"" + chr(34)}')
|
||||
" 2>$null
|
||||
Pop-Location
|
||||
if ($currentConfig) {
|
||||
Write-Color -Text "Current configuration:" -Color White
|
||||
foreach ($line in $currentConfig) {
|
||||
Write-Color -Text " $line" -Color DarkGray
|
||||
}
|
||||
Write-Host ""
|
||||
}
|
||||
} catch {
|
||||
Pop-Location
|
||||
}
|
||||
}
|
||||
|
||||
# ============================================================
|
||||
# Configure Worker LLM Provider
|
||||
# ============================================================
|
||||
|
||||
$SelectedProviderId = ""
|
||||
$SelectedEnvVar = ""
|
||||
$SelectedModel = ""
|
||||
$SelectedMaxTokens = 8192
|
||||
$SelectedMaxContextTokens = 120000
|
||||
$SelectedApiBase = ""
|
||||
$SubscriptionMode = ""
|
||||
|
||||
# -- Credential detection (silent -- just set flags) ----------
|
||||
$ClaudeCredDetected = $false
|
||||
$claudeCredPath = Join-Path $env:USERPROFILE ".claude\.credentials.json"
|
||||
if (Test-Path $claudeCredPath) { $ClaudeCredDetected = $true }
|
||||
|
||||
$CodexCredDetected = $false
|
||||
$codexAuthPath = Join-Path $env:USERPROFILE ".codex\auth.json"
|
||||
if (Test-Path $codexAuthPath) { $CodexCredDetected = $true }
|
||||
|
||||
$ZaiCredDetected = $false
|
||||
$zaiKey = [System.Environment]::GetEnvironmentVariable("ZAI_API_KEY", "User")
|
||||
if (-not $zaiKey) { $zaiKey = $env:ZAI_API_KEY }
|
||||
if ($zaiKey) { $ZaiCredDetected = $true }
|
||||
|
||||
$KimiCredDetected = $false
|
||||
$kimiConfigPath = Join-Path $env:USERPROFILE ".kimi\config.toml"
|
||||
if (Test-Path $kimiConfigPath) { $KimiCredDetected = $true }
|
||||
$kimiKey = [System.Environment]::GetEnvironmentVariable("KIMI_API_KEY", "User")
|
||||
if (-not $kimiKey) { $kimiKey = $env:KIMI_API_KEY }
|
||||
if ($kimiKey) { $KimiCredDetected = $true }
|
||||
|
||||
$HiveCredDetected = $false
|
||||
$hiveKey = [System.Environment]::GetEnvironmentVariable("HIVE_API_KEY", "User")
|
||||
if (-not $hiveKey) { $hiveKey = $env:HIVE_API_KEY }
|
||||
if ($hiveKey) { $HiveCredDetected = $true }
|
||||
|
||||
# Detect API key providers
|
||||
$ProviderMenuEnvVars = @("ANTHROPIC_API_KEY", "OPENAI_API_KEY", "GEMINI_API_KEY", "GROQ_API_KEY", "CEREBRAS_API_KEY", "OPENROUTER_API_KEY")
|
||||
$ProviderMenuNames = @("Anthropic (Claude) - Recommended", "OpenAI (GPT)", "Google Gemini - Free tier available", "Groq - Fast, free tier", "Cerebras - Fast, free tier", "OpenRouter - Bring any OpenRouter model")
|
||||
$ProviderMenuIds = @("anthropic", "openai", "gemini", "groq", "cerebras", "openrouter")
|
||||
$ProviderMenuUrls = @(
|
||||
"https://console.anthropic.com/settings/keys",
|
||||
"https://platform.openai.com/api-keys",
|
||||
"https://aistudio.google.com/apikey",
|
||||
"https://console.groq.com/keys",
|
||||
"https://cloud.cerebras.ai/",
|
||||
"https://openrouter.ai/keys"
|
||||
)
|
||||
|
||||
# -- Read previous worker_llm configuration (if any) ---------
|
||||
$PrevProvider = ""
|
||||
$PrevModel = ""
|
||||
$PrevEnvVar = ""
|
||||
$PrevSubMode = ""
|
||||
if (Test-Path $HiveConfigFile) {
|
||||
try {
|
||||
$prevConfig = Get-Content -Path $HiveConfigFile -Raw | ConvertFrom-Json
|
||||
$prevLlm = $prevConfig.worker_llm
|
||||
if ($prevLlm) {
|
||||
$PrevProvider = if ($prevLlm.provider) { $prevLlm.provider } else { "" }
|
||||
$PrevModel = if ($prevLlm.model) { $prevLlm.model } else { "" }
|
||||
$PrevEnvVar = if ($prevLlm.api_key_env_var) { $prevLlm.api_key_env_var } else { "" }
|
||||
if ($prevLlm.use_claude_code_subscription) { $PrevSubMode = "claude_code" }
|
||||
elseif ($prevLlm.use_codex_subscription) { $PrevSubMode = "codex" }
|
||||
elseif ($prevLlm.use_kimi_code_subscription) { $PrevSubMode = "kimi_code" }
|
||||
elseif ($prevLlm.api_base -and $prevLlm.api_base -like "*api.z.ai*") { $PrevSubMode = "zai_code" }
|
||||
elseif ($prevLlm.api_base -and $prevLlm.api_base -like "*api.kimi.com*") { $PrevSubMode = "kimi_code" }
|
||||
elseif ($prevLlm.provider -eq "hive" -or ($prevLlm.api_base -and $prevLlm.api_base -like "*adenhq.com*")) { $PrevSubMode = "hive_llm" }
|
||||
}
|
||||
} catch { }
|
||||
}
|
||||
|
||||
# Compute default menu number (only if credential is still valid)
|
||||
$DefaultChoice = ""
|
||||
if ($PrevSubMode -or $PrevProvider) {
|
||||
$prevCredValid = $false
|
||||
switch ($PrevSubMode) {
|
||||
"claude_code" { if ($ClaudeCredDetected) { $prevCredValid = $true } }
|
||||
"zai_code" { if ($ZaiCredDetected) { $prevCredValid = $true } }
|
||||
"codex" { if ($CodexCredDetected) { $prevCredValid = $true } }
|
||||
"kimi_code" { if ($KimiCredDetected) { $prevCredValid = $true } }
|
||||
"hive_llm" { if ($HiveCredDetected) { $prevCredValid = $true } }
|
||||
default {
|
||||
if ($PrevEnvVar) {
|
||||
$envVal = [System.Environment]::GetEnvironmentVariable($PrevEnvVar, "Process")
|
||||
if (-not $envVal) { $envVal = [System.Environment]::GetEnvironmentVariable($PrevEnvVar, "User") }
|
||||
if ($envVal) { $prevCredValid = $true }
|
||||
}
|
||||
}
|
||||
}
|
||||
if ($prevCredValid) {
|
||||
switch ($PrevSubMode) {
|
||||
"claude_code" { $DefaultChoice = "1" }
|
||||
"zai_code" { $DefaultChoice = "2" }
|
||||
"codex" { $DefaultChoice = "3" }
|
||||
"kimi_code" { $DefaultChoice = "4" }
|
||||
"hive_llm" { $DefaultChoice = "5" }
|
||||
}
|
||||
if (-not $DefaultChoice) {
|
||||
switch ($PrevProvider) {
|
||||
"anthropic" { $DefaultChoice = "6" }
|
||||
"openai" { $DefaultChoice = "7" }
|
||||
"gemini" { $DefaultChoice = "8" }
|
||||
"groq" { $DefaultChoice = "9" }
|
||||
"cerebras" { $DefaultChoice = "10" }
|
||||
"openrouter" { $DefaultChoice = "11" }
|
||||
"kimi" { $DefaultChoice = "4" }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# -- Show unified provider selection menu ---------------------
|
||||
Write-Color -Text "Select your worker LLM provider:" -Color White
|
||||
Write-Host ""
|
||||
Write-Color -Text " Subscription modes (no API key purchase needed):" -Color Cyan
|
||||
|
||||
# 1) Claude Code
|
||||
Write-Host " " -NoNewline
|
||||
Write-Color -Text "1" -Color Cyan -NoNewline
|
||||
Write-Host ") Claude Code Subscription " -NoNewline
|
||||
Write-Color -Text "(use your Claude Max/Pro plan)" -Color DarkGray -NoNewline
|
||||
if ($ClaudeCredDetected) { Write-Color -Text " (credential detected)" -Color Green } else { Write-Host "" }
|
||||
|
||||
# 2) ZAI Code
|
||||
Write-Host " " -NoNewline
|
||||
Write-Color -Text "2" -Color Cyan -NoNewline
|
||||
Write-Host ") ZAI Code Subscription " -NoNewline
|
||||
Write-Color -Text "(use your ZAI Code plan)" -Color DarkGray -NoNewline
|
||||
if ($ZaiCredDetected) { Write-Color -Text " (credential detected)" -Color Green } else { Write-Host "" }
|
||||
|
||||
# 3) Codex
|
||||
Write-Host " " -NoNewline
|
||||
Write-Color -Text "3" -Color Cyan -NoNewline
|
||||
Write-Host ") OpenAI Codex Subscription " -NoNewline
|
||||
Write-Color -Text "(use your Codex/ChatGPT Plus plan)" -Color DarkGray -NoNewline
|
||||
if ($CodexCredDetected) { Write-Color -Text " (credential detected)" -Color Green } else { Write-Host "" }
|
||||
|
||||
# 4) Kimi Code
|
||||
Write-Host " " -NoNewline
|
||||
Write-Color -Text "4" -Color Cyan -NoNewline
|
||||
Write-Host ") Kimi Code Subscription " -NoNewline
|
||||
Write-Color -Text "(use your Kimi Code plan)" -Color DarkGray -NoNewline
|
||||
if ($KimiCredDetected) { Write-Color -Text " (credential detected)" -Color Green } else { Write-Host "" }
|
||||
|
||||
# 5) Hive LLM
|
||||
Write-Host " " -NoNewline
|
||||
Write-Color -Text "5" -Color Cyan -NoNewline
|
||||
Write-Host ") Hive LLM " -NoNewline
|
||||
Write-Color -Text "(use your Hive API key)" -Color DarkGray -NoNewline
|
||||
if ($HiveCredDetected) { Write-Color -Text " (credential detected)" -Color Green } else { Write-Host "" }
|
||||
|
||||
Write-Host ""
|
||||
Write-Color -Text " API key providers:" -Color Cyan
|
||||
|
||||
# 6-11) API key providers
|
||||
for ($idx = 0; $idx -lt $ProviderMenuEnvVars.Count; $idx++) {
|
||||
$num = $idx + 6
|
||||
$envVal = [System.Environment]::GetEnvironmentVariable($ProviderMenuEnvVars[$idx], "Process")
|
||||
if (-not $envVal) { $envVal = [System.Environment]::GetEnvironmentVariable($ProviderMenuEnvVars[$idx], "User") }
|
||||
Write-Host " " -NoNewline
|
||||
Write-Color -Text "$num" -Color Cyan -NoNewline
|
||||
Write-Host ") $($ProviderMenuNames[$idx])" -NoNewline
|
||||
if ($envVal) { Write-Color -Text " (credential detected)" -Color Green } else { Write-Host "" }
|
||||
}
|
||||
|
||||
$SkipChoice = 6 + $ProviderMenuEnvVars.Count
|
||||
Write-Host " " -NoNewline
|
||||
Write-Color -Text "$SkipChoice" -Color Cyan -NoNewline
|
||||
Write-Host ") Skip for now"
|
||||
Write-Host ""
|
||||
|
||||
if ($DefaultChoice) {
|
||||
Write-Color -Text " Previously configured: $PrevProvider/$PrevModel. Press Enter to keep." -Color DarkGray
|
||||
Write-Host ""
|
||||
}
|
||||
|
||||
while ($true) {
|
||||
if ($DefaultChoice) {
|
||||
$raw = Read-Host "Enter choice (1-$SkipChoice) [$DefaultChoice]"
|
||||
if ([string]::IsNullOrWhiteSpace($raw)) { $raw = $DefaultChoice }
|
||||
} else {
|
||||
$raw = Read-Host "Enter choice (1-$SkipChoice)"
|
||||
}
|
||||
if ($raw -match '^\d+$') {
|
||||
$num = [int]$raw
|
||||
if ($num -ge 1 -and $num -le $SkipChoice) { break }
|
||||
}
|
||||
Write-Color -Text "Invalid choice. Please enter 1-$SkipChoice" -Color Red
|
||||
}
|
||||
|
||||
switch ($num) {
|
||||
1 {
|
||||
# Claude Code Subscription
|
||||
if (-not $ClaudeCredDetected) {
|
||||
Write-Host ""
|
||||
Write-Warn "~/.claude/.credentials.json not found."
|
||||
Write-Host " Run 'claude' first to authenticate with your Claude subscription,"
|
||||
Write-Host " then run this script again."
|
||||
Write-Host ""
|
||||
exit 1
|
||||
}
|
||||
$SubscriptionMode = "claude_code"
|
||||
$SelectedProviderId = "anthropic"
|
||||
$SelectedModel = "claude-opus-4-6"
|
||||
$SelectedMaxTokens = 32768
|
||||
$SelectedMaxContextTokens = 180000
|
||||
Write-Host ""
|
||||
Write-Ok "Using Claude Code subscription"
|
||||
}
|
||||
2 {
|
||||
# ZAI Code Subscription
|
||||
$SubscriptionMode = "zai_code"
|
||||
$SelectedProviderId = "openai"
|
||||
$SelectedEnvVar = "ZAI_API_KEY"
|
||||
$SelectedModel = "glm-5"
|
||||
$SelectedMaxTokens = 32768
|
||||
$SelectedMaxContextTokens = 120000
|
||||
Write-Host ""
|
||||
Write-Ok "Using ZAI Code subscription"
|
||||
Write-Color -Text " Model: glm-5 | API: api.z.ai" -Color DarkGray
|
||||
}
|
||||
3 {
|
||||
# OpenAI Codex Subscription
|
||||
if (-not $CodexCredDetected) {
|
||||
Write-Host ""
|
||||
Write-Warn "Codex credentials not found. Starting OAuth login..."
|
||||
Write-Host ""
|
||||
try {
|
||||
Push-Location $ProjectDir
|
||||
& $UvCmd run python (Join-Path $ProjectDir "core\codex_oauth.py") 2>&1
|
||||
Pop-Location
|
||||
if ($LASTEXITCODE -eq 0) {
|
||||
$CodexCredDetected = $true
|
||||
} else {
|
||||
Write-Host ""
|
||||
Write-Fail "OAuth login failed or was cancelled."
|
||||
Write-Host ""
|
||||
Write-Host " Or run 'codex' to authenticate, then run this script again."
|
||||
Write-Host ""
|
||||
$SelectedProviderId = ""
|
||||
}
|
||||
} catch {
|
||||
Pop-Location
|
||||
Write-Fail "OAuth login failed: $($_.Exception.Message)"
|
||||
$SelectedProviderId = ""
|
||||
}
|
||||
}
|
||||
if ($CodexCredDetected) {
|
||||
$SubscriptionMode = "codex"
|
||||
$SelectedProviderId = "openai"
|
||||
$SelectedModel = "gpt-5.3-codex"
|
||||
$SelectedMaxTokens = 16384
|
||||
$SelectedMaxContextTokens = 120000
|
||||
Write-Host ""
|
||||
Write-Ok "Using OpenAI Codex subscription"
|
||||
}
|
||||
}
|
||||
4 {
|
||||
# Kimi Code Subscription
|
||||
$SubscriptionMode = "kimi_code"
|
||||
$SelectedProviderId = "kimi"
|
||||
$SelectedEnvVar = "KIMI_API_KEY"
|
||||
$SelectedModel = "kimi-k2.5"
|
||||
$SelectedMaxTokens = 32768
|
||||
$SelectedMaxContextTokens = 120000
|
||||
Write-Host ""
|
||||
Write-Ok "Using Kimi Code subscription"
|
||||
Write-Color -Text " Model: kimi-k2.5 | API: api.kimi.com/coding" -Color DarkGray
|
||||
}
|
||||
5 {
|
||||
# Hive LLM
|
||||
$SubscriptionMode = "hive_llm"
|
||||
$SelectedProviderId = "hive"
|
||||
$SelectedEnvVar = "HIVE_API_KEY"
|
||||
$SelectedMaxTokens = 32768
|
||||
$SelectedMaxContextTokens = 120000
|
||||
Write-Host ""
|
||||
Write-Ok "Using Hive LLM"
|
||||
Write-Host ""
|
||||
Write-Host " Select a model:"
|
||||
Write-Host " " -NoNewline; Write-Color -Text "1)" -Color Cyan -NoNewline; Write-Host " queen " -NoNewline; Write-Color -Text "(default - Hive flagship)" -Color DarkGray
|
||||
Write-Host " " -NoNewline; Write-Color -Text "2)" -Color Cyan -NoNewline; Write-Host " kimi-2.5"
|
||||
Write-Host " " -NoNewline; Write-Color -Text "3)" -Color Cyan -NoNewline; Write-Host " GLM-5"
|
||||
Write-Host ""
|
||||
$hiveModelChoice = Read-Host " Enter model choice (1-3) [1]"
|
||||
if (-not $hiveModelChoice) { $hiveModelChoice = "1" }
|
||||
switch ($hiveModelChoice) {
|
||||
"2" { $SelectedModel = "kimi-2.5" }
|
||||
"3" { $SelectedModel = "GLM-5" }
|
||||
default { $SelectedModel = "queen" }
|
||||
}
|
||||
Write-Color -Text " Model: $SelectedModel | API: $HiveLlmEndpoint" -Color DarkGray
|
||||
}
|
||||
{ $_ -ge 6 -and $_ -le 11 } {
|
||||
# API key providers
|
||||
$provIdx = $num - 6
|
||||
$SelectedEnvVar = $ProviderMenuEnvVars[$provIdx]
|
||||
$SelectedProviderId = $ProviderMenuIds[$provIdx]
|
||||
$providerName = $ProviderMenuNames[$provIdx] -replace ' - .*', '' # strip description
|
||||
$signupUrl = $ProviderMenuUrls[$provIdx]
|
||||
if ($SelectedProviderId -eq "openrouter") {
|
||||
$SelectedApiBase = "https://openrouter.ai/api/v1"
|
||||
} else {
|
||||
$SelectedApiBase = ""
|
||||
}
|
||||
|
||||
# Prompt for key (allow replacement if already set) with verification + retry
|
||||
while ($true) {
|
||||
$existingKey = [System.Environment]::GetEnvironmentVariable($SelectedEnvVar, "User")
|
||||
if (-not $existingKey) { $existingKey = [System.Environment]::GetEnvironmentVariable($SelectedEnvVar, "Process") }
|
||||
|
||||
if ($existingKey) {
|
||||
$masked = $existingKey.Substring(0, [Math]::Min(4, $existingKey.Length)) + "..." + $existingKey.Substring([Math]::Max(0, $existingKey.Length - 4))
|
||||
Write-Host ""
|
||||
Write-Color -Text " $([char]0x2B22) Current key: $masked" -Color Green
|
||||
$apiKey = Read-Host " Press Enter to keep, or paste a new key to replace"
|
||||
} else {
|
||||
Write-Host ""
|
||||
Write-Host "Get your API key from: " -NoNewline
|
||||
Write-Color -Text $signupUrl -Color Cyan
|
||||
Write-Host ""
|
||||
$apiKey = Read-Host "Paste your $providerName API key (or press Enter to skip)"
|
||||
}
|
||||
|
||||
if ($apiKey) {
|
||||
[System.Environment]::SetEnvironmentVariable($SelectedEnvVar, $apiKey, "User")
|
||||
Set-Item -Path "Env:\$SelectedEnvVar" -Value $apiKey
|
||||
Write-Host ""
|
||||
Write-Ok "API key saved as User environment variable: $SelectedEnvVar"
|
||||
|
||||
# Health check the new key
|
||||
Write-Host " Verifying API key... " -NoNewline
|
||||
try {
|
||||
Push-Location $ProjectDir
|
||||
if ($SelectedApiBase) {
|
||||
$hcResult = & $UvCmd run python (Join-Path $ProjectDir "scripts/check_llm_key.py") $SelectedProviderId $apiKey $SelectedApiBase 2>$null
|
||||
} else {
|
||||
$hcResult = & $UvCmd run python (Join-Path $ProjectDir "scripts/check_llm_key.py") $SelectedProviderId $apiKey 2>$null
|
||||
}
|
||||
Pop-Location
|
||||
$hcJson = $hcResult | ConvertFrom-Json
|
||||
if ($hcJson.valid -eq $true) {
|
||||
Write-Color -Text "ok" -Color Green
|
||||
break
|
||||
} elseif ($hcJson.valid -eq $false) {
|
||||
Write-Color -Text "failed" -Color Red
|
||||
Write-Warn $hcJson.message
|
||||
# Undo the save so user can retry cleanly
|
||||
[System.Environment]::SetEnvironmentVariable($SelectedEnvVar, $null, "User")
|
||||
Remove-Item -Path "Env:\$SelectedEnvVar" -ErrorAction SilentlyContinue
|
||||
Write-Host ""
|
||||
Read-Host " Press Enter to try again"
|
||||
# loop back to key prompt
|
||||
} else {
|
||||
Write-Color -Text "--" -Color Yellow
|
||||
Write-Color -Text " Could not verify key (network issue). The key has been saved." -Color DarkGray
|
||||
break
|
||||
}
|
||||
} catch {
|
||||
Pop-Location
|
||||
Write-Color -Text "--" -Color Yellow
|
||||
Write-Color -Text " Could not verify key (network issue). The key has been saved." -Color DarkGray
|
||||
break
|
||||
}
|
||||
} elseif (-not $existingKey) {
|
||||
# No existing key and user skipped
|
||||
Write-Host ""
|
||||
Write-Warn "Skipped. Set the environment variable manually when ready:"
|
||||
Write-Host " [System.Environment]::SetEnvironmentVariable('$SelectedEnvVar', 'your-key', 'User')"
|
||||
$SelectedEnvVar = ""
|
||||
$SelectedProviderId = ""
|
||||
break
|
||||
} else {
|
||||
# User pressed Enter with existing key -- keep it
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
{ $_ -eq $SkipChoice } {
|
||||
Write-Host ""
|
||||
Write-Warn "Skipped. A worker LLM provider is required for worker agents."
|
||||
Write-Host " Run this script again when ready."
|
||||
Write-Host ""
|
||||
$SelectedEnvVar = ""
|
||||
$SelectedProviderId = ""
|
||||
}
|
||||
}
|
||||
|
||||
# For ZAI subscription: prompt for API key (allow replacement if already set) with verification + retry
|
||||
if ($SubscriptionMode -eq "zai_code") {
|
||||
while ($true) {
|
||||
$existingZai = [System.Environment]::GetEnvironmentVariable("ZAI_API_KEY", "User")
|
||||
if (-not $existingZai) { $existingZai = $env:ZAI_API_KEY }
|
||||
|
||||
if ($existingZai) {
|
||||
$masked = $existingZai.Substring(0, [Math]::Min(4, $existingZai.Length)) + "..." + $existingZai.Substring([Math]::Max(0, $existingZai.Length - 4))
|
||||
Write-Host ""
|
||||
Write-Color -Text " $([char]0x2B22) Current ZAI key: $masked" -Color Green
|
||||
$apiKey = Read-Host " Press Enter to keep, or paste a new key to replace"
|
||||
} else {
|
||||
Write-Host ""
|
||||
$apiKey = Read-Host "Paste your ZAI API key (or press Enter to skip)"
|
||||
}
|
||||
|
||||
if ($apiKey) {
|
||||
[System.Environment]::SetEnvironmentVariable("ZAI_API_KEY", $apiKey, "User")
|
||||
$env:ZAI_API_KEY = $apiKey
|
||||
Write-Host ""
|
||||
Write-Ok "ZAI API key saved as User environment variable"
|
||||
|
||||
# Health check the new key
|
||||
Write-Host " Verifying ZAI API key... " -NoNewline
|
||||
try {
|
||||
Push-Location $ProjectDir
|
||||
$hcResult = & $UvCmd run python (Join-Path $ProjectDir "scripts/check_llm_key.py") "zai" $apiKey "https://api.z.ai/api/coding/paas/v4" 2>$null
|
||||
Pop-Location
|
||||
$hcJson = $hcResult | ConvertFrom-Json
|
||||
if ($hcJson.valid -eq $true) {
|
||||
Write-Color -Text "ok" -Color Green
|
||||
break
|
||||
} elseif ($hcJson.valid -eq $false) {
|
||||
Write-Color -Text "failed" -Color Red
|
||||
Write-Warn $hcJson.message
|
||||
# Undo the save so user can retry cleanly
|
||||
[System.Environment]::SetEnvironmentVariable("ZAI_API_KEY", $null, "User")
|
||||
Remove-Item -Path "Env:\ZAI_API_KEY" -ErrorAction SilentlyContinue
|
||||
Write-Host ""
|
||||
Read-Host " Press Enter to try again"
|
||||
# loop back to key prompt
|
||||
} else {
|
||||
Write-Color -Text "--" -Color Yellow
|
||||
Write-Color -Text " Could not verify key (network issue). The key has been saved." -Color DarkGray
|
||||
break
|
||||
}
|
||||
} catch {
|
||||
Pop-Location
|
||||
Write-Color -Text "--" -Color Yellow
|
||||
Write-Color -Text " Could not verify key (network issue). The key has been saved." -Color DarkGray
|
||||
break
|
||||
}
|
||||
} elseif (-not $existingZai) {
|
||||
# No existing key and user skipped
|
||||
Write-Host ""
|
||||
Write-Warn "Skipped. Add your ZAI API key later:"
|
||||
Write-Color -Text " [System.Environment]::SetEnvironmentVariable('ZAI_API_KEY', 'your-key', 'User')" -Color Cyan
|
||||
$SelectedEnvVar = ""
|
||||
$SelectedProviderId = ""
|
||||
$SubscriptionMode = ""
|
||||
break
|
||||
} else {
|
||||
# User pressed Enter with existing key -- keep it
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# For Kimi Code subscription: prompt for API key with verification + retry
|
||||
if ($SubscriptionMode -eq "kimi_code") {
|
||||
while ($true) {
|
||||
$existingKimi = [System.Environment]::GetEnvironmentVariable("KIMI_API_KEY", "User")
|
||||
if (-not $existingKimi) { $existingKimi = $env:KIMI_API_KEY }
|
||||
|
||||
if ($existingKimi) {
|
||||
$masked = $existingKimi.Substring(0, [Math]::Min(4, $existingKimi.Length)) + "..." + $existingKimi.Substring([Math]::Max(0, $existingKimi.Length - 4))
|
||||
Write-Host ""
|
||||
Write-Color -Text " $([char]0x2B22) Current Kimi key: $masked" -Color Green
|
||||
$apiKey = Read-Host " Press Enter to keep, or paste a new key to replace"
|
||||
} else {
|
||||
Write-Host ""
|
||||
Write-Host "Get your API key from: " -NoNewline
|
||||
Write-Color -Text "https://www.kimi.com/code" -Color Cyan
|
||||
Write-Host ""
|
||||
$apiKey = Read-Host "Paste your Kimi API key (or press Enter to skip)"
|
||||
}
|
||||
|
||||
if ($apiKey) {
|
||||
[System.Environment]::SetEnvironmentVariable("KIMI_API_KEY", $apiKey, "User")
|
||||
$env:KIMI_API_KEY = $apiKey
|
||||
Write-Host ""
|
||||
Write-Ok "Kimi API key saved as User environment variable"
|
||||
|
||||
# Health check the new key
|
||||
Write-Host " Verifying Kimi API key... " -NoNewline
|
||||
try {
|
||||
Push-Location $ProjectDir
|
||||
$hcResult = & $UvCmd run python (Join-Path $ProjectDir "scripts/check_llm_key.py") "kimi" $apiKey "https://api.kimi.com/coding" 2>$null
|
||||
Pop-Location
|
||||
$hcJson = $hcResult | ConvertFrom-Json
|
||||
if ($hcJson.valid -eq $true) {
|
||||
Write-Color -Text "ok" -Color Green
|
||||
break
|
||||
} elseif ($hcJson.valid -eq $false) {
|
||||
Write-Color -Text "failed" -Color Red
|
||||
Write-Warn $hcJson.message
|
||||
[System.Environment]::SetEnvironmentVariable("KIMI_API_KEY", $null, "User")
|
||||
Remove-Item -Path "Env:\KIMI_API_KEY" -ErrorAction SilentlyContinue
|
||||
Write-Host ""
|
||||
Read-Host " Press Enter to try again"
|
||||
} else {
|
||||
Write-Color -Text "--" -Color Yellow
|
||||
Write-Color -Text " Could not verify key (network issue). The key has been saved." -Color DarkGray
|
||||
break
|
||||
}
|
||||
} catch {
|
||||
Pop-Location
|
||||
Write-Color -Text "--" -Color Yellow
|
||||
Write-Color -Text " Could not verify key (network issue). The key has been saved." -Color DarkGray
|
||||
break
|
||||
}
|
||||
} elseif (-not $existingKimi) {
|
||||
Write-Host ""
|
||||
Write-Warn "Skipped. Add your Kimi API key later:"
|
||||
Write-Color -Text " [System.Environment]::SetEnvironmentVariable('KIMI_API_KEY', 'your-key', 'User')" -Color Cyan
|
||||
$SelectedEnvVar = ""
|
||||
$SelectedProviderId = ""
|
||||
$SubscriptionMode = ""
|
||||
break
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# For Hive LLM: prompt for API key with verification + retry
|
||||
if ($SubscriptionMode -eq "hive_llm") {
|
||||
while ($true) {
|
||||
$existingHive = [System.Environment]::GetEnvironmentVariable("HIVE_API_KEY", "User")
|
||||
if (-not $existingHive) { $existingHive = $env:HIVE_API_KEY }
|
||||
|
||||
if ($existingHive) {
|
||||
$masked = $existingHive.Substring(0, [Math]::Min(4, $existingHive.Length)) + "..." + $existingHive.Substring([Math]::Max(0, $existingHive.Length - 4))
|
||||
Write-Host ""
|
||||
Write-Color -Text " $([char]0x2B22) Current Hive key: $masked" -Color Green
|
||||
Write-Host ""
|
||||
$apiKey = Read-Host "Paste a new Hive API key (or press Enter to keep current)"
|
||||
} else {
|
||||
Write-Host ""
|
||||
Write-Host " Get your API key from: " -NoNewline
|
||||
Write-Color -Text "https://discord.com/invite/hQdU7QDkgR" -Color Cyan
|
||||
Write-Host ""
|
||||
$apiKey = Read-Host "Paste your Hive API key (or press Enter to skip)"
|
||||
}
|
||||
|
||||
if ($apiKey) {
|
||||
[System.Environment]::SetEnvironmentVariable("HIVE_API_KEY", $apiKey, "User")
|
||||
$env:HIVE_API_KEY = $apiKey
|
||||
Write-Host ""
|
||||
Write-Ok "Hive API key saved as User environment variable"
|
||||
|
||||
# Health check the new key
|
||||
Write-Host " Verifying Hive API key... " -NoNewline
|
||||
try {
|
||||
Push-Location $ProjectDir
|
||||
$hcResult = & $UvCmd run python (Join-Path $ProjectDir "scripts/check_llm_key.py") "hive" $apiKey "$HiveLlmEndpoint" 2>$null
|
||||
Pop-Location
|
||||
$hcJson = $hcResult | ConvertFrom-Json
|
||||
if ($hcJson.valid -eq $true) {
|
||||
Write-Color -Text "ok" -Color Green
|
||||
break
|
||||
} elseif ($hcJson.valid -eq $false) {
|
||||
Write-Color -Text "failed" -Color Red
|
||||
Write-Warn $hcJson.message
|
||||
[System.Environment]::SetEnvironmentVariable("HIVE_API_KEY", $null, "User")
|
||||
Remove-Item -Path "Env:\HIVE_API_KEY" -ErrorAction SilentlyContinue
|
||||
Write-Host ""
|
||||
Read-Host " Press Enter to try again"
|
||||
} else {
|
||||
Write-Color -Text "--" -Color Yellow
|
||||
Write-Color -Text " Could not verify key (network issue). The key has been saved." -Color DarkGray
|
||||
break
|
||||
}
|
||||
} catch {
|
||||
Pop-Location
|
||||
Write-Color -Text "--" -Color Yellow
|
||||
break
|
||||
}
|
||||
} elseif (-not $existingHive) {
|
||||
Write-Host ""
|
||||
Write-Warn "Skipped. Add your Hive API key later:"
|
||||
Write-Color -Text " [System.Environment]::SetEnvironmentVariable('HIVE_API_KEY', 'your-key', 'User')" -Color Cyan
|
||||
$SelectedEnvVar = ""
|
||||
$SelectedProviderId = ""
|
||||
$SubscriptionMode = ""
|
||||
break
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Prompt for model if not already selected (manual provider path)
|
||||
if ($SelectedProviderId -and -not $SelectedModel) {
|
||||
$modelSel = Get-ModelSelection $SelectedProviderId
|
||||
$SelectedModel = $modelSel.Model
|
||||
$SelectedMaxTokens = $modelSel.MaxTokens
|
||||
$SelectedMaxContextTokens = $modelSel.MaxContextTokens
|
||||
}
|
||||
|
||||
# ============================================================
|
||||
# Save configuration to worker_llm section
|
||||
# ============================================================
|
||||
|
||||
if ($SelectedProviderId) {
|
||||
if (-not $SelectedModel) {
|
||||
$SelectedModel = $DefaultModels[$SelectedProviderId]
|
||||
}
|
||||
Write-Host ""
|
||||
Write-Host " Saving worker model configuration... " -NoNewline
|
||||
|
||||
if (-not (Test-Path $HiveConfigDir)) {
|
||||
New-Item -ItemType Directory -Path $HiveConfigDir -Force | Out-Null
|
||||
}
|
||||
|
||||
try {
|
||||
if (Test-Path $HiveConfigFile) {
|
||||
$config = Get-Content -Path $HiveConfigFile -Raw | ConvertFrom-Json
|
||||
} else {
|
||||
$config = @{}
|
||||
}
|
||||
} catch {
|
||||
$config = @{}
|
||||
}
|
||||
|
||||
$workerLlm = @{
|
||||
provider = $SelectedProviderId
|
||||
model = $SelectedModel
|
||||
max_tokens = $SelectedMaxTokens
|
||||
max_context_tokens = $SelectedMaxContextTokens
|
||||
}
|
||||
|
||||
if ($SubscriptionMode -eq "claude_code") {
|
||||
$workerLlm["use_claude_code_subscription"] = $true
|
||||
} elseif ($SubscriptionMode -eq "codex") {
|
||||
$workerLlm["use_codex_subscription"] = $true
|
||||
} elseif ($SubscriptionMode -eq "zai_code") {
|
||||
$workerLlm["api_base"] = "https://api.z.ai/api/coding/paas/v4"
|
||||
$workerLlm["api_key_env_var"] = $SelectedEnvVar
|
||||
} elseif ($SubscriptionMode -eq "kimi_code") {
|
||||
$workerLlm["api_base"] = "https://api.kimi.com/coding"
|
||||
$workerLlm["api_key_env_var"] = $SelectedEnvVar
|
||||
} elseif ($SubscriptionMode -eq "hive_llm") {
|
||||
$workerLlm["api_base"] = $HiveLlmEndpoint
|
||||
$workerLlm["api_key_env_var"] = $SelectedEnvVar
|
||||
} elseif ($SelectedProviderId -eq "openrouter") {
|
||||
$workerLlm["api_base"] = "https://openrouter.ai/api/v1"
|
||||
$workerLlm["api_key_env_var"] = $SelectedEnvVar
|
||||
} else {
|
||||
$workerLlm["api_key_env_var"] = $SelectedEnvVar
|
||||
}
|
||||
|
||||
$config | Add-Member -NotePropertyName "worker_llm" -NotePropertyValue $workerLlm -Force
|
||||
$config | ConvertTo-Json -Depth 4 | Set-Content -Path $HiveConfigFile -Encoding UTF8
|
||||
Write-Ok "done"
|
||||
Write-Color -Text " ~/.hive/configuration.json (worker_llm section)" -Color DarkGray
|
||||
|
||||
Write-Host ""
|
||||
Write-Ok "Worker model configured successfully."
|
||||
Write-Color -Text " Worker agents will now use: $SelectedProviderId/$SelectedModel" -Color DarkGray
|
||||
Write-Color -Text " Run this script again to change, or remove the worker_llm section" -Color DarkGray
|
||||
Write-Color -Text " from ~/.hive/configuration.json to revert to the default." -Color DarkGray
|
||||
Write-Host ""
|
||||
}
|
||||
Executable
+1115
File diff suppressed because it is too large
Load Diff
@@ -409,8 +409,6 @@ class BrowserSession:
|
||||
We're already inside ``self._lock`` so we can't call ``stop()``.
|
||||
This mirrors the teardown logic without re-acquiring the lock.
|
||||
"""
|
||||
_CLOSE_TIMEOUT = 10.0 # seconds
|
||||
|
||||
if self.cdp_port:
|
||||
from .port_manager import release_port
|
||||
|
||||
@@ -419,21 +417,21 @@ class BrowserSession:
|
||||
|
||||
if self.context:
|
||||
try:
|
||||
await asyncio.wait_for(self.context.close(), timeout=_CLOSE_TIMEOUT)
|
||||
await self.context.close()
|
||||
except Exception:
|
||||
pass
|
||||
self.context = None
|
||||
|
||||
if self.browser:
|
||||
try:
|
||||
await asyncio.wait_for(self.browser.close(), timeout=_CLOSE_TIMEOUT)
|
||||
await self.browser.close()
|
||||
except Exception:
|
||||
pass
|
||||
self.browser = None
|
||||
|
||||
if self._playwright:
|
||||
try:
|
||||
await asyncio.wait_for(self._playwright.stop(), timeout=_CLOSE_TIMEOUT)
|
||||
await self._playwright.stop()
|
||||
except Exception:
|
||||
pass
|
||||
self._playwright = None
|
||||
@@ -590,10 +588,6 @@ class BrowserSession:
|
||||
|
||||
async def stop(self) -> dict:
|
||||
"""Stop the browser and clean up resources."""
|
||||
# Timeout for each Playwright teardown call — prevents hanging when
|
||||
# the browser process is crashed or unresponsive.
|
||||
_CLOSE_TIMEOUT = 10.0 # seconds
|
||||
|
||||
async with self._lock:
|
||||
# Release CDP port if allocated
|
||||
if self.cdp_port:
|
||||
@@ -604,35 +598,23 @@ class BrowserSession:
|
||||
|
||||
# Close context (works for both persistent and ephemeral)
|
||||
if self.context:
|
||||
try:
|
||||
await asyncio.wait_for(self.context.close(), timeout=_CLOSE_TIMEOUT)
|
||||
except Exception as exc:
|
||||
logger.warning("context.close() failed for profile %r: %s", self.profile, exc)
|
||||
await self.context.close()
|
||||
self.context = None
|
||||
|
||||
# Agent sessions share a browser — don't close it (other agents depend on it).
|
||||
# Only standard sessions own their browser and playwright instances.
|
||||
if self.session_type != "agent":
|
||||
if self.browser:
|
||||
try:
|
||||
await asyncio.wait_for(self.browser.close(), timeout=_CLOSE_TIMEOUT)
|
||||
except Exception as exc:
|
||||
logger.warning("browser.close() failed for profile %r: %s", self.profile, exc)
|
||||
await self.browser.close()
|
||||
self.browser = None
|
||||
|
||||
if self._playwright:
|
||||
try:
|
||||
await asyncio.wait_for(self._playwright.stop(), timeout=_CLOSE_TIMEOUT)
|
||||
except Exception as exc:
|
||||
logger.warning("playwright.stop() failed for profile %r: %s", self.profile, exc)
|
||||
await self._playwright.stop()
|
||||
self._playwright = None
|
||||
|
||||
# Kill the Chrome subprocess
|
||||
if self._chrome_process:
|
||||
try:
|
||||
await self._chrome_process.kill()
|
||||
except Exception as exc:
|
||||
logger.warning("chrome_process.kill() failed for profile %r: %s", self.profile, exc)
|
||||
await self._chrome_process.kill()
|
||||
self._chrome_process = None
|
||||
else:
|
||||
self.browser = None # Drop reference to shared browser
|
||||
|
||||
Reference in New Issue
Block a user