fix(openrouter): stabilize quickstart and tool execution

- add cross-platform OpenRouter quickstart setup, config fallbacks, and key validation\n- harden LiteLLM/OpenRouter tool execution, duplicate question handling, and worker loading UX\n- add backend and frontend regression coverage for OpenRouter flows
This commit is contained in:
Sundaram Kumar Jha
2026-03-14 20:48:58 +05:30
parent ff01c1fd99
commit 9202cbd4d4
20 changed files with 1685 additions and 151 deletions
+13 -2
View File
@@ -47,7 +47,13 @@ def get_preferred_model() -> str:
"""Return the user's preferred LLM model string (e.g. 'anthropic/claude-sonnet-4-20250514')."""
llm = get_hive_config().get("llm", {})
if llm.get("provider") and llm.get("model"):
return f"{llm['provider']}/{llm['model']}"
provider = str(llm["provider"])
model = str(llm["model"]).strip()
# OpenRouter quickstart stores raw model IDs; tolerate pasted "openrouter/<id>" too.
if provider.lower() == "openrouter" and model.lower().startswith("openrouter/"):
model = model[len("openrouter/") :]
if model:
return f"{provider}/{model}"
return "anthropic/claude-sonnet-4-20250514"
@@ -57,6 +63,7 @@ def get_max_tokens() -> int:
DEFAULT_MAX_CONTEXT_TOKENS = 32_000
OPENROUTER_API_BASE = "https://openrouter.ai/api/v1"
def get_max_context_tokens() -> int:
@@ -138,7 +145,11 @@ def get_api_base() -> str | None:
if llm.get("use_kimi_code_subscription"):
# Kimi Code uses an Anthropic-compatible endpoint (no /v1 suffix).
return "https://api.kimi.com/coding"
return llm.get("api_base")
if llm.get("api_base"):
return llm["api_base"]
if str(llm.get("provider", "")).lower() == "openrouter":
return OPENROUTER_API_BASE
return None
def get_llm_extra_kwargs() -> dict[str, Any]:
+10
View File
@@ -51,6 +51,16 @@ def ensure_credential_key_env() -> None:
if found and value:
os.environ[var_name] = value
logger.debug("Loaded %s from shell config", var_name)
# Also load the currently configured LLM env var even if it's not in CREDENTIAL_SPECS.
# This keeps quickstart-written keys available to fresh processes on Unix shells.
from framework.config import get_hive_config
llm_env_var = str(get_hive_config().get("llm", {}).get("api_key_env_var", "")).strip()
if llm_env_var and not os.environ.get(llm_env_var):
found, value = check_env_var_in_shell_config(llm_env_var)
if found and value:
os.environ[llm_env_var] = value
logger.debug("Loaded configured LLM env var %s from shell config", llm_env_var)
except ImportError:
pass
+168 -4
View File
@@ -69,6 +69,53 @@ def _is_context_too_large_error(exc: BaseException) -> bool:
return bool(_CONTEXT_TOO_LARGE_RE.search(str(exc)))
def _normalize_question_text(value: str) -> str:
"""Normalize question text for duplicate-input detection."""
return " ".join(str(value).split()).strip().casefold()
def _build_question_signature(
prompt: str,
options: list[str] | None = None,
) -> str | None:
"""Return a stable signature for a single user-facing question."""
normalized_prompt = _normalize_question_text(prompt)
normalized_options = [
_normalize_question_text(option)
for option in (options or [])
if _normalize_question_text(option)
]
if not normalized_prompt and not normalized_options:
return None
payload = {
"prompt": normalized_prompt,
"options": normalized_options,
}
return f"question:{json.dumps(payload, sort_keys=True, separators=(',', ':'))}"
def _build_question_batch_signature(questions: list[dict[str, Any]]) -> str | None:
"""Return a stable signature for a multi-question widget."""
normalized_questions: list[dict[str, Any]] = []
for question in questions:
prompt = _normalize_question_text(str(question.get("prompt", "")))
options = [
_normalize_question_text(option)
for option in question.get("options", []) or []
if _normalize_question_text(option)
]
normalized_questions.append(
{
"id": _normalize_question_text(str(question.get("id", ""))),
"prompt": prompt,
**({"options": options} if options else {}),
}
)
if not normalized_questions:
return None
return f"batch:{json.dumps(normalized_questions, sort_keys=True, separators=(',', ':'))}"
# ---------------------------------------------------------------------------
# Escalation receiver (temporary routing target for subagent → user input)
# ---------------------------------------------------------------------------
@@ -372,6 +419,10 @@ class EventLoopNode(NodeProtocol):
self._action_plan_emitted: set[str] = set()
# Monotonic counter for spillover file naming (web_search_1.txt, etc.)
self._spill_counter: int = 0
# Prevent weak models from re-asking the exact same question immediately
# after the user already answered it.
self._pending_input_signatures: set[str] = set()
self._recent_answered_input_signatures: set[str] = set()
# Subagent mark_complete: when True, _evaluate returns ACCEPT immediately
self._mark_complete_flag = False
# Counter for subagent instances (1, 2, 3, ...)
@@ -405,6 +456,10 @@ class EventLoopNode(NodeProtocol):
# Verdict counters for runtime logging
_accept_count = _retry_count = _escalate_count = _continue_count = 0
# Per-run question dedupe state should not leak across sessions/runs.
self._pending_input_signatures.clear()
self._recent_answered_input_signatures.clear()
# Client-facing auto-block grace: consecutive text-only turns without
# any real tool call or set_output. Resets on progress.
_cf_text_only_streak = 0
@@ -704,6 +759,7 @@ class EventLoopNode(NodeProtocol):
)
_stream_retry_count = 0
_turn_cancelled = False
_llm_turn_failed_waiting_input = False
while True:
try:
(
@@ -806,12 +862,20 @@ class EventLoopNode(NodeProtocol):
# its arguments.
error_str = str(e).lower()
if "failed to parse tool call" in error_str:
tool_hint = ""
tool_match = re.search(r"for '([^']+)'", str(e))
if tool_match and tool_match.group(1) == "save_agent_draft":
tool_hint = (
" For save_agent_draft specifically: keep node descriptions "
"to one short sentence, omit optional metadata, and prefer "
"the smallest graph that still satisfies the request."
)
await conversation.add_user_message(
"[System: Your previous tool call had malformed "
"JSON arguments (likely truncated). Keep your "
"tool call arguments shorter and simpler. Do NOT "
"repeat the same long argument — summarize or "
"split into multiple calls.]"
f"split into multiple calls.{tool_hint}]"
)
await asyncio.sleep(delay)
@@ -823,6 +887,16 @@ class EventLoopNode(NodeProtocol):
# can retry or adjust the request.
if ctx.node_spec.client_facing:
error_msg = f"LLM call failed: {e}"
_guardrail_phrase = (
"no endpoints available matching your guardrail restrictions "
"and data policy"
)
if _guardrail_phrase in str(e).lower():
error_msg += (
" OpenRouter blocked this model under current privacy settings. "
"Update https://openrouter.ai/settings/privacy or choose another "
"OpenRouter model."
)
logger.error(
"[%s] iter=%d: %s — waiting for user input",
node_id,
@@ -844,6 +918,7 @@ class EventLoopNode(NodeProtocol):
f"[Error: {error_msg}. Please try again.]"
)
await self._await_user_input(ctx, prompt="")
_llm_turn_failed_waiting_input = True
break # exit retry loop, continue outer iteration
# Non-client-facing: crash as before
@@ -894,6 +969,11 @@ class EventLoopNode(NodeProtocol):
await self._await_user_input(ctx, prompt="")
continue # back to top of for-iteration loop
# Client-facing non-transient LLM failures wait for user input and then
# continue the outer loop without touching per-turn token vars.
if _llm_turn_failed_waiting_input:
continue
# 6e'. Feed actual API token count back for accurate estimation
turn_input = turn_tokens.get("input", 0)
if turn_input > 0:
@@ -1301,6 +1381,11 @@ class EventLoopNode(NodeProtocol):
options=ask_user_options,
questions=multi_qs,
)
if got_input and self._pending_input_signatures:
self._recent_answered_input_signatures = set(
self._pending_input_signatures
)
self._pending_input_signatures.clear()
# Emit deferred tool_call_completed for ask_user / ask_user_multiple
deferred = getattr(self, "_deferred_tool_complete", None)
if deferred:
@@ -2147,6 +2232,7 @@ class EventLoopNode(NodeProtocol):
await accumulator.set(key, value)
self._record_learning(key, value)
outputs_set_this_turn.append(key)
self._recent_answered_input_signatures.clear()
await self._publish_output_key_set(stream_id, node_id, key, execution_id)
logged_tool_calls.append(
{
@@ -2163,7 +2249,6 @@ class EventLoopNode(NodeProtocol):
elif tc.tool_name == "ask_user":
# --- Framework-level ask_user handling ---
user_input_requested = True
ask_user_prompt = tc.tool_input.get("question", "")
raw_options = tc.tool_input.get("options", None)
# Defensive: ensure options is a list of strings.
@@ -2200,6 +2285,37 @@ class EventLoopNode(NodeProtocol):
user_input_requested = False
continue
ask_user_signature = _build_question_signature(
str(ask_user_prompt),
ask_user_options,
)
if (
ask_user_signature
and ask_user_signature in self._recent_answered_input_signatures
):
logger.info(
"[%s] blocked duplicate ask_user after answered input: %s",
node_id,
str(ask_user_prompt)[:120],
)
result = ToolResult(
tool_use_id=tc.tool_use_id,
content=(
"ERROR: This question was already asked and answered. "
"Use the user's latest answer already present in the "
"conversation instead of asking again."
),
is_error=True,
)
results_by_id[tc.tool_use_id] = result
user_input_requested = False
continue
user_input_requested = True
self._pending_input_signatures = (
{ask_user_signature} if ask_user_signature else set()
)
# Free-form ask_user (no options): stream the question
# text as a chat message so the user can see it. When
# options are present the QuestionWidget shows the
@@ -2225,7 +2341,6 @@ class EventLoopNode(NodeProtocol):
elif tc.tool_name == "ask_user_multiple":
# --- Framework-level ask_user_multiple ---
user_input_requested = True
raw_questions = tc.tool_input.get("questions", [])
if not isinstance(raw_questions, list) or len(raw_questions) < 2:
result = ToolResult(
@@ -2263,6 +2378,43 @@ class EventLoopNode(NodeProtocol):
}
)
batch_signature = _build_question_batch_signature(questions)
if (
batch_signature
and batch_signature in self._recent_answered_input_signatures
):
logger.info(
"[%s] blocked duplicate ask_user_multiple after answered input",
node_id,
)
result = ToolResult(
tool_use_id=tc.tool_use_id,
content=(
"ERROR: This same question set was already asked and "
"answered. Use the user's latest answers from the "
"conversation instead of asking again."
),
is_error=True,
)
results_by_id[tc.tool_use_id] = result
user_input_requested = False
continue
user_input_requested = True
self._pending_input_signatures = {
signature
for signature in (
_build_question_signature(
str(question.get("prompt", "")),
question.get("options"),
)
for question in questions
)
if signature
}
if batch_signature:
self._pending_input_signatures.add(batch_signature)
# Store as multi-question prompt/options for
# the event emission path
ask_user_prompt = ""
@@ -2280,6 +2432,7 @@ class EventLoopNode(NodeProtocol):
elif tc.tool_name == "escalate":
# --- Framework-level escalate handling ---
self._recent_answered_input_signatures.clear()
reason = str(tc.tool_input.get("reason", "")).strip()
context = str(tc.tool_input.get("context", "")).strip()
@@ -2324,6 +2477,7 @@ class EventLoopNode(NodeProtocol):
elif tc.tool_name == "delegate_to_sub_agent":
# --- Framework-level subagent delegation ---
self._recent_answered_input_signatures.clear()
# Queue for parallel execution in Phase 2
logger.info(
"🔄 LLM requesting subagent delegation: agent_id='%s', task='%s'",
@@ -2336,6 +2490,7 @@ class EventLoopNode(NodeProtocol):
elif tc.tool_name == "report_to_parent":
# --- Report from sub-agent to parent (optionally blocking) ---
self._recent_answered_input_signatures.clear()
reported_to_parent = True
msg = tc.tool_input.get("message", "")
data = tc.tool_input.get("data")
@@ -2390,6 +2545,7 @@ class EventLoopNode(NodeProtocol):
)
results_by_id[tc.tool_use_id] = result
else:
self._recent_answered_input_signatures.clear()
pending_real.append(tc)
# Phase 2a: execute real tools in parallel.
@@ -2545,7 +2701,11 @@ class EventLoopNode(NodeProtocol):
content=result.content,
is_error=result.is_error,
)
if tc.tool_name in ("ask_user", "ask_user_multiple"):
if (
tc.tool_name in ("ask_user", "ask_user_multiple")
and user_input_requested
and not result.is_error
):
# Defer tool_call_completed until after user responds
self._deferred_tool_complete = {
"stream_id": stream_id,
@@ -2689,6 +2849,8 @@ class EventLoopNode(NodeProtocol):
"Always call it after greeting the user, asking a question, or "
"requesting approval. Do NOT call it for status updates or "
"summaries that don't require a response. "
"Never ask the same question again after the user has already "
"answered it; use their latest answer from the conversation. "
"Always include 2-3 predefined options. The UI automatically "
"appends an 'Other' free-text input after your options, so NEVER "
"include catch-all options like 'Custom idea', 'Something else', "
@@ -2747,6 +2909,8 @@ class EventLoopNode(NodeProtocol):
"questions together with a single Submit button. "
"ALWAYS prefer this over ask_user when you have multiple things "
"to clarify. "
"Do NOT repeat the same batch after the user already answered it; "
"use the answers already present in the conversation. "
"IMPORTANT: Do NOT repeat the questions in your text response — "
"the widget renders them. Keep your text to a brief intro only. "
'Example: {"questions": ['
+511 -9
View File
@@ -7,9 +7,12 @@ Groq, and local models.
See: https://docs.litellm.ai/docs/providers
"""
import ast
import asyncio
import json
import logging
import os
import re
import time
from collections.abc import AsyncIterator
from datetime import datetime
@@ -113,11 +116,17 @@ def _patch_litellm_metadata_nonetype() -> None:
if litellm is not None:
_patch_litellm_anthropic_oauth()
_patch_litellm_metadata_nonetype()
litellm.suppress_debug_info = True
if not os.environ.get("LITELLM_LOG"):
logging.getLogger("LiteLLM").setLevel(logging.WARNING)
logging.getLogger("LiteLLM Router").setLevel(logging.WARNING)
logging.getLogger("LiteLLM Proxy").setLevel(logging.WARNING)
RATE_LIMIT_MAX_RETRIES = 10
RATE_LIMIT_BACKOFF_BASE = 2 # seconds
RATE_LIMIT_MAX_DELAY = 120 # seconds - cap to prevent absurd waits
MINIMAX_API_BASE = "https://api.minimax.io/v1"
OPENROUTER_API_BASE = "https://openrouter.ai/api/v1"
# Providers that accept cache_control on message content blocks.
# Anthropic: native ephemeral caching. MiniMax & Z-AI/GLM: pass-through to their APIs.
@@ -146,6 +155,16 @@ KIMI_API_BASE = "https://api.kimi.com/coding"
# Conversation-structure issues are deterministic — long waits don't help.
EMPTY_STREAM_MAX_RETRIES = 3
EMPTY_STREAM_RETRY_DELAY = 1.0 # seconds
OPENROUTER_TOOL_COMPAT_ERROR_SNIPPETS = (
"no endpoints found that support tool use",
"no endpoints available that support tool use",
"provider routing",
)
OPENROUTER_TOOL_CALL_RE = re.compile(
r"<\|tool_call_start\|>\s*(.*?)\s*<\|tool_call_end\|>",
re.DOTALL,
)
OPENROUTER_TOOL_COMPAT_MODEL_CACHE: set[str] = set()
# Directory for dumping failed requests
FAILED_REQUESTS_DIR = Path.home() / ".hive" / "failed_requests"
@@ -385,6 +404,8 @@ class LiteLLMProvider(LLMProvider):
model_lower = model.lower()
if model_lower.startswith("minimax/") or model_lower.startswith("minimax-"):
return MINIMAX_API_BASE
if model_lower.startswith("openrouter/"):
return OPENROUTER_API_BASE
if model_lower.startswith("kimi/"):
return KIMI_API_BASE
return None
@@ -791,6 +812,464 @@ class LiteLLMProvider(LLMProvider):
model = (self.model or "").lower()
return model.startswith("minimax/") or model.startswith("minimax-")
def _is_openrouter_model(self) -> bool:
"""Return True when the configured model targets OpenRouter."""
model = (self.model or "").lower()
if model.startswith("openrouter/"):
return True
api_base = (self.api_base or "").lower()
return "openrouter.ai/api/v1" in api_base
def _should_use_openrouter_tool_compat(
self,
error: BaseException,
tools: list[Tool] | None,
) -> bool:
"""Return True when OpenRouter rejects native tool use for the model."""
if not tools or not self._is_openrouter_model():
return False
error_text = str(error).lower()
return "openrouter" in error_text and any(
snippet in error_text for snippet in OPENROUTER_TOOL_COMPAT_ERROR_SNIPPETS
)
@staticmethod
def _extract_json_object(text: str) -> dict[str, Any] | None:
"""Extract the first JSON object from a model response."""
candidates = [text.strip()]
stripped = text.strip()
if stripped.startswith("```"):
fence_lines = stripped.splitlines()
if len(fence_lines) >= 3:
candidates.append("\n".join(fence_lines[1:-1]).strip())
decoder = json.JSONDecoder()
for candidate in candidates:
if not candidate:
continue
try:
parsed = json.loads(candidate)
except json.JSONDecodeError:
parsed = None
if isinstance(parsed, dict):
return parsed
for start_idx, char in enumerate(candidate):
if char != "{":
continue
try:
parsed, _ = decoder.raw_decode(candidate[start_idx:])
except json.JSONDecodeError:
continue
if isinstance(parsed, dict):
return parsed
return None
def _parse_openrouter_tool_compat_response(
self,
content: str,
tools: list[Tool],
) -> tuple[str, list[dict[str, Any]]]:
"""Parse JSON tool-compat output into assistant text and tool calls."""
payload = self._extract_json_object(content)
if payload is None:
text_tool_content, text_tool_calls = self._parse_openrouter_text_tool_calls(
content,
tools,
)
if text_tool_calls:
logger.info(
"[openrouter-tool-compat] Parsed textual tool-call markers for %s",
self.model,
)
return text_tool_content, text_tool_calls
logger.info(
"[openrouter-tool-compat] %s returned non-JSON fallback content; "
"treating it as plain text.",
self.model,
)
return content.strip(), []
assistant_text = payload.get("assistant_response")
if not isinstance(assistant_text, str):
assistant_text = payload.get("content")
if not isinstance(assistant_text, str):
assistant_text = payload.get("response")
if not isinstance(assistant_text, str):
assistant_text = ""
tool_calls_raw = payload.get("tool_calls")
if not tool_calls_raw and {"name", "arguments"} <= payload.keys():
tool_calls_raw = [payload]
elif isinstance(payload.get("tool_call"), dict):
tool_calls_raw = [payload["tool_call"]]
if not isinstance(tool_calls_raw, list):
tool_calls_raw = []
allowed_tool_names = {tool.name for tool in tools}
tool_calls: list[dict[str, Any]] = []
compat_prefix = f"openrouter_compat_{time.time_ns()}"
for idx, raw_call in enumerate(tool_calls_raw):
if not isinstance(raw_call, dict):
continue
function_block = raw_call.get("function")
function_name = (
raw_call.get("name")
or raw_call.get("tool_name")
or (function_block.get("name") if isinstance(function_block, dict) else None)
)
if not isinstance(function_name, str) or function_name not in allowed_tool_names:
if function_name:
logger.warning(
"[openrouter-tool-compat] Ignoring unknown tool '%s' for model %s",
function_name,
self.model,
)
continue
arguments = raw_call.get("arguments")
if arguments is None:
arguments = raw_call.get("tool_input")
if arguments is None:
arguments = raw_call.get("input")
if arguments is None and isinstance(function_block, dict):
arguments = function_block.get("arguments")
if arguments is None:
arguments = {}
if isinstance(arguments, str):
try:
arguments = json.loads(arguments)
except json.JSONDecodeError:
arguments = {"_raw": arguments}
elif not isinstance(arguments, dict):
arguments = {"value": arguments}
tool_calls.append(
{
"id": f"{compat_prefix}_{idx}",
"name": function_name,
"input": arguments,
}
)
return assistant_text.strip(), tool_calls
@staticmethod
def _close_truncated_json_fragment(fragment: str) -> str:
"""Close a truncated JSON fragment by balancing quotes/brackets."""
stack: list[str] = []
in_string = False
escaped = False
normalized = fragment.rstrip()
while normalized and normalized[-1] in ",:{[":
normalized = normalized[:-1].rstrip()
for char in normalized:
if in_string:
if escaped:
escaped = False
elif char == "\\":
escaped = True
elif char == '"':
in_string = False
continue
if char == '"':
in_string = True
elif char in "{[":
stack.append(char)
elif char == "}" and stack and stack[-1] == "{":
stack.pop()
elif char == "]" and stack and stack[-1] == "[":
stack.pop()
if in_string:
if escaped:
normalized = normalized[:-1]
normalized += '"'
for opener in reversed(stack):
normalized += "}" if opener == "{" else "]"
return normalized
def _repair_truncated_tool_arguments(self, raw_arguments: str) -> dict[str, Any] | None:
"""Try to recover a truncated JSON object from tool-call arguments."""
stripped = raw_arguments.strip()
if not stripped or stripped[0] != "{":
return None
max_trim = min(len(stripped), 256)
for trim in range(max_trim + 1):
candidate = stripped[: len(stripped) - trim].rstrip()
if not candidate:
break
candidate = self._close_truncated_json_fragment(candidate)
try:
parsed = json.loads(candidate)
except json.JSONDecodeError:
continue
if isinstance(parsed, dict):
return parsed
return None
def _parse_tool_call_arguments(self, raw_arguments: str, tool_name: str) -> dict[str, Any]:
"""Parse streamed tool arguments, repairing truncation when possible."""
try:
parsed = json.loads(raw_arguments) if raw_arguments else {}
except json.JSONDecodeError:
parsed = None
if isinstance(parsed, dict):
return parsed
repaired = self._repair_truncated_tool_arguments(raw_arguments)
if repaired is not None:
logger.warning(
"[tool-args] Recovered truncated arguments for %s on %s",
tool_name,
self.model,
)
return repaired
raise ValueError(
f"Failed to parse tool call arguments for '{tool_name}' "
"(likely truncated JSON)."
)
def _parse_openrouter_text_tool_calls(
self,
content: str,
tools: list[Tool],
) -> tuple[str, list[dict[str, Any]]]:
"""Parse textual OpenRouter tool-call markers into synthetic tool calls."""
tools_by_name = {tool.name: tool for tool in tools}
compat_prefix = f"openrouter_compat_{time.time_ns()}"
tool_calls: list[dict[str, Any]] = []
for block_index, match in enumerate(OPENROUTER_TOOL_CALL_RE.finditer(content)):
tool_calls.extend(
self._parse_openrouter_text_tool_call_block(
block=match.group(1),
tools_by_name=tools_by_name,
compat_prefix=f"{compat_prefix}_{block_index}",
)
)
stripped_text = OPENROUTER_TOOL_CALL_RE.sub("", content).strip()
return stripped_text, tool_calls
def _parse_openrouter_text_tool_call_block(
self,
block: str,
tools_by_name: dict[str, Tool],
compat_prefix: str,
) -> list[dict[str, Any]]:
"""Parse a single textual tool-call block like [tool(arg='x')]."""
try:
parsed = ast.parse(block.strip(), mode="eval").body
except SyntaxError:
return []
call_nodes = parsed.elts if isinstance(parsed, ast.List) else [parsed]
tool_calls: list[dict[str, Any]] = []
for call_index, call_node in enumerate(call_nodes):
if not isinstance(call_node, ast.Call) or not isinstance(call_node.func, ast.Name):
continue
tool_name = call_node.func.id
tool = tools_by_name.get(tool_name)
if tool is None:
continue
try:
tool_input = self._parse_openrouter_text_tool_call_arguments(
call_node=call_node,
tool=tool,
)
except (ValueError, SyntaxError):
continue
tool_calls.append(
{
"id": f"{compat_prefix}_{call_index}",
"name": tool_name,
"input": tool_input,
}
)
return tool_calls
@staticmethod
def _parse_openrouter_text_tool_call_arguments(
call_node: ast.Call,
tool: Tool,
) -> dict[str, Any]:
"""Parse positional/keyword args from a textual tool call."""
properties = tool.parameters.get("properties", {})
positional_keys = list(properties.keys())
tool_input: dict[str, Any] = {}
if len(call_node.args) > len(positional_keys):
raise ValueError("Too many positional args for textual tool call")
for idx, arg_node in enumerate(call_node.args):
tool_input[positional_keys[idx]] = ast.literal_eval(arg_node)
for kwarg in call_node.keywords:
if kwarg.arg is None:
raise ValueError("Star args are not supported in textual tool calls")
tool_input[kwarg.arg] = ast.literal_eval(kwarg.value)
return tool_input
def _build_openrouter_tool_compat_messages(
self,
messages: list[dict[str, Any]],
system: str,
tools: list[Tool],
) -> list[dict[str, Any]]:
"""Build a JSON-only prompt for models without native tool support."""
tool_specs = [
{
"name": tool.name,
"description": tool.description,
"parameters": tool.parameters,
}
for tool in tools
]
compat_instruction = (
"Tool compatibility mode is active because this OpenRouter model does not support "
"native function calling on the routed provider.\n"
"Return exactly one JSON object and nothing else.\n"
'Schema: {"assistant_response": string, '
'"tool_calls": [{"name": string, "arguments": object}]}\n'
"Rules:\n"
"- If a tool is required, put one or more entries in tool_calls "
"and do not invent tool results.\n"
"- If no tool is required, set tool_calls to [] and put the full "
"answer in assistant_response.\n"
"- Only use tool names from the allowed tool list.\n"
"- arguments must always be valid JSON objects.\n"
f"Allowed tools:\n{json.dumps(tool_specs, ensure_ascii=True)}"
)
compat_system = compat_instruction if not system else f"{system}\n\n{compat_instruction}"
full_messages: list[dict[str, Any]] = [{"role": "system", "content": compat_system}]
full_messages.extend(messages)
return [
message
for message in full_messages
if not (
message.get("role") == "assistant"
and not message.get("content")
and not message.get("tool_calls")
)
]
async def _acomplete_via_openrouter_tool_compat(
self,
messages: list[dict[str, Any]],
system: str,
tools: list[Tool],
max_tokens: int,
) -> LLMResponse:
"""Emulate tool calling via JSON when OpenRouter rejects native tools."""
full_messages = self._build_openrouter_tool_compat_messages(messages, system, tools)
kwargs: dict[str, Any] = {
"model": self.model,
"messages": full_messages,
"max_tokens": max_tokens,
**self.extra_kwargs,
}
if self.api_key:
kwargs["api_key"] = self.api_key
if self.api_base:
kwargs["api_base"] = self.api_base
response = await self._acompletion_with_rate_limit_retry(**kwargs)
raw_content = response.choices[0].message.content or ""
assistant_text, tool_calls = self._parse_openrouter_tool_compat_response(
raw_content,
tools,
)
usage = response.usage
input_tokens = usage.prompt_tokens if usage else 0
output_tokens = usage.completion_tokens if usage else 0
stop_reason = "tool_calls" if tool_calls else (response.choices[0].finish_reason or "stop")
return LLMResponse(
content=assistant_text,
model=response.model or self.model,
input_tokens=input_tokens,
output_tokens=output_tokens,
stop_reason=stop_reason,
raw_response={
"compat_mode": "openrouter_tool_emulation",
"tool_calls": tool_calls,
"response": response,
},
)
async def _stream_via_openrouter_tool_compat(
self,
messages: list[dict[str, Any]],
system: str,
tools: list[Tool],
max_tokens: int,
) -> AsyncIterator[StreamEvent]:
"""Fallback stream for OpenRouter models without native tool support."""
from framework.llm.stream_events import (
FinishEvent,
StreamErrorEvent,
TextDeltaEvent,
TextEndEvent,
ToolCallEvent,
)
logger.info(
"[openrouter-tool-compat] Using compatibility mode for %s",
self.model,
)
try:
response = await self._acomplete_via_openrouter_tool_compat(
messages=messages,
system=system,
tools=tools,
max_tokens=max_tokens,
)
except Exception as e:
yield StreamErrorEvent(error=str(e), recoverable=False)
return
raw_response = response.raw_response if isinstance(response.raw_response, dict) else {}
tool_calls = raw_response.get("tool_calls", [])
if response.content:
yield TextDeltaEvent(content=response.content, snapshot=response.content)
yield TextEndEvent(full_text=response.content)
for tool_call in tool_calls:
yield ToolCallEvent(
tool_use_id=tool_call["id"],
tool_name=tool_call["name"],
tool_input=tool_call["input"],
)
yield FinishEvent(
stop_reason=response.stop_reason,
input_tokens=response.input_tokens,
output_tokens=response.output_tokens,
model=response.model,
)
async def _stream_via_nonstream_completion(
self,
messages: list[dict[str, Any]],
@@ -834,12 +1313,11 @@ class LiteLLMProvider(LLMProvider):
tool_calls = msg.tool_calls or []
for tc in tool_calls:
parsed_args: Any
args = tc.function.arguments if tc.function else ""
try:
parsed_args = json.loads(args) if args else {}
except json.JSONDecodeError:
parsed_args = {"_raw": args}
parsed_args = self._parse_tool_call_arguments(
args,
tc.function.name if tc.function else "",
)
yield ToolCallEvent(
tool_use_id=getattr(tc, "id", ""),
tool_name=tc.function.name if tc.function else "",
@@ -898,6 +1376,20 @@ class LiteLLMProvider(LLMProvider):
yield event
return
if (
tools
and self._is_openrouter_model()
and self.model in OPENROUTER_TOOL_COMPAT_MODEL_CACHE
):
async for event in self._stream_via_openrouter_tool_compat(
messages=messages,
system=system,
tools=tools,
max_tokens=max_tokens,
):
yield event
return
full_messages: list[dict[str, Any]] = []
if system:
sys_msg: dict[str, Any] = {"role": "system", "content": system}
@@ -1044,10 +1536,10 @@ class LiteLLMProvider(LLMProvider):
if choice.finish_reason:
stream_finish_reason = choice.finish_reason
for _idx, tc_data in sorted(tool_calls_acc.items()):
try:
parsed_args = json.loads(tc_data["arguments"])
except (json.JSONDecodeError, KeyError):
parsed_args = {"_raw": tc_data.get("arguments", "")}
parsed_args = self._parse_tool_call_arguments(
tc_data.get("arguments", ""),
tc_data.get("name", ""),
)
tail_events.append(
ToolCallEvent(
tool_use_id=tc_data["id"],
@@ -1228,6 +1720,16 @@ class LiteLLMProvider(LLMProvider):
return
except Exception as e:
if self._should_use_openrouter_tool_compat(e, tools):
OPENROUTER_TOOL_COMPAT_MODEL_CACHE.add(self.model)
async for event in self._stream_via_openrouter_tool_compat(
messages=messages,
system=system,
tools=tools or [],
max_tokens=max_tokens,
):
yield event
return
if _is_stream_transient_error(e) and attempt < RATE_LIMIT_MAX_RETRIES:
wait = _compute_retry_delay(attempt, exception=e)
logger.warning(
+2
View File
@@ -1350,6 +1350,8 @@ class AgentRunner:
return "MISTRAL_API_KEY"
elif model_lower.startswith("groq/"):
return "GROQ_API_KEY"
elif model_lower.startswith("openrouter/"):
return "OPENROUTER_API_KEY"
elif self._is_local_model(model_lower):
return None # Local models don't need an API key
elif model_lower.startswith("azure/"):
@@ -2234,7 +2234,10 @@ def register_queen_lifecycle_tools(
"with unique colors. No code is generated. "
"Planning-only types (decision, browser/GCU) are dissolved at confirm/build time: "
"decision nodes merge into predecessor's success_criteria with yes/no edges; "
"browser/GCU nodes merge into predecessor's sub_agents list as leaf delegates."
"browser/GCU nodes merge into predecessor's sub_agents list as leaf delegates. "
"Keep arguments compact: use the smallest graph that fits the request, "
"keep node descriptions to one short sentence, and omit optional metadata "
"unless it is necessary for the design."
),
parameters={
"type": "object",
+6 -3
View File
@@ -1,5 +1,6 @@
import { memo, useMemo, useState, useRef, useEffect, useCallback } from "react";
import { Play, Pause, Loader2, CheckCircle2 } from "lucide-react";
import { isRunButtonDisabled, type QueenPhase } from "@/lib/run-button-state";
export type NodeStatus = "running" | "complete" | "pending" | "error" | "looping";
@@ -31,7 +32,7 @@ interface AgentGraphProps {
version?: string;
runState?: RunState;
building?: boolean;
queenPhase?: "planning" | "building" | "staging" | "running";
queenPhase?: QueenPhase;
}
// --- Extracted RunButton so hover state survives parent re-renders ---
@@ -400,6 +401,8 @@ export default function AgentGraph({ nodes, title: _title, onNodeClick, onRun, o
return { layers, cols, maxCols, nodeW, colSpacing, firstColX };
}, [nodes, forwardEdges]);
const runDisabled = isRunButtonDisabled(nodes.length, queenPhase);
if (nodes.length === 0) {
return (
<div className="flex flex-col h-full">
@@ -412,7 +415,7 @@ export default function AgentGraph({ nodes, title: _title, onNodeClick, onRun, o
</span>
)}
</div>
<RunButton runState={runState} disabled={nodes.length === 0 || queenPhase === "building" || queenPhase === "planning"} onRun={handleRun} onPause={onPause ?? (() => {})} btnRef={runBtnRef} />
<RunButton runState={runState} disabled={runDisabled} onRun={handleRun} onPause={onPause ?? (() => {})} btnRef={runBtnRef} />
</div>
<div className="flex-1 flex items-center justify-center px-5">
{building ? (
@@ -748,7 +751,7 @@ export default function AgentGraph({ nodes, title: _title, onNodeClick, onRun, o
</span>
)}
</div>
<RunButton runState={runState} disabled={nodes.length === 0} onRun={handleRun} onPause={onPause ?? (() => {})} btnRef={runBtnRef} />
<RunButton runState={runState} disabled={runDisabled} onRun={handleRun} onPause={onPause ?? (() => {})} btnRef={runBtnRef} />
</div>
{/* Graph */}
+5 -2
View File
@@ -1,6 +1,7 @@
import { useEffect, useMemo, useRef, useState, useCallback } from "react";
import { Loader2 } from "lucide-react";
import type { DraftGraph as DraftGraphData, DraftNode } from "@/api/types";
import { isRunButtonDisabled, type QueenPhase } from "@/lib/run-button-state";
import { RunButton } from "./AgentGraph";
import type { GraphNode, RunState } from "./AgentGraph";
@@ -91,6 +92,7 @@ interface DraftGraphProps {
onPause?: () => void;
/** Current run state — drives the RunButton appearance. */
runState?: RunState;
queenPhase?: QueenPhase;
}
// Layout constants — tuned for a ~500px panel (484px after px-2 padding)
@@ -357,13 +359,14 @@ function Tooltip({ node, style }: { node: DraftNode; style: React.CSSProperties
);
}
export default function DraftGraph({ draft, onNodeClick, flowchartMap, runtimeNodes, onRuntimeNodeClick, building, loading, onRun, onPause, runState = "idle" }: DraftGraphProps) {
export default function DraftGraph({ draft, onNodeClick, flowchartMap, runtimeNodes, onRuntimeNodeClick, building, loading, onRun, onPause, runState = "idle", queenPhase }: DraftGraphProps) {
const [hoveredNode, setHoveredNode] = useState<string | null>(null);
const [mousePos, setMousePos] = useState<{ x: number; y: number } | null>(null);
const containerRef = useRef<HTMLDivElement>(null);
const runBtnRef = useRef<HTMLButtonElement>(null);
const [containerW, setContainerW] = useState(484);
const chrome = useDraftChromeColors();
const runDisabled = isRunButtonDisabled(draft?.nodes.length ?? 0, queenPhase);
// Shift-to-pin tooltip
const shiftHeld = useRef(false);
@@ -1011,7 +1014,7 @@ export default function DraftGraph({ draft, onNodeClick, flowchartMap, runtimeNo
)}
</div>
{onRun && (
<RunButton runState={runState} disabled={draft.nodes.length === 0} onRun={onRun} onPause={onPause ?? (() => {})} btnRef={runBtnRef} />
<RunButton runState={runState} disabled={runDisabled} onRun={onRun} onPause={onPause ?? (() => {})} btnRef={runBtnRef} />
)}
</div>
@@ -0,0 +1,25 @@
import { describe, expect, it } from "vitest";
import { isRunButtonDisabled } from "./run-button-state";
describe("isRunButtonDisabled", () => {
it("disables run when there are no nodes", () => {
expect(isRunButtonDisabled(0, "running")).toBe(true);
});
it("disables run during planning", () => {
expect(isRunButtonDisabled(4, "planning")).toBe(true);
});
it("disables run during building", () => {
expect(isRunButtonDisabled(4, "building")).toBe(true);
});
it("allows run in staging when nodes exist", () => {
expect(isRunButtonDisabled(4, "staging")).toBe(false);
});
it("allows run in running view when nodes exist", () => {
expect(isRunButtonDisabled(4, "running")).toBe(false);
});
});
+12
View File
@@ -0,0 +1,12 @@
export type QueenPhase = "planning" | "building" | "staging" | "running";
export function isRunButtonDisabled(
nodeCount: number,
queenPhase?: QueenPhase,
): boolean {
return (
nodeCount === 0
|| queenPhase === "planning"
|| queenPhase === "building"
);
}
+2 -1
View File
@@ -2822,7 +2822,7 @@ export default function Workspace() {
<div className={`${activeAgentState?.queenPhase === "planning" || activeAgentState?.queenPhase === "building" || activeAgentState?.originalDraft ? "w-[500px] min-w-[400px]" : "w-[300px] min-w-[240px]"} bg-card/30 flex flex-col border-r border-border/30 transition-[width] duration-200`}>
<div className="flex-1 min-h-0">
{activeAgentState?.queenPhase === "planning" || activeAgentState?.queenPhase === "building" ? (
<DraftGraph draft={activeAgentState?.draftGraph ?? null} loading={!activeAgentState?.draftGraph} building={activeAgentState?.queenBuilding} onRun={handleRun} onPause={handlePause} runState={activeAgentState?.workerRunState ?? "idle"} />
<DraftGraph draft={activeAgentState?.draftGraph ?? null} loading={!activeAgentState?.draftGraph} building={activeAgentState?.queenBuilding} onRun={handleRun} onPause={handlePause} runState={activeAgentState?.workerRunState ?? "idle"} queenPhase={activeAgentState?.queenPhase ?? "building"} />
) : activeAgentState?.originalDraft ? (
<DraftGraph
draft={activeAgentState.originalDraft}
@@ -2830,6 +2830,7 @@ export default function Workspace() {
onRun={handleRun}
onPause={handlePause}
runState={activeAgentState?.workerRunState ?? "idle"}
queenPhase={activeAgentState?.queenPhase ?? "building"}
flowchartMap={activeAgentState.flowchartMap ?? undefined}
runtimeNodes={currentGraph.nodes}
onRuntimeNodeClick={(runtimeNodeId) => {
@@ -0,0 +1,61 @@
import importlib.util
from pathlib import Path
def _load_check_llm_key_module():
module_path = Path(__file__).resolve().parents[2] / "scripts" / "check_llm_key.py"
spec = importlib.util.spec_from_file_location("check_llm_key_script", module_path)
module = importlib.util.module_from_spec(spec)
assert spec.loader is not None
spec.loader.exec_module(module)
return module
def _run_openrouter_check(monkeypatch, status_code: int):
module = _load_check_llm_key_module()
calls = {}
class FakeResponse:
def __init__(self, code):
self.status_code = code
class FakeClient:
def __init__(self, timeout):
calls["timeout"] = timeout
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return False
def get(self, endpoint, headers):
calls["endpoint"] = endpoint
calls["headers"] = headers
return FakeResponse(status_code)
monkeypatch.setattr(module.httpx, "Client", FakeClient)
result = module.check_openrouter("test-key")
return result, calls
def test_check_openrouter_200(monkeypatch):
result, calls = _run_openrouter_check(monkeypatch, 200)
assert result == {"valid": True, "message": "OpenRouter API key valid"}
assert calls["endpoint"] == "https://openrouter.ai/api/v1/models"
assert calls["headers"] == {"Authorization": "Bearer test-key"}
def test_check_openrouter_401(monkeypatch):
result, _ = _run_openrouter_check(monkeypatch, 401)
assert result == {"valid": False, "message": "Invalid OpenRouter API key"}
def test_check_openrouter_403(monkeypatch):
result, _ = _run_openrouter_check(monkeypatch, 403)
assert result == {"valid": False, "message": "OpenRouter API key lacks permissions"}
def test_check_openrouter_429(monkeypatch):
result, _ = _run_openrouter_check(monkeypatch, 429)
assert result == {"valid": True, "message": "OpenRouter API key valid"}
+45 -1
View File
@@ -2,7 +2,7 @@
import logging
from framework.config import get_hive_config
from framework.config import get_api_base, get_hive_config, get_preferred_model
class TestGetHiveConfig:
@@ -21,3 +21,47 @@ class TestGetHiveConfig:
assert result == {}
assert "Failed to load Hive config" in caplog.text
assert str(config_file) in caplog.text
class TestOpenRouterConfig:
"""OpenRouter config composition and fallback behavior."""
def test_get_preferred_model_for_openrouter(self, tmp_path, monkeypatch):
config_file = tmp_path / "configuration.json"
config_file.write_text(
'{"llm":{"provider":"openrouter","model":"x-ai/grok-4.20-beta"}}',
encoding="utf-8",
)
monkeypatch.setattr("framework.config.HIVE_CONFIG_FILE", config_file)
assert get_preferred_model() == "openrouter/x-ai/grok-4.20-beta"
def test_get_preferred_model_normalizes_openrouter_prefixed_model(self, tmp_path, monkeypatch):
config_file = tmp_path / "configuration.json"
config_file.write_text(
'{"llm":{"provider":"openrouter","model":"openrouter/x-ai/grok-4.20-beta"}}',
encoding="utf-8",
)
monkeypatch.setattr("framework.config.HIVE_CONFIG_FILE", config_file)
assert get_preferred_model() == "openrouter/x-ai/grok-4.20-beta"
def test_get_api_base_falls_back_to_openrouter_default(self, tmp_path, monkeypatch):
config_file = tmp_path / "configuration.json"
config_file.write_text(
'{"llm":{"provider":"openrouter","model":"x-ai/grok-4.20-beta"}}',
encoding="utf-8",
)
monkeypatch.setattr("framework.config.HIVE_CONFIG_FILE", config_file)
assert get_api_base() == "https://openrouter.ai/api/v1"
def test_get_api_base_keeps_explicit_openrouter_api_base(self, tmp_path, monkeypatch):
config_file = tmp_path / "configuration.json"
config_file.write_text(
'{"llm":{"provider":"openrouter","model":"x-ai/grok-4.20-beta","api_base":"https://proxy.example/v1"}}',
encoding="utf-8",
)
monkeypatch.setattr("framework.config.HIVE_CONFIG_FILE", config_file)
assert get_api_base() == "https://proxy.example/v1"
+70
View File
@@ -0,0 +1,70 @@
import os
import sys
from types import ModuleType, SimpleNamespace
from framework.credentials import key_storage
from framework.credentials.validation import ensure_credential_key_env
def _install_fake_aden_modules(monkeypatch, check_fn, credential_specs):
shell_config_module = ModuleType("aden_tools.credentials.shell_config")
shell_config_module.check_env_var_in_shell_config = check_fn
credentials_module = ModuleType("aden_tools.credentials")
credentials_module.CREDENTIAL_SPECS = credential_specs
monkeypatch.setitem(sys.modules, "aden_tools.credentials.shell_config", shell_config_module)
monkeypatch.setitem(sys.modules, "aden_tools.credentials", credentials_module)
def test_bootstrap_loads_configured_llm_env_var_from_shell_config(monkeypatch):
monkeypatch.setattr(key_storage, "load_credential_key", lambda: None)
monkeypatch.setattr(key_storage, "load_aden_api_key", lambda: None)
monkeypatch.setattr(
"framework.config.get_hive_config",
lambda: {"llm": {"api_key_env_var": "OPENROUTER_API_KEY"}},
)
monkeypatch.delenv("OPENROUTER_API_KEY", raising=False)
monkeypatch.delenv("ANTHROPIC_API_KEY", raising=False)
calls = []
def check_env(var_name):
calls.append(var_name)
if var_name == "OPENROUTER_API_KEY":
return True, "or-key-123"
return False, None
_install_fake_aden_modules(
monkeypatch,
check_env,
{"anthropic": SimpleNamespace(env_var="ANTHROPIC_API_KEY")},
)
ensure_credential_key_env()
assert os.environ.get("OPENROUTER_API_KEY") == "or-key-123"
assert "OPENROUTER_API_KEY" in calls
def test_bootstrap_does_not_override_existing_configured_llm_env_var(monkeypatch):
monkeypatch.setattr(key_storage, "load_credential_key", lambda: None)
monkeypatch.setattr(key_storage, "load_aden_api_key", lambda: None)
monkeypatch.setattr(
"framework.config.get_hive_config",
lambda: {"llm": {"api_key_env_var": "OPENROUTER_API_KEY"}},
)
monkeypatch.setenv("OPENROUTER_API_KEY", "already-set")
calls = []
def check_env(var_name):
calls.append(var_name)
return True, "new-value-should-not-apply"
_install_fake_aden_modules(monkeypatch, check_env, {})
ensure_credential_key_env()
assert os.environ.get("OPENROUTER_API_KEY") == "already-set"
assert "OPENROUTER_API_KEY" not in calls
+140
View File
@@ -537,6 +537,118 @@ class TestClientFacingBlocking:
assert llm._call_index >= 2
assert result.output["answer"] == "help provided"
@pytest.mark.asyncio
async def test_duplicate_ask_user_after_answer_is_blocked(
self, runtime, memory, client_spec
):
"""Repeated identical ask_user should not block the user a second time."""
client_spec.output_keys = ["answer"]
llm = MockStreamingLLM(
scenarios=[
tool_call_scenario(
"ask_user",
{
"question": "Proceed with this design?",
"options": ["Yes", "No"],
},
tool_use_id="ask_1",
),
tool_call_scenario(
"ask_user",
{
"question": "Proceed with this design?",
"options": ["Yes", "No"],
},
tool_use_id="ask_2",
),
tool_call_scenario(
"set_output",
{"key": "answer", "value": "accepted"},
tool_use_id="set_1",
),
text_scenario("Proceeding."),
]
)
bus = EventBus()
received = []
async def capture(e):
received.append(e)
bus.subscribe(
event_types=[EventType.CLIENT_INPUT_REQUESTED],
handler=capture,
)
node = EventLoopNode(event_bus=bus, config=LoopConfig(max_iterations=6))
ctx = build_ctx(runtime, client_spec, memory, llm)
async def user_responds():
await asyncio.sleep(0.05)
await node.inject_event("Yes, proceed with this design", is_client_input=True)
task = asyncio.create_task(user_responds())
result = await node.execute(ctx)
await task
assert result.success is True
assert result.output["answer"] == "accepted"
assert len(received) == 1
@pytest.mark.asyncio
async def test_duplicate_ask_user_multiple_after_answer_is_blocked(
self, runtime, memory, client_spec
):
"""Repeated identical ask_user_multiple should not re-open the same widget."""
client_spec.output_keys = ["answer"]
questions = [
{"id": "scope", "prompt": "What scope?", "options": ["Small", "Large"]},
{"id": "format", "prompt": "Which format?", "options": ["JSON", "CSV"]},
]
llm = MockStreamingLLM(
scenarios=[
tool_call_scenario(
"ask_user_multiple",
{"questions": questions},
tool_use_id="ask_multi_1",
),
tool_call_scenario(
"ask_user_multiple",
{"questions": questions},
tool_use_id="ask_multi_2",
),
tool_call_scenario(
"set_output",
{"key": "answer", "value": "captured"},
tool_use_id="set_1",
),
text_scenario("Captured."),
]
)
bus = EventBus()
received = []
async def capture(e):
received.append(e)
bus.subscribe(
event_types=[EventType.CLIENT_INPUT_REQUESTED],
handler=capture,
)
node = EventLoopNode(event_bus=bus, config=LoopConfig(max_iterations=6))
ctx = build_ctx(runtime, client_spec, memory, llm)
async def user_responds():
await asyncio.sleep(0.05)
await node.inject_event("[scope]: Small\n[format]: JSON", is_client_input=True)
task = asyncio.create_task(user_responds())
result = await node.execute(ctx)
await task
assert result.success is True
assert result.output["answer"] == "captured"
assert len(received) == 1
@pytest.mark.asyncio
async def test_client_facing_does_not_block_on_tools(self, runtime, memory):
"""client_facing + tool calls (no ask_user) should NOT block."""
@@ -1530,6 +1642,34 @@ class TestTransientErrorRetry:
await node.execute(ctx)
assert llm._call_index == 1 # only tried once
@pytest.mark.asyncio
async def test_client_facing_non_transient_error_does_not_crash(
self, runtime, node_spec, memory
):
"""Client-facing non-transient errors should wait for input, not crash on token vars."""
node_spec.output_keys = []
node_spec.client_facing = True
llm = ErrorThenSuccessLLM(
error=ValueError("bad request: blocked by policy"),
fail_count=100, # always fails
success_scenario=text_scenario("unreachable"),
)
ctx = build_ctx(runtime, node_spec, memory, llm)
node = EventLoopNode(
config=LoopConfig(
max_iterations=1,
max_stream_retries=0,
stream_retry_backoff_base=0.01,
),
)
node._await_user_input = AsyncMock(return_value=None)
result = await node.execute(ctx)
assert result.success is False
assert "Max iterations" in (result.error or "")
node._await_user_input.assert_awaited_once()
@pytest.mark.asyncio
async def test_transient_error_exhausts_retries(self, runtime, node_spec, memory):
"""Transient errors that exhaust retries should raise."""
+279 -1
View File
@@ -19,7 +19,11 @@ from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from framework.llm.anthropic import AnthropicProvider
from framework.llm.litellm import LiteLLMProvider, _compute_retry_delay
from framework.llm.litellm import (
OPENROUTER_TOOL_COMPAT_MODEL_CACHE,
LiteLLMProvider,
_compute_retry_delay,
)
from framework.llm.provider import LLMProvider, LLMResponse, Tool
@@ -72,6 +76,20 @@ class TestLiteLLMProviderInit:
)
assert provider.api_base == "https://proxy.example/v1"
def test_init_openrouter_defaults_api_base(self):
"""OpenRouter should default to the official OpenAI-compatible endpoint."""
provider = LiteLLMProvider(model="openrouter/x-ai/grok-4.20-beta", api_key="my-key")
assert provider.api_base == "https://openrouter.ai/api/v1"
def test_init_openrouter_keeps_custom_api_base(self):
"""Explicit api_base should win over OpenRouter defaults."""
provider = LiteLLMProvider(
model="openrouter/x-ai/grok-4.20-beta",
api_key="my-key",
api_base="https://proxy.example/v1",
)
assert provider.api_base == "https://proxy.example/v1"
def test_init_ollama_no_key_needed(self):
"""Test that Ollama models don't require API key."""
with patch.dict(os.environ, {}, clear=True):
@@ -192,6 +210,34 @@ class TestToolConversion:
assert result["function"]["parameters"]["properties"]["query"]["type"] == "string"
assert result["function"]["parameters"]["required"] == ["query"]
def test_parse_tool_call_arguments_repairs_truncated_json(self):
"""Truncated JSON fragments should be repaired into valid tool inputs."""
provider = LiteLLMProvider(model="gpt-4o-mini", api_key="test-key")
parsed = provider._parse_tool_call_arguments(
(
'{"question":"What story structure should the agent use?",'
'"options":["3-act structure","Beginning-Middle-End","Random paragraph"'
),
"ask_user",
)
assert parsed == {
"question": "What story structure should the agent use?",
"options": [
"3-act structure",
"Beginning-Middle-End",
"Random paragraph",
],
}
def test_parse_tool_call_arguments_raises_when_unrepairable(self):
"""Completely invalid JSON should fail fast instead of producing _raw loops."""
provider = LiteLLMProvider(model="gpt-4o-mini", api_key="test-key")
with pytest.raises(ValueError, match="Failed to parse tool call arguments"):
provider._parse_tool_call_arguments('{"question": foo', "ask_user")
class TestAnthropicProviderBackwardCompatibility:
"""Test AnthropicProvider backward compatibility with LiteLLM backend."""
@@ -682,6 +728,238 @@ class TestMiniMaxStreamFallback:
assert not LiteLLMProvider(model="gpt-4o-mini", api_key="x")._is_minimax_model()
class TestOpenRouterToolCompatFallback:
"""OpenRouter models should fall back when native tool use is unavailable."""
def teardown_method(self):
OPENROUTER_TOOL_COMPAT_MODEL_CACHE.clear()
@pytest.mark.asyncio
@patch("litellm.acompletion")
async def test_stream_falls_back_to_json_tool_emulation(self, mock_acompletion):
"""OpenRouter tool-use 404s should emit synthetic ToolCallEvents instead of errors."""
from framework.llm.stream_events import FinishEvent, ToolCallEvent
provider = LiteLLMProvider(
model="openrouter/liquid/lfm-2.5-1.2b-thinking:free",
api_key="test-key",
)
tools = [
Tool(
name="web_search",
description="Search the web",
parameters={
"properties": {
"query": {"type": "string"},
"num_results": {"type": "integer"},
},
"required": ["query"],
},
)
]
compat_response = MagicMock()
compat_response.choices = [MagicMock()]
compat_response.choices[0].message.content = (
'{"assistant_response":"","tool_calls":['
'{"name":"web_search","arguments":'
'{"query":"Python 3.13 release notes","num_results":3}}'
"]}"
)
compat_response.choices[0].finish_reason = "stop"
compat_response.model = provider.model
compat_response.usage.prompt_tokens = 18
compat_response.usage.completion_tokens = 9
async def side_effect(*args, **kwargs):
if kwargs.get("stream"):
raise RuntimeError(
'OpenrouterException - {"error":{"message":"No endpoints found '
'that support tool use. To learn more about provider routing, '
'visit: https://openrouter.ai/docs/guides/routing/provider-selection",'
'"code":404}}'
)
return compat_response
mock_acompletion.side_effect = side_effect
events = []
async for event in provider.stream(
messages=[{"role": "user", "content": "Search for the Python 3.13 release notes."}],
system="Use tools when needed.",
tools=tools,
max_tokens=256,
):
events.append(event)
tool_calls = [event for event in events if isinstance(event, ToolCallEvent)]
assert len(tool_calls) == 1
assert tool_calls[0].tool_name == "web_search"
assert tool_calls[0].tool_input == {
"query": "Python 3.13 release notes",
"num_results": 3,
}
assert tool_calls[0].tool_use_id.startswith("openrouter_compat_")
finish_events = [event for event in events if isinstance(event, FinishEvent)]
assert len(finish_events) == 1
assert finish_events[0].stop_reason == "tool_calls"
assert finish_events[0].input_tokens == 18
assert finish_events[0].output_tokens == 9
assert mock_acompletion.call_count == 2
first_call = mock_acompletion.call_args_list[0].kwargs
assert first_call["stream"] is True
assert "tools" in first_call
second_call = mock_acompletion.call_args_list[1].kwargs
assert "tools" not in second_call
assert "Tool compatibility mode is active" in second_call["messages"][0]["content"]
assert provider.model in OPENROUTER_TOOL_COMPAT_MODEL_CACHE
@pytest.mark.asyncio
@patch("litellm.acompletion")
async def test_stream_tool_compat_parses_textual_tool_calls_and_uses_cache(
self,
mock_acompletion,
):
"""Textual tool-call markers should become ToolCallEvents and skip repeat probing."""
from framework.llm.stream_events import ToolCallEvent
provider = LiteLLMProvider(
model="openrouter/liquid/lfm-2.5-1.2b-thinking:free",
api_key="test-key",
)
tools = [
Tool(
name="ask_user_multiple",
description="Ask the user a multiple-choice question",
parameters={
"properties": {
"options": {"type": "array"},
"question": {"type": "string"},
"prompt": {"type": "string"},
},
"required": ["options", "question", "prompt"],
},
)
]
compat_response = MagicMock()
compat_response.choices = [MagicMock()]
compat_response.choices[0].message.content = (
"<|tool_call_start|>"
"[ask_user_multiple(options=['Quartet Collaborator', 'Project Advisor'], "
"question='Who are you?', prompt='Who are you?')]"
"<|tool_call_end|>"
)
compat_response.choices[0].finish_reason = "stop"
compat_response.model = provider.model
compat_response.usage.prompt_tokens = 10
compat_response.usage.completion_tokens = 5
call_state = {"count": 0}
async def side_effect(*args, **kwargs):
call_state["count"] += 1
if kwargs.get("stream"):
raise RuntimeError(
'OpenrouterException - {"error":{"message":"No endpoints found '
'that support tool use.","code":404}}'
)
return compat_response
mock_acompletion.side_effect = side_effect
first_events = []
async for event in provider.stream(
messages=[{"role": "user", "content": "Who are you?"}],
system="Use tools when needed.",
tools=tools,
max_tokens=128,
):
first_events.append(event)
tool_calls = [event for event in first_events if isinstance(event, ToolCallEvent)]
assert len(tool_calls) == 1
assert tool_calls[0].tool_name == "ask_user_multiple"
assert tool_calls[0].tool_input == {
"options": ["Quartet Collaborator", "Project Advisor"],
"question": "Who are you?",
"prompt": "Who are you?",
}
second_events = []
async for event in provider.stream(
messages=[{"role": "user", "content": "Who are you?"}],
system="Use tools when needed.",
tools=tools,
max_tokens=128,
):
second_events.append(event)
second_tool_calls = [event for event in second_events if isinstance(event, ToolCallEvent)]
assert len(second_tool_calls) == 1
assert mock_acompletion.call_count == 3
assert mock_acompletion.call_args_list[0].kwargs["stream"] is True
assert "stream" not in mock_acompletion.call_args_list[1].kwargs
assert "stream" not in mock_acompletion.call_args_list[2].kwargs
@pytest.mark.asyncio
@patch("litellm.acompletion")
async def test_stream_tool_compat_treats_non_json_as_plain_text(self, mock_acompletion):
"""If fallback output is not valid JSON, preserve it as assistant text."""
from framework.llm.stream_events import FinishEvent, TextDeltaEvent, ToolCallEvent
provider = LiteLLMProvider(
model="openrouter/liquid/lfm-2.5-1.2b-thinking:free",
api_key="test-key",
)
tools = [
Tool(
name="web_search",
description="Search the web",
parameters={"properties": {"query": {"type": "string"}}, "required": ["query"]},
)
]
compat_response = MagicMock()
compat_response.choices = [MagicMock()]
compat_response.choices[0].message.content = "I can answer directly without tools."
compat_response.choices[0].finish_reason = "stop"
compat_response.model = provider.model
compat_response.usage.prompt_tokens = 12
compat_response.usage.completion_tokens = 6
async def side_effect(*args, **kwargs):
if kwargs.get("stream"):
raise RuntimeError(
'OpenrouterException - {"error":{"message":"No endpoints found '
'that support tool use.","code":404}}'
)
return compat_response
mock_acompletion.side_effect = side_effect
events = []
async for event in provider.stream(
messages=[{"role": "user", "content": "Say hello."}],
system="Be concise.",
tools=tools,
max_tokens=128,
):
events.append(event)
text_events = [event for event in events if isinstance(event, TextDeltaEvent)]
assert len(text_events) == 1
assert text_events[0].snapshot == "I can answer directly without tools."
assert not any(isinstance(event, ToolCallEvent) for event in events)
finish_events = [event for event in events if isinstance(event, FinishEvent)]
assert len(finish_events) == 1
assert finish_events[0].stop_reason == "stop"
# ---------------------------------------------------------------------------
# AgentRunner._is_local_model — parameterized tests
# ---------------------------------------------------------------------------
@@ -21,3 +21,8 @@ def test_minimax_provider_prefix_maps_to_minimax_api_key():
def test_minimax_model_name_prefix_maps_to_minimax_api_key():
runner = _runner_for_unit_test()
assert runner._get_api_key_env_var("minimax-chat") == "MINIMAX_API_KEY"
def test_openrouter_provider_prefix_maps_to_openrouter_api_key():
runner = _runner_for_unit_test()
assert runner._get_api_key_env_var("openrouter/x-ai/grok-4.20-beta") == "OPENROUTER_API_KEY"
+127 -50
View File
@@ -783,6 +783,7 @@ $ProviderMap = [ordered]@{
GOOGLE_API_KEY = @{ Name = "Google AI"; Id = "google" }
GROQ_API_KEY = @{ Name = "Groq"; Id = "groq" }
CEREBRAS_API_KEY = @{ Name = "Cerebras"; Id = "cerebras" }
OPENROUTER_API_KEY = @{ Name = "OpenRouter"; Id = "openrouter" }
MISTRAL_API_KEY = @{ Name = "Mistral"; Id = "mistral" }
TOGETHER_API_KEY = @{ Name = "Together AI"; Id = "together" }
DEEPSEEK_API_KEY = @{ Name = "DeepSeek"; Id = "deepseek" }
@@ -825,9 +826,45 @@ $ModelChoices = @{
)
}
function Normalize-OpenRouterModelId {
param([string]$ModelId)
$normalized = if ($ModelId) { $ModelId.Trim() } else { "" }
if ($normalized -match '(?i)^openrouter/(.+)$') {
$normalized = $matches[1]
}
return $normalized
}
function Get-ModelSelection {
param([string]$ProviderId)
if ($ProviderId -eq "openrouter") {
$defaultModel = ""
if ($PrevModel -and $PrevProvider -eq $ProviderId) {
$defaultModel = Normalize-OpenRouterModelId $PrevModel
}
Write-Host ""
Write-Color -Text "Enter your OpenRouter model id:" -Color White
Write-Color -Text " Paste from openrouter.ai (example: x-ai/grok-4.20-beta)" -Color DarkGray
Write-Color -Text " If calls fail with guardrail/privacy errors: openrouter.ai/settings/privacy" -Color DarkGray
Write-Host ""
while ($true) {
if ($defaultModel) {
$rawModel = Read-Host "Model id [$defaultModel]"
if ([string]::IsNullOrWhiteSpace($rawModel)) { $rawModel = $defaultModel }
} else {
$rawModel = Read-Host "Model id"
}
$normalizedModel = Normalize-OpenRouterModelId $rawModel
if (-not [string]::IsNullOrWhiteSpace($normalizedModel)) {
Write-Host ""
Write-Ok "Model: $normalizedModel"
return @{ Model = $normalizedModel; MaxTokens = 8192; MaxContextTokens = 120000 }
}
Write-Color -Text "Model id cannot be empty." -Color Red
}
}
$choices = $ModelChoices[$ProviderId]
if (-not $choices -or $choices.Count -eq 0) {
return @{ Model = $DefaultModels[$ProviderId]; MaxTokens = 8192; MaxContextTokens = 120000 }
@@ -888,6 +925,7 @@ $SelectedEnvVar = ""
$SelectedModel = ""
$SelectedMaxTokens = 8192
$SelectedMaxContextTokens = 120000
$SelectedApiBase = ""
$SubscriptionMode = ""
# ── Credential detection (silent — just set flags) ───────────
@@ -912,15 +950,16 @@ if (-not $kimiKey) { $kimiKey = $env:KIMI_API_KEY }
if ($kimiKey) { $KimiCredDetected = $true }
# Detect API key providers
$ProviderMenuEnvVars = @("ANTHROPIC_API_KEY", "OPENAI_API_KEY", "GEMINI_API_KEY", "GROQ_API_KEY", "CEREBRAS_API_KEY")
$ProviderMenuNames = @("Anthropic (Claude) - Recommended", "OpenAI (GPT)", "Google Gemini - Free tier available", "Groq - Fast, free tier", "Cerebras - Fast, free tier")
$ProviderMenuIds = @("anthropic", "openai", "gemini", "groq", "cerebras")
$ProviderMenuEnvVars = @("ANTHROPIC_API_KEY", "OPENAI_API_KEY", "GEMINI_API_KEY", "GROQ_API_KEY", "CEREBRAS_API_KEY", "OPENROUTER_API_KEY")
$ProviderMenuNames = @("Anthropic (Claude) - Recommended", "OpenAI (GPT)", "Google Gemini - Free tier available", "Groq - Fast, free tier", "Cerebras - Fast, free tier", "OpenRouter - Bring any OpenRouter model")
$ProviderMenuIds = @("anthropic", "openai", "gemini", "groq", "cerebras", "openrouter")
$ProviderMenuUrls = @(
"https://console.anthropic.com/settings/keys",
"https://platform.openai.com/api-keys",
"https://aistudio.google.com/apikey",
"https://console.groq.com/keys",
"https://cloud.cerebras.ai/"
"https://cloud.cerebras.ai/",
"https://openrouter.ai/keys"
)
# ── Read previous configuration (if any) ──────────────────────
@@ -976,6 +1015,7 @@ if ($PrevSubMode -or $PrevProvider) {
"gemini" { $DefaultChoice = "7" }
"groq" { $DefaultChoice = "8" }
"cerebras" { $DefaultChoice = "9" }
"openrouter" { $DefaultChoice = "10" }
"kimi" { $DefaultChoice = "4" }
}
}
@@ -1018,7 +1058,7 @@ if ($KimiCredDetected) { Write-Color -Text " (credential detected)" -Color Gree
Write-Host ""
Write-Color -Text " API key providers:" -Color Cyan
# 5-9) API key providers
# 5-10) API key providers
for ($idx = 0; $idx -lt $ProviderMenuEnvVars.Count; $idx++) {
$num = $idx + 5
$envVal = [System.Environment]::GetEnvironmentVariable($ProviderMenuEnvVars[$idx], "Process")
@@ -1029,8 +1069,9 @@ for ($idx = 0; $idx -lt $ProviderMenuEnvVars.Count; $idx++) {
if ($envVal) { Write-Color -Text " (credential detected)" -Color Green } else { Write-Host "" }
}
$SkipChoice = 5 + $ProviderMenuEnvVars.Count
Write-Host " " -NoNewline
Write-Color -Text "10" -Color Cyan -NoNewline
Write-Color -Text "$SkipChoice" -Color Cyan -NoNewline
Write-Host ") Skip for now"
Write-Host ""
@@ -1041,16 +1082,16 @@ if ($DefaultChoice) {
while ($true) {
if ($DefaultChoice) {
$raw = Read-Host "Enter choice (1-10) [$DefaultChoice]"
$raw = Read-Host "Enter choice (1-$SkipChoice) [$DefaultChoice]"
if ([string]::IsNullOrWhiteSpace($raw)) { $raw = $DefaultChoice }
} else {
$raw = Read-Host "Enter choice (1-10)"
$raw = Read-Host "Enter choice (1-$SkipChoice)"
}
if ($raw -match '^\d+$') {
$num = [int]$raw
if ($num -ge 1 -and $num -le 10) { break }
if ($num -ge 1 -and $num -le $SkipChoice) { break }
}
Write-Color -Text "Invalid choice. Please enter 1-10" -Color Red
Write-Color -Text "Invalid choice. Please enter 1-$SkipChoice" -Color Red
}
switch ($num) {
@@ -1129,13 +1170,18 @@ switch ($num) {
Write-Ok "Using Kimi Code subscription"
Write-Color -Text " Model: kimi-k2.5 | API: api.kimi.com/coding" -Color DarkGray
}
{ $_ -ge 5 -and $_ -le 9 } {
{ $_ -ge 5 -and $_ -le 10 } {
# API key providers
$provIdx = $num - 5
$SelectedEnvVar = $ProviderMenuEnvVars[$provIdx]
$SelectedProviderId = $ProviderMenuIds[$provIdx]
$providerName = $ProviderMenuNames[$provIdx] -replace ' - .*', '' # strip description
$signupUrl = $ProviderMenuUrls[$provIdx]
if ($SelectedProviderId -eq "openrouter") {
$SelectedApiBase = "https://openrouter.ai/api/v1"
} else {
$SelectedApiBase = ""
}
# Prompt for key (allow replacement if already set) with verification + retry
while ($true) {
@@ -1164,7 +1210,11 @@ switch ($num) {
# Health check the new key
Write-Host " Verifying API key... " -NoNewline
try {
$hcResult = & uv run python (Join-Path $ScriptDir "scripts/check_llm_key.py") $SelectedProviderId $apiKey 2>$null
if ($SelectedApiBase) {
$hcResult = & uv run python (Join-Path $ScriptDir "scripts/check_llm_key.py") $SelectedProviderId $apiKey $SelectedApiBase 2>$null
} else {
$hcResult = & uv run python (Join-Path $ScriptDir "scripts/check_llm_key.py") $SelectedProviderId $apiKey 2>$null
}
$hcJson = $hcResult | ConvertFrom-Json
if ($hcJson.valid -eq $true) {
Write-Color -Text "ok" -Color Green
@@ -1202,7 +1252,7 @@ switch ($num) {
}
}
}
10 {
{ $_ -eq $SkipChoice } {
Write-Host ""
Write-Warn "Skipped. An LLM API key is required to test and use worker agents."
Write-Host " Add your API key later by running:"
@@ -1383,11 +1433,45 @@ if ($SelectedProviderId) {
} elseif ($SubscriptionMode -eq "kimi_code") {
$config.llm["api_base"] = "https://api.kimi.com/coding"
$config.llm["api_key_env_var"] = $SelectedEnvVar
} elseif ($SelectedProviderId -eq "openrouter") {
$config.llm["api_base"] = "https://openrouter.ai/api/v1"
$config.llm["api_key_env_var"] = $SelectedEnvVar
} else {
$config.llm["api_key_env_var"] = $SelectedEnvVar
}
$config | ConvertTo-Json -Depth 4 | Set-Content -Path $HiveConfigFile -Encoding UTF8
$expectedApiBase = ""
if ($SubscriptionMode -eq "zai_code") {
$expectedApiBase = "https://api.z.ai/api/coding/paas/v4"
} elseif ($SubscriptionMode -eq "kimi_code") {
$expectedApiBase = "https://api.kimi.com/coding"
} elseif ($SelectedProviderId -eq "openrouter") {
$expectedApiBase = "https://openrouter.ai/api/v1"
}
try {
$savedConfig = Get-Content -Path $HiveConfigFile -Raw | ConvertFrom-Json
$savedLlm = $savedConfig.llm
$verifyOk = $savedLlm -and $savedLlm.provider -eq $SelectedProviderId -and $savedLlm.model -eq $SelectedModel
if ($SelectedEnvVar) {
$verifyOk = $verifyOk -and $savedLlm.api_key_env_var -eq $SelectedEnvVar
}
if ($expectedApiBase) {
$verifyOk = $verifyOk -and $savedLlm.api_base -eq $expectedApiBase
}
if (-not $verifyOk) {
throw "Saved configuration mismatch"
}
} catch {
Write-Fail "configuration verification failed"
Write-Color -Text " Could not persist ~/.hive/configuration.json with the selected LLM settings." -Color Yellow
exit 1
}
Write-Ok "done"
Write-Color -Text " ~/.hive/configuration.json" -Color DarkGray
}
@@ -1682,6 +1766,9 @@ if ($SelectedProviderId) {
Write-Color -Text " API: api.z.ai (OpenAI-compatible)" -Color DarkGray
} elseif ($SubscriptionMode -eq "codex") {
Write-Ok "OpenAI Codex Subscription -> $SelectedModel"
} elseif ($SelectedProviderId -eq "openrouter") {
Write-Ok "OpenRouter API Key -> $SelectedModel"
Write-Color -Text " API: openrouter.ai/api/v1 (OpenAI-compatible)" -Color DarkGray
} else {
Write-Color -Text " $SelectedProviderId" -Color Cyan -NoNewline
Write-Host " -> " -NoNewline
@@ -1712,45 +1799,35 @@ if ($CodexAvailable) {
Write-Host ""
}
# Auto-launch dashboard or show manual instructions
if ($FrontendBuilt) {
Write-Color -Text "Launching dashboard..." -Color White
Write-Host ""
Write-Color -Text " Starting server on http://localhost:8787" -Color DarkGray
Write-Color -Text " Press Ctrl+C to stop" -Color DarkGray
Write-Host ""
& (Join-Path $ScriptDir "hive.ps1") open
} else {
Write-Color -Text "═══════════════════════════════════════════════════════" -Color Yellow
Write-Host ""
Write-Color -Text " IMPORTANT: Restart your terminal now!" -Color Yellow
Write-Host ""
Write-Color -Text "═══════════════════════════════════════════════════════" -Color Yellow
Write-Host ""
Write-Host 'Environment variables (uv, API keys) are now configured, but you need to'
Write-Host 'restart your terminal for them to take effect in new sessions.'
Write-Host ""
# Setup-only mode: quickstart never auto-launches the dashboard.
Write-Color -Text "═══════════════════════════════════════════════════════" -Color Yellow
Write-Host ""
Write-Color -Text " IMPORTANT: Restart your terminal now!" -Color Yellow
Write-Host ""
Write-Color -Text "═══════════════════════════════════════════════════════" -Color Yellow
Write-Host ""
Write-Host 'Environment variables (uv, API keys) are now configured, but you need to'
Write-Host 'restart your terminal for them to take effect in new sessions.'
Write-Host ""
Write-Color -Text "Run an Agent:" -Color White
Write-Host ""
Write-Host " Launch the interactive dashboard to browse and run agents:"
Write-Host " You can start an example agent or an agent built by yourself:"
Write-Color -Text " .\hive.ps1 tui" -Color Cyan
Write-Host ""
Write-Color -Text "Run an Agent:" -Color White
Write-Host ""
Write-Host " Launch the interactive dashboard when you're ready:"
Write-Color -Text " hive open" -Color Cyan
Write-Host ""
if ($SelectedProviderId -or $credKey) {
Write-Color -Text "Note:" -Color White
Write-Host "- uv has been added to your User PATH"
if ($SelectedProviderId -and $SelectedEnvVar) {
Write-Host "- $SelectedEnvVar is set for LLM access"
}
if ($credKey) {
Write-Host "- HIVE_CREDENTIAL_KEY is set for credential encryption"
}
Write-Host "- All variables will persist across reboots"
Write-Host ""
if ($SelectedProviderId -or $credKey) {
Write-Color -Text "Note:" -Color White
Write-Host "- uv has been added to your User PATH"
if ($SelectedProviderId -and $SelectedEnvVar) {
Write-Host "- $SelectedEnvVar is set for LLM access"
}
Write-Color -Text 'Run .\quickstart.ps1 again to reconfigure.' -Color DarkGray
if ($credKey) {
Write-Host "- HIVE_CREDENTIAL_KEY is set for credential encryption"
}
Write-Host "- All variables will persist across reboots"
Write-Host ""
}
Write-Color -Text 'Run .\quickstart.ps1 again to reconfigure.' -Color DarkGray
Write-Host ""
+181 -77
View File
@@ -385,6 +385,7 @@ if [ "$USE_ASSOC_ARRAYS" = true ]; then
["GOOGLE_API_KEY"]="Google AI"
["GROQ_API_KEY"]="Groq"
["CEREBRAS_API_KEY"]="Cerebras"
["OPENROUTER_API_KEY"]="OpenRouter"
["MISTRAL_API_KEY"]="Mistral"
["TOGETHER_API_KEY"]="Together AI"
["DEEPSEEK_API_KEY"]="DeepSeek"
@@ -398,6 +399,7 @@ if [ "$USE_ASSOC_ARRAYS" = true ]; then
["GOOGLE_API_KEY"]="google"
["GROQ_API_KEY"]="groq"
["CEREBRAS_API_KEY"]="cerebras"
["OPENROUTER_API_KEY"]="openrouter"
["MISTRAL_API_KEY"]="mistral"
["TOGETHER_API_KEY"]="together"
["DEEPSEEK_API_KEY"]="deepseek"
@@ -521,9 +523,9 @@ if [ "$USE_ASSOC_ARRAYS" = true ]; then
}
else
# Bash 3.2 - use parallel indexed arrays
PROVIDER_ENV_VARS=(ANTHROPIC_API_KEY OPENAI_API_KEY MINIMAX_API_KEY GEMINI_API_KEY GOOGLE_API_KEY GROQ_API_KEY CEREBRAS_API_KEY MISTRAL_API_KEY TOGETHER_API_KEY DEEPSEEK_API_KEY)
PROVIDER_DISPLAY_NAMES=("Anthropic (Claude)" "OpenAI (GPT)" "MiniMax" "Google Gemini" "Google AI" "Groq" "Cerebras" "Mistral" "Together AI" "DeepSeek")
PROVIDER_ID_LIST=(anthropic openai minimax gemini google groq cerebras mistral together deepseek)
PROVIDER_ENV_VARS=(ANTHROPIC_API_KEY OPENAI_API_KEY MINIMAX_API_KEY GEMINI_API_KEY GOOGLE_API_KEY GROQ_API_KEY CEREBRAS_API_KEY OPENROUTER_API_KEY MISTRAL_API_KEY TOGETHER_API_KEY DEEPSEEK_API_KEY)
PROVIDER_DISPLAY_NAMES=("Anthropic (Claude)" "OpenAI (GPT)" "MiniMax" "Google Gemini" "Google AI" "Groq" "Cerebras" "OpenRouter" "Mistral" "Together AI" "DeepSeek")
PROVIDER_ID_LIST=(anthropic openai minimax gemini google groq cerebras openrouter mistral together deepseek)
# Default models by provider id (parallel arrays)
MODEL_PROVIDER_IDS=(anthropic openai minimax gemini groq cerebras mistral together_ai deepseek)
@@ -701,10 +703,57 @@ detect_shell_rc() {
SHELL_RC_FILE=$(detect_shell_rc)
SHELL_NAME=$(basename "$SHELL")
# Normalize user-pasted OpenRouter model IDs:
# - trim whitespace
# - strip leading "openrouter/" if present
normalize_openrouter_model_id() {
local raw="$1"
# Trim leading/trailing whitespace
raw="${raw#"${raw%%[![:space:]]*}"}"
raw="${raw%"${raw##*[![:space:]]}"}"
if [[ "$raw" =~ ^[Oo][Pp][Ee][Nn][Rr][Oo][Uu][Tt][Ee][Rr]/(.+)$ ]]; then
raw="${BASH_REMATCH[1]}"
fi
printf '%s' "$raw"
}
# Prompt the user to choose a model for their selected provider.
# Sets SELECTED_MODEL, SELECTED_MAX_TOKENS, and SELECTED_MAX_CONTEXT_TOKENS.
prompt_model_selection() {
local provider_id="$1"
if [ "$provider_id" = "openrouter" ]; then
local default_model=""
if [ -n "$PREV_MODEL" ] && [ "$provider_id" = "$PREV_PROVIDER" ]; then
default_model="$(normalize_openrouter_model_id "$PREV_MODEL")"
fi
echo ""
echo -e "${BOLD}Enter your OpenRouter model id:${NC}"
echo -e " ${DIM}Paste from openrouter.ai (example: x-ai/grok-4.20-beta)${NC}"
echo -e " ${DIM}If calls fail with guardrail/privacy errors: openrouter.ai/settings/privacy${NC}"
echo ""
local input_model=""
while true; do
if [ -n "$default_model" ]; then
read -r -p "Model id [$default_model]: " input_model || true
input_model="${input_model:-$default_model}"
else
read -r -p "Model id: " input_model || true
fi
local normalized_model
normalized_model="$(normalize_openrouter_model_id "$input_model")"
if [ -n "$normalized_model" ]; then
SELECTED_MODEL="$normalized_model"
SELECTED_MAX_TOKENS=8192
SELECTED_MAX_CONTEXT_TOKENS=120000
echo ""
echo -e "${GREEN}${NC} Model: ${DIM}$SELECTED_MODEL${NC}"
return
fi
echo -e "${RED}Model id cannot be empty.${NC}"
done
fi
local count
count="$(get_model_choice_count "$provider_id")"
@@ -798,10 +847,14 @@ save_configuration() {
max_context_tokens=120000
fi
mkdir -p "$HIVE_CONFIG_DIR"
$PYTHON_CMD -c "
uv run python -c "
import json
from datetime import datetime, timezone
from pathlib import Path
cfg_path = Path.home() / '.hive' / 'configuration.json'
cfg_path.parent.mkdir(parents=True, exist_ok=True)
config = {
'llm': {
'provider': '$provider_id',
@@ -810,7 +863,7 @@ config = {
'max_context_tokens': $max_context_tokens,
'api_key_env_var': '$env_var'
},
'created_at': '$(date -u +"%Y-%m-%dT%H:%M:%S+00:00")'
'created_at': datetime.now(timezone.utc).strftime('%Y-%m-%dT%H:%M:%S+00:00')
}
if '$use_claude_code_sub' == 'true':
config['llm']['use_claude_code_subscription'] = True
@@ -822,10 +875,42 @@ if '$use_codex_sub' == 'true':
config['llm'].pop('api_key_env_var', None)
if '$api_base':
config['llm']['api_base'] = '$api_base'
with open('$HIVE_CONFIG_FILE', 'w') as f:
tmp_path = cfg_path.parent / (cfg_path.name + '.tmp')
with open(tmp_path, 'w', encoding='utf-8') as f:
json.dump(config, f, indent=2)
tmp_path.replace(cfg_path)
print(json.dumps(config, indent=2))
" 2>/dev/null
"
}
# Verify that configuration was persisted exactly as selected.
# Args: provider_id env_var model [api_base]
verify_configuration() {
local provider_id="$1"
local env_var="$2"
local model="$3"
local api_base="${4:-}"
uv run python -c "
import json
import sys
from pathlib import Path
cfg_path = Path.home() / '.hive' / 'configuration.json'
with open(cfg_path, encoding='utf-8-sig') as f:
cfg = json.load(f)
llm = cfg.get('llm', {})
ok = (llm.get('provider') == '$provider_id' and llm.get('model') == '$model')
if '$env_var':
ok = ok and (llm.get('api_key_env_var') == '$env_var')
if '$api_base':
ok = ok and (llm.get('api_base') == '$api_base')
if not ok:
print(json.dumps(llm, indent=2))
sys.exit(1)
"
}
# Source shell rc file to pick up existing env vars (temporarily disable set -e)
@@ -901,10 +986,12 @@ PREV_MODEL=""
PREV_ENV_VAR=""
PREV_SUB_MODE=""
if [ -f "$HIVE_CONFIG_FILE" ]; then
eval "$($PYTHON_CMD -c "
eval "$(uv run python -c "
import json, sys
from pathlib import Path
try:
with open('$HIVE_CONFIG_FILE') as f:
cfg_path = Path.home() / '.hive' / 'configuration.json'
with open(cfg_path, encoding='utf-8-sig') as f:
c = json.load(f)
llm = c.get('llm', {})
print(f'PREV_PROVIDER={llm.get(\"provider\", \"\")}')
@@ -954,6 +1041,7 @@ if [ -n "$PREV_SUB_MODE" ] || [ -n "$PREV_PROVIDER" ]; then
gemini) DEFAULT_CHOICE=8 ;;
groq) DEFAULT_CHOICE=9 ;;
cerebras) DEFAULT_CHOICE=10 ;;
openrouter) DEFAULT_CHOICE=11 ;;
minimax) DEFAULT_CHOICE=4 ;;
kimi) DEFAULT_CHOICE=5 ;;
esac
@@ -1004,10 +1092,10 @@ fi
echo ""
echo -e " ${CYAN}${BOLD}API key providers:${NC}"
# 6-10) API key providers — show (credential detected) if key already set
PROVIDER_MENU_ENVS=(ANTHROPIC_API_KEY OPENAI_API_KEY GEMINI_API_KEY GROQ_API_KEY CEREBRAS_API_KEY)
PROVIDER_MENU_NAMES=("Anthropic (Claude) - Recommended" "OpenAI (GPT)" "Google Gemini - Free tier available" "Groq - Fast, free tier" "Cerebras - Fast, free tier")
for idx in 0 1 2 3 4; do
# 6-11) API key providers — show (credential detected) if key already set
PROVIDER_MENU_ENVS=(ANTHROPIC_API_KEY OPENAI_API_KEY GEMINI_API_KEY GROQ_API_KEY CEREBRAS_API_KEY OPENROUTER_API_KEY)
PROVIDER_MENU_NAMES=("Anthropic (Claude) - Recommended" "OpenAI (GPT)" "Google Gemini - Free tier available" "Groq - Fast, free tier" "Cerebras - Fast, free tier" "OpenRouter - Bring any OpenRouter model")
for idx in "${!PROVIDER_MENU_ENVS[@]}"; do
num=$((idx + 6))
env_var="${PROVIDER_MENU_ENVS[$idx]}"
if [ -n "${!env_var}" ]; then
@@ -1017,7 +1105,8 @@ for idx in 0 1 2 3 4; do
fi
done
echo -e " ${CYAN}11)${NC} Skip for now"
SKIP_CHOICE=$((6 + ${#PROVIDER_MENU_ENVS[@]}))
echo -e " ${CYAN}$SKIP_CHOICE)${NC} Skip for now"
echo ""
if [ -n "$DEFAULT_CHOICE" ]; then
@@ -1027,15 +1116,15 @@ fi
while true; do
if [ -n "$DEFAULT_CHOICE" ]; then
read -r -p "Enter choice (1-11) [$DEFAULT_CHOICE]: " choice || true
read -r -p "Enter choice (1-$SKIP_CHOICE) [$DEFAULT_CHOICE]: " choice || true
choice="${choice:-$DEFAULT_CHOICE}"
else
read -r -p "Enter choice (1-11): " choice || true
read -r -p "Enter choice (1-$SKIP_CHOICE): " choice || true
fi
if [[ "$choice" =~ ^[0-9]+$ ]] && [ "$choice" -ge 1 ] && [ "$choice" -le 11 ]; then
if [[ "$choice" =~ ^[0-9]+$ ]] && [ "$choice" -ge 1 ] && [ "$choice" -le "$SKIP_CHOICE" ]; then
break
fi
echo -e "${RED}Invalid choice. Please enter 1-11${NC}"
echo -e "${RED}Invalid choice. Please enter 1-$SKIP_CHOICE${NC}"
done
case $choice in
@@ -1162,6 +1251,13 @@ case $choice in
SIGNUP_URL="https://cloud.cerebras.ai/"
;;
11)
SELECTED_ENV_VAR="OPENROUTER_API_KEY"
SELECTED_PROVIDER_ID="openrouter"
SELECTED_API_BASE="https://openrouter.ai/api/v1"
PROVIDER_NAME="OpenRouter"
SIGNUP_URL="https://openrouter.ai/keys"
;;
"$SKIP_CHOICE")
echo ""
echo -e "${YELLOW}Skipped.${NC} An LLM API key is required to test and use worker agents."
echo -e "Add your API key later by running:"
@@ -1202,7 +1298,7 @@ if { [ -z "$SUBSCRIPTION_MODE" ] || [ "$SUBSCRIPTION_MODE" = "minimax_code" ] ||
echo -e "${GREEN}${NC} API key saved to $SHELL_RC_FILE"
# Health check the new key
echo -n " Verifying API key... "
if { [ "$SUBSCRIPTION_MODE" = "minimax_code" ] || [ "$SUBSCRIPTION_MODE" = "kimi_code" ]; } && [ -n "${SELECTED_API_BASE:-}" ]; then
if [ -n "${SELECTED_API_BASE:-}" ]; then
HC_RESULT=$(uv run python "$SCRIPT_DIR/scripts/check_llm_key.py" "$SELECTED_PROVIDER_ID" "$API_KEY" "$SELECTED_API_BASE" 2>/dev/null) || true
else
HC_RESULT=$(uv run python "$SCRIPT_DIR/scripts/check_llm_key.py" "$SELECTED_PROVIDER_ID" "$API_KEY" 2>/dev/null) || true
@@ -1314,18 +1410,37 @@ fi
if [ -n "$SELECTED_PROVIDER_ID" ]; then
echo ""
echo -n " Saving configuration... "
SAVE_OK=true
if [ "$SUBSCRIPTION_MODE" = "claude_code" ]; then
save_configuration "$SELECTED_PROVIDER_ID" "" "$SELECTED_MODEL" "$SELECTED_MAX_TOKENS" "$SELECTED_MAX_CONTEXT_TOKENS" "true" "" > /dev/null
save_configuration "$SELECTED_PROVIDER_ID" "" "$SELECTED_MODEL" "$SELECTED_MAX_TOKENS" "$SELECTED_MAX_CONTEXT_TOKENS" "true" "" > /dev/null || SAVE_OK=false
elif [ "$SUBSCRIPTION_MODE" = "codex" ]; then
save_configuration "$SELECTED_PROVIDER_ID" "" "$SELECTED_MODEL" "$SELECTED_MAX_TOKENS" "$SELECTED_MAX_CONTEXT_TOKENS" "" "" "true" > /dev/null
save_configuration "$SELECTED_PROVIDER_ID" "" "$SELECTED_MODEL" "$SELECTED_MAX_TOKENS" "$SELECTED_MAX_CONTEXT_TOKENS" "" "" "true" > /dev/null || SAVE_OK=false
elif [ "$SUBSCRIPTION_MODE" = "zai_code" ]; then
save_configuration "$SELECTED_PROVIDER_ID" "$SELECTED_ENV_VAR" "$SELECTED_MODEL" "$SELECTED_MAX_TOKENS" "$SELECTED_MAX_CONTEXT_TOKENS" "" "https://api.z.ai/api/coding/paas/v4" > /dev/null
save_configuration "$SELECTED_PROVIDER_ID" "$SELECTED_ENV_VAR" "$SELECTED_MODEL" "$SELECTED_MAX_TOKENS" "$SELECTED_MAX_CONTEXT_TOKENS" "" "https://api.z.ai/api/coding/paas/v4" > /dev/null || SAVE_OK=false
elif [ "$SUBSCRIPTION_MODE" = "minimax_code" ]; then
save_configuration "$SELECTED_PROVIDER_ID" "$SELECTED_ENV_VAR" "$SELECTED_MODEL" "$SELECTED_MAX_TOKENS" "$SELECTED_MAX_CONTEXT_TOKENS" "" "$SELECTED_API_BASE" > /dev/null
save_configuration "$SELECTED_PROVIDER_ID" "$SELECTED_ENV_VAR" "$SELECTED_MODEL" "$SELECTED_MAX_TOKENS" "$SELECTED_MAX_CONTEXT_TOKENS" "" "$SELECTED_API_BASE" > /dev/null || SAVE_OK=false
elif [ "$SUBSCRIPTION_MODE" = "kimi_code" ]; then
save_configuration "$SELECTED_PROVIDER_ID" "$SELECTED_ENV_VAR" "$SELECTED_MODEL" "$SELECTED_MAX_TOKENS" "$SELECTED_MAX_CONTEXT_TOKENS" "" "$SELECTED_API_BASE" > /dev/null
save_configuration "$SELECTED_PROVIDER_ID" "$SELECTED_ENV_VAR" "$SELECTED_MODEL" "$SELECTED_MAX_TOKENS" "$SELECTED_MAX_CONTEXT_TOKENS" "" "$SELECTED_API_BASE" > /dev/null || SAVE_OK=false
elif [ "$SELECTED_PROVIDER_ID" = "openrouter" ]; then
save_configuration "$SELECTED_PROVIDER_ID" "$SELECTED_ENV_VAR" "$SELECTED_MODEL" "$SELECTED_MAX_TOKENS" "$SELECTED_MAX_CONTEXT_TOKENS" "" "$SELECTED_API_BASE" > /dev/null || SAVE_OK=false
else
save_configuration "$SELECTED_PROVIDER_ID" "$SELECTED_ENV_VAR" "$SELECTED_MODEL" "$SELECTED_MAX_TOKENS" "$SELECTED_MAX_CONTEXT_TOKENS" > /dev/null
save_configuration "$SELECTED_PROVIDER_ID" "$SELECTED_ENV_VAR" "$SELECTED_MODEL" "$SELECTED_MAX_TOKENS" "$SELECTED_MAX_CONTEXT_TOKENS" > /dev/null || SAVE_OK=false
fi
if [ "$SAVE_OK" = false ]; then
echo -e "${RED}failed${NC}"
echo -e "${YELLOW} Could not write ~/.hive/configuration.json. Please rerun quickstart.${NC}"
exit 1
fi
VERIFY_API_BASE=""
if [ "$SUBSCRIPTION_MODE" = "zai_code" ]; then
VERIFY_API_BASE="https://api.z.ai/api/coding/paas/v4"
elif [ "$SUBSCRIPTION_MODE" = "minimax_code" ] || [ "$SUBSCRIPTION_MODE" = "kimi_code" ] || [ "$SELECTED_PROVIDER_ID" = "openrouter" ]; then
VERIFY_API_BASE="${SELECTED_API_BASE:-}"
fi
if ! verify_configuration "$SELECTED_PROVIDER_ID" "$SELECTED_ENV_VAR" "$SELECTED_MODEL" "$VERIFY_API_BASE"; then
echo -e "${RED}failed${NC}"
echo -e "${YELLOW} Configuration verification failed for ~/.hive/configuration.json.${NC}"
exit 1
fi
echo -e "${GREEN}${NC}"
echo -e " ${DIM}~/.hive/configuration.json${NC}"
@@ -1340,24 +1455,24 @@ echo ""
echo -e "${GREEN}${NC} Browser automation enabled"
# Patch gcu_enabled into configuration.json
if [ -f "$HIVE_CONFIG_FILE" ]; then
uv run python -c "
uv run python -c "
import json
with open('$HIVE_CONFIG_FILE') as f:
config = json.load(f)
from datetime import datetime, timezone
from pathlib import Path
cfg_path = Path.home() / '.hive' / 'configuration.json'
cfg_path.parent.mkdir(parents=True, exist_ok=True)
if cfg_path.exists():
with open(cfg_path, encoding='utf-8-sig') as f:
config = json.load(f)
else:
config = {'created_at': datetime.now(timezone.utc).strftime('%Y-%m-%dT%H:%M:%S+00:00')}
config['gcu_enabled'] = True
with open('$HIVE_CONFIG_FILE', 'w') as f:
tmp_path = cfg_path.parent / (cfg_path.name + '.tmp')
with open(tmp_path, 'w', encoding='utf-8') as f:
json.dump(config, f, indent=2)
tmp_path.replace(cfg_path)
"
else
mkdir -p "$HIVE_CONFIG_DIR"
uv run python -c "
import json
config = {'gcu_enabled': True, 'created_at': '$(date -u +"%Y-%m-%dT%H:%M:%S+00:00")'}
with open('$HIVE_CONFIG_FILE', 'w') as f:
json.dump(config, f, indent=2)
"
fi
echo ""
@@ -1557,6 +1672,9 @@ if [ -n "$SELECTED_PROVIDER_ID" ]; then
elif [ "$SUBSCRIPTION_MODE" = "minimax_code" ]; then
echo -e " ${GREEN}${NC} MiniMax Coding Key → ${DIM}$SELECTED_MODEL${NC}"
echo -e " ${DIM}API: api.minimax.io/v1 (OpenAI-compatible)${NC}"
elif [ "$SELECTED_PROVIDER_ID" = "openrouter" ]; then
echo -e " ${GREEN}${NC} OpenRouter API Key → ${DIM}$SELECTED_MODEL${NC}"
echo -e " ${DIM}API: openrouter.ai/api/v1 (OpenAI-compatible)${NC}"
else
echo -e " ${CYAN}$SELECTED_PROVIDER_ID${NC}${DIM}$SELECTED_MODEL${NC}"
fi
@@ -1601,40 +1719,26 @@ if [ "$CODEX_AVAILABLE" = true ]; then
echo ""
fi
# Auto-launch dashboard if frontend was built
if [ "$FRONTEND_BUILT" = true ]; then
echo -e "${BOLD}Launching dashboard...${NC}"
echo ""
echo -e " ${DIM}Starting server on http://localhost:8787${NC}"
echo -e " ${DIM}Press Ctrl+C to stop${NC}"
echo ""
echo -e " ${DIM}Tip: You can restart the dashboard anytime with:${NC} ${CYAN}hive open${NC}"
echo ""
# exec replaces the quickstart process with hive open
exec "$SCRIPT_DIR/hive" open
else
# No frontend — show manual instructions
echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
echo -e "${BOLD}⚠️ IMPORTANT: Load your new configuration${NC}"
echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
echo ""
echo -e " Your API keys have been saved to ${CYAN}$SHELL_RC_FILE${NC}"
echo -e " To use them, either:"
echo ""
echo -e " ${GREEN}Option 1:${NC} Source your shell config now:"
echo -e " ${CYAN}source $SHELL_RC_FILE${NC}"
echo ""
echo -e " ${GREEN}Option 2:${NC} Open a new terminal window"
echo ""
echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
echo ""
# Setup-only mode: quickstart never auto-launches the dashboard.
echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
echo -e "${BOLD}IMPORTANT: Load your new configuration${NC}"
echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
echo ""
echo -e " Your API keys have been saved to ${CYAN}$SHELL_RC_FILE${NC}"
echo -e " To use them, either:"
echo ""
echo -e " ${GREEN}Option 1:${NC} Source your shell config now:"
echo -e " ${CYAN}source $SHELL_RC_FILE${NC}"
echo ""
echo -e " ${GREEN}Option 2:${NC} Open a new terminal window"
echo ""
echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
echo ""
echo -e "${BOLD}Run an Agent:${NC}"
echo ""
echo -e " Launch the interactive dashboard to browse and run agents:"
echo -e " You can start an example agent or an agent built by yourself:"
echo -e " ${CYAN}hive open${NC}"
echo ""
echo -e "${DIM}Run ./quickstart.sh again to reconfigure.${NC}"
echo ""
fi
echo -e "${BOLD}Run an Agent:${NC}"
echo ""
echo -e " Launch the interactive dashboard when you're ready:"
echo -e " ${CYAN}hive open${NC}"
echo ""
echo -e "${DIM}Run ./quickstart.sh again to reconfigure.${NC}"
echo ""
+19
View File
@@ -56,6 +56,22 @@ def check_openai_compatible(api_key: str, endpoint: str, name: str) -> dict:
return {"valid": False, "message": f"{name} API returned status {r.status_code}"}
def check_openrouter(
api_key: str, api_base: str = "https://openrouter.ai/api/v1", **_: str
) -> dict:
"""Validate OpenRouter key against GET /models."""
endpoint = f"{api_base.rstrip('/')}/models"
with httpx.Client(timeout=TIMEOUT) as client:
r = client.get(endpoint, headers={"Authorization": f"Bearer {api_key}"})
if r.status_code in (200, 429):
return {"valid": True, "message": "OpenRouter API key valid"}
if r.status_code == 401:
return {"valid": False, "message": "Invalid OpenRouter API key"}
if r.status_code == 403:
return {"valid": False, "message": "OpenRouter API key lacks permissions"}
return {"valid": False, "message": f"OpenRouter API returned status {r.status_code}"}
def check_minimax(
api_key: str, api_base: str = "https://api.minimax.io/v1", **_: str
) -> dict:
@@ -129,6 +145,7 @@ PROVIDERS = {
"cerebras": lambda key, **kw: check_openai_compatible(
key, "https://api.cerebras.ai/v1/models", "Cerebras"
),
"openrouter": lambda key, **kw: check_openrouter(key),
"minimax": lambda key, **kw: check_minimax(key),
# Kimi For Coding uses an Anthropic-compatible endpoint; check via /v1/messages
# with empty messages (same as check_anthropic, triggers 400 not 401).
@@ -157,6 +174,8 @@ def main() -> None:
try:
if api_base and provider_id == "minimax":
result = check_minimax(api_key, api_base)
elif api_base and provider_id == "openrouter":
result = check_openrouter(api_key, api_base)
elif api_base and provider_id == "kimi":
# Kimi uses an Anthropic-compatible endpoint; check via /v1/messages
result = check_anthropic_compatible(