fix(openrouter): harden quickstart setup and model validation

This commit is contained in:
Sundaram Kumar Jha
2026-03-18 10:39:58 +05:30
parent 80dfb429d7
commit 4e2951854b
5 changed files with 418 additions and 194 deletions
+23 -9
View File
@@ -11,7 +11,6 @@ import ast
import asyncio import asyncio
import json import json
import logging import logging
import os
import re import re
import time import time
from collections.abc import AsyncIterator from collections.abc import AsyncIterator
@@ -133,11 +132,6 @@ def _patch_litellm_metadata_nonetype() -> None:
if litellm is not None: if litellm is not None:
_patch_litellm_anthropic_oauth() _patch_litellm_anthropic_oauth()
_patch_litellm_metadata_nonetype() _patch_litellm_metadata_nonetype()
litellm.suppress_debug_info = True
if not os.environ.get("LITELLM_LOG"):
logging.getLogger("LiteLLM").setLevel(logging.WARNING)
logging.getLogger("LiteLLM Router").setLevel(logging.WARNING)
logging.getLogger("LiteLLM Proxy").setLevel(logging.WARNING)
RATE_LIMIT_MAX_RETRIES = 10 RATE_LIMIT_MAX_RETRIES = 10
RATE_LIMIT_BACKOFF_BASE = 2 # seconds RATE_LIMIT_BACKOFF_BASE = 2 # seconds
@@ -181,7 +175,9 @@ OPENROUTER_TOOL_CALL_RE = re.compile(
r"<\|tool_call_start\|>\s*(.*?)\s*<\|tool_call_end\|>", r"<\|tool_call_start\|>\s*(.*?)\s*<\|tool_call_end\|>",
re.DOTALL, re.DOTALL,
) )
OPENROUTER_TOOL_COMPAT_MODEL_CACHE: set[str] = set() OPENROUTER_TOOL_COMPAT_CACHE_TTL_SECONDS = 3600
# OpenRouter routing can change over time, so tool-compat caching must expire.
OPENROUTER_TOOL_COMPAT_MODEL_CACHE: dict[str, float] = {}
# Directory for dumping failed requests # Directory for dumping failed requests
FAILED_REQUESTS_DIR = Path.home() / ".hive" / "failed_requests" FAILED_REQUESTS_DIR = Path.home() / ".hive" / "failed_requests"
@@ -224,6 +220,24 @@ def _prune_failed_request_dumps(max_files: int = MAX_FAILED_REQUEST_DUMPS) -> No
pass # Best-effort — never block the caller pass # Best-effort — never block the caller
def _remember_openrouter_tool_compat_model(model: str) -> None:
"""Cache OpenRouter tool-compat fallback for a bounded time window."""
OPENROUTER_TOOL_COMPAT_MODEL_CACHE[model] = (
time.monotonic() + OPENROUTER_TOOL_COMPAT_CACHE_TTL_SECONDS
)
def _is_openrouter_tool_compat_cached(model: str) -> bool:
"""Return True when the cached OpenRouter compat entry is still fresh."""
expires_at = OPENROUTER_TOOL_COMPAT_MODEL_CACHE.get(model)
if expires_at is None:
return False
if expires_at <= time.monotonic():
OPENROUTER_TOOL_COMPAT_MODEL_CACHE.pop(model, None)
return False
return True
def _dump_failed_request( def _dump_failed_request(
model: str, model: str,
kwargs: dict[str, Any], kwargs: dict[str, Any],
@@ -1457,7 +1471,7 @@ class LiteLLMProvider(LLMProvider):
if ( if (
tools tools
and self._is_openrouter_model() and self._is_openrouter_model()
and self.model in OPENROUTER_TOOL_COMPAT_MODEL_CACHE and _is_openrouter_tool_compat_cached(self.model)
): ):
async for event in self._stream_via_openrouter_tool_compat( async for event in self._stream_via_openrouter_tool_compat(
messages=messages, messages=messages,
@@ -1799,7 +1813,7 @@ class LiteLLMProvider(LLMProvider):
except Exception as e: except Exception as e:
if self._should_use_openrouter_tool_compat(e, tools): if self._should_use_openrouter_tool_compat(e, tools):
OPENROUTER_TOOL_COMPAT_MODEL_CACHE.add(self.model) _remember_openrouter_tool_compat_model(self.model)
async for event in self._stream_via_openrouter_tool_compat( async for event in self._stream_via_openrouter_tool_compat(
messages=messages, messages=messages,
system=system, system=system,
+86 -12
View File
@@ -39,7 +39,12 @@ def _run_openrouter_check(monkeypatch, status_code: int):
return result, calls return result, calls
def _run_openrouter_model_check(monkeypatch, status_code: int, payload: dict | None = None): def _run_openrouter_model_check(
monkeypatch,
status_code: int,
payload: dict | None = None,
model: str = "openai/gpt-4o-mini",
):
module = _load_check_llm_key_module() module = _load_check_llm_key_module()
calls = {} calls = {}
@@ -64,14 +69,13 @@ def _run_openrouter_model_check(monkeypatch, status_code: int, payload: dict | N
def __exit__(self, exc_type, exc, tb): def __exit__(self, exc_type, exc, tb):
return False return False
def post(self, endpoint, headers, json): def get(self, endpoint, headers):
calls["endpoint"] = endpoint calls["endpoint"] = endpoint
calls["headers"] = headers calls["headers"] = headers
calls["json"] = json
return FakeResponse(status_code) return FakeResponse(status_code)
monkeypatch.setattr(module.httpx, "Client", FakeClient) monkeypatch.setattr(module.httpx, "Client", FakeClient)
result = module.check_openrouter_model("test-key", "openai/gpt-4o-mini") result = module.check_openrouter_model("test-key", model)
return result, calls return result, calls
@@ -98,18 +102,88 @@ def test_check_openrouter_429(monkeypatch):
def test_check_openrouter_model_200(monkeypatch): def test_check_openrouter_model_200(monkeypatch):
result, calls = _run_openrouter_model_check(monkeypatch, 200) result, calls = _run_openrouter_model_check(
monkeypatch,
200,
{
"data": [
{
"id": "openai/gpt-4o-mini",
"canonical_slug": "openai/gpt-4o-mini",
}
]
},
)
assert result == { assert result == {
"valid": True, "valid": True,
"message": "OpenRouter model is available: openai/gpt-4o-mini", "message": "OpenRouter model is available: openai/gpt-4o-mini",
"model": "openai/gpt-4o-mini",
} }
assert calls["endpoint"] == "https://openrouter.ai/api/v1/chat/completions" assert calls["endpoint"] == "https://openrouter.ai/api/v1/models/user"
assert calls["headers"] == { assert calls["headers"] == {"Authorization": "Bearer test-key"}
"Authorization": "Bearer test-key",
"Content-Type": "application/json",
def test_check_openrouter_model_200_matches_canonical_slug(monkeypatch):
result, _ = _run_openrouter_model_check(
monkeypatch,
200,
{
"data": [
{
"id": "mistralai/mistral-small-4",
"canonical_slug": "mistralai/mistral-small-2603",
}
]
},
model="mistralai/mistral-small-2603",
)
assert result == {
"valid": True,
"message": "OpenRouter model is available: mistralai/mistral-small-2603",
"model": "mistralai/mistral-small-2603",
}
def test_check_openrouter_model_200_sanitizes_pasted_unicode(monkeypatch):
result, _ = _run_openrouter_model_check(
monkeypatch,
200,
{
"data": [
{
"id": "z-ai/glm-5-turbo",
"canonical_slug": "z-ai/glm-5-turbo",
}
]
},
model="openrouter/z-ai\u200b/glm\u20115\u2011turbo",
)
assert result == {
"valid": True,
"message": "OpenRouter model is available: z-ai/glm-5-turbo",
"model": "z-ai/glm-5-turbo",
}
def test_check_openrouter_model_200_not_found_with_suggestions(monkeypatch):
result, _ = _run_openrouter_model_check(
monkeypatch,
200,
{
"data": [
{"id": "z-ai/glm-5-turbo"},
{"id": "z-ai/glm-4.6v"},
]
},
model="z-ai/glm-5-turb",
)
assert result == {
"valid": False,
"message": (
"OpenRouter model is not available for this key/settings: z-ai/glm-5-turb. "
"Closest matches: z-ai/glm-5-turbo"
),
} }
assert calls["json"]["model"] == "openai/gpt-4o-mini"
assert calls["json"]["max_tokens"] == 1
def test_check_openrouter_model_404_with_error_message(monkeypatch): def test_check_openrouter_model_404_with_error_message(monkeypatch):
@@ -121,7 +195,7 @@ def test_check_openrouter_model_404_with_error_message(monkeypatch):
assert result == { assert result == {
"valid": False, "valid": False,
"message": ( "message": (
"OpenRouter model is not available: openai/gpt-4o-mini. " "OpenRouter model is not available for this key/settings: openai/gpt-4o-mini. "
"No endpoints available for this model" "No endpoints available for this model"
), ),
} }
+63 -57
View File
@@ -867,6 +867,9 @@ function Get-ModelSelection {
$hcResult = & uv run python (Join-Path $ScriptDir "scripts/check_llm_key.py") "openrouter" $openrouterKey $modelApiBase $normalizedModel 2>$null $hcResult = & uv run python (Join-Path $ScriptDir "scripts/check_llm_key.py") "openrouter" $openrouterKey $modelApiBase $normalizedModel 2>$null
$hcJson = $hcResult | ConvertFrom-Json $hcJson = $hcResult | ConvertFrom-Json
if ($hcJson.valid -eq $true) { if ($hcJson.valid -eq $true) {
if ($hcJson.model) {
$normalizedModel = [string]$hcJson.model
}
Write-Color -Text "ok" -Color Green Write-Color -Text "ok" -Color Green
} elseif ($hcJson.valid -eq $false) { } elseif ($hcJson.valid -eq $false) {
Write-Color -Text "failed" -Color Red Write-Color -Text "failed" -Color Red
@@ -1575,37 +1578,6 @@ if ($SelectedProviderId) {
} }
$config | ConvertTo-Json -Depth 4 | Set-Content -Path $HiveConfigFile -Encoding UTF8 $config | ConvertTo-Json -Depth 4 | Set-Content -Path $HiveConfigFile -Encoding UTF8
$expectedApiBase = ""
if ($SubscriptionMode -eq "zai_code") {
$expectedApiBase = "https://api.z.ai/api/coding/paas/v4"
} elseif ($SubscriptionMode -eq "kimi_code") {
$expectedApiBase = "https://api.kimi.com/coding"
} elseif ($SelectedProviderId -eq "openrouter") {
$expectedApiBase = "https://openrouter.ai/api/v1"
}
try {
$savedConfig = Get-Content -Path $HiveConfigFile -Raw | ConvertFrom-Json
$savedLlm = $savedConfig.llm
$verifyOk = $savedLlm -and $savedLlm.provider -eq $SelectedProviderId -and $savedLlm.model -eq $SelectedModel
if ($SelectedEnvVar) {
$verifyOk = $verifyOk -and $savedLlm.api_key_env_var -eq $SelectedEnvVar
}
if ($expectedApiBase) {
$verifyOk = $verifyOk -and $savedLlm.api_base -eq $expectedApiBase
}
if (-not $verifyOk) {
throw "Saved configuration mismatch"
}
} catch {
Write-Fail "configuration verification failed"
Write-Color -Text " Could not persist ~/.hive/configuration.json with the selected LLM settings." -Color Yellow
exit 1
}
Write-Ok "done" Write-Ok "done"
Write-Color -Text " ~/.hive/configuration.json" -Color DarkGray Write-Color -Text " ~/.hive/configuration.json" -Color DarkGray
} }
@@ -1933,35 +1905,69 @@ if ($CodexAvailable) {
Write-Host "" Write-Host ""
} }
# Setup-only mode: quickstart never auto-launches the dashboard. # Setup-only mode: show manual instructions
Write-Color -Text "═══════════════════════════════════════════════════════" -Color Yellow if ($FrontendBuilt) {
Write-Host "" Write-Color -Text "═══════════════════════════════════════════════════════" -Color Yellow
Write-Color -Text " IMPORTANT: Restart your terminal now!" -Color Yellow Write-Host ""
Write-Host "" Write-Color -Text " IMPORTANT: Restart your terminal now!" -Color Yellow
Write-Color -Text "═══════════════════════════════════════════════════════" -Color Yellow Write-Host ""
Write-Host "" Write-Color -Text "═══════════════════════════════════════════════════════" -Color Yellow
Write-Host 'Environment variables (uv, API keys) are now configured, but you need to' Write-Host ""
Write-Host 'restart your terminal for them to take effect in new sessions.' Write-Host 'Environment variables (uv, API keys) are now configured, but you need to'
Write-Host "" Write-Host 'restart your terminal for them to take effect in new sessions.'
Write-Host ""
Write-Color -Text "Run an Agent:" -Color White Write-Color -Text "Run an Agent:" -Color White
Write-Host "" Write-Host ""
Write-Host " Launch the interactive dashboard when you're ready:" Write-Host " Quickstart only sets things up. Launch the dashboard when you're ready:"
Write-Color -Text " hive open" -Color Cyan Write-Color -Text " hive open" -Color Cyan
Write-Host "" Write-Host ""
if ($SelectedProviderId -or $credKey) { if ($SelectedProviderId -or $credKey) {
Write-Color -Text "Note:" -Color White Write-Color -Text "Note:" -Color White
Write-Host "- uv has been added to your User PATH" Write-Host "- uv has been added to your User PATH"
if ($SelectedProviderId -and $SelectedEnvVar) { if ($SelectedProviderId -and $SelectedEnvVar) {
Write-Host "- $SelectedEnvVar is set for LLM access" Write-Host "- $SelectedEnvVar is set for LLM access"
}
if ($credKey) {
Write-Host "- HIVE_CREDENTIAL_KEY is set for credential encryption"
}
Write-Host "- All variables will persist across reboots"
Write-Host ""
} }
if ($credKey) {
Write-Host "- HIVE_CREDENTIAL_KEY is set for credential encryption" Write-Color -Text 'Run .\quickstart.ps1 again to reconfigure.' -Color DarkGray
Write-Host ""
} else {
Write-Color -Text "═══════════════════════════════════════════════════════" -Color Yellow
Write-Host ""
Write-Color -Text " IMPORTANT: Restart your terminal now!" -Color Yellow
Write-Host ""
Write-Color -Text "═══════════════════════════════════════════════════════" -Color Yellow
Write-Host ""
Write-Host 'Environment variables (uv, API keys) are now configured, but you need to'
Write-Host 'restart your terminal for them to take effect in new sessions.'
Write-Host ""
Write-Color -Text "Run an Agent:" -Color White
Write-Host ""
Write-Host " Frontend build was skipped or failed. Once the dashboard is available, launch it with:"
Write-Color -Text " hive open" -Color Cyan
Write-Host ""
if ($SelectedProviderId -or $credKey) {
Write-Color -Text "Note:" -Color White
Write-Host "- uv has been added to your User PATH"
if ($SelectedProviderId -and $SelectedEnvVar) {
Write-Host "- $SelectedEnvVar is set for LLM access"
}
if ($credKey) {
Write-Host "- HIVE_CREDENTIAL_KEY is set for credential encryption"
}
Write-Host "- All variables will persist across reboots"
Write-Host ""
} }
Write-Host "- All variables will persist across reboots"
Write-Color -Text 'Run .\quickstart.ps1 again to reconfigure.' -Color DarkGray
Write-Host "" Write-Host ""
} }
Write-Color -Text 'Run .\quickstart.ps1 again to reconfigure.' -Color DarkGray
Write-Host ""
+131 -102
View File
@@ -46,7 +46,6 @@ prompt_yes_no() {
else else
prompt="$prompt [y/N] " prompt="$prompt [y/N] "
fi fi
read -r -p "$prompt" response read -r -p "$prompt" response
response="${response:-$default}" response="${response:-$default}"
[[ "$response" =~ ^[Yy] ]] [[ "$response" =~ ^[Yy] ]]
@@ -741,12 +740,17 @@ prompt_model_selection() {
local model_hc_result="" local model_hc_result=""
local model_hc_valid="" local model_hc_valid=""
local model_hc_msg="" local model_hc_msg=""
local model_hc_canonical=""
local model_hc_base="${SELECTED_API_BASE:-https://openrouter.ai/api/v1}" local model_hc_base="${SELECTED_API_BASE:-https://openrouter.ai/api/v1}"
echo -n " Verifying model id... " echo -n " Verifying model id... "
model_hc_result="$(uv run python "$SCRIPT_DIR/scripts/check_llm_key.py" "openrouter" "$openrouter_key" "$model_hc_base" "$normalized_model" 2>/dev/null)" || true model_hc_result="$(uv run python "$SCRIPT_DIR/scripts/check_llm_key.py" "openrouter" "$openrouter_key" "$model_hc_base" "$normalized_model" 2>/dev/null)" || true
model_hc_valid="$(echo "$model_hc_result" | $PYTHON_CMD -c "import json,sys; print(json.loads(sys.stdin.read()).get('valid',''))" 2>/dev/null)" || true model_hc_valid="$(echo "$model_hc_result" | $PYTHON_CMD -c "import json,sys; print(json.loads(sys.stdin.read()).get('valid',''))" 2>/dev/null)" || true
model_hc_msg="$(echo "$model_hc_result" | $PYTHON_CMD -c "import json,sys; print(json.loads(sys.stdin.read()).get('message',''))" 2>/dev/null)" || true model_hc_msg="$(echo "$model_hc_result" | $PYTHON_CMD -c "import json,sys; print(json.loads(sys.stdin.read()).get('message',''))" 2>/dev/null)" || true
model_hc_canonical="$(echo "$model_hc_result" | $PYTHON_CMD -c "import json,sys; print(json.loads(sys.stdin.read()).get('model',''))" 2>/dev/null)" || true
if [ "$model_hc_valid" = "True" ]; then if [ "$model_hc_valid" = "True" ]; then
if [ -n "$model_hc_canonical" ]; then
normalized_model="$model_hc_canonical"
fi
echo -e "${GREEN}ok${NC}" echo -e "${GREEN}ok${NC}"
elif [ "$model_hc_valid" = "False" ]; then elif [ "$model_hc_valid" = "False" ]; then
echo -e "${RED}failed${NC}" echo -e "${RED}failed${NC}"
@@ -865,70 +869,73 @@ save_configuration() {
max_context_tokens=120000 max_context_tokens=120000
fi fi
uv run python -c " uv run python - \
import json "$provider_id" \
from datetime import datetime, timezone "$env_var" \
from pathlib import Path "$model" \
"$max_tokens" \
cfg_path = Path.home() / '.hive' / 'configuration.json' "$max_context_tokens" \
cfg_path.parent.mkdir(parents=True, exist_ok=True) "$use_claude_code_sub" \
"$api_base" \
config = { "$use_codex_sub" \
'llm': { "$(date -u +"%Y-%m-%dT%H:%M:%S+00:00")" 2>/dev/null <<'PY'
'provider': '$provider_id',
'model': '$model',
'max_tokens': $max_tokens,
'max_context_tokens': $max_context_tokens,
'api_key_env_var': '$env_var'
},
'created_at': datetime.now(timezone.utc).strftime('%Y-%m-%dT%H:%M:%S+00:00')
}
if '$use_claude_code_sub' == 'true':
config['llm']['use_claude_code_subscription'] = True
# No api_key_env_var needed for Claude Code subscription
config['llm'].pop('api_key_env_var', None)
if '$use_codex_sub' == 'true':
config['llm']['use_codex_subscription'] = True
# No api_key_env_var needed for Codex subscription
config['llm'].pop('api_key_env_var', None)
if '$api_base':
config['llm']['api_base'] = '$api_base'
tmp_path = cfg_path.parent / (cfg_path.name + '.tmp')
with open(tmp_path, 'w', encoding='utf-8') as f:
json.dump(config, f, indent=2)
tmp_path.replace(cfg_path)
print(json.dumps(config, indent=2))
"
}
# Verify that configuration was persisted exactly as selected.
# Args: provider_id env_var model [api_base]
verify_configuration() {
local provider_id="$1"
local env_var="$2"
local model="$3"
local api_base="${4:-}"
uv run python -c "
import json import json
import sys import sys
from pathlib import Path from pathlib import Path
cfg_path = Path.home() / '.hive' / 'configuration.json' (
with open(cfg_path, encoding='utf-8-sig') as f: provider_id,
cfg = json.load(f) env_var,
llm = cfg.get('llm', {}) model,
max_tokens,
max_context_tokens,
use_claude_code_sub,
api_base,
use_codex_sub,
created_at,
) = sys.argv[1:10]
ok = (llm.get('provider') == '$provider_id' and llm.get('model') == '$model') cfg_path = Path.home() / ".hive" / "configuration.json"
if '$env_var': cfg_path.parent.mkdir(parents=True, exist_ok=True)
ok = ok and (llm.get('api_key_env_var') == '$env_var')
if '$api_base':
ok = ok and (llm.get('api_base') == '$api_base')
if not ok: try:
print(json.dumps(llm, indent=2)) with open(cfg_path, encoding="utf-8-sig") as f:
sys.exit(1) config = json.load(f)
" except (OSError, json.JSONDecodeError):
config = {}
config["llm"] = {
"provider": provider_id,
"model": model,
"max_tokens": int(max_tokens),
"max_context_tokens": int(max_context_tokens),
"api_key_env_var": env_var,
}
config["created_at"] = created_at
if use_claude_code_sub == "true":
config["llm"]["use_claude_code_subscription"] = True
config["llm"].pop("api_key_env_var", None)
else:
config["llm"].pop("use_claude_code_subscription", None)
if use_codex_sub == "true":
config["llm"]["use_codex_subscription"] = True
config["llm"].pop("api_key_env_var", None)
else:
config["llm"].pop("use_codex_subscription", None)
if api_base:
config["llm"]["api_base"] = api_base
else:
config["llm"].pop("api_base", None)
tmp_path = cfg_path.with_name(cfg_path.name + ".tmp")
with open(tmp_path, "w", encoding="utf-8") as f:
json.dump(config, f, indent=2)
tmp_path.replace(cfg_path)
print(json.dumps(config, indent=2))
PY
} }
# Source shell rc file to pick up existing env vars (temporarily disable set -e) # Source shell rc file to pick up existing env vars (temporarily disable set -e)
@@ -1009,28 +1016,36 @@ PREV_MODEL=""
PREV_ENV_VAR="" PREV_ENV_VAR=""
PREV_SUB_MODE="" PREV_SUB_MODE=""
if [ -f "$HIVE_CONFIG_FILE" ]; then if [ -f "$HIVE_CONFIG_FILE" ]; then
eval "$(uv run python -c " eval "$(uv run python - 2>/dev/null <<'PY'
import json, sys import json
from pathlib import Path from pathlib import Path
cfg_path = Path.home() / ".hive" / "configuration.json"
try: try:
cfg_path = Path.home() / '.hive' / 'configuration.json' with open(cfg_path, encoding="utf-8-sig") as f:
with open(cfg_path, encoding='utf-8-sig') as f:
c = json.load(f) c = json.load(f)
llm = c.get('llm', {}) llm = c.get("llm", {})
print(f'PREV_PROVIDER={llm.get(\"provider\", \"\")}') print(f"PREV_PROVIDER={llm.get(\"provider\", \"\")}")
print(f'PREV_MODEL={llm.get(\"model\", \"\")}') print(f"PREV_MODEL={llm.get(\"model\", \"\")}")
print(f'PREV_ENV_VAR={llm.get(\"api_key_env_var\", \"\")}') print(f"PREV_ENV_VAR={llm.get(\"api_key_env_var\", \"\")}")
sub = '' sub = ""
if llm.get('use_claude_code_subscription'): sub = 'claude_code' if llm.get("use_claude_code_subscription"):
elif llm.get('use_codex_subscription'): sub = 'codex' sub = "claude_code"
elif llm.get('use_kimi_code_subscription'): sub = 'kimi_code' elif llm.get("use_codex_subscription"):
elif llm.get('provider', '') == 'minimax' or 'api.minimax.io' in llm.get('api_base', ''): sub = 'minimax_code' sub = "codex"
elif llm.get('provider', '') == 'hive' or 'adenhq.com' in llm.get('api_base', ''): sub = 'hive_llm' elif llm.get("use_kimi_code_subscription"):
elif 'api.z.ai' in llm.get('api_base', ''): sub = 'zai_code' sub = "kimi_code"
print(f'PREV_SUB_MODE={sub}') elif llm.get("provider", "") == "minimax" or "api.minimax.io" in llm.get("api_base", ""):
sub = "minimax_code"
elif llm.get("provider", "") == "hive" or "adenhq.com" in llm.get("api_base", ""):
sub = "hive_llm"
elif "api.z.ai" in llm.get("api_base", ""):
sub = "zai_code"
print(f"PREV_SUB_MODE={sub}")
except Exception: except Exception:
pass pass
" 2>/dev/null)" || true PY
)" || true
fi fi
# Compute default menu number from previous config (only if credential is still valid) # Compute default menu number from previous config (only if credential is still valid)
@@ -1494,17 +1509,6 @@ if [ -n "$SELECTED_PROVIDER_ID" ]; then
echo -e "${YELLOW} Could not write ~/.hive/configuration.json. Please rerun quickstart.${NC}" echo -e "${YELLOW} Could not write ~/.hive/configuration.json. Please rerun quickstart.${NC}"
exit 1 exit 1
fi fi
VERIFY_API_BASE=""
if [ "$SUBSCRIPTION_MODE" = "zai_code" ]; then
VERIFY_API_BASE="https://api.z.ai/api/coding/paas/v4"
elif [ "$SUBSCRIPTION_MODE" = "minimax_code" ] || [ "$SUBSCRIPTION_MODE" = "kimi_code" ] || [ "$SELECTED_PROVIDER_ID" = "openrouter" ]; then
VERIFY_API_BASE="${SELECTED_API_BASE:-}"
fi
if ! verify_configuration "$SELECTED_PROVIDER_ID" "$SELECTED_ENV_VAR" "$SELECTED_MODEL" "$VERIFY_API_BASE"; then
echo -e "${RED}failed${NC}"
echo -e "${YELLOW} Configuration verification failed for ~/.hive/configuration.json.${NC}"
exit 1
fi
echo -e "${GREEN}${NC}" echo -e "${GREEN}${NC}"
echo -e " ${DIM}~/.hive/configuration.json${NC}" echo -e " ${DIM}~/.hive/configuration.json${NC}"
fi fi
@@ -1518,24 +1522,46 @@ echo ""
echo -e "${GREEN}${NC} Browser automation enabled" echo -e "${GREEN}${NC} Browser automation enabled"
# Patch gcu_enabled into configuration.json # Patch gcu_enabled into configuration.json
uv run python -c " if [ -f "$HIVE_CONFIG_FILE" ]; then
if ! uv run python - <<'PY'
import json import json
from datetime import datetime, timezone
from pathlib import Path from pathlib import Path
cfg_path = Path.home() / '.hive' / 'configuration.json' cfg_path = Path.home() / ".hive" / "configuration.json"
cfg_path.parent.mkdir(parents=True, exist_ok=True) with open(cfg_path, encoding="utf-8-sig") as f:
if cfg_path.exists(): config = json.load(f)
with open(cfg_path, encoding='utf-8-sig') as f: config["gcu_enabled"] = True
config = json.load(f) tmp_path = cfg_path.with_name(cfg_path.name + ".tmp")
else: with open(tmp_path, "w", encoding="utf-8") as f:
config = {'created_at': datetime.now(timezone.utc).strftime('%Y-%m-%dT%H:%M:%S+00:00')}
config['gcu_enabled'] = True
tmp_path = cfg_path.parent / (cfg_path.name + '.tmp')
with open(tmp_path, 'w', encoding='utf-8') as f:
json.dump(config, f, indent=2) json.dump(config, f, indent=2)
tmp_path.replace(cfg_path) tmp_path.replace(cfg_path)
" PY
then
echo -e "${RED}failed${NC}"
echo -e "${YELLOW} Could not update ~/.hive/configuration.json with browser automation settings.${NC}"
exit 1
fi
else
if ! uv run python - "$(date -u +"%Y-%m-%dT%H:%M:%S+00:00")" <<'PY'
import json
import sys
from pathlib import Path
cfg_path = Path.home() / ".hive" / "configuration.json"
cfg_path.parent.mkdir(parents=True, exist_ok=True)
config = {
"gcu_enabled": True,
"created_at": sys.argv[1],
}
with open(cfg_path, "w", encoding="utf-8") as f:
json.dump(config, f, indent=2)
PY
then
echo -e "${RED}failed${NC}"
echo -e "${YELLOW} Could not create ~/.hive/configuration.json for browser automation settings.${NC}"
exit 1
fi
fi
echo "" echo ""
@@ -1782,7 +1808,6 @@ if [ "$CODEX_AVAILABLE" = true ]; then
echo "" echo ""
fi fi
# Setup-only mode: quickstart never auto-launches the dashboard.
echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
echo -e "${BOLD}IMPORTANT: Load your new configuration${NC}" echo -e "${BOLD}IMPORTANT: Load your new configuration${NC}"
echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
@@ -1800,7 +1825,11 @@ echo ""
echo -e "${BOLD}Run an Agent:${NC}" echo -e "${BOLD}Run an Agent:${NC}"
echo "" echo ""
echo -e " Launch the interactive dashboard when you're ready:" if [ "$FRONTEND_BUILT" = true ]; then
echo -e " Quickstart only sets things up. Launch the dashboard when you're ready:"
else
echo -e " Frontend build was skipped or failed. Once the dashboard is available, launch it with:"
fi
echo -e " ${CYAN}hive open${NC}" echo -e " ${CYAN}hive open${NC}"
echo "" echo ""
echo -e "${DIM}Run ./quickstart.sh again to reconfigure.${NC}" echo -e "${DIM}Run ./quickstart.sh again to reconfigure.${NC}"
+115 -14
View File
@@ -12,13 +12,31 @@ Output: single JSON line {"valid": bool, "message": str}
""" """
import json import json
import re
import sys import sys
import unicodedata
from difflib import get_close_matches
import httpx import httpx
from framework.config import HIVE_LLM_ENDPOINT from framework.config import HIVE_LLM_ENDPOINT
TIMEOUT = 10.0 TIMEOUT = 10.0
OPENROUTER_SEPARATOR_TRANSLATION = str.maketrans(
{
"\u2010": "-",
"\u2011": "-",
"\u2012": "-",
"\u2013": "-",
"\u2014": "-",
"\u2015": "-",
"\u2212": "-",
"\u2044": "/",
"\u2215": "/",
"\u29F8": "/",
"\uFF0F": "/",
}
)
def _extract_error_message(response: httpx.Response) -> str: def _extract_error_message(response: httpx.Response) -> str:
@@ -44,6 +62,77 @@ def _extract_error_message(response: httpx.Response) -> str:
return "" return ""
def _sanitize_openrouter_model_id(value: str) -> str:
"""Sanitize pasted OpenRouter model IDs into a comparable slug."""
normalized = unicodedata.normalize("NFKC", value or "")
normalized = "".join(
ch
for ch in normalized
if unicodedata.category(ch) not in {"Cc", "Cf"}
)
normalized = normalized.translate(OPENROUTER_SEPARATOR_TRANSLATION)
normalized = re.sub(r"\s+", "", normalized)
if normalized.casefold().startswith("openrouter/"):
normalized = normalized.split("/", 1)[1]
return normalized
def _normalize_openrouter_model_id(value: str) -> str:
"""Normalize OpenRouter model IDs for exact/alias matching."""
return _sanitize_openrouter_model_id(value).casefold()
def _extract_openrouter_model_lookup(payload: object) -> dict[str, str]:
"""Map normalized model IDs/aliases to a preferred canonical display slug."""
if not isinstance(payload, dict):
return {}
data = payload.get("data")
if not isinstance(data, list):
return {}
lookup: dict[str, str] = {}
for item in data:
if not isinstance(item, dict):
continue
model_id = item.get("id")
canonical_slug = item.get("canonical_slug")
candidates = [
_sanitize_openrouter_model_id(value)
for value in (model_id, canonical_slug)
if isinstance(value, str) and _sanitize_openrouter_model_id(value)
]
if not candidates:
continue
preferred_slug = candidates[-1]
for candidate in candidates:
lookup[_normalize_openrouter_model_id(candidate)] = preferred_slug
return lookup
def _format_openrouter_model_unavailable_message(
model: str, available_model_lookup: dict[str, str]
) -> str:
"""Return a helpful not-found message with close-match suggestions."""
suggestions = [
available_model_lookup[key]
for key in get_close_matches(
_normalize_openrouter_model_id(model),
list(available_model_lookup),
n=1,
cutoff=0.6,
)
]
base = f"OpenRouter model is not available for this key/settings: {model}"
if suggestions:
return f"{base}. Closest matches: {', '.join(suggestions)}"
return base
def check_anthropic(api_key: str, **_: str) -> dict: def check_anthropic(api_key: str, **_: str) -> dict:
"""Send empty messages to trigger 400 without consuming tokens.""" """Send empty messages to trigger 400 without consuming tokens."""
with httpx.Client(timeout=TIMEOUT) as client: with httpx.Client(timeout=TIMEOUT) as client:
@@ -103,23 +192,32 @@ def check_openrouter_model(
api_base: str = "https://openrouter.ai/api/v1", api_base: str = "https://openrouter.ai/api/v1",
**_: str, **_: str,
) -> dict: ) -> dict:
"""Validate that an OpenRouter model ID is routable with this key.""" """Validate that an OpenRouter model ID is available to this key/settings."""
endpoint = f"{api_base.rstrip('/')}/chat/completions" requested_model = _sanitize_openrouter_model_id(model)
endpoint = f"{api_base.rstrip('/')}/models/user"
with httpx.Client(timeout=TIMEOUT) as client: with httpx.Client(timeout=TIMEOUT) as client:
r = client.post( r = client.get(
endpoint, endpoint,
headers={ headers={"Authorization": f"Bearer {api_key}"},
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
},
json={
"model": model,
"messages": [{"role": "user", "content": "Hello, are you working?"}],
"max_tokens": 1,
},
) )
if r.status_code == 200: if r.status_code == 200:
return {"valid": True, "message": f"OpenRouter model is available: {model}"} available_model_lookup = _extract_openrouter_model_lookup(r.json())
matched_model = available_model_lookup.get(
_normalize_openrouter_model_id(requested_model)
)
if matched_model:
return {
"valid": True,
"message": f"OpenRouter model is available: {matched_model}",
"model": matched_model,
}
return {
"valid": False,
"message": _format_openrouter_model_unavailable_message(
requested_model, available_model_lookup
),
}
if r.status_code == 429: if r.status_code == 429:
return { return {
"valid": True, "valid": True,
@@ -132,7 +230,10 @@ def check_openrouter_model(
detail = _extract_error_message(r) detail = _extract_error_message(r)
if r.status_code in (400, 404, 422): if r.status_code in (400, 404, 422):
base = f"OpenRouter model is not available: {model}" base = (
"OpenRouter model is not available for this key/settings: "
f"{requested_model}"
)
return {"valid": False, "message": f"{base}. {detail}" if detail else base} return {"valid": False, "message": f"{base}. {detail}" if detail else base}
suffix = f": {detail}" if detail else "" suffix = f": {detail}" if detail else ""