Merge pull request #4304 from adenhq/fix/init-config
Release / Create Release (push) Waiting to run

model selection + max_tokens in quickstart
This commit is contained in:
Timothy @aden
2026-02-09 20:11:55 -08:00
committed by GitHub
10 changed files with 373 additions and 151 deletions
@@ -1,33 +1,8 @@
"""Runtime configuration.""" """Runtime configuration."""
import json from dataclasses import dataclass
from dataclasses import dataclass, field
from pathlib import Path
def _load_preferred_model() -> str:
"""Load preferred model from ~/.hive/configuration.json."""
config_path = Path.home() / ".hive" / "configuration.json"
if config_path.exists():
try:
with open(config_path) as f:
config = json.load(f)
llm = config.get("llm", {})
if llm.get("provider") and llm.get("model"):
return f"{llm['provider']}/{llm['model']}"
except Exception:
pass
return "anthropic/claude-sonnet-4-20250514"
@dataclass
class RuntimeConfig:
model: str = field(default_factory=_load_preferred_model)
temperature: float = 0.7
max_tokens: int = 40000
api_key: str | None = None
api_base: str | None = None
from framework.config import RuntimeConfig
default_config = RuntimeConfig() default_config = RuntimeConfig()
+64
View File
@@ -0,0 +1,64 @@
"""Shared Hive configuration utilities.
Centralises reading of ~/.hive/configuration.json so that the runner
and every agent template share one implementation instead of copy-pasting
helper functions.
"""
import json
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any
from framework.graph.edge import DEFAULT_MAX_TOKENS
# ---------------------------------------------------------------------------
# Low-level config file access
# ---------------------------------------------------------------------------
HIVE_CONFIG_FILE = Path.home() / ".hive" / "configuration.json"
def get_hive_config() -> dict[str, Any]:
"""Load hive configuration from ~/.hive/configuration.json."""
if not HIVE_CONFIG_FILE.exists():
return {}
try:
with open(HIVE_CONFIG_FILE) as f:
return json.load(f)
except (json.JSONDecodeError, OSError):
return {}
# ---------------------------------------------------------------------------
# Derived helpers
# ---------------------------------------------------------------------------
def get_preferred_model() -> str:
"""Return the user's preferred LLM model string (e.g. 'anthropic/claude-sonnet-4-20250514')."""
llm = get_hive_config().get("llm", {})
if llm.get("provider") and llm.get("model"):
return f"{llm['provider']}/{llm['model']}"
return "anthropic/claude-sonnet-4-20250514"
def get_max_tokens() -> int:
"""Return the configured max_tokens, falling back to DEFAULT_MAX_TOKENS."""
return get_hive_config().get("llm", {}).get("max_tokens", DEFAULT_MAX_TOKENS)
# ---------------------------------------------------------------------------
# RuntimeConfig shared across agent templates
# ---------------------------------------------------------------------------
@dataclass
class RuntimeConfig:
"""Agent runtime configuration loaded from ~/.hive/configuration.json."""
model: str = field(default_factory=get_preferred_model)
temperature: float = 0.7
max_tokens: int = field(default_factory=get_max_tokens)
api_key: str | None = None
api_base: str | None = None
+2 -1
View File
@@ -9,7 +9,7 @@ from framework.graph.client_io import (
from framework.graph.code_sandbox import CodeSandbox, safe_eval, safe_exec from framework.graph.code_sandbox import CodeSandbox, safe_eval, safe_exec
from framework.graph.context_handoff import ContextHandoff, HandoffContext from framework.graph.context_handoff import ContextHandoff, HandoffContext
from framework.graph.conversation import ConversationStore, Message, NodeConversation from framework.graph.conversation import ConversationStore, Message, NodeConversation
from framework.graph.edge import EdgeCondition, EdgeSpec, GraphSpec from framework.graph.edge import DEFAULT_MAX_TOKENS, EdgeCondition, EdgeSpec, GraphSpec
from framework.graph.event_loop_node import ( from framework.graph.event_loop_node import (
EventLoopNode, EventLoopNode,
JudgeProtocol, JudgeProtocol,
@@ -58,6 +58,7 @@ __all__ = [
"EdgeSpec", "EdgeSpec",
"EdgeCondition", "EdgeCondition",
"GraphSpec", "GraphSpec",
"DEFAULT_MAX_TOKENS",
# Executor (fixed graph) # Executor (fixed graph)
"GraphExecutor", "GraphExecutor",
# Plan (flexible execution) # Plan (flexible execution)
+14 -2
View File
@@ -24,10 +24,12 @@ given the current goal, context, and execution state.
from enum import StrEnum from enum import StrEnum
from typing import Any from typing import Any
from pydantic import BaseModel, Field from pydantic import BaseModel, Field, model_validator
from framework.graph.safe_eval import safe_eval from framework.graph.safe_eval import safe_eval
DEFAULT_MAX_TOKENS = 8192
class EdgeCondition(StrEnum): class EdgeCondition(StrEnum):
"""When an edge should be traversed.""" """When an edge should be traversed."""
@@ -424,7 +426,7 @@ class GraphSpec(BaseModel):
# Default LLM settings # Default LLM settings
default_model: str = "claude-haiku-4-5-20251001" default_model: str = "claude-haiku-4-5-20251001"
max_tokens: int = 1024 max_tokens: int = Field(default=None) # resolved by _resolve_max_tokens validator
# Cleanup LLM for JSON extraction fallback (fast/cheap model preferred) # Cleanup LLM for JSON extraction fallback (fast/cheap model preferred)
# If not set, uses CEREBRAS_API_KEY -> cerebras/llama-3.3-70b or # If not set, uses CEREBRAS_API_KEY -> cerebras/llama-3.3-70b or
@@ -447,6 +449,16 @@ class GraphSpec(BaseModel):
model_config = {"extra": "allow"} model_config = {"extra": "allow"}
@model_validator(mode="before")
@classmethod
def _resolve_max_tokens(cls, values: Any) -> Any:
"""Resolve max_tokens from the global config store when not explicitly set."""
if isinstance(values, dict) and values.get("max_tokens") is None:
from framework.config import get_max_tokens
values["max_tokens"] = get_max_tokens()
return values
def get_node(self, node_id: str) -> Any | None: def get_node(self, node_id: str) -> Any | None:
"""Get a node by ID.""" """Get a node by ID."""
for node in self.nodes: for node in self.nodes:
+14 -21
View File
@@ -8,8 +8,15 @@ from dataclasses import dataclass, field
from pathlib import Path from pathlib import Path
from typing import TYPE_CHECKING, Any from typing import TYPE_CHECKING, Any
from framework.config import get_hive_config, get_preferred_model
from framework.graph import Goal from framework.graph import Goal
from framework.graph.edge import AsyncEntryPointSpec, EdgeCondition, EdgeSpec, GraphSpec from framework.graph.edge import (
DEFAULT_MAX_TOKENS,
AsyncEntryPointSpec,
EdgeCondition,
EdgeSpec,
GraphSpec,
)
from framework.graph.executor import ExecutionResult, GraphExecutor from framework.graph.executor import ExecutionResult, GraphExecutor
from framework.graph.node import NodeSpec from framework.graph.node import NodeSpec
from framework.llm.provider import LLMProvider, Tool from framework.llm.provider import LLMProvider, Tool
@@ -28,9 +35,6 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# Configuration paths
HIVE_CONFIG_FILE = Path.home() / ".hive" / "configuration.json"
def _ensure_credential_key_env() -> None: def _ensure_credential_key_env() -> None:
"""Load HIVE_CREDENTIAL_KEY from shell config if not already in environment. """Load HIVE_CREDENTIAL_KEY from shell config if not already in environment.
@@ -60,17 +64,6 @@ def _ensure_credential_key_env() -> None:
CLAUDE_CREDENTIALS_FILE = Path.home() / ".claude" / ".credentials.json" CLAUDE_CREDENTIALS_FILE = Path.home() / ".claude" / ".credentials.json"
def get_hive_config() -> dict[str, Any]:
"""Load hive configuration from ~/.hive/configuration.json."""
if not HIVE_CONFIG_FILE.exists():
return {}
try:
with open(HIVE_CONFIG_FILE) as f:
return json.load(f)
except (json.JSONDecodeError, OSError):
return {}
def get_claude_code_token() -> str | None: def get_claude_code_token() -> str | None:
""" """
Get the OAuth token from Claude Code subscription. Get the OAuth token from Claude Code subscription.
@@ -268,11 +261,7 @@ class AgentRunner:
@staticmethod @staticmethod
def _resolve_default_model() -> str: def _resolve_default_model() -> str:
"""Resolve the default model from ~/.hive/configuration.json.""" """Resolve the default model from ~/.hive/configuration.json."""
config = get_hive_config() return get_preferred_model()
llm = config.get("llm", {})
if llm.get("provider") and llm.get("model"):
return f"{llm['provider']}/{llm['model']}"
return "anthropic/claude-sonnet-4-20250514"
def __init__( def __init__(
self, self,
@@ -425,7 +414,11 @@ class AgentRunner:
if agent_config and hasattr(agent_config, "model"): if agent_config and hasattr(agent_config, "model"):
model = agent_config.model model = agent_config.model
max_tokens = getattr(agent_config, "max_tokens", 1024) if agent_config else 1024 if agent_config and hasattr(agent_config, "max_tokens"):
max_tokens = agent_config.max_tokens
else:
hive_config = get_hive_config()
max_tokens = hive_config.get("llm", {}).get("max_tokens", DEFAULT_MAX_TOKENS)
# Build GraphSpec from module-level variables # Build GraphSpec from module-level variables
graph = GraphSpec( graph = GraphSpec(
+27 -6
View File
@@ -5,12 +5,31 @@ Aden Hive is a Python-based agent framework. Configuration is handled through en
## Configuration Overview ## Configuration Overview
``` ```
Environment variables (API keys, runtime flags) ~/.hive/configuration.json (global defaults: provider, model, max_tokens)
Agent config.py (per-agent settings: model, tools, storage) Environment variables (API keys, runtime flags)
pyproject.toml (package metadata and dependencies) Agent config.py (per-agent settings: model, tools, storage)
.mcp.json (MCP server connections) pyproject.toml (package metadata and dependencies)
.mcp.json (MCP server connections)
``` ```
## Global Configuration (~/.hive/configuration.json)
The `quickstart.sh` script creates this file during setup. It stores the default LLM provider, model, and max_tokens used by all agents unless overridden in an agent's own `config.py`.
```json
{
"llm": {
"provider": "anthropic",
"model": "claude-sonnet-4-5-20250929",
"max_tokens": 8192,
"api_key_env_var": "ANTHROPIC_API_KEY"
},
"created_at": "2026-01-15T12:00:00+00:00"
}
```
The default `max_tokens` value (8192) is defined as `DEFAULT_MAX_TOKENS` in `framework.graph.edge` and re-exported from `framework.graph`. Each agent's `RuntimeConfig` reads from this file at startup. To change defaults, either re-run `quickstart.sh` or edit the file directly.
## Environment Variables ## Environment Variables
### LLM Providers (at least one required for real execution) ### LLM Providers (at least one required for real execution)
@@ -61,14 +80,16 @@ Each agent package in `exports/` contains its own `config.py`:
```python ```python
# exports/my_agent/config.py # exports/my_agent/config.py
CONFIG = { CONFIG = {
"model": "claude-haiku-4-5-20251001", # Default LLM model "model": "anthropic/claude-sonnet-4-5-20250929", # Default LLM model
"max_tokens": 4096, "max_tokens": 8192, # default: DEFAULT_MAX_TOKENS from framework.graph
"temperature": 0.7, "temperature": 0.7,
"tools": ["web_search", "pdf_read"], # MCP tools to enable "tools": ["web_search", "pdf_read"], # MCP tools to enable
"storage_path": "/tmp/my_agent", # Runtime data location "storage_path": "/tmp/my_agent", # Runtime data location
} }
``` ```
If `model` or `max_tokens` are omitted, the agent loads defaults from `~/.hive/configuration.json`.
### Agent Graph Specification ### Agent Graph Specification
Agent behavior is defined in `agent.json` (or constructed in `agent.py`): Agent behavior is defined in `agent.json` (or constructed in `agent.py`):
@@ -1,33 +1,8 @@
"""Runtime configuration.""" """Runtime configuration."""
import json from dataclasses import dataclass
from dataclasses import dataclass, field
from pathlib import Path
def _load_preferred_model() -> str:
"""Load preferred model from ~/.hive/configuration.json."""
config_path = Path.home() / ".hive" / "configuration.json"
if config_path.exists():
try:
with open(config_path) as f:
config = json.load(f)
llm = config.get("llm", {})
if llm.get("provider") and llm.get("model"):
return f"{llm['provider']}/{llm['model']}"
except Exception:
pass
return "anthropic/claude-sonnet-4-20250514"
@dataclass
class RuntimeConfig:
model: str = field(default_factory=_load_preferred_model)
temperature: float = 0.7
max_tokens: int = 40000
api_key: str | None = None
api_base: str | None = None
from framework.config import RuntimeConfig
default_config = RuntimeConfig() default_config = RuntimeConfig()
@@ -1,33 +1,8 @@
"""Runtime configuration.""" """Runtime configuration."""
import json from dataclasses import dataclass
from dataclasses import dataclass, field
from pathlib import Path
def _load_preferred_model() -> str:
"""Load preferred model from ~/.hive/configuration.json."""
config_path = Path.home() / ".hive" / "configuration.json"
if config_path.exists():
try:
with open(config_path) as f:
config = json.load(f)
llm = config.get("llm", {})
if llm.get("provider") and llm.get("model"):
return f"{llm['provider']}/{llm['model']}"
except Exception:
pass
return "anthropic/claude-sonnet-4-20250514"
@dataclass
class RuntimeConfig:
model: str = field(default_factory=_load_preferred_model)
temperature: float = 0.7
max_tokens: int = 40000
api_key: str | None = None
api_base: str | None = None
from framework.config import RuntimeConfig
default_config = RuntimeConfig() default_config = RuntimeConfig()
+2 -27
View File
@@ -1,33 +1,8 @@
"""Runtime configuration.""" """Runtime configuration."""
import json from dataclasses import dataclass
from dataclasses import dataclass, field
from pathlib import Path
def _load_preferred_model() -> str:
"""Load preferred model from ~/.hive/configuration.json."""
config_path = Path.home() / ".hive" / "configuration.json"
if config_path.exists():
try:
with open(config_path) as f:
config = json.load(f)
llm = config.get("llm", {})
if llm.get("provider") and llm.get("model"):
return f"{llm['provider']}/{llm['model']}"
except Exception:
pass
return "anthropic/claude-sonnet-4-20250514"
@dataclass
class RuntimeConfig:
model: str = field(default_factory=_load_preferred_model)
temperature: float = 0.7
max_tokens: int = 40000
api_key: str | None = None
api_base: str | None = None
from framework.config import RuntimeConfig
default_config = RuntimeConfig() default_config = RuntimeConfig()
+244 -13
View File
@@ -303,9 +303,9 @@ if [ "$USE_ASSOC_ARRAYS" = true ]; then
) )
declare -A DEFAULT_MODELS=( declare -A DEFAULT_MODELS=(
["anthropic"]="claude-sonnet-4-5-20250929" ["anthropic"]="claude-opus-4-6"
["openai"]="gpt-4o" ["openai"]="gpt-5.2"
["gemini"]="gemini-3.0-flash-preview" ["gemini"]="gemini-3-flash-preview"
["groq"]="moonshotai/kimi-k2-instruct-0905" ["groq"]="moonshotai/kimi-k2-instruct-0905"
["cerebras"]="zai-glm-4.7" ["cerebras"]="zai-glm-4.7"
["mistral"]="mistral-large-latest" ["mistral"]="mistral-large-latest"
@@ -313,6 +313,65 @@ if [ "$USE_ASSOC_ARRAYS" = true ]; then
["deepseek"]="deepseek-chat" ["deepseek"]="deepseek-chat"
) )
# Model choices per provider: composite-key associative arrays
# Keys: "provider:index" -> value
declare -A MODEL_CHOICES_ID=(
["anthropic:0"]="claude-opus-4-6"
["anthropic:1"]="claude-sonnet-4-5-20250929"
["anthropic:2"]="claude-sonnet-4-20250514"
["anthropic:3"]="claude-haiku-4-5-20251001"
["openai:0"]="gpt-5.2"
["openai:1"]="gpt-5-mini"
["openai:2"]="gpt-5-nano"
["gemini:0"]="gemini-3-flash-preview"
["gemini:1"]="gemini-3-pro-preview"
["groq:0"]="moonshotai/kimi-k2-instruct-0905"
["groq:1"]="openai/gpt-oss-120b"
["cerebras:0"]="zai-glm-4.7"
["cerebras:1"]="qwen3-235b-a22b-instruct-2507"
)
declare -A MODEL_CHOICES_LABEL=(
["anthropic:0"]="Opus 4.6 - Most capable (recommended)"
["anthropic:1"]="Sonnet 4.5 - Best balance"
["anthropic:2"]="Sonnet 4 - Fast + capable"
["anthropic:3"]="Haiku 4.5 - Fast + cheap"
["openai:0"]="GPT-5.2 - Most capable (recommended)"
["openai:1"]="GPT-5 Mini - Fast + cheap"
["openai:2"]="GPT-5 Nano - Fastest"
["gemini:0"]="Gemini 3 Flash - Fast (recommended)"
["gemini:1"]="Gemini 3 Pro - Best quality"
["groq:0"]="Kimi K2 - Best quality (recommended)"
["groq:1"]="GPT-OSS 120B - Fast reasoning"
["cerebras:0"]="ZAI-GLM 4.7 - Best quality (recommended)"
["cerebras:1"]="Qwen3 235B - Frontier reasoning"
)
# NOTE: 8192 should match DEFAULT_MAX_TOKENS in core/framework/graph/edge.py
declare -A MODEL_CHOICES_MAXTOKENS=(
["anthropic:0"]=8192
["anthropic:1"]=8192
["anthropic:2"]=8192
["anthropic:3"]=8192
["openai:0"]=16384
["openai:1"]=16384
["openai:2"]=16384
["gemini:0"]=8192
["gemini:1"]=8192
["groq:0"]=8192
["groq:1"]=8192
["cerebras:0"]=8192
["cerebras:1"]=8192
)
declare -A MODEL_CHOICES_COUNT=(
["anthropic"]=4
["openai"]=3
["gemini"]=2
["groq"]=2
["cerebras"]=2
)
# Helper functions for Bash 4+ # Helper functions for Bash 4+
get_provider_name() { get_provider_name() {
echo "${PROVIDER_NAMES[$1]}" echo "${PROVIDER_NAMES[$1]}"
@@ -325,6 +384,22 @@ if [ "$USE_ASSOC_ARRAYS" = true ]; then
get_default_model() { get_default_model() {
echo "${DEFAULT_MODELS[$1]}" echo "${DEFAULT_MODELS[$1]}"
} }
get_model_choice_count() {
echo "${MODEL_CHOICES_COUNT[$1]:-0}"
}
get_model_choice_id() {
echo "${MODEL_CHOICES_ID[$1:$2]}"
}
get_model_choice_label() {
echo "${MODEL_CHOICES_LABEL[$1:$2]}"
}
get_model_choice_maxtokens() {
echo "${MODEL_CHOICES_MAXTOKENS[$1:$2]}"
}
else else
# Bash 3.2 - use parallel indexed arrays # Bash 3.2 - use parallel indexed arrays
PROVIDER_ENV_VARS=(ANTHROPIC_API_KEY OPENAI_API_KEY GEMINI_API_KEY GOOGLE_API_KEY GROQ_API_KEY CEREBRAS_API_KEY MISTRAL_API_KEY TOGETHER_API_KEY DEEPSEEK_API_KEY) PROVIDER_ENV_VARS=(ANTHROPIC_API_KEY OPENAI_API_KEY GEMINI_API_KEY GOOGLE_API_KEY GROQ_API_KEY CEREBRAS_API_KEY MISTRAL_API_KEY TOGETHER_API_KEY DEEPSEEK_API_KEY)
@@ -333,7 +408,7 @@ else
# Default models by provider id (parallel arrays) # Default models by provider id (parallel arrays)
MODEL_PROVIDER_IDS=(anthropic openai gemini groq cerebras mistral together_ai deepseek) MODEL_PROVIDER_IDS=(anthropic openai gemini groq cerebras mistral together_ai deepseek)
MODEL_DEFAULTS=("claude-sonnet-4-5-20250929" "gpt-4o" "gemini-3.0-flash-preview" "moonshotai/kimi-k2-instruct-0905" "zai-glm-4.7" "mistral-large-latest" "meta-llama/Llama-3.3-70B-Instruct-Turbo" "deepseek-chat") MODEL_DEFAULTS=("claude-opus-4-6" "gpt-5.2" "gemini-3-flash-preview" "moonshotai/kimi-k2-instruct-0905" "zai-glm-4.7" "mistral-large-latest" "meta-llama/Llama-3.3-70B-Instruct-Turbo" "deepseek-chat")
# Helper: get provider display name for an env var # Helper: get provider display name for an env var
get_provider_name() { get_provider_name() {
@@ -373,6 +448,82 @@ else
i=$((i + 1)) i=$((i + 1))
done done
} }
# Model choices per provider - flat parallel arrays with provider offsets
# Provider order: anthropic(4), openai(3), gemini(2), groq(2), cerebras(2)
MC_PROVIDERS=(anthropic anthropic anthropic anthropic openai openai openai gemini gemini groq groq cerebras cerebras)
MC_IDS=("claude-opus-4-6" "claude-sonnet-4-5-20250929" "claude-sonnet-4-20250514" "claude-haiku-4-5-20251001" "gpt-5.2" "gpt-5-mini" "gpt-5-nano" "gemini-3-flash-preview" "gemini-3-pro-preview" "moonshotai/kimi-k2-instruct-0905" "openai/gpt-oss-120b" "zai-glm-4.7" "qwen3-235b-a22b-instruct-2507")
MC_LABELS=("Opus 4.6 - Most capable (recommended)" "Sonnet 4.5 - Best balance" "Sonnet 4 - Fast + capable" "Haiku 4.5 - Fast + cheap" "GPT-5.2 - Most capable (recommended)" "GPT-5 Mini - Fast + cheap" "GPT-5 Nano - Fastest" "Gemini 3 Flash - Fast (recommended)" "Gemini 3 Pro - Best quality" "Kimi K2 - Best quality (recommended)" "GPT-OSS 120B - Fast reasoning" "ZAI-GLM 4.7 - Best quality (recommended)" "Qwen3 235B - Frontier reasoning")
# NOTE: 8192 should match DEFAULT_MAX_TOKENS in core/framework/graph/edge.py
MC_MAXTOKENS=(8192 8192 8192 8192 16384 16384 16384 8192 8192 8192 8192 8192 8192)
# Helper: get number of model choices for a provider
get_model_choice_count() {
local provider_id="$1"
local count=0
local i=0
while [ $i -lt ${#MC_PROVIDERS[@]} ]; do
if [ "${MC_PROVIDERS[$i]}" = "$provider_id" ]; then
count=$((count + 1))
fi
i=$((i + 1))
done
echo "$count"
}
# Helper: get model choice id by provider and index (0-based within provider)
get_model_choice_id() {
local provider_id="$1"
local idx="$2"
local count=0
local i=0
while [ $i -lt ${#MC_PROVIDERS[@]} ]; do
if [ "${MC_PROVIDERS[$i]}" = "$provider_id" ]; then
if [ $count -eq "$idx" ]; then
echo "${MC_IDS[$i]}"
return
fi
count=$((count + 1))
fi
i=$((i + 1))
done
}
# Helper: get model choice label by provider and index
get_model_choice_label() {
local provider_id="$1"
local idx="$2"
local count=0
local i=0
while [ $i -lt ${#MC_PROVIDERS[@]} ]; do
if [ "${MC_PROVIDERS[$i]}" = "$provider_id" ]; then
if [ $count -eq "$idx" ]; then
echo "${MC_LABELS[$i]}"
return
fi
count=$((count + 1))
fi
i=$((i + 1))
done
}
# Helper: get model choice max_tokens by provider and index
get_model_choice_maxtokens() {
local provider_id="$1"
local idx="$2"
local count=0
local i=0
while [ $i -lt ${#MC_PROVIDERS[@]} ]; do
if [ "${MC_PROVIDERS[$i]}" = "$provider_id" ]; then
if [ $count -eq "$idx" ]; then
echo "${MC_MAXTOKENS[$i]}"
return
fi
count=$((count + 1))
fi
i=$((i + 1))
done
}
fi fi
# Configuration directory # Configuration directory
@@ -411,12 +562,74 @@ detect_shell_rc() {
SHELL_RC_FILE=$(detect_shell_rc) SHELL_RC_FILE=$(detect_shell_rc)
SHELL_NAME=$(basename "$SHELL") SHELL_NAME=$(basename "$SHELL")
# Prompt the user to choose a model for their selected provider.
# Sets SELECTED_MODEL and SELECTED_MAX_TOKENS.
prompt_model_selection() {
local provider_id="$1"
local count
count="$(get_model_choice_count "$provider_id")"
if [ "$count" -eq 0 ]; then
# No curated choices for this provider (e.g. Mistral, DeepSeek)
SELECTED_MODEL="$(get_default_model "$provider_id")"
SELECTED_MAX_TOKENS=8192
return
fi
if [ "$count" -eq 1 ]; then
# Only one choice — auto-select
SELECTED_MODEL="$(get_model_choice_id "$provider_id" 0)"
SELECTED_MAX_TOKENS="$(get_model_choice_maxtokens "$provider_id" 0)"
return
fi
# Multiple choices — show menu
echo ""
echo -e "${BOLD}Select a model:${NC}"
echo ""
local i=0
while [ $i -lt "$count" ]; do
local label
label="$(get_model_choice_label "$provider_id" "$i")"
local mid
mid="$(get_model_choice_id "$provider_id" "$i")"
local num=$((i + 1))
echo -e " ${CYAN}$num)${NC} $label ${DIM}($mid)${NC}"
i=$((i + 1))
done
echo ""
local choice
while true; do
read -r -p "Enter choice [1]: " choice
choice="${choice:-1}"
if [[ "$choice" =~ ^[0-9]+$ ]] && [ "$choice" -ge 1 ] && [ "$choice" -le "$count" ]; then
local idx=$((choice - 1))
SELECTED_MODEL="$(get_model_choice_id "$provider_id" "$idx")"
SELECTED_MAX_TOKENS="$(get_model_choice_maxtokens "$provider_id" "$idx")"
echo ""
echo -e "${GREEN}${NC} Model: ${DIM}$SELECTED_MODEL${NC}"
return
fi
echo -e "${RED}Invalid choice. Please enter 1-$count${NC}"
done
}
# Function to save configuration # Function to save configuration
save_configuration() { save_configuration() {
local provider_id="$1" local provider_id="$1"
local env_var="$2" local env_var="$2"
local model local model="$3"
model="$(get_default_model "$provider_id")" local max_tokens="$4"
# Fallbacks if not provided
if [ -z "$model" ]; then
model="$(get_default_model "$provider_id")"
fi
if [ -z "$max_tokens" ]; then
max_tokens=8192
fi
mkdir -p "$HIVE_CONFIG_DIR" mkdir -p "$HIVE_CONFIG_DIR"
@@ -426,6 +639,7 @@ config = {
'llm': { 'llm': {
'provider': '$provider_id', 'provider': '$provider_id',
'model': '$model', 'model': '$model',
'max_tokens': $max_tokens,
'api_key_env_var': '$env_var' 'api_key_env_var': '$env_var'
}, },
'created_at': '$(date -u +"%Y-%m-%dT%H:%M:%S+00:00")' 'created_at': '$(date -u +"%Y-%m-%dT%H:%M:%S+00:00")'
@@ -449,6 +663,8 @@ FOUND_PROVIDERS=() # Display names for UI
FOUND_ENV_VARS=() # Corresponding env var names FOUND_ENV_VARS=() # Corresponding env var names
SELECTED_PROVIDER_ID="" # Will hold the chosen provider ID SELECTED_PROVIDER_ID="" # Will hold the chosen provider ID
SELECTED_ENV_VAR="" # Will hold the chosen env var SELECTED_ENV_VAR="" # Will hold the chosen env var
SELECTED_MODEL="" # Will hold the chosen model ID
SELECTED_MAX_TOKENS=8192 # Will hold the chosen max_tokens
if [ "$USE_ASSOC_ARRAYS" = true ]; then if [ "$USE_ASSOC_ARRAYS" = true ]; then
# Bash 4+ - iterate over associative array keys # Bash 4+ - iterate over associative array keys
@@ -486,6 +702,8 @@ if [ ${#FOUND_PROVIDERS[@]} -gt 0 ]; then
echo "" echo ""
echo -e "${GREEN}${NC} Using ${FOUND_PROVIDERS[0]}" echo -e "${GREEN}${NC} Using ${FOUND_PROVIDERS[0]}"
prompt_model_selection "$SELECTED_PROVIDER_ID"
fi fi
else else
# Multiple providers found, let user pick one # Multiple providers found, let user pick one
@@ -498,28 +716,34 @@ if [ ${#FOUND_PROVIDERS[@]} -gt 0 ]; then
echo -e " ${CYAN}$i)${NC} $provider" echo -e " ${CYAN}$i)${NC} $provider"
i=$((i + 1)) i=$((i + 1))
done done
echo -e " ${CYAN}$i)${NC} Other"
max_choice=$i
echo "" echo ""
while true; do while true; do
read -r -p "Enter choice (1-${#FOUND_PROVIDERS[@]}): " choice read -r -p "Enter choice (1-$max_choice): " choice
if [[ "$choice" =~ ^[0-9]+$ ]] && [ "$choice" -ge 1 ] && [ "$choice" -le "${#FOUND_PROVIDERS[@]}" ]; then if [[ "$choice" =~ ^[0-9]+$ ]] && [ "$choice" -ge 1 ] && [ "$choice" -le "$max_choice" ]; then
if [ "$choice" -eq "$max_choice" ]; then
# Fall through to the manual provider selection below
break
fi
idx=$((choice - 1)) idx=$((choice - 1))
SELECTED_ENV_VAR="${FOUND_ENV_VARS[$idx]}" SELECTED_ENV_VAR="${FOUND_ENV_VARS[$idx]}"
SELECTED_PROVIDER_ID="$(get_provider_id "$SELECTED_ENV_VAR")" SELECTED_PROVIDER_ID="$(get_provider_id "$SELECTED_ENV_VAR")"
echo "" echo ""
echo -e "${GREEN}${NC} Selected: ${FOUND_PROVIDERS[$idx]}" echo -e "${GREEN}${NC} Selected: ${FOUND_PROVIDERS[$idx]}"
prompt_model_selection "$SELECTED_PROVIDER_ID"
break break
fi fi
echo -e "${RED}Invalid choice. Please enter 1-${#FOUND_PROVIDERS[@]}${NC}" echo -e "${RED}Invalid choice. Please enter 1-$max_choice${NC}"
done done
fi fi
fi fi
if [ -z "$SELECTED_PROVIDER_ID" ]; then if [ -z "$SELECTED_PROVIDER_ID" ]; then
echo "No API keys found. Let's configure one."
echo "" echo ""
prompt_choice "Select your LLM provider:" \ prompt_choice "Select your LLM provider:" \
"Anthropic (Claude) - Recommended" \ "Anthropic (Claude) - Recommended" \
"OpenAI (GPT)" \ "OpenAI (GPT)" \
@@ -595,11 +819,16 @@ if [ -z "$SELECTED_PROVIDER_ID" ]; then
fi fi
fi fi
# Prompt for model if not already selected (manual provider path)
if [ -n "$SELECTED_PROVIDER_ID" ] && [ -z "$SELECTED_MODEL" ]; then
prompt_model_selection "$SELECTED_PROVIDER_ID"
fi
# Save configuration if a provider was selected # Save configuration if a provider was selected
if [ -n "$SELECTED_PROVIDER_ID" ]; then if [ -n "$SELECTED_PROVIDER_ID" ]; then
echo "" echo ""
echo -n " Saving configuration... " echo -n " Saving configuration... "
save_configuration "$SELECTED_PROVIDER_ID" "$SELECTED_ENV_VAR" > /dev/null save_configuration "$SELECTED_PROVIDER_ID" "$SELECTED_ENV_VAR" "$SELECTED_MODEL" "$SELECTED_MAX_TOKENS" > /dev/null
echo -e "${GREEN}${NC}" echo -e "${GREEN}${NC}"
echo -e " ${DIM}~/.hive/configuration.json${NC}" echo -e " ${DIM}~/.hive/configuration.json${NC}"
fi fi
@@ -781,7 +1010,9 @@ echo ""
# Show configured provider # Show configured provider
if [ -n "$SELECTED_PROVIDER_ID" ]; then if [ -n "$SELECTED_PROVIDER_ID" ]; then
SELECTED_MODEL="$(get_default_model "$SELECTED_PROVIDER_ID")" if [ -z "$SELECTED_MODEL" ]; then
SELECTED_MODEL="$(get_default_model "$SELECTED_PROVIDER_ID")"
fi
echo -e "${BOLD}Default LLM:${NC}" echo -e "${BOLD}Default LLM:${NC}"
echo -e " ${CYAN}$SELECTED_PROVIDER_ID${NC}${DIM}$SELECTED_MODEL${NC}" echo -e " ${CYAN}$SELECTED_PROVIDER_ID${NC}${DIM}$SELECTED_MODEL${NC}"
echo "" echo ""