diff --git a/.claude/settings.local.json b/.claude/settings.local.json new file mode 100644 index 00000000..c30ad53c --- /dev/null +++ b/.claude/settings.local.json @@ -0,0 +1,19 @@ +{ + "permissions": { + "allow": [ + "Bash(npm install:*)", + "Bash(npm test:*)", + "Skill(building-agents-construction)", + "Skill(building-agents-construction:*)", + "Bash(PYTHONPATH=core:exports pytest:*)", + "mcp__agent-builder__create_session", + "mcp__agent-builder__get_session_status", + "mcp__agent-builder__set_goal", + "mcp__agent-builder__list_mcp_servers", + "mcp__agent-builder__test_node", + "mcp__agent-builder__add_node", + "mcp__agent-builder__add_edge", + "mcp__agent-builder__validate_graph" + ] + } +} diff --git a/.claude/skills/building-agents-construction/SKILL.md b/.claude/skills/building-agents-construction/SKILL.md index bc149711..22e637d6 100644 --- a/.claude/skills/building-agents-construction/SKILL.md +++ b/.claude/skills/building-agents-construction/SKILL.md @@ -16,15 +16,151 @@ Step-by-step guide for building goal-driven agent packages. **Prerequisites:** Read `building-agents-core` for fundamental concepts. -## Step-by-Step Guide +## CRITICAL: entry_points Format Reference -### Step 1: Create Package Structure +**⚠️ Common Mistake Prevention:** -When user requests an agent, **immediately create the package**: +The `entry_points` parameter in GraphSpec has a specific format that is easy to get wrong. This section exists because this mistake has caused production bugs. + +### Correct Format ```python -# 1. Create directory +entry_points = {"start": "first-node-id"} +``` + +**Examples from working agents:** + +```python +# From exports/outbound_sales_agent/agent.py +entry_node = "lead-qualification" +entry_points = {"start": "lead-qualification"} + +# From exports/support_ticket_agent/agent.py (FIXED) +entry_node = "parse-ticket" +entry_points = {"start": "parse-ticket"} +``` + +### WRONG Formats (DO NOT USE) + +```python +# ❌ WRONG: Using node ID as key with input keys as value +entry_points = { + "parse-ticket": ["ticket_content", "customer_id", "ticket_id"] +} +# Error: ValidationError: Input should be a valid string, got list + +# ❌ WRONG: Using set instead of dict +entry_points = {"parse-ticket"} +# Error: ValidationError: Input should be a valid dictionary, got set + +# ❌ WRONG: Missing "start" key +entry_points = {"entry": "parse-ticket"} +# Error: Graph execution fails, cannot find entry point +``` + +### Validation Check + +After writing graph configuration, ALWAYS validate: + +```python +# Check 1: Must be a dict +assert isinstance(entry_points, dict), f"entry_points must be dict, got {type(entry_points)}" + +# Check 2: Must have "start" key +assert "start" in entry_points, f"entry_points must have 'start' key, got keys: {entry_points.keys()}" + +# Check 3: "start" value must match entry_node +assert entry_points["start"] == entry_node, f"entry_points['start']={entry_points['start']} must match entry_node={entry_node}" + +# Check 4: Value must be a string (node ID) +assert isinstance(entry_points["start"], str), f"entry_points['start'] must be string, got {type(entry_points['start'])}" +``` + +**Why this matters:** GraphSpec uses Pydantic validation. The wrong format causes ValidationError at runtime, which blocks all agent execution and tests. This bug is not caught until you try to run the agent. + +## Building Session Management with MCP + +**MANDATORY**: Use the agent-builder MCP server's BuildSession system for automatic bookkeeping and persistence. + +### Available MCP Session Tools + +```python +# Create new session (call FIRST before building) +mcp__agent-builder__create_session(name="Support Ticket Agent") +# Returns: session_id, automatically sets as active session + +# Get current session status (use for progress tracking) +status = mcp__agent-builder__get_session_status() +# Returns: { +# "session_id": "build_20250122_...", +# "name": "Support Ticket Agent", +# "has_goal": true, +# "node_count": 5, +# "edge_count": 7, +# "nodes": ["parse-ticket", "categorize", ...], +# "edges": [("parse-ticket", "categorize"), ...] +# } + +# List all saved sessions +mcp__agent-builder__list_sessions() + +# Load previous session +mcp__agent-builder__load_session_by_id(session_id="build_...") + +# Delete session +mcp__agent-builder__delete_session(session_id="build_...") +``` + +### How MCP Session Works + +The BuildSession class (in `core/framework/mcp/agent_builder_server.py`) automatically: +- **Persists to disk** after every operation (`_save_session()` called automatically) +- **Tracks all components**: goal, nodes, edges, mcp_servers +- **Maintains timestamps**: created_at, last_modified +- **Stores to**: `~/.claude-code-agent-builder/sessions/` + +When you call MCP tools like: +- `mcp__agent-builder__set_goal(...)` - Automatically added to session.goal and saved +- `mcp__agent-builder__add_node(...)` - Automatically added to session.nodes and saved +- `mcp__agent-builder__add_edge(...)` - Automatically added to session.edges and saved + +**No manual bookkeeping needed** - the MCP server handles it all! + +### Show Progress to User + +```python +# Get session status to show progress +status = json.loads(mcp__agent-builder__get_session_status()) + +print(f"\n📊 Building Progress:") +print(f" Session: {status['name']}") +print(f" Goal defined: {status['has_goal']}") +print(f" Nodes: {status['node_count']}") +print(f" Edges: {status['edge_count']}") +print(f" Nodes added: {', '.join(status['nodes'])}") +``` + +**Benefits:** +- Automatic persistence - survive crashes/restarts +- Clear audit trail - all operations logged +- Session resume - continue from where you left off +- Progress tracking built-in +- No manual state management needed + +## Step-by-Step Guide + +### Step 1: Create Building Session & Package Structure + +When user requests an agent, **immediately create MCP session and package**: + +```python +# 0. FIRST: Create MCP building session agent_name = "technical_research_agent" # snake_case +session_result = mcp__agent-builder__create_session(name=agent_name.replace('_', ' ').title()) +session_id = json.loads(session_result)["session_id"] +print(f"✅ Created building session: {session_id}") + +# 1. Create directory package_path = f"exports/{agent_name}" Bash(f"mkdir -p {package_path}/nodes") @@ -174,14 +310,22 @@ Edit( Open exports/technical_research_agent/agent.py to see the goal! ``` +**Note:** Goal is automatically tracked in MCP session. Use `mcp__agent-builder__get_session_status()` to check progress. + ### Step 3: Add Nodes (Incremental) -**⚠️ IMPORTANT:** Before adding any node with tools, you MUST: +**⚠️ CRITICAL VALIDATION REQUIREMENTS:** +Before adding any node with tools: 1. Call `mcp__agent-builder__list_mcp_tools()` to discover available tools 2. Verify each tool exists in the response 3. If a tool doesn't exist, inform the user and ask how to proceed +After writing each node: +4. **MANDATORY**: Validate with `mcp__agent-builder__test_node()` before proceeding +5. **MANDATORY**: Check MCP session status to track progress +6. Only proceed to next node after validation passes + For each node, **write immediately after approval**: ```python @@ -234,24 +378,36 @@ Open exports/technical_research_agent/nodes/__init__.py to see it! **Repeat for each node.** User watches the file grow. -#### Optional: Validate Node with MCP Tools +#### MANDATORY: Validate Each Node with MCP Tools -After writing a node, you can optionally use MCP tools for validation: +After writing EVERY node, you MUST validate before proceeding: ```python -# Node is already written to file. Now validate it: -mcp__agent-builder__test_node( +# Node is already written to file. Now VALIDATE IT (REQUIRED): +validation_result = json.loads(mcp__agent-builder__test_node( node_id="analyze-request", test_input='{"query": "test query"}', mock_llm_response='{"analysis": "mock output"}' -) +)) -# Returns validation result showing node behavior -# This is OPTIONAL - for bookkeeping/validation only -# The node already exists in the file! +# Check validation result +if validation_result["valid"]: + # Show user validation passed + print(f"✅ Node validation passed: analyze-request") + + # Show session progress + status = json.loads(mcp__agent-builder__get_session_status()) + print(f"📊 Session progress: {status['node_count']} nodes added") +else: + # STOP - Do not proceed until fixed + print(f"❌ Node validation FAILED:") + for error in validation_result["errors"]: + print(f" - {error}") + print("⚠️ Must fix node before proceeding to next component") + # Ask user how to proceed ``` -**Key Point:** The node was written to `nodes/__init__.py` FIRST. The MCP tool is just for validation. +**CRITICAL:** Do NOT proceed to the next node until validation passes. Bugs caught here prevent wasted work later. ### Step 4: Connect Edges @@ -282,10 +438,15 @@ Edit( ) # Write entry points and terminal nodes +# ⚠️ CRITICAL: entry_points format must be {"start": "node_id"} +# Common mistake: {"node_id": ["input_keys"]} is WRONG +# Correct format: {"start": "first-node-id"} +# Reference: See exports/outbound_sales_agent/agent.py for example + graph_config = f''' # Graph configuration entry_node = "{entry_node_id}" -entry_points = {entry_points} +entry_points = {{"start": "{entry_node_id}"}} # CRITICAL: Must be {{"start": "node-id"}} pause_nodes = {pause_nodes} terminal_nodes = {terminal_nodes} @@ -311,23 +472,101 @@ Edit( 5 edges connecting 6 nodes ``` -#### Optional: Validate Graph Structure +#### MANDATORY: Validate Graph Structure -After writing edges, optionally validate with MCP tools: +After writing edges, you MUST validate before proceeding to finalization: ```python -# Edges already written to agent.py. Now validate structure: -mcp__agent-builder__validate_graph() +# Edges already written to agent.py. Now VALIDATE STRUCTURE (REQUIRED): +graph_validation = json.loads(mcp__agent-builder__validate_graph()) -# Returns: unreachable nodes, missing connections, etc. -# This is OPTIONAL - for validation only +# Check for structural issues +if graph_validation["valid"]: + print("✅ Graph structure validated successfully") + + # Show session summary + status = json.loads(mcp__agent-builder__get_session_status()) + print(f" - Nodes: {status['node_count']}") + print(f" - Edges: {status['edge_count']}") + print(f" - Entry point: {entry_node_id}") +else: + print("❌ Graph validation FAILED:") + for error in graph_validation["errors"]: + print(f" ERROR: {error}") + print("\n⚠️ Must fix graph structure before finalizing agent") + # Ask user how to proceed + +# Additional validation: Check entry_points format +if not isinstance(entry_points, dict): + print("❌ CRITICAL ERROR: entry_points must be a dict") + print(f" Current value: {entry_points} (type: {type(entry_points)})") + print(" Correct format: {'start': 'node-id'}") + # STOP - This is the mistake that caused the support_ticket_agent bug + +if entry_points.get("start") != entry_node_id: + print("❌ CRITICAL ERROR: entry_points['start'] must match entry_node") + print(f" entry_points: {entry_points}") + print(f" entry_node: {entry_node_id}") + print(" They must be consistent!") ``` +**CRITICAL:** Do NOT proceed to Step 5 (finalization) until graph validation passes. This checkpoint prevents structural bugs from reaching production. + ### Step 5: Finalize Agent Class -Write the agent class: +**Pre-flight checks before finalization:** ```python +# MANDATORY: Verify all validations passed before finalizing +print("\n🔍 Pre-finalization Checklist:") + +# Get current session status +status = json.loads(mcp__agent-builder__get_session_status()) + +checks_passed = True + +# Check 1: Goal defined +if not status["has_goal"]: + print("❌ No goal defined") + checks_passed = False +else: + print(f"✅ Goal defined: {status['goal_name']}") + +# Check 2: Nodes added +if status["node_count"] == 0: + print("❌ No nodes added") + checks_passed = False +else: + print(f"✅ {status['node_count']} nodes added: {', '.join(status['nodes'])}") + +# Check 3: Edges added +if status["edge_count"] == 0: + print("❌ No edges added") + checks_passed = False +else: + print(f"✅ {status['edge_count']} edges added") + +# Check 4: Entry points format correct +if not isinstance(entry_points, dict) or "start" not in entry_points: + print("❌ CRITICAL: entry_points format incorrect") + print(f" Current: {entry_points}") + print(" Required: {'start': 'node-id'}") + checks_passed = False +else: + print(f"✅ Entry points valid: {entry_points}") + +if not checks_passed: + print("\n⚠️ CANNOT PROCEED to finalization until all checks pass") + print(" Fix the issues above first") + # Ask user how to proceed or stop here + return + +print("\n✅ All pre-flight checks passed - proceeding to finalization\n") +``` + +Write the agent class: + +````python agent_class_code = f''' class {agent_class_name}: @@ -500,7 +739,7 @@ python -m {agent_name} run --input '{{"key": "value"}}' # Interactive shell python -m {agent_name} shell -``` +```` ## As Python Module @@ -516,17 +755,19 @@ result = await default_agent.run({{"key": "value"}}) - `nodes/__init__.py` - Node definitions - `config.py` - Runtime configuration - `__main__.py` - CLI interface -''' + ''' Write( - file_path=f"{package_path}/README.md", - content=readme_content +file_path=f"{package_path}/README.md", +content=readme_content ) + ``` **Show user:** ``` + ✅ Agent class written to agent.py ✅ Package exports finalized in __init__.py ✅ README.md generated @@ -534,11 +775,28 @@ Write( 🎉 Agent complete: exports/technical_research_agent/ Commands: - python -m technical_research_agent info - python -m technical_research_agent validate - python -m technical_research_agent run --input '{"topic": "..."}' +python -m technical_research_agent info +python -m technical_research_agent validate +python -m technical_research_agent run --input '{"topic": "..."}' ``` +**Final session summary:** + +```python +# Show final MCP session status +status = json.loads(mcp__agent-builder__get_session_status()) + +print("\n📊 Build Session Summary:") +print(f" Session ID: {status['session_id']}") +print(f" Agent: {status['name']}") +print(f" Goal: {status['goal_name']}") +print(f" Nodes: {status['node_count']}") +print(f" Edges: {status['edge_count']}") +print(f" MCP Servers: {status['mcp_servers_count']}") +print("\n✅ Agent construction complete with full validation") +print(f"\nSession saved to: ~/.claude-code-agent-builder/sessions/{status['session_id']}.json") +```` + ## CLI Template ```python @@ -623,7 +881,7 @@ def shell(): if __name__ == "__main__": cli() ''' -``` +```` ## Testing During Build @@ -677,11 +935,13 @@ response = AskUserQuestion( After completing construction: **If agent structure complete:** + - Validate: `python -m agent_name validate` - Test basic execution: `python -m agent_name info` - Proceed to testing-agent skill for comprehensive tests **If implementation needed:** + - Check for STATUS.md or IMPLEMENTATION_GUIDE.md in agent directory - May need Python functions or MCP tool integration diff --git a/.gitignore b/.gitignore index 776000c8..196a9a09 100644 --- a/.gitignore +++ b/.gitignore @@ -9,12 +9,10 @@ workdir/ .next/ out/ -# Environment files (generated from config.yaml) +# Environment files .env .env.local .env.*.local -honeycomb/.env -hive/.env # User configuration (copied from .example) config.yaml diff --git a/.mcp.json b/.mcp.json index b3a8cd04..b58d7b4a 100644 --- a/.mcp.json +++ b/.mcp.json @@ -5,13 +5,13 @@ "args": ["-m", "framework.mcp.agent_builder_server"], "cwd": "core", "env": { - "PYTHONPATH": "../aden-tools/src" + "PYTHONPATH": "../tools/src" } }, - "aden-tools": { + "tools": { "command": "python", "args": ["mcp_server.py", "--stdio"], - "cwd": "aden-tools", + "cwd": "tools", "env": { "PYTHONPATH": "src" } diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2e5df4bc..a83094bf 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ -# Contributing to Hive +# Contributing to Aden Agent Framework -Thank you for your interest in contributing to Hive! This document provides guidelines and information for contributors. +Thank you for your interest in contributing to the Aden Agent Framework! This document provides guidelines and information for contributors. ## Code of Conduct @@ -12,24 +12,21 @@ By participating in this project, you agree to abide by our [Code of Conduct](CO 2. Clone your fork: `git clone https://github.com/YOUR_USERNAME/hive.git` 3. Create a feature branch: `git checkout -b feature/your-feature-name` 4. Make your changes -5. Run tests: `npm run test` +5. Run tests: `PYTHONPATH=core:exports python -m pytest` 6. Commit your changes following our commit conventions 7. Push to your fork and submit a Pull Request ## Development Setup ```bash -# Install dependencies -npm install +# Install Python packages +./scripts/setup-python.sh -# Copy configuration -cp config.yaml.example config.yaml +# Verify installation +python -c "import framework; import aden_tools; print('✓ Setup complete')" -# Generate environment files -npm run setup - -# Start development environment -docker compose up +# Install Claude Code skills (optional) +./quickstart.sh ``` ## Commit Convention @@ -77,28 +74,33 @@ feat(component): add new feature description ## Project Structure -- `honeycomb/` - React frontend application -- `hive/` - Node.js backend API +- `core/` - Core framework (agent runtime, graph executor, protocols) +- `tools/` - MCP Tools Package (19 tools for agent capabilities) +- `exports/` - Agent packages and examples - `docs/` - Documentation - `scripts/` - Build and utility scripts +- `.claude/` - Claude Code skills for building/testing agents ## Code Style -- Use TypeScript for all new code -- Follow existing code patterns +- Use Python 3.11+ for all new code +- Follow PEP 8 style guide +- Add type hints to function signatures +- Write docstrings for classes and public functions - Use meaningful variable and function names -- Add comments for complex logic - Keep functions focused and small ## Testing ```bash -# Run all tests -npm run test +# Run all tests for the framework +cd core && python -m pytest -# Run tests for a specific package -npm run test --workspace=honeycomb -npm run test --workspace=hive +# Run all tests for tools +cd tools && python -m pytest + +# Run tests for a specific agent +PYTHONPATH=core:exports python -m agent_name test ``` ## Questions? diff --git a/DEVELOPER.md b/DEVELOPER.md index ae6c1cec..875c905f 100644 --- a/DEVELOPER.md +++ b/DEVELOPER.md @@ -1,43 +1,39 @@ # Developer Guide -This comprehensive guide covers everything you need to know to work on the Hive monorepo effectively. +This guide covers everything you need to know to develop with the Aden Agent Framework. ## Table of Contents 1. [Repository Overview](#repository-overview) 2. [Initial Setup](#initial-setup) 3. [Project Structure](#project-structure) -4. [Configuration System](#configuration-system) -5. [Development Workflow](#development-workflow) -6. [Working with the Frontend (honeycomb)](#working-with-the-frontend-honeycomb) -7. [Working with the Backend (hive)](#working-with-the-backend-hive) -8. [Docker Development](#docker-development) -9. [Testing](#testing) -10. [Code Style & Conventions](#code-style--conventions) -11. [Git Workflow](#git-workflow) -12. [Debugging](#debugging) -13. [Common Tasks](#common-tasks) -14. [Troubleshooting](#troubleshooting) +4. [Building Agents](#building-agents) +5. [Testing Agents](#testing-agents) +6. [Code Style & Conventions](#code-style--conventions) +7. [Git Workflow](#git-workflow) +8. [Common Tasks](#common-tasks) +9. [Troubleshooting](#troubleshooting) --- ## Repository Overview -Hive is a monorepo containing two main packages: +Aden Agent Framework is a Python-based system for building goal-driven, self-improving AI agents. -| Package | Directory | Description | Tech Stack | -|---------|-----------|-------------|------------| -| **honeycomb** | `/honeycomb` | Frontend web application | React 18, TypeScript, Vite | -| **hive** | `/hive` | Backend API server | Node.js, Express, TypeScript | - -The repository uses **npm workspaces** to manage dependencies across packages from a single root `package.json`. +| Package | Directory | Description | Tech Stack | +| ------------- | ---------- | -------------------------------------------- | ----------------- | +| **framework** | `/core` | Core runtime, graph executor, protocols | Python 3.11+ | +| **tools** | `/tools` | 19 MCP tools for agent capabilities | Python 3.11+ | +| **exports** | `/exports` | Agent packages and examples | Python 3.11+ | +| **skills** | `.claude` | Claude Code skills for building/testing | Markdown | ### Key Principles -- **Single source of configuration**: Edit `config.yaml` once, environment files are auto-generated -- **Consistent tooling**: Both packages use TypeScript with strict mode -- **Docker-first**: Production deployments use containerized builds -- **Developer ergonomics**: Hot reload, clear error messages, minimal setup +- **Goal-Driven Development**: Define objectives, framework generates agent graphs +- **Self-Improving**: Agents adapt and evolve based on failures +- **SDK-Wrapped Nodes**: Built-in memory, monitoring, and tool access +- **Human-in-the-Loop**: Intervention points for human oversight +- **Production-Ready**: Evaluation, testing, and deployment infrastructure --- @@ -47,18 +43,17 @@ The repository uses **npm workspaces** to manage dependencies across packages fr Ensure you have installed: -- **Node.js v20+** - [Download](https://nodejs.org/) or use nvm: `nvm install 20` -- **npm v10+** - Comes with Node.js 20 -- **Docker v20.10+** - [Download](https://docs.docker.com/get-docker/) -- **Docker Compose v2+** - Included with Docker Desktop +- **Python 3.11+** - [Download](https://www.python.org/downloads/) (3.12 or 3.13 recommended) +- **pip** - Package installer for Python (comes with Python) +- **git** - Version control +- **Claude Code** - [Install](https://docs.anthropic.com/claude/docs/claude-code) (optional, for using building skills) Verify installation: ```bash -node --version # Should be v20.x.x -npm --version # Should be 10.x.x -docker --version # Should be 20.10+ -docker compose version # Should be v2.x.x +python --version # Should be 3.11+ +pip --version # Should be latest +git --version # Any recent version ``` ### Step-by-Step Setup @@ -68,41 +63,55 @@ docker compose version # Should be v2.x.x git clone https://github.com/adenhq/hive.git cd hive -# 2. Create your configuration file -cp config.yaml.example config.yaml - -# 3. (Optional) Edit config.yaml with your settings -# Most defaults work out of the box - -# 4. Run the automated setup -npm run setup +# 2. Run automated Python setup +./scripts/setup-python.sh ``` -The `setup` script performs these actions: -1. Installs all dependencies for root, honeycomb, and hive -2. Generates `.env` files from your `config.yaml` -3. Reports any issues +The setup script performs these actions: -### AI Agent Tools (Optional) +1. Checks Python version (3.10+ required, 3.11+ recommended) +2. Installs `framework` package from `/core` (editable mode) +3. Installs `aden_tools` package from `/tools` (editable mode) +4. Fixes package compatibility (upgrades openai for litellm) +5. Verifies all installations -If working with the agent framework: +### API Keys (Optional) + +For running agents with real LLMs: ```bash -# Set up aden-tools credentials -cd aden-tools -cp .env.example .env -# Edit .env with your ANTHROPIC_API_KEY and BRAVE_SEARCH_API_KEY +# Add to your shell profile (~/.bashrc, ~/.zshrc, etc.) +export ANTHROPIC_API_KEY="your-key-here" +export OPENAI_API_KEY="your-key-here" # Optional +export BRAVE_SEARCH_API_KEY="your-key-here" # Optional, for web search tool ``` +Get API keys: +- **Anthropic**: [console.anthropic.com](https://console.anthropic.com/) +- **OpenAI**: [platform.openai.com](https://platform.openai.com/) +- **Brave Search**: [brave.com/search/api](https://brave.com/search/api/) + +### Install Claude Code Skills + +```bash +# Install building-agents and testing-agent skills +./quickstart.sh +``` + +This installs: +- `/building-agents` - Build new goal-driven agents +- `/testing-agent` - Test agents with evaluation framework + ### Verify Setup ```bash -# Build both packages to verify everything works -npm run build +# Verify package imports +python -c "import framework; print('✓ framework OK')" +python -c "import aden_tools; print('✓ aden_tools OK')" +python -c "import litellm; print('✓ litellm OK')" -# Or run in development mode -npm run dev -w honeycomb # Terminal 1: Frontend at http://localhost:3000 -npm run dev -w hive # Terminal 2: Backend at http://localhost:4000 +# Run an example agent +PYTHONPATH=core:exports python -m support_ticket_agent validate ``` --- @@ -110,819 +119,325 @@ npm run dev -w hive # Terminal 2: Backend at http://localhost:4000 ## Project Structure ``` -hive/ # Repository root +hive/ # Repository root │ -├── .github/ # GitHub configuration +├── .github/ # GitHub configuration │ ├── workflows/ -│ │ ├── ci.yml # Runs on every PR: lint, test, build -│ │ └── release.yml # Runs on tags: publish Docker images -│ ├── ISSUE_TEMPLATE/ # Bug report & feature request templates -│ ├── PULL_REQUEST_TEMPLATE.md # PR description template -│ └── CODEOWNERS # Auto-assign reviewers +│ │ ├── ci.yml # Runs on every PR +│ │ └── release.yml # Runs on tags +│ ├── ISSUE_TEMPLATE/ # Bug report & feature request templates +│ ├── PULL_REQUEST_TEMPLATE.md # PR description template +│ └── CODEOWNERS # Auto-assign reviewers │ -├── docs/ # Documentation -│ ├── getting-started.md # Quick start guide -│ ├── configuration.md # Configuration reference -│ └── architecture.md # System architecture +├── .claude/ # Claude Code Skills +│ └── skills/ +│ ├── building-agents/ # Skills for building agents +│ │ ├── SKILL.md # Main skill definition +│ │ ├── building-agents-core/ +│ │ ├── building-agents-patterns/ +│ │ └── building-agents-construction/ +│ ├── testing-agent/ # Skills for testing agents +│ │ └── SKILL.md +│ └── agent-workflow/ # Complete workflow orchestration │ -├── honeycomb/ # FRONTEND PACKAGE +├── core/ # CORE FRAMEWORK PACKAGE +│ ├── framework/ # Main package code +│ │ ├── runner/ # AgentRunner - loads and runs agents +│ │ ├── executor/ # GraphExecutor - executes node graphs +│ │ ├── protocols/ # Standard protocols (hooks, tracing, etc.) +│ │ ├── llm/ # LLM provider integrations (Anthropic, OpenAI, etc.) +│ │ ├── memory/ # Memory systems (STM, LTM/RLM) +│ │ ├── tools/ # Tool registry and management +│ │ └── __init__.py +│ ├── pyproject.toml # Package metadata and dependencies +│ ├── requirements.txt # Python dependencies +│ ├── README.md # Framework documentation +│ ├── MCP_INTEGRATION_GUIDE.md # MCP server integration guide +│ └── docs/ # Protocol documentation +│ +├── tools/ # TOOLS PACKAGE (19 MCP tools) │ ├── src/ -│ │ ├── components/ # Reusable UI components -│ │ ├── hooks/ # Custom React hooks -│ │ │ └── useApi.ts # Hook for API calls -│ │ ├── pages/ # Route-level page components -│ │ │ ├── HomePage.tsx -│ │ │ └── NotFoundPage.tsx -│ │ ├── services/ # External service clients -│ │ │ └── api.ts # Backend API client -│ │ ├── styles/ # Global CSS -│ │ │ └── index.css -│ │ ├── types/ # TypeScript type definitions -│ │ │ └── index.ts -│ │ ├── utils/ # Utility functions -│ │ │ └── index.ts -│ │ ├── App.tsx # Root component with routing -│ │ ├── main.tsx # Application entry point -│ │ └── vite-env.d.ts # Vite type declarations -│ ├── public/ # Static assets (copied as-is) -│ │ └── favicon.svg -│ ├── index.html # HTML template -│ ├── nginx.conf # Production nginx config -│ ├── package.json # Package dependencies & scripts -│ ├── tsconfig.json # TypeScript configuration -│ ├── tsconfig.node.json # TypeScript config for Vite -│ ├── vite.config.ts # Vite bundler configuration -│ ├── Dockerfile # Production Docker build -│ ├── Dockerfile.dev # Development Docker build -│ └── .env.example # Environment variable template +│ │ └── aden_tools/ +│ │ ├── tools/ # Individual tool implementations +│ │ │ ├── web_search_tool/ +│ │ │ ├── web_scrape_tool/ +│ │ │ ├── file_system_toolkits/ +│ │ │ └── ... # 19 tools total +│ │ ├── mcp_server.py # HTTP MCP server +│ │ └── __init__.py +│ ├── pyproject.toml # Package metadata +│ ├── requirements.txt # Python dependencies +│ └── README.md # Tools documentation │ -├── hive/ # BACKEND PACKAGE -│ ├── src/ -│ │ ├── config/ # Configuration loading -│ │ │ └── index.ts # Env var parsing & validation -│ │ ├── controllers/ # Request handlers (business logic) -│ │ ├── middleware/ # Express middleware -│ │ │ └── errorHandler.ts # Global error handling -│ │ ├── models/ # Data models / database schemas -│ │ ├── routes/ # API route definitions -│ │ │ ├── api.ts # /api/* routes -│ │ │ └── health.ts # Health check endpoints -│ │ ├── services/ # Business logic services -│ │ ├── types/ # TypeScript type definitions -│ │ │ └── index.ts -│ │ ├── utils/ # Utility functions -│ │ │ └── logger.ts # Structured logging -│ │ ├── index.ts # Application entry point -│ │ └── server.ts # Express server setup -│ ├── package.json # Package dependencies & scripts -│ ├── tsconfig.json # TypeScript configuration -│ ├── Dockerfile # Production Docker build -│ ├── Dockerfile.dev # Development Docker build -│ └── .env.example # Environment variable template +├── exports/ # AGENT PACKAGES +│ ├── support_ticket_agent/ # Example: Support ticket handler +│ ├── market_research_agent/ # Example: Market research +│ ├── outbound_sales_agent/ # Example: Sales outreach +│ ├── personal_assistant_agent/ # Example: Personal assistant +│ └── ... # More agent examples │ -├── scripts/ # Build & utility scripts -│ ├── setup.sh # First-time setup script -│ └── generate-env.ts # Generates .env from config.yaml +├── docs/ # Documentation +│ ├── getting-started.md # Quick start guide +│ ├── configuration.md # Configuration reference +│ ├── architecture.md # System architecture +│ └── articles/ # Technical articles │ -├── config.yaml.example # Configuration template (copy to config.yaml) -├── config.yaml # Your local configuration (git-ignored) -├── docker-compose.yml # Production Docker Compose -├── docker-compose.override.yml.example # Dev overrides template -├── docker-compose.override.yml # Your local dev overrides (git-ignored) +├── scripts/ # Build & utility scripts +│ ├── setup-python.sh # Python environment setup +│ └── setup.sh # Legacy setup script │ -├── package.json # Root package.json (workspaces config) -├── package-lock.json # Dependency lock file -├── tsconfig.base.json # Shared TypeScript settings -│ -├── .gitignore # Git ignore rules -├── .editorconfig # Editor formatting rules -├── .dockerignore # Docker ignore rules -│ -├── README.md # Project overview -├── DEVELOPER.md # This file -├── CONTRIBUTING.md # Contribution guidelines -├── CHANGELOG.md # Version history -├── LICENSE # Apache 2.0 License -├── CODE_OF_CONDUCT.md # Community guidelines -└── SECURITY.md # Security policy +├── quickstart.sh # Install Claude Code skills +├── ENVIRONMENT_SETUP.md # Complete Python setup guide +├── README.md # Project overview +├── DEVELOPER.md # This file +├── CONTRIBUTING.md # Contribution guidelines +├── CHANGELOG.md # Version history +├── ROADMAP.md # Product roadmap +├── LICENSE # Apache 2.0 License +├── CODE_OF_CONDUCT.md # Community guidelines +└── SECURITY.md # Security policy ``` --- -## Configuration System +## Building Agents -### How It Works +### Using Claude Code Skills -Instead of managing multiple `.env` files, you edit a single `config.yaml`: - -``` -┌─────────────────┐ -│ config.yaml │ ← You edit this one file -└────────┬────────┘ - │ - ▼ -┌─────────────────┐ -│ generate-env.ts │ ← Script transforms YAML to .env -└────────┬────────┘ - │ - ├──────────────────┬──────────────────┐ - ▼ ▼ ▼ -┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ -│ /.env │ │ /honeycomb/.env │ │ /hive/.env │ -│ (Docker Compose)│ │ (Frontend) │ │ (Backend) │ -└─────────────────┘ └─────────────────┘ └─────────────────┘ -``` - -### Configuration Reference - -The `config.yaml` file structure: - -```yaml -# =========================================== -# Application Configuration -# =========================================== - -# Application metadata -app: - name: hive # Used in logs and API responses - environment: development # development | staging | production - -# Server configuration -server: - frontend: - port: 3000 # Frontend port - host: "0.0.0.0" # Bind address - backend: - port: 4000 # Backend API port - host: "0.0.0.0" # Bind address - -# API configuration -api: - prefix: /api # API route prefix - cors: - origins: # Allowed CORS origins - - "http://localhost:3000" - - "http://localhost:4000" - -# Logging configuration -logging: - level: debug # debug | info | warn | error - format: pretty # pretty | json - -# Security settings -security: - jwt: - secret: "change-me-in-production-use-min-32-chars" - expiresIn: "7d" # Token expiration - -# Database configuration (when needed) -database: - host: localhost - port: 5432 - name: hive - user: postgres - password: postgres - -# Feature flags (optional) -features: - enableMetrics: true - enableSwagger: true -``` - -### Regenerating Environment Files - -After editing `config.yaml`, regenerate the `.env` files: +The fastest way to build agents is using the Claude Code skills: ```bash -npm run generate:env +# Install skills (one-time) +./quickstart.sh + +# Build a new agent +claude> /building-agents + +# Test the agent +claude> /testing-agent ``` -This is required because: -- Docker Compose reads from `.env` files -- Vite reads frontend env vars from `/honeycomb/.env` -- Node.js reads backend env vars from `/hive/.env` +### Agent Development Workflow ---- +1. **Define Your Goal** + ``` + claude> /building-agents + Enter goal: "Build an agent that processes customer support tickets" + ``` -## Development Workflow +2. **Design the Workflow** + - The skill guides you through defining nodes + - Each node is a unit of work (LLM call, function, router) + - Edges define how execution flows -### Option 1: Local Development (Recommended for Active Development) +3. **Generate the Agent** + - The skill generates a complete Python package in `exports/` + - Includes: `agent.json`, `tools.py`, `README.md` -Best for rapid iteration with instant hot reload: +4. **Validate the Agent** + ```bash + PYTHONPATH=core:exports python -m your_agent_name validate + ``` -```bash -# Terminal 1: Start frontend -npm run dev -w honeycomb +5. **Test the Agent** + ``` + claude> /testing-agent + ``` -# Terminal 2: Start backend -npm run dev -w hive -``` +### Manual Agent Development -| Service | URL | Hot Reload | -|---------|-----|------------| -| Frontend | http://localhost:3000 | Yes (Vite HMR) | -| Backend | http://localhost:4000 | Yes (tsx watch) | -| API Health | http://localhost:4000/health | - | +If you prefer to build agents manually: -### Option 2: Docker Development - -Best for testing Docker builds or when you need consistent environments: - -```bash -# Copy development overrides -cp docker-compose.override.yml.example docker-compose.override.yml - -# Start containers with hot reload -docker compose up - -# Or in detached mode -docker compose up -d - -# View logs -docker compose logs -f - -# Stop containers -docker compose down -``` - -### Option 3: Mixed Mode - -Run backend in Docker, frontend locally (useful for frontend-focused work): - -```bash -# Start only backend in Docker -docker compose up hive -d - -# Run frontend locally -npm run dev -w honeycomb -``` - -### Available NPM Scripts - -**Root level** (run from repository root): - -| Command | Description | -|---------|-------------| -| `npm run setup` | First-time setup (install + generate env) | -| `npm run generate:env` | Regenerate .env files from config.yaml | -| `npm run build` | Build all packages | -| `npm run build -w honeycomb` | Build frontend only | -| `npm run build -w hive` | Build backend only | -| `npm run lint` | Lint all packages | -| `npm run test` | Run all tests | -| `npm run clean` | Remove node_modules and build artifacts | - -**Frontend** (`/honeycomb`): - -| Command | Description | -|---------|-------------| -| `npm run dev` | Start Vite dev server with HMR | -| `npm run build` | Type-check and build for production | -| `npm run preview` | Preview production build locally | -| `npm run lint` | Lint with ESLint | -| `npm run test` | Run tests with Vitest | -| `npm run test:coverage` | Run tests with coverage report | - -**Backend** (`/hive`): - -| Command | Description | -|---------|-------------| -| `npm run dev` | Start with hot reload (tsx watch) | -| `npm run build` | Compile TypeScript to JavaScript | -| `npm run start` | Run compiled JavaScript | -| `npm run lint` | Lint with ESLint | -| `npm run test` | Run tests with Vitest | -| `npm run test:coverage` | Run tests with coverage report | - ---- - -## Working with the Frontend (honeycomb) - -### Tech Stack - -- **React 18** - UI library with hooks -- **TypeScript** - Type safety -- **Vite** - Build tool with instant HMR -- **React Router v6** - Client-side routing -- **Vitest** - Testing framework - -### Adding a New Page - -1. Create the page component: - -```tsx -// honeycomb/src/pages/UsersPage.tsx -import { useEffect, useState } from 'react'; -import { useApi } from '../hooks/useApi'; - -export function UsersPage() { - const { data, loading, error } = useApi('/api/users'); - - if (loading) return
Loading...
; - if (error) return
Error: {error.message}
; - - return ( -
-

Users

- -
- ); -} -``` - -2. Add the route in `App.tsx`: - -```tsx -// honeycomb/src/App.tsx -import { UsersPage } from './pages/UsersPage'; - -// Inside Routes: -} /> -``` - -### Adding a New Component - -```tsx -// honeycomb/src/components/Button.tsx -interface ButtonProps { - children: React.ReactNode; - onClick?: () => void; - variant?: 'primary' | 'secondary'; -} - -export function Button({ children, onClick, variant = 'primary' }: ButtonProps) { - return ( - - ); -} -``` - -### Making API Calls - -Use the provided `useApi` hook or the `api` service: - -```tsx -// Using the hook (recommended for components) -import { useApi } from '../hooks/useApi'; - -function MyComponent() { - const { data, loading, error, refetch } = useApi('/api/endpoint'); - // ... -} - -// Using the service directly (for non-component code) -import { api } from '../services/api'; - -async function fetchData() { - const response = await api.get('/api/endpoint'); - return response.data; -} -``` - -### Environment Variables in Frontend - -Access environment variables using `import.meta.env`: - -```tsx -// Only VITE_* prefixed variables are exposed to the frontend -const apiUrl = import.meta.env.VITE_API_URL; -const appName = import.meta.env.VITE_APP_NAME; -``` - -**Important**: Never put secrets in frontend environment variables. They are bundled into the JavaScript and visible to users. - -### Path Aliases - -Use `@/` to import from the `src` directory: - -```tsx -// Instead of: -import { Button } from '../../../components/Button'; - -// Use: -import { Button } from '@/components/Button'; -``` - ---- - -## Working with the Backend (hive) - -### Tech Stack - -- **Node.js 20** - Runtime -- **Express** - Web framework -- **TypeScript** - Type safety -- **tsx** - TypeScript execution with hot reload -- **Zod** - Runtime validation (recommended) -- **Vitest** - Testing framework - -### Adding a New API Endpoint - -1. Create the route file: - -```typescript -// hive/src/routes/users.ts -import { Router } from 'express'; -import type { Request, Response } from 'express'; - -const router = Router(); - -// GET /api/users -router.get('/', async (req: Request, res: Response) => { - try { - const users = await getUsersFromDatabase(); - res.json(users); - } catch (error) { - res.status(500).json({ error: 'Failed to fetch users' }); - } -}); - -// GET /api/users/:id -router.get('/:id', async (req: Request, res: Response) => { - const { id } = req.params; - try { - const user = await getUserById(id); - if (!user) { - return res.status(404).json({ error: 'User not found' }); - } - res.json(user); - } catch (error) { - res.status(500).json({ error: 'Failed to fetch user' }); - } -}); - -// POST /api/users -router.post('/', async (req: Request, res: Response) => { - const { name, email } = req.body; - try { - const user = await createUser({ name, email }); - res.status(201).json(user); - } catch (error) { - res.status(500).json({ error: 'Failed to create user' }); - } -}); - -export default router; -``` - -2. Register the route in `api.ts`: - -```typescript -// hive/src/routes/api.ts -import usersRouter from './users'; - -// Add to the router: -router.use('/users', usersRouter); -``` - -### Request Validation with Zod - -```typescript -// hive/src/routes/users.ts -import { z } from 'zod'; - -const createUserSchema = z.object({ - name: z.string().min(1).max(100), - email: z.string().email(), - age: z.number().int().positive().optional(), -}); - -router.post('/', async (req: Request, res: Response) => { - const result = createUserSchema.safeParse(req.body); - - if (!result.success) { - return res.status(400).json({ - error: 'Validation failed', - details: result.error.issues - }); - } - - const { name, email, age } = result.data; - // ... create user -}); -``` - -### Adding Middleware - -```typescript -// hive/src/middleware/auth.ts -import type { Request, Response, NextFunction } from 'express'; - -export function requireAuth(req: Request, res: Response, next: NextFunction) { - const token = req.headers.authorization?.replace('Bearer ', ''); - - if (!token) { - return res.status(401).json({ error: 'Authentication required' }); - } - - try { - const decoded = verifyToken(token); - req.user = decoded; - next(); - } catch { - res.status(401).json({ error: 'Invalid token' }); - } -} - -// Usage in routes: -router.get('/protected', requireAuth, (req, res) => { - res.json({ user: req.user }); -}); -``` - -### Logging - -Use the built-in logger for consistent structured logging: - -```typescript -import { logger } from '../utils/logger'; - -// Different log levels -logger.debug('Detailed debug info', { userId: 123 }); -logger.info('User logged in', { userId: 123 }); -logger.warn('Rate limit approaching', { currentRate: 95 }); -logger.error('Database connection failed', { error: err.message }); -``` - -### Environment Variables in Backend - -Access via `process.env` or the config module: - -```typescript -// Direct access -const port = process.env.PORT || 4000; - -// Or via config (recommended - adds validation) -import { config } from '../config'; -const port = config.port; -``` - ---- - -## Docker Development - -### Docker Compose Files - -| File | Purpose | -|------|---------| -| `docker-compose.yml` | Base configuration (production-like) | -| `docker-compose.override.yml` | Development overrides (hot reload, debug ports) | - -When you run `docker compose up`, Docker automatically merges both files. - -### Building Images - -```bash -# Build all images -docker compose build - -# Build specific service -docker compose build honeycomb -docker compose build hive - -# Build with no cache (fresh build) -docker compose build --no-cache -``` - -### Running Containers - -```bash -# Start all services -docker compose up - -# Start in background -docker compose up -d - -# Start specific service -docker compose up hive - -# View logs -docker compose logs -f -docker compose logs -f hive # Specific service - -# Stop all services -docker compose down - -# Stop and remove volumes -docker compose down -v -``` - -### Debugging in Docker - -The development override exposes debug ports: - -- **Backend debug port**: 9229 (Node.js inspector) - -To debug the backend in VS Code: - -1. Add to `.vscode/launch.json`: - -```json +```python +# exports/my_agent/agent.json { - "version": "0.2.0", - "configurations": [ + "goal": { + "goal_id": "support_ticket", + "name": "Support Ticket Handler", + "description": "Process customer support tickets", + "success_criteria": "Ticket is categorized, prioritized, and routed correctly" + }, + "nodes": [ { - "name": "Attach to Docker", - "type": "node", - "request": "attach", - "port": 9229, - "address": "localhost", - "localRoot": "${workspaceFolder}/hive", - "remoteRoot": "/app", - "restart": true + "node_id": "analyze", + "name": "Analyze Ticket", + "node_type": "llm", + "system_prompt": "Analyze this support ticket...", + "input_keys": ["ticket_content"], + "output_keys": ["category", "priority"] + } + ], + "edges": [ + { + "edge_id": "start_to_analyze", + "source": "START", + "target": "analyze", + "condition": "on_success" } ] } ``` -2. Start containers: `docker compose up` -3. In VS Code, press F5 or select "Attach to Docker" - -### Useful Docker Commands +### Running Agents ```bash -# Execute command in running container -docker compose exec hive sh -docker compose exec honeycomb sh +# Validate agent structure +PYTHONPATH=core:exports python -m agent_name validate -# View container resource usage -docker stats +# Show agent information +PYTHONPATH=core:exports python -m agent_name info -# Remove all stopped containers -docker container prune +# Run agent with input +PYTHONPATH=core:exports python -m agent_name run --input '{ + "ticket_content": "My login is broken", + "customer_id": "CUST-123" +}' -# Remove unused images -docker image prune +# Run in mock mode (no LLM calls) +PYTHONPATH=core:exports python -m agent_name run --mock --input '{...}' ``` --- -## Testing +## Testing Agents -### Running Tests +### Using the Testing Agent Skill ```bash -# Run all tests -npm run test - -# Run tests for specific package -npm run test -w honeycomb -npm run test -w hive - -# Run with coverage -npm run test:coverage -w honeycomb -npm run test:coverage -w hive - -# Run in watch mode (re-runs on file changes) -cd honeycomb && npm run test -- --watch -cd hive && npm run test -- --watch +# Run tests for an agent +claude> /testing-agent ``` -### Writing Frontend Tests +This generates and runs: +- **Constraint tests** - Verify agent respects constraints +- **Success tests** - Verify agent achieves success criteria +- **Integration tests** - End-to-end workflows -```tsx -// honeycomb/src/components/Button.test.tsx -import { render, screen, fireEvent } from '@testing-library/react'; -import { describe, it, expect, vi } from 'vitest'; -import { Button } from './Button'; +### Manual Testing -describe('Button', () => { - it('renders children', () => { - render(); - expect(screen.getByText('Click me')).toBeInTheDocument(); - }); +```bash +# Run all tests for an agent +PYTHONPATH=core:exports python -m agent_name test - it('calls onClick when clicked', () => { - const handleClick = vi.fn(); - render(); +# Run specific test type +PYTHONPATH=core:exports python -m agent_name test --type constraint +PYTHONPATH=core:exports python -m agent_name test --type success - fireEvent.click(screen.getByText('Click me')); +# Run with parallel execution +PYTHONPATH=core:exports python -m agent_name test --parallel 4 - expect(handleClick).toHaveBeenCalledTimes(1); - }); -}); +# Fail fast (stop on first failure) +PYTHONPATH=core:exports python -m agent_name test --fail-fast ``` -### Writing Backend Tests +### Writing Custom Tests -```typescript -// hive/src/routes/health.test.ts -import { describe, it, expect } from 'vitest'; -import request from 'supertest'; -import { app } from '../server'; +```python +# exports/my_agent/tests/test_custom.py +import pytest +from framework.runner import AgentRunner -describe('Health Routes', () => { - it('GET /health returns healthy status', async () => { - const response = await request(app).get('/health'); +def test_ticket_categorization(): + """Test that tickets are categorized correctly""" + runner = AgentRunner.from_file("exports/my_agent/agent.json") - expect(response.status).toBe(200); - expect(response.body).toMatchObject({ - status: 'healthy', - }); - }); + result = runner.run({ + "ticket_content": "I can't log in to my account" + }) - it('GET /health/ready returns ready status', async () => { - const response = await request(app).get('/health/ready'); - - expect(response.status).toBe(200); - expect(response.body.ready).toBe(true); - }); -}); + assert result["category"] == "authentication" + assert result["priority"] in ["high", "medium", "low"] ``` --- ## Code Style & Conventions -### TypeScript +### Python Code Style -- **Strict mode enabled** - No implicit any, strict null checks -- **Explicit return types** on exported functions -- **Interface over type** for object shapes (unless unions needed) -- **Readonly** where possible +- **PEP 8** - Follow Python style guide +- **Type hints** - Use for function signatures and class attributes +- **Docstrings** - Document classes and public functions +- **Black** - Code formatter (run with `black .`) -```typescript -// Good -interface User { - readonly id: string; - name: string; - email: string; -} +```python +# Good +from typing import Optional, Dict, Any -export function getUser(id: string): Promise { - // ... -} +def process_ticket( + ticket_content: str, + customer_id: str, + priority: Optional[str] = None +) -> Dict[str, Any]: + """ + Process a customer support ticket. -// Avoid -export function getUser(id) { // Missing types - // ... -} + Args: + ticket_content: The content of the ticket + customer_id: The customer's ID + priority: Optional priority override + + Returns: + Dictionary with processing results + """ + # Implementation + return {"status": "processed", "id": ticket_id} + +# Avoid +def process_ticket(ticket_content, customer_id, priority=None): + # No types, no docstring + return {"status": "processed", "id": ticket_id} ``` -### React Components +### Agent Package Structure -- **Functional components** only (no class components) -- **Named exports** for components -- **Props interface** defined above component - -```tsx -// Good -interface ButtonProps { - children: React.ReactNode; - onClick?: () => void; -} - -export function Button({ children, onClick }: ButtonProps) { - return ; -} - -// Avoid -export default function({ children, onClick }) { // Missing types, default export - return ; -} +``` +my_agent/ +├── __init__.py # Package initialization +├── __main__.py # CLI entry point +├── agent.json # Agent definition (nodes, edges, goal) +├── tools.py # Custom tools (optional) +├── mcp_servers.json # MCP server config (optional) +├── README.md # Agent documentation +└── tests/ # Test files + ├── __init__.py + ├── test_constraint.py # Constraint tests + └── test_success.py # Success criteria tests ``` ### File Naming -| Type | Convention | Example | -|------|------------|---------| -| Components | PascalCase | `UserCard.tsx` | -| Hooks | camelCase with `use` prefix | `useAuth.ts` | -| Utilities | camelCase | `formatDate.ts` | -| Types | PascalCase | `User.ts` or in `types/index.ts` | -| Tests | Same as file + `.test` | `UserCard.test.tsx` | -| Styles | Same as component | `UserCard.css` | +| Type | Convention | Example | +| ------------------- | ------------------------ | --------------------------- | +| Modules | snake_case | `ticket_handler.py` | +| Classes | PascalCase | `TicketHandler` | +| Functions/Variables | snake_case | `process_ticket()` | +| Constants | UPPER_SNAKE_CASE | `MAX_RETRIES = 3` | +| Test files | `test_` prefix | `test_ticket_handler.py` | +| Agent packages | snake_case | `support_ticket_agent/` | ### Import Order -1. External packages -2. Internal absolute imports (`@/...`) -3. Relative imports -4. Style imports +1. Standard library +2. Third-party packages +3. Framework imports +4. Local imports -```tsx -// External -import { useState, useEffect } from 'react'; -import { useNavigate } from 'react-router-dom'; +```python +# Standard library +import json +from typing import Dict, Any -// Internal absolute -import { Button } from '@/components/Button'; -import { useApi } from '@/hooks/useApi'; +# Third-party +import litellm +from pydantic import BaseModel -// Relative -import { formatUserName } from './utils'; +# Framework +from framework.runner import AgentRunner +from framework.context import NodeContext -// Styles -import './UserCard.css'; +# Local +from .tools import custom_tool ``` --- @@ -952,6 +467,7 @@ Follow [Conventional Commits](https://www.conventionalcommits.org/): ``` **Types:** + - `feat` - New feature - `fix` - Bug fix - `docs` - Documentation only @@ -991,11 +507,13 @@ chore(deps): update React to 18.2.0 ### Frontend Debugging **React Developer Tools:** + 1. Install the [React DevTools browser extension](https://react.dev/learn/react-developer-tools) 2. Open browser DevTools → React tab 3. Inspect component tree, props, state, and hooks **VS Code Debugging:** + 1. Add Chrome debug configuration to `.vscode/launch.json`: ```json @@ -1014,6 +532,7 @@ chore(deps): update React to 18.2.0 ### Backend Debugging **VS Code Debugging:** + 1. Add Node debug configuration: ```json @@ -1032,13 +551,14 @@ chore(deps): update React to 18.2.0 3. Press F5 to start debugging **Logging:** + ```typescript -import { logger } from '../utils/logger'; +import { logger } from "../utils/logger"; // Add debug logs -logger.debug('Processing request', { +logger.debug("Processing request", { userId: req.user.id, - body: req.body + body: req.body, }); ``` @@ -1046,80 +566,122 @@ logger.debug('Processing request', { ## Common Tasks -### Adding a New Dependency +### Adding Python Dependencies ```bash -# Add to frontend -npm install -w honeycomb +# Add to core framework +cd core +pip install +# Then add to requirements.txt or pyproject.toml -# Add to backend -npm install -w hive +# Add to tools package +cd tools +pip install +# Then add to requirements.txt or pyproject.toml -# Add dev dependency -npm install -D -w honeycomb - -# Add to root (shared tooling) -npm install -D -w . +# Reinstall in editable mode +pip install -e . ``` -### Updating Dependencies +### Creating a New Agent ```bash -# Check for outdated packages -npm outdated +# Option 1: Use Claude Code skill (recommended) +claude> /building-agents -# Update all to latest minor/patch -npm update +# Option 2: Copy from example +cp -r exports/support_ticket_agent exports/my_new_agent +cd exports/my_new_agent +# Edit agent.json, tools.py, README.md -# Update specific package -npm install @latest -w honeycomb +# Option 3: Use the agent builder MCP tools (advanced) +# See core/MCP_BUILDER_TOOLS_GUIDE.md ``` -### Adding Environment Variables +### Adding Custom Tools to an Agent -1. Add to `config.yaml.example` (template): +```python +# exports/my_agent/tools.py +from typing import Dict, Any -```yaml -myService: - apiKey: "your-api-key-here" +def my_custom_tool(param1: str, param2: int) -> Dict[str, Any]: + """ + Description of what this tool does. + + Args: + param1: Description of param1 + param2: Description of param2 + + Returns: + Dictionary with tool results + """ + # Implementation + return {"result": "success", "data": ...} + +# Register tool in agent.json +{ + "nodes": [ + { + "node_id": "use_tool", + "node_type": "function", + "tools": ["my_custom_tool"], + ... + } + ] +} ``` -2. Add to your local `config.yaml`: - -```yaml -myService: - apiKey: "actual-api-key" -``` - -3. Update `scripts/generate-env.ts` to output the new variable - -4. Regenerate env files: +### Adding MCP Server Integration ```bash -npm run generate:env +# 1. Create mcp_servers.json in your agent package +# exports/my_agent/mcp_servers.json +{ + "tools": { + "transport": "stdio", + "command": "python", + "args": ["-m", "aden_tools.mcp_server"], + "cwd": "tools/", + "description": "File system and web tools" + } +} + +# 2. Reference tools in agent.json +{ + "nodes": [ + { + "node_id": "search", + "tools": ["web_search", "web_scrape"], + ... + } + ] +} ``` -5. Access in code: - -```typescript -// Backend -const apiKey = process.env.MY_SERVICE_API_KEY; - -// Frontend (must be prefixed with VITE_) -const apiKey = import.meta.env.VITE_MY_SERVICE_API_KEY; -``` - -### Database Migrations (when added) +### Setting Environment Variables ```bash -# Create a new migration -npm run migration:create -w hive -- --name add-users-table +# Add to your shell profile (~/.bashrc, ~/.zshrc, etc.) +export ANTHROPIC_API_KEY="your-key-here" +export OPENAI_API_KEY="your-key-here" +export BRAVE_SEARCH_API_KEY="your-key-here" -# Run pending migrations -npm run migration:run -w hive +# Or create .env file (not committed to git) +echo 'ANTHROPIC_API_KEY=your-key-here' >> .env +``` -# Rollback last migration -npm run migration:rollback -w hive +### Debugging Agent Execution + +```python +# Add debug logging to your agent +import logging +logging.basicConfig(level=logging.DEBUG) + +# Run with verbose output +PYTHONPATH=core:exports python -m agent_name run --input '{...}' --verbose + +# Use mock mode to test without LLM calls +PYTHONPATH=core:exports python -m agent_name run --mock --input '{...}' ``` --- @@ -1206,4 +768,4 @@ npm run test -w honeycomb -- --clearCache --- -*Happy coding!* 🐝 +_Happy coding!_ 🐝 diff --git a/ENVIRONMENT_SETUP.md b/ENVIRONMENT_SETUP.md new file mode 100644 index 00000000..8a518874 --- /dev/null +++ b/ENVIRONMENT_SETUP.md @@ -0,0 +1,347 @@ +# Agent Development Environment Setup + +Complete setup guide for building and running goal-driven agents with the Aden Agent Framework. + +## Quick Setup + +```bash +# Run the automated setup script +./scripts/setup-python.sh +``` + +This will: + +- Check Python version (requires 3.10+, recommends 3.11+) +- Install the core framework package (`framework`) +- Install the tools package (`aden_tools`) +- Fix package compatibility issues (openai + litellm) +- Verify all installations + +## Manual Setup (Alternative) + +If you prefer to set up manually or the script fails: + +### 1. Install Core Framework + +```bash +cd core +pip install -e . +``` + +### 2. Install Tools Package + +```bash +cd tools +pip install -e . +``` + +### 3. Upgrade OpenAI Package + +```bash +# litellm requires openai >= 1.0.0 +pip install --upgrade "openai>=1.0.0" +``` + +### 4. Verify Installation + +```bash +python -c "import framework; print('✓ framework OK')" +python -c "import aden_tools; print('✓ aden_tools OK')" +python -c "import litellm; print('✓ litellm OK')" +``` + +## Requirements + +### Python Version + +- **Minimum:** Python 3.10 +- **Recommended:** Python 3.11 or 3.12 +- **Tested on:** Python 3.11, 3.12, 3.13 + +### System Requirements + +- pip (latest version) +- 2GB+ RAM +- Internet connection (for LLM API calls) + +### API Keys (Optional) + +For running agents with real LLMs: + +```bash +export ANTHROPIC_API_KEY="your-key-here" +``` + +## Running Agents + +All agent commands must be run from the project root with `PYTHONPATH` set: + +```bash +# From /home/timothy/oss/hive/ directory +PYTHONPATH=core:exports python -m agent_name COMMAND +``` + +### Example: Support Ticket Agent + +```bash +# Validate agent structure +PYTHONPATH=core:exports python -m support_ticket_agent validate + +# Show agent information +PYTHONPATH=core:exports python -m support_ticket_agent info + +# Run agent with input +PYTHONPATH=core:exports python -m support_ticket_agent run --input '{ + "ticket_content": "My login is broken. Error 401.", + "customer_id": "CUST-123", + "ticket_id": "TKT-456" +}' + +# Run in mock mode (no LLM calls) +PYTHONPATH=core:exports python -m support_ticket_agent run --mock --input '{...}' +``` + +### Example: Other Agents + +```bash +# Market Research Agent +PYTHONPATH=core:exports python -m market_research_agent info + +# Outbound Sales Agent +PYTHONPATH=core:exports python -m outbound_sales_agent validate + +# Personal Assistant Agent +PYTHONPATH=core:exports python -m personal_assistant_agent run --input '{...}' +``` + +## Building New Agents + +Use Claude Code CLI with the agent building skills: + +### 1. Install Skills (One-time) + +```bash +./quickstart.sh +``` + +This installs: + +- `/building-agents` - Build new agents +- `/testing-agent` - Test agents + +### 2. Build an Agent + +``` +claude> /building-agents +``` + +Follow the prompts to: + +1. Define your agent's goal +2. Design the workflow nodes +3. Connect edges +4. Generate the agent package + +### 3. Test Your Agent + +``` +claude> /testing-agent +``` + +Creates comprehensive test suites for your agent. + +## Troubleshooting + +### "ModuleNotFoundError: No module named 'framework'" + +**Solution:** Install the core package: + +```bash +cd core && pip install -e . +``` + +### "ModuleNotFoundError: No module named 'aden_tools'" + +**Solution:** Install the tools package: + +```bash +cd tools && pip install -e . +``` + +Or run the setup script: + +```bash +./scripts/setup-python.sh +``` + +### "ModuleNotFoundError: No module named 'openai.\_models'" + +**Cause:** Outdated `openai` package (0.27.x) incompatible with `litellm` + +**Solution:** Upgrade openai: + +```bash +pip install --upgrade "openai>=1.0.0" +``` + +### "No module named 'support_ticket_agent'" + +**Cause:** Not running from project root or missing PYTHONPATH + +**Solution:** Ensure you're in `/home/timothy/oss/hive/` and use: + +```bash +PYTHONPATH=core:exports python -m support_ticket_agent validate +``` + +### Agent imports fail with "broken installation" + +**Symptom:** `pip list` shows packages pointing to non-existent directories + +**Solution:** Reinstall packages properly: + +```bash +# Remove broken installations +pip uninstall -y framework tools aden-tools + +# Reinstall correctly +cd /home/timothy/oss/hive +./scripts/setup-python.sh +``` + +## Package Structure + +The Hive framework consists of three Python packages: + +``` +hive/ +├── core/ # Core framework (runtime, graph executor, LLM providers) +│ ├── framework/ +│ ├── pyproject.toml +│ └── requirements.txt +│ +├── tools/ # Tools and MCP servers +│ ├── src/ +│ │ └── aden_tools/ # Actual package location +│ ├── pyproject.toml +│ └── README.md +│ +└── exports/ # Agent packages (your agents go here) + ├── support_ticket_agent/ + ├── market_research_agent/ + ├── outbound_sales_agent/ + └── personal_assistant_agent/ +``` + +### Why PYTHONPATH is Required + +The packages are installed in **editable mode** (`pip install -e`), which means: + +- `framework` and `aden_tools` are globally importable (no PYTHONPATH needed) +- `exports` is NOT installed as a package (PYTHONPATH required) + +This design allows agents in `exports/` to be: + +- Developed independently +- Version controlled separately +- Deployed as standalone packages + +## Development Workflow + +### 1. Setup (Once) + +```bash +./scripts/setup-python.sh +``` + +### 2. Build Agent (Claude Code) + +``` +claude> /building-agents +Enter goal: "Build an agent that processes customer support tickets" +``` + +### 3. Validate Agent + +```bash +PYTHONPATH=core:exports python -m support_ticket_agent validate +``` + +### 4. Test Agent + +``` +claude> /testing-agent +``` + +### 5. Run Agent + +```bash +PYTHONPATH=core:exports python -m support_ticket_agent run --input '{...}' +``` + +## IDE Setup + +### VSCode + +Add to `.vscode/settings.json`: + +```json +{ + "python.analysis.extraPaths": [ + "${workspaceFolder}/core", + "${workspaceFolder}/exports" + ], + "python.autoComplete.extraPaths": [ + "${workspaceFolder}/core", + "${workspaceFolder}/exports" + ] +} +``` + +### PyCharm + +1. Open Project Settings → Project Structure +2. Mark `core` as Sources Root +3. Mark `exports` as Sources Root + +## Environment Variables + +### Required for LLM Operations + +```bash +export ANTHROPIC_API_KEY="sk-ant-..." +``` + +### Optional Configuration + +```bash +# Credentials storage location (default: ~/.aden/credentials) +export ADEN_CREDENTIALS_PATH="/custom/path" + +# Agent storage location (default: /tmp) +export AGENT_STORAGE_PATH="/custom/storage" +``` + +## Additional Resources + +- **Framework Documentation:** [core/README.md](core/README.md) +- **Tools Documentation:** [tools/README.md](tools/README.md) +- **Example Agents:** [exports/](exports/) +- **Agent Building Guide:** [.claude/skills/building-agents-construction/SKILL.md](.claude/skills/building-agents-construction/SKILL.md) +- **Testing Guide:** [.claude/skills/testing-agent/SKILL.md](.claude/skills/testing-agent/SKILL.md) + +## Contributing + +When contributing agent packages: + +1. Place agents in `exports/agent_name/` +2. Follow the standard agent structure (see existing agents) +3. Include README.md with usage instructions +4. Add tests if using `/testing-agent` +5. Document required environment variables + +## Support + +- **Issues:** https://github.com/adenhq/hive/issues +- **Discord:** https://discord.com/invite/MXE49hrKDk +- **Documentation:** https://docs.adenhq.com/ diff --git a/README.es.md b/README.es.md new file mode 100644 index 00000000..0ebf5aa5 --- /dev/null +++ b/README.es.md @@ -0,0 +1,339 @@ +

+ Hive Banner +

+ +

+ English | + 简体中文 | + Español | + Português | + 日本語 | + Русский +

+ +[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/adenhq/hive/blob/main/LICENSE) +[![Y Combinator](https://img.shields.io/badge/Y%20Combinator-Aden-orange)](https://www.ycombinator.com/companies/aden) +[![Docker Pulls](https://img.shields.io/docker/pulls/adenhq/hive?logo=Docker&labelColor=%23528bff)](https://hub.docker.com/u/adenhq) +[![Discord](https://img.shields.io/discord/1172610340073242735?logo=discord&labelColor=%235462eb&logoColor=%23f5f5f5&color=%235462eb)](https://discord.com/invite/MXE49hrKDk) +[![Twitter Follow](https://img.shields.io/twitter/follow/teamaden?logo=X&color=%23f5f5f5)](https://x.com/aden_hq) +[![LinkedIn](https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff)](https://www.linkedin.com/company/teamaden/) + +

+ AI Agents + Multi-Agent + Goal-Driven + HITL + Production +

+

+ OpenAI + Anthropic + Gemini + MCP +

+ +## Descripción General + +Construye agentes de IA confiables y auto-mejorables sin codificar flujos de trabajo. Define tu objetivo a través de una conversación con un agente de codificación, y el framework genera un grafo de nodos con código de conexión creado dinámicamente. Cuando algo falla, el framework captura los datos del error, evoluciona el agente a través del agente de codificación y lo vuelve a desplegar. Los nodos de intervención humana integrados, la gestión de credenciales y el monitoreo en tiempo real te dan control sin sacrificar la adaptabilidad. + +Visita [adenhq.com](https://adenhq.com) para documentación completa, ejemplos y guías. + +## ¿Qué es Aden? + +

+ Aden Architecture +

+ +Aden es una plataforma para construir, desplegar, operar y adaptar agentes de IA: + +- **Construir** - Un Agente de Codificación genera Agentes de Trabajo especializados (Ventas, Marketing, Operaciones) a partir de objetivos en lenguaje natural +- **Desplegar** - Despliegue headless con integración CI/CD y gestión completa del ciclo de vida de API +- **Operar** - Monitoreo en tiempo real, observabilidad y guardarraíles de ejecución mantienen los agentes confiables +- **Adaptar** - Evaluación continua, supervisión y adaptación aseguran que los agentes mejoren con el tiempo +- **Infraestructura** - Memoria compartida, integraciones LLM, herramientas y habilidades impulsan cada agente + +## Enlaces Rápidos + +- **[Documentación](https://docs.adenhq.com/)** - Guías completas y referencia de API +- **[Guía de Auto-Hospedaje](https://docs.adenhq.com/getting-started/quickstart)** - Despliega Hive en tu infraestructura +- **[Registro de Cambios](https://github.com/adenhq/hive/releases)** - Últimas actualizaciones y versiones + +- **[Reportar Problemas](https://github.com/adenhq/hive/issues)** - Reportes de bugs y solicitudes de funciones + +## Inicio Rápido + +### Prerrequisitos + +- [Python 3.11+](https://www.python.org/downloads/) - Para desarrollo de agentes +- [Docker](https://docs.docker.com/get-docker/) (v20.10+) - Opcional, para herramientas en contenedores + +### Instalación + +```bash +# Clonar el repositorio +git clone https://github.com/adenhq/hive.git +cd hive + +# Ejecutar configuración del entorno Python +./scripts/setup-python.sh +``` + +Esto instala: +- **framework** - Runtime del agente principal y ejecutor de grafos +- **aden_tools** - 19 herramientas MCP para capacidades de agentes +- Todas las dependencias requeridas + +### Construye Tu Primer Agente + +```bash +# Instalar habilidades de Claude Code (una vez) +./quickstart.sh + +# Construir un agente usando Claude Code +claude> /building-agents + +# Probar tu agente +claude> /testing-agent + +# Ejecutar tu agente +PYTHONPATH=core:exports python -m your_agent_name run --input '{...}' +``` + +**[📖 Guía de Configuración Completa](ENVIRONMENT_SETUP.md)** - Instrucciones detalladas para desarrollo de agentes + +## Características + +- **Desarrollo Orientado a Objetivos** - Define objetivos en lenguaje natural; el agente de codificación genera el grafo de agentes y el código de conexión para lograrlos +- **Agentes Auto-Adaptables** - El framework captura fallos, actualiza objetivos y actualiza el grafo de agentes +- **Conexiones de Nodos Dinámicas** - Sin aristas predefinidas; el código de conexión es generado por cualquier LLM capaz basado en tus objetivos +- **Nodos Envueltos en SDK** - Cada nodo obtiene memoria compartida, memoria RLM local, monitoreo, herramientas y acceso LLM de serie +- **Humano en el Bucle** - Nodos de intervención que pausan la ejecución para entrada humana con tiempos de espera y escalación configurables +- **Observabilidad en Tiempo Real** - Streaming WebSocket para monitoreo en vivo de ejecución de agentes, decisiones y comunicación entre nodos +- **Control de Costos y Presupuesto** - Establece límites de gasto, limitadores y políticas de degradación automática de modelos +- **Listo para Producción** - Auto-hospedable, construido para escala y confiabilidad + +## Por Qué Aden + +Los frameworks de agentes tradicionales requieren que diseñes manualmente flujos de trabajo, definas interacciones de agentes y manejes fallos de forma reactiva. Aden invierte este paradigma—**describes resultados, y el sistema se construye solo**. + +```mermaid +flowchart LR + subgraph BUILD["🏗️ BUILD"] + GOAL["Define Goal
+ Success Criteria"] --> NODES["Add Nodes
LLM/Router/Function"] + NODES --> EDGES["Connect Edges
on_success/failure/conditional"] + EDGES --> TEST["Test & Validate"] --> APPROVE["Approve & Export"] + end + + subgraph EXPORT["📦 EXPORT"] + direction TB + JSON["agent.json
(GraphSpec)"] + TOOLS["tools.py
(Functions)"] + MCP["mcp_servers.json
(Integrations)"] + end + + subgraph RUN["🚀 RUNTIME"] + LOAD["AgentRunner
Load + Parse"] --> SETUP["Setup Runtime
+ ToolRegistry"] + SETUP --> EXEC["GraphExecutor
Execute Nodes"] + + subgraph DECISION["Decision Recording"] + DEC1["runtime.decide()
intent → options → choice"] + DEC2["runtime.record_outcome()
success, result, metrics"] + end + end + + subgraph INFRA["⚙️ INFRASTRUCTURE"] + CTX["NodeContext
memory • llm • tools"] + STORE[("FileStorage
Runs & Decisions")] + end + + APPROVE --> EXPORT + EXPORT --> LOAD + EXEC --> DECISION + EXEC --> CTX + DECISION --> STORE + STORE -.->|"Analyze & Improve"| NODES + + style BUILD fill:#ffbe42,stroke:#cc5d00,stroke-width:3px,color:#333 + style EXPORT fill:#fff59d,stroke:#ed8c00,stroke-width:2px,color:#333 + style RUN fill:#ffb100,stroke:#cc5d00,stroke-width:3px,color:#333 + style DECISION fill:#ffcc80,stroke:#ed8c00,stroke-width:2px,color:#333 + style INFRA fill:#e8763d,stroke:#cc5d00,stroke-width:3px,color:#fff + style STORE fill:#ed8c00,stroke:#cc5d00,stroke-width:2px,color:#fff +``` + +### La Ventaja de Aden + +| Frameworks Tradicionales | Aden | +|--------------------------|------| +| Codificar flujos de trabajo de agentes | Describir objetivos en lenguaje natural | +| Definición manual de grafos | Grafos de agentes auto-generados | +| Manejo reactivo de errores | Auto-evolución proactiva | +| Configuraciones de herramientas estáticas | Nodos dinámicos envueltos en SDK | +| Configuración de monitoreo separada | Observabilidad en tiempo real integrada | +| Gestión de presupuesto DIY | Controles de costos y degradación integrados | + +### Cómo Funciona + +1. **Define Tu Objetivo** → Describe lo que quieres lograr en lenguaje simple +2. **El Agente de Codificación Genera** → Crea el grafo de agentes, código de conexión y casos de prueba +3. **Los Trabajadores Ejecutan** → Los nodos envueltos en SDK se ejecutan con observabilidad completa y acceso a herramientas +4. **El Plano de Control Monitorea** → Métricas en tiempo real, aplicación de presupuesto, gestión de políticas +5. **Auto-Mejora** → En caso de fallo, el sistema evoluciona el grafo y lo vuelve a desplegar automáticamente + +## Cómo se Compara Aden + +Aden adopta un enfoque fundamentalmente diferente al desarrollo de agentes. Mientras que la mayoría de los frameworks requieren que codifiques flujos de trabajo o definas manualmente grafos de agentes, Aden usa un **agente de codificación para generar todo tu sistema de agentes** a partir de objetivos en lenguaje natural. Cuando los agentes fallan, el framework no solo registra errores—**evoluciona automáticamente el grafo de agentes** y lo vuelve a desplegar. + +> **Nota:** Para la tabla de comparación detallada de frameworks y preguntas frecuentes, consulta el [README.md](README.md) en inglés. + +### Cuándo Elegir Aden + +Elige Aden cuando necesites: + +- Agentes que **se auto-mejoren a partir de fallos** sin intervención manual +- **Desarrollo orientado a objetivos** donde describes resultados, no flujos de trabajo +- **Confiabilidad en producción** con recuperación y redespliegue automáticos +- **Iteración rápida** en arquitecturas de agentes sin reescribir código +- **Observabilidad completa** con monitoreo en tiempo real y supervisión humana + +Elige otros frameworks cuando necesites: + +- **Flujos de trabajo predecibles y con tipos seguros** (PydanticAI, Mastra) +- **RAG y procesamiento de documentos** (LlamaIndex, Haystack) +- **Investigación sobre emergencia de agentes** (CAMEL) +- **Voz/multimodal en tiempo real** (TEN Framework) +- **Encadenamiento simple de componentes** (LangChain, Swarm) + +## Estructura del Proyecto + +``` +hive/ +├── core/ # Framework principal - Runtime de agentes, ejecutor de grafos, protocolos +├── tools/ # Paquete de Herramientas MCP - 19 herramientas para capacidades de agentes +├── exports/ # Paquetes de Agentes - Agentes pre-construidos y ejemplos +├── docs/ # Documentación y guías +├── scripts/ # Scripts de construcción y utilidades +├── .claude/ # Habilidades de Claude Code para construir agentes +├── ENVIRONMENT_SETUP.md # Guía de configuración de Python para desarrollo de agentes +├── DEVELOPER.md # Guía del desarrollador +├── CONTRIBUTING.md # Directrices de contribución +└── ROADMAP.md # Hoja de ruta del producto +``` + +## Desarrollo + +### Desarrollo de Agentes en Python + +Para construir y ejecutar agentes orientados a objetivos con el framework: + +```bash +# Configuración única +./scripts/setup-python.sh + +# Esto instala: +# - paquete framework (runtime principal) +# - paquete aden_tools (19 herramientas MCP) +# - Todas las dependencias + +# Construir nuevos agentes usando habilidades de Claude Code +claude> /building-agents + +# Probar agentes +claude> /testing-agent + +# Ejecutar agentes +PYTHONPATH=core:exports python -m agent_name run --input '{...}' +``` + +Consulta [ENVIRONMENT_SETUP.md](ENVIRONMENT_SETUP.md) para instrucciones de configuración completas. + +## Documentación + +- **[Guía del Desarrollador](DEVELOPER.md)** - Guía completa para desarrolladores +- [Primeros Pasos](docs/getting-started.md) - Instrucciones de configuración rápida +- [Guía de Configuración](docs/configuration.md) - Todas las opciones de configuración +- [Visión General de Arquitectura](docs/architecture.md) - Diseño y estructura del sistema + +## Hoja de Ruta + +El Framework de Agentes Aden tiene como objetivo ayudar a los desarrolladores a construir agentes auto-adaptativos orientados a resultados. Encuentra nuestra hoja de ruta aquí + +[ROADMAP.md](ROADMAP.md) + +```mermaid +timeline + title Aden Agent Framework Roadmap + section Foundation + Architecture : Node-Based Architecture : Python SDK : LLM Integration (OpenAI, Anthropic, Google) : Communication Protocol + Coding Agent : Goal Creation Session : Worker Agent Creation : MCP Tools Integration + Worker Agent : Human-in-the-Loop : Callback Handlers : Intervention Points : Streaming Interface + Tools : File Use : Memory (STM/LTM) : Web Search : Web Scraper : Audit Trail + Core : Eval System : Pydantic Validation : Docker Deployment : Documentation : Sample Agents + section Expansion + Intelligence : Guardrails : Streaming Mode : Semantic Search + Platform : JavaScript SDK : Custom Tool Integrator : Credential Store + Deployment : Self-Hosted : Cloud Services : CI/CD Pipeline + Templates : Sales Agent : Marketing Agent : Analytics Agent : Training Agent : Smart Form Agent +``` + +## Comunidad y Soporte + +Usamos [Discord](https://discord.com/invite/MXE49hrKDk) para soporte, solicitudes de funciones y discusiones de la comunidad. + +- Discord - [Únete a nuestra comunidad](https://discord.com/invite/MXE49hrKDk) +- Twitter/X - [@adenhq](https://x.com/aden_hq) +- LinkedIn - [Página de la Empresa](https://www.linkedin.com/company/teamaden/) + +## Contribuir + +¡Damos la bienvenida a las contribuciones! Por favor consulta [CONTRIBUTING.md](CONTRIBUTING.md) para las directrices. + +1. Haz fork del repositorio +2. Crea tu rama de funcionalidad (`git checkout -b feature/amazing-feature`) +3. Haz commit de tus cambios (`git commit -m 'Add amazing feature'`) +4. Haz push a la rama (`git push origin feature/amazing-feature`) +5. Abre un Pull Request + +## Únete a Nuestro Equipo + +**¡Estamos contratando!** Únete a nosotros en roles de ingeniería, investigación y comercialización. + +[Ver Posiciones Abiertas](https://jobs.adenhq.com/a8cec478-cdbc-473c-bbd4-f4b7027ec193/applicant) + +## Seguridad + +Para preocupaciones de seguridad, por favor consulta [SECURITY.md](SECURITY.md). + +## Licencia + +Este proyecto está licenciado bajo la Licencia Apache 2.0 - consulta el archivo [LICENSE](LICENSE) para más detalles. + +## Preguntas Frecuentes (FAQ) + +> **Nota:** Para las preguntas frecuentes completas, consulta el [README.md](README.md) en inglés. + +**P: ¿Aden depende de LangChain u otros frameworks de agentes?** + +No. Aden está construido desde cero sin dependencias de LangChain, CrewAI u otros frameworks de agentes. El framework está diseñado para ser ligero y flexible, generando grafos de agentes dinámicamente en lugar de depender de componentes predefinidos. + +**P: ¿Qué proveedores de LLM soporta Aden?** + +Aden soporta más de 100 proveedores de LLM a través de la integración de LiteLLM, incluyendo OpenAI (GPT-4, GPT-4o), Anthropic (modelos Claude), Google Gemini, Mistral, Groq y muchos más. Simplemente configura la variable de entorno de la clave API apropiada y especifica el nombre del modelo. + +**P: ¿Aden es de código abierto?** + +Sí, Aden es completamente de código abierto bajo la Licencia Apache 2.0. Fomentamos activamente las contribuciones y colaboración de la comunidad. + +**P: ¿Qué hace que Aden sea diferente de otros frameworks de agentes?** + +Aden genera todo tu sistema de agentes a partir de objetivos en lenguaje natural usando un agente de codificación—no codificas flujos de trabajo ni defines grafos manualmente. Cuando los agentes fallan, el framework captura automáticamente los datos del fallo, evoluciona el grafo de agentes y lo vuelve a desplegar. Este ciclo de auto-mejora es único de Aden. + +**P: ¿Aden soporta flujos de trabajo con humano en el bucle?** + +Sí, Aden soporta completamente flujos de trabajo con humano en el bucle a través de nodos de intervención que pausan la ejecución para entrada humana. Estos incluyen tiempos de espera configurables y políticas de escalación, permitiendo colaboración fluida entre expertos humanos y agentes de IA. + +--- + +

+ Hecho con 🔥 Pasión en San Francisco +

diff --git a/README.ja.md b/README.ja.md new file mode 100644 index 00000000..12e09508 --- /dev/null +++ b/README.ja.md @@ -0,0 +1,339 @@ +

+ Hive Banner +

+ +

+ English | + 简体中文 | + Español | + Português | + 日本語 | + Русский +

+ +[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/adenhq/hive/blob/main/LICENSE) +[![Y Combinator](https://img.shields.io/badge/Y%20Combinator-Aden-orange)](https://www.ycombinator.com/companies/aden) +[![Docker Pulls](https://img.shields.io/docker/pulls/adenhq/hive?logo=Docker&labelColor=%23528bff)](https://hub.docker.com/u/adenhq) +[![Discord](https://img.shields.io/discord/1172610340073242735?logo=discord&labelColor=%235462eb&logoColor=%23f5f5f5&color=%235462eb)](https://discord.com/invite/MXE49hrKDk) +[![Twitter Follow](https://img.shields.io/twitter/follow/teamaden?logo=X&color=%23f5f5f5)](https://x.com/aden_hq) +[![LinkedIn](https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff)](https://www.linkedin.com/company/teamaden/) + +

+ AI Agents + Multi-Agent + Goal-Driven + HITL + Production +

+

+ OpenAI + Anthropic + Gemini + MCP +

+ +## 概要 + +ワークフローをハードコーディングせずに、信頼性の高い自己改善型AIエージェントを構築できます。コーディングエージェントとの会話を通じて目標を定義すると、フレームワークが動的に作成された接続コードを持つノードグラフを生成します。問題が発生すると、フレームワークは障害データをキャプチャし、コーディングエージェントを通じてエージェントを進化させ、再デプロイします。組み込みのヒューマンインザループノード、認証情報管理、リアルタイムモニタリングにより、適応性を損なうことなく制御を維持できます。 + +完全なドキュメント、例、ガイドについては [adenhq.com](https://adenhq.com) をご覧ください。 + +## Adenとは + +

+ Aden Architecture +

+ +Adenは、AIエージェントの構築、デプロイ、運用、適応のためのプラットフォームです: + +- **構築** - コーディングエージェントが自然言語の目標から専門的なワーカーエージェント(セールス、マーケティング、オペレーション)を生成 +- **デプロイ** - CI/CD統合と完全なAPIライフサイクル管理を備えたヘッドレスデプロイメント +- **運用** - リアルタイムモニタリング、可観測性、ランタイムガードレールがエージェントの信頼性を維持 +- **適応** - 継続的な評価、監督、適応により、エージェントは時間とともに改善 +- **インフラ** - 共有メモリ、LLM統合、ツール、スキルがすべてのエージェントを支援 + +## クイックリンク + +- **[ドキュメント](https://docs.adenhq.com/)** - 完全なガイドとAPIリファレンス +- **[セルフホスティングガイド](https://docs.adenhq.com/getting-started/quickstart)** - インフラストラクチャへのHiveデプロイ +- **[変更履歴](https://github.com/adenhq/hive/releases)** - 最新の更新とリリース + +- **[問題を報告](https://github.com/adenhq/hive/issues)** - バグレポートと機能リクエスト + +## クイックスタート + +### 前提条件 + +- [Python 3.11+](https://www.python.org/downloads/) - エージェント開発用 +- [Docker](https://docs.docker.com/get-docker/) (v20.10+) - オプション、コンテナ化されたツール用 + +### インストール + +```bash +# リポジトリをクローン +git clone https://github.com/adenhq/hive.git +cd hive + +# Python環境セットアップを実行 +./scripts/setup-python.sh +``` + +これにより以下がインストールされます: +- **framework** - コアエージェントランタイムとグラフエグゼキュータ +- **aden_tools** - エージェント機能のための19個のMCPツール +- すべての必要な依存関係 + +### 最初のエージェントを構築 + +```bash +# Claude Codeスキルをインストール(1回のみ) +./quickstart.sh + +# Claude Codeを使用してエージェントを構築 +claude> /building-agents + +# エージェントをテスト +claude> /testing-agent + +# エージェントを実行 +PYTHONPATH=core:exports python -m your_agent_name run --input '{...}' +``` + +**[📖 完全セットアップガイド](ENVIRONMENT_SETUP.md)** - エージェント開発の詳細な手順 + +## 機能 + +- **目標駆動開発** - 自然言語で目標を定義;コーディングエージェントがそれを達成するためのエージェントグラフと接続コードを生成 +- **自己適応エージェント** - フレームワークが障害をキャプチャし、目標を更新し、エージェントグラフを更新 +- **動的ノード接続** - 事前定義されたエッジなし;接続コードは目標に基づいて任意の対応LLMによって生成 +- **SDKラップノード** - すべてのノードが共有メモリ、ローカルRLMメモリ、モニタリング、ツール、LLMアクセスを標準装備 +- **ヒューマンインザループ** - 設定可能なタイムアウトとエスカレーションを備えた、人間の入力のために実行を一時停止する介入ノード +- **リアルタイム可観測性** - エージェント実行、決定、ノード間通信のライブモニタリングのためのWebSocketストリーミング +- **コストと予算管理** - 支出制限、スロットル、自動モデル劣化ポリシーを設定 +- **本番環境対応** - セルフホスト可能、スケールと信頼性のために構築 + +## なぜAdenか + +従来のエージェントフレームワークでは、ワークフローを手動で設計し、エージェントの相互作用を定義し、障害を事後的に処理する必要があります。Adenはこのパラダイムを逆転させます—**結果を記述すれば、システムが自ら構築します**。 + +```mermaid +flowchart LR + subgraph BUILD["🏗️ BUILD"] + GOAL["Define Goal
+ Success Criteria"] --> NODES["Add Nodes
LLM/Router/Function"] + NODES --> EDGES["Connect Edges
on_success/failure/conditional"] + EDGES --> TEST["Test & Validate"] --> APPROVE["Approve & Export"] + end + + subgraph EXPORT["📦 EXPORT"] + direction TB + JSON["agent.json
(GraphSpec)"] + TOOLS["tools.py
(Functions)"] + MCP["mcp_servers.json
(Integrations)"] + end + + subgraph RUN["🚀 RUNTIME"] + LOAD["AgentRunner
Load + Parse"] --> SETUP["Setup Runtime
+ ToolRegistry"] + SETUP --> EXEC["GraphExecutor
Execute Nodes"] + + subgraph DECISION["Decision Recording"] + DEC1["runtime.decide()
intent → options → choice"] + DEC2["runtime.record_outcome()
success, result, metrics"] + end + end + + subgraph INFRA["⚙️ INFRASTRUCTURE"] + CTX["NodeContext
memory • llm • tools"] + STORE[("FileStorage
Runs & Decisions")] + end + + APPROVE --> EXPORT + EXPORT --> LOAD + EXEC --> DECISION + EXEC --> CTX + DECISION --> STORE + STORE -.->|"Analyze & Improve"| NODES + + style BUILD fill:#ffbe42,stroke:#cc5d00,stroke-width:3px,color:#333 + style EXPORT fill:#fff59d,stroke:#ed8c00,stroke-width:2px,color:#333 + style RUN fill:#ffb100,stroke:#cc5d00,stroke-width:3px,color:#333 + style DECISION fill:#ffcc80,stroke:#ed8c00,stroke-width:2px,color:#333 + style INFRA fill:#e8763d,stroke:#cc5d00,stroke-width:3px,color:#fff + style STORE fill:#ed8c00,stroke:#cc5d00,stroke-width:2px,color:#fff +``` + +### Adenの優位性 + +| 従来のフレームワーク | Aden | +|----------------------|------| +| エージェントワークフローをハードコード | 自然言語で目標を記述 | +| 手動でグラフを定義 | 自動生成されるエージェントグラフ | +| 事後的なエラー処理 | プロアクティブな自己進化 | +| 静的なツール設定 | 動的なSDKラップノード | +| 別途モニタリング設定 | 組み込みのリアルタイム可観測性 | +| DIY予算管理 | 統合されたコスト制御と劣化 | + +### 仕組み + +1. **目標を定義** → 達成したいことを平易な言葉で記述 +2. **コーディングエージェントが生成** → エージェントグラフ、接続コード、テストケースを作成 +3. **ワーカーが実行** → SDKラップノードが完全な可観測性とツールアクセスで実行 +4. **コントロールプレーンが監視** → リアルタイムメトリクス、予算執行、ポリシー管理 +5. **自己改善** → 障害時、システムがグラフを進化させ自動的に再デプロイ + +## Adenの比較 + +Adenはエージェント開発に根本的に異なるアプローチを採用しています。ほとんどのフレームワークがワークフローをハードコードするか、エージェントグラフを手動で定義することを要求するのに対し、Adenは**コーディングエージェントを使用して自然言語の目標からエージェントシステム全体を生成**します。エージェントが失敗した場合、フレームワークは単にエラーをログに記録するだけでなく—**自動的にエージェントグラフを進化させ**、再デプロイします。 + +> **注意:** 詳細なフレームワーク比較表とよくある質問については、英語の[README.md](README.md)を参照してください。 + +### Adenを選ぶべきとき + +Adenを選択する場合: + +- 手動介入なしに**失敗から自己改善する**エージェントが必要 +- ワークフローではなく結果を記述する**目標駆動開発**が必要 +- 自動回復と再デプロイを備えた**本番環境の信頼性**が必要 +- コードを書き直すことなくエージェントアーキテクチャを**迅速に反復**する必要がある +- リアルタイムモニタリングと人間の監督を備えた**完全な可観測性**が必要 + +他のフレームワークを選択する場合: + +- **型安全で予測可能なワークフロー**(PydanticAI、Mastra) +- **RAGとドキュメント処理**(LlamaIndex、Haystack) +- **エージェント創発の研究**(CAMEL) +- **リアルタイム音声/マルチモーダル**(TEN Framework) +- **シンプルなコンポーネント連鎖**(LangChain、Swarm) + +## プロジェクト構造 + +``` +hive/ +├── core/ # コアフレームワーク - エージェントランタイム、グラフエグゼキュータ、プロトコル +├── tools/ # MCPツールパッケージ - エージェント機能のための19個のツール +├── exports/ # エージェントパッケージ - 事前構築されたエージェントと例 +├── docs/ # ドキュメントとガイド +├── scripts/ # ビルドとユーティリティスクリプト +├── .claude/ # エージェント構築用のClaude Codeスキル +├── ENVIRONMENT_SETUP.md # エージェント開発用のPythonセットアップガイド +├── DEVELOPER.md # 開発者ガイド +├── CONTRIBUTING.md # 貢献ガイドライン +└── ROADMAP.md # プロダクトロードマップ +``` + +## 開発 + +### Pythonエージェント開発 + +フレームワークで目標駆動エージェントを構築および実行するには: + +```bash +# 1回限りのセットアップ +./scripts/setup-python.sh + +# これにより以下がインストールされます: +# - frameworkパッケージ(コアランタイム) +# - aden_toolsパッケージ(19個のMCPツール) +# - すべての依存関係 + +# Claude Codeスキルを使用して新しいエージェントを構築 +claude> /building-agents + +# エージェントをテスト +claude> /testing-agent + +# エージェントを実行 +PYTHONPATH=core:exports python -m agent_name run --input '{...}' +``` + +完全なセットアップ手順については、[ENVIRONMENT_SETUP.md](ENVIRONMENT_SETUP.md)を参照してください。 + +## ドキュメント + +- **[開発者ガイド](DEVELOPER.md)** - 開発者向け総合ガイド +- [はじめに](docs/getting-started.md) - クイックセットアップ手順 +- [設定ガイド](docs/configuration.md) - すべての設定オプション +- [アーキテクチャ概要](docs/architecture.md) - システム設計と構造 + +## ロードマップ + +Adenエージェントフレームワークは、開発者が結果志向で自己適応するエージェントを構築できるよう支援することを目指しています。ロードマップはこちらをご覧ください + +[ROADMAP.md](ROADMAP.md) + +```mermaid +timeline + title Aden Agent Framework Roadmap + section Foundation + Architecture : Node-Based Architecture : Python SDK : LLM Integration (OpenAI, Anthropic, Google) : Communication Protocol + Coding Agent : Goal Creation Session : Worker Agent Creation : MCP Tools Integration + Worker Agent : Human-in-the-Loop : Callback Handlers : Intervention Points : Streaming Interface + Tools : File Use : Memory (STM/LTM) : Web Search : Web Scraper : Audit Trail + Core : Eval System : Pydantic Validation : Docker Deployment : Documentation : Sample Agents + section Expansion + Intelligence : Guardrails : Streaming Mode : Semantic Search + Platform : JavaScript SDK : Custom Tool Integrator : Credential Store + Deployment : Self-Hosted : Cloud Services : CI/CD Pipeline + Templates : Sales Agent : Marketing Agent : Analytics Agent : Training Agent : Smart Form Agent +``` + +## コミュニティとサポート + +サポート、機能リクエスト、コミュニティディスカッションには[Discord](https://discord.com/invite/MXE49hrKDk)を使用しています。 + +- Discord - [コミュニティに参加](https://discord.com/invite/MXE49hrKDk) +- Twitter/X - [@adenhq](https://x.com/aden_hq) +- LinkedIn - [会社ページ](https://www.linkedin.com/company/teamaden/) + +## 貢献 + +貢献を歓迎します!ガイドラインについては[CONTRIBUTING.md](CONTRIBUTING.md)をご覧ください。 + +1. リポジトリをフォーク +2. 機能ブランチを作成 (`git checkout -b feature/amazing-feature`) +3. 変更をコミット (`git commit -m 'Add amazing feature'`) +4. ブランチにプッシュ (`git push origin feature/amazing-feature`) +5. プルリクエストを開く + +## チームに参加 + +**採用中です!** エンジニアリング、リサーチ、マーケティングの役職で私たちに参加してください。 + +[オープンポジションを見る](https://jobs.adenhq.com/a8cec478-cdbc-473c-bbd4-f4b7027ec193/applicant) + +## セキュリティ + +セキュリティに関する懸念については、[SECURITY.md](SECURITY.md)をご覧ください。 + +## ライセンス + +このプロジェクトはApache License 2.0の下でライセンスされています - 詳細は[LICENSE](LICENSE)ファイルをご覧ください。 + +## よくある質問 (FAQ) + +> **注意:** よくある質問の完全版については、英語の[README.md](README.md)を参照してください。 + +**Q: AdenはLangChainや他のエージェントフレームワークに依存していますか?** + +いいえ。AdenはLangChain、CrewAI、その他のエージェントフレームワークに依存せずにゼロから構築されています。フレームワークは軽量で柔軟に設計されており、事前定義されたコンポーネントに依存するのではなく、エージェントグラフを動的に生成します。 + +**Q: AdenはどのLLMプロバイダーをサポートしていますか?** + +AdenはLiteLLM統合を通じて100以上のLLMプロバイダーをサポートしており、OpenAI(GPT-4、GPT-4o)、Anthropic(Claudeモデル)、Google Gemini、Mistral、Groqなどが含まれます。適切なAPIキー環境変数を設定し、モデル名を指定するだけです。 + +**Q: Adenはオープンソースですか?** + +はい、AdenはApache License 2.0の下で完全にオープンソースです。コミュニティの貢献とコラボレーションを積極的に奨励しています。 + +**Q: Adenは他のエージェントフレームワークと何が違いますか?** + +Adenはコーディングエージェントを使用して自然言語の目標からエージェントシステム全体を生成します—ワークフローをハードコードしたり、グラフを手動で定義したりする必要はありません。エージェントが失敗すると、フレームワークは自動的に障害データをキャプチャし、エージェントグラフを進化させ、再デプロイします。この自己改善ループはAden独自のものです。 + +**Q: Adenはヒューマンインザループワークフローをサポートしていますか?** + +はい、Adenは人間の入力のために実行を一時停止する介入ノードを通じて、ヒューマンインザループワークフローを完全にサポートしています。設定可能なタイムアウトとエスカレーションポリシーが含まれており、人間の専門家とAIエージェントのシームレスなコラボレーションを可能にします。 + +--- + +

+ サンフランシスコで 🔥 情熱を込めて作成 +

diff --git a/README.md b/README.md index 17670f21..932a98bc 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,15 @@ Hive Banner

+

+ English | + 简体中文 | + Español | + Português | + 日本語 | + Русский +

+ [![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/adenhq/hive/blob/main/LICENSE) [![Y Combinator](https://img.shields.io/badge/Y%20Combinator-Aden-orange)](https://www.ycombinator.com/companies/aden) [![Docker Pulls](https://img.shields.io/docker/pulls/adenhq/hive?logo=Docker&labelColor=%23528bff)](https://hub.docker.com/u/adenhq) @@ -29,6 +38,20 @@ Build reliable, self-improving AI agents without hardcoding workflows. Define yo Visit [adenhq.com](https://adenhq.com) for complete documentation, examples, and guides. +## What is Aden + +

+ Aden Architecture +

+ +Aden is a platform for building, deploying, operating, and adapting AI agents: + +- **Build** - A Coding Agent generates specialized Worker Agents (Sales, Marketing, Ops) from natural language goals +- **Deploy** - Headless deployment with CI/CD integration and full API lifecycle management +- **Operate** - Real-time monitoring, observability, and runtime guardrails keep agents reliable +- **Adapt** - Continuous evaluation, supervision, and adaptation ensure agents improve over time +- **Infra** - Shared memory, LLM integrations, tools, and skills power every agent + ## Quick Links - **[Documentation](https://docs.adenhq.com/)** - Complete guides and API reference @@ -41,8 +64,8 @@ Visit [adenhq.com](https://adenhq.com) for complete documentation, examples, and ### Prerequisites -- [Docker](https://docs.docker.com/get-docker/) (v20.10+) -- [Docker Compose](https://docs.docker.com/compose/install/) (v2.0+) +- [Python 3.11+](https://www.python.org/downloads/) for agent development +- [Docker](https://docs.docker.com/get-docker/) (v20.10+) - Optional, for containerized tools ### Installation @@ -51,19 +74,32 @@ Visit [adenhq.com](https://adenhq.com) for complete documentation, examples, and git clone https://github.com/adenhq/hive.git cd hive -# Copy and configure -cp config.yaml.example config.yaml - -# Run setup and start services -npm run setup -docker compose up +# Run Python environment setup +./scripts/setup-python.sh ``` -**Access the application:** +This installs: +- **framework** - Core agent runtime and graph executor +- **aden_tools** - 19 MCP tools for agent capabilities +- All required dependencies -- Dashboard: http://localhost:3000 -- API: http://localhost:4000 -- Health: http://localhost:4000/health +### Build Your First Agent + +```bash +# Install Claude Code skills (one-time) +./quickstart.sh + +# Build an agent using Claude Code +claude> /building-agents + +# Test your agent +claude> /testing-agent + +# Run your agent +PYTHONPATH=core:exports python -m your_agent_name run --input '{...}' +``` + +**[📖 Complete Setup Guide](ENVIRONMENT_SETUP.md)** - Detailed instructions for agent development ## Features @@ -127,14 +163,14 @@ flowchart LR ### The Aden Advantage -| Traditional Frameworks | Aden | -|------------------------|------| -| Hardcode agent workflows | Describe goals in natural language | -| Manual graph definition | Auto-generated agent graphs | -| Reactive error handling | Proactive self-evolution | -| Static tool configurations | Dynamic SDK-wrapped nodes | -| Separate monitoring setup | Built-in real-time observability | -| DIY budget management | Integrated cost controls & degradation | +| Traditional Frameworks | Aden | +| -------------------------- | -------------------------------------- | +| Hardcode agent workflows | Describe goals in natural language | +| Manual graph definition | Auto-generated agent graphs | +| Reactive error handling | Proactive self-evolution | +| Static tool configurations | Dynamic SDK-wrapped nodes | +| Separate monitoring setup | Built-in real-time observability | +| DIY budget management | Integrated cost controls & degradation | ### How It Works @@ -150,20 +186,21 @@ Aden takes a fundamentally different approach to agent development. While most f ### Comparison Table -| Framework | Category | Approach | Aden Difference | -|-----------|----------|----------|-----------------| -| **LangChain, LlamaIndex, Haystack** | Component Libraries | Predefined components for RAG/LLM apps; manual connection logic | Generates entire graph and connection code upfront | -| **CrewAI, AutoGen, Swarm** | Multi-Agent Orchestration | Role-based agents with predefined collaboration patterns | Dynamically creates agents/connections; adapts on failure | -| **PydanticAI, Mastra, Agno** | Type-Safe Frameworks | Structured outputs and validation for known workflows | Evolving workflows; structure emerges through iteration | -| **Agent Zero, Letta** | Personal AI Assistants | Memory and learning; OS-as-tool or stateful memory focus | Production multi-agent systems with self-healing | -| **CAMEL** | Research Framework | Emergent behavior in large-scale simulations (up to 1M agents) | Production-oriented with reliable execution and recovery | -| **TEN Framework, Genkit** | Infrastructure Frameworks | Real-time multimodal (TEN) or full-stack AI (Genkit) | Higher abstraction—generates and evolves agent logic | -| **GPT Engineer, Motia** | Code Generation | Code from specs (GPT Engineer) or "Step" primitive (Motia) | Self-adapting graphs with automatic failure recovery | -| **Trading Agents** | Domain-Specific | Hardcoded trading firm roles on LangGraph | Domain-agnostic; generates structures for any use case | +| Framework | Category | Approach | Aden Difference | +| ----------------------------------- | ------------------------- | --------------------------------------------------------------- | --------------------------------------------------------- | +| **LangChain, LlamaIndex, Haystack** | Component Libraries | Predefined components for RAG/LLM apps; manual connection logic | Generates entire graph and connection code upfront | +| **CrewAI, AutoGen, Swarm** | Multi-Agent Orchestration | Role-based agents with predefined collaboration patterns | Dynamically creates agents/connections; adapts on failure | +| **PydanticAI, Mastra, Agno** | Type-Safe Frameworks | Structured outputs and validation for known workflows | Evolving workflows; structure emerges through iteration | +| **Agent Zero, Letta** | Personal AI Assistants | Memory and learning; OS-as-tool or stateful memory focus | Production multi-agent systems with self-healing | +| **CAMEL** | Research Framework | Emergent behavior in large-scale simulations (up to 1M agents) | Production-oriented with reliable execution and recovery | +| **TEN Framework, Genkit** | Infrastructure Frameworks | Real-time multimodal (TEN) or full-stack AI (Genkit) | Higher abstraction—generates and evolves agent logic | +| **GPT Engineer, Motia** | Code Generation | Code from specs (GPT Engineer) or "Step" primitive (Motia) | Self-adapting graphs with automatic failure recovery | +| **Trading Agents** | Domain-Specific | Hardcoded trading firm roles on LangGraph | Domain-agnostic; generates structures for any use case | ### When to Choose Aden Choose Aden when you need: + - Agents that **self-improve from failures** without manual intervention - **Goal-driven development** where you describe outcomes, not workflows - **Production reliability** with automatic recovery and redeployment @@ -171,6 +208,7 @@ Choose Aden when you need: - **Full observability** with real-time monitoring and human oversight Choose other frameworks when you need: + - **Type-safe, predictable workflows** (PydanticAI, Mastra) - **RAG and document processing** (LlamaIndex, Haystack) - **Research on agent emergence** (CAMEL) @@ -181,13 +219,13 @@ Choose other frameworks when you need: ``` hive/ -├── honeycomb/ # Frontend Dashboard -├── hive/ # Backend API Server -├── aden-tools/ # MCP Tools Package - 19 tools for agent capabilities +├── core/ # Core framework - Agent runtime, graph executor, protocols +├── tools/ # MCP Tools Package - 19 tools for agent capabilities +├── exports/ # Agent packages - Pre-built agents and examples ├── docs/ # Documentation and guides ├── scripts/ # Build and utility scripts -├── config.yaml.example # Configuration template -├── docker-compose.yml # Container orchestration +├── .claude/ # Claude Code skills for building agents +├── ENVIRONMENT_SETUP.md # Python setup guide for agent development ├── DEVELOPER.md # Developer guide ├── CONTRIBUTING.md # Contribution guidelines └── ROADMAP.md # Product roadmap @@ -195,31 +233,30 @@ hive/ ## Development -### Local Development with Hot Reload +### Python Agent Development + +For building and running goal-driven agents with the framework: ```bash -# Copy development overrides -cp docker-compose.override.yml.example docker-compose.override.yml +# One-time setup +./scripts/setup-python.sh -# Start with hot reload enabled -docker compose up +# This installs: +# - framework package (core runtime) +# - aden_tools package (19 MCP tools) +# - All dependencies + +# Build new agents using Claude Code skills +claude> /building-agents + +# Test agents +claude> /testing-agent + +# Run agents +PYTHONPATH=core:exports python -m agent_name run --input '{...}' ``` -### Running Without Docker - -```bash -# Install dependencies -npm install - -# Generate environment files -npm run generate:env - -# Start frontend (in honeycomb/) -cd honeycomb && npm run dev - -# Start backend (in hive/) -cd hive && npm run dev -``` +See [ENVIRONMENT_SETUP.md](ENVIRONMENT_SETUP.md) for complete setup instructions. ## Documentation @@ -290,11 +327,11 @@ No. Aden is built from the ground up with no dependencies on LangChain, CrewAI, **Q: What LLM providers does Aden support?** -Aden supports OpenAI (GPT-4, GPT-4o), Anthropic (Claude models), and Google Gemini out of the box. The architecture is provider-agnostic through SDK abstraction, with LiteLLM integration on the roadmap for expanded model support. +Aden supports 100+ LLM providers through LiteLLM integration, including OpenAI (GPT-4, GPT-4o), Anthropic (Claude models), Google Gemini, Mistral, Groq, and many more. Simply set the appropriate API key environment variable and specify the model name. **Q: Can I use Aden with local AI models like Ollama?** -Local model support through LiteLLM integration is on our roadmap. The SDK's provider-agnostic design means adding local model support will be straightforward once implemented. +Yes! Aden supports local models through LiteLLM. Simply use the model name format `ollama/model-name` (e.g., `ollama/llama3`, `ollama/mistral`) and ensure Ollama is running locally. **Q: What makes Aden different from other agent frameworks?** diff --git a/README.pt.md b/README.pt.md new file mode 100644 index 00000000..6725de43 --- /dev/null +++ b/README.pt.md @@ -0,0 +1,339 @@ +

+ Hive Banner +

+ +

+ English | + 简体中文 | + Español | + Português | + 日本語 | + Русский +

+ +[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/adenhq/hive/blob/main/LICENSE) +[![Y Combinator](https://img.shields.io/badge/Y%20Combinator-Aden-orange)](https://www.ycombinator.com/companies/aden) +[![Docker Pulls](https://img.shields.io/docker/pulls/adenhq/hive?logo=Docker&labelColor=%23528bff)](https://hub.docker.com/u/adenhq) +[![Discord](https://img.shields.io/discord/1172610340073242735?logo=discord&labelColor=%235462eb&logoColor=%23f5f5f5&color=%235462eb)](https://discord.com/invite/MXE49hrKDk) +[![Twitter Follow](https://img.shields.io/twitter/follow/teamaden?logo=X&color=%23f5f5f5)](https://x.com/aden_hq) +[![LinkedIn](https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff)](https://www.linkedin.com/company/teamaden/) + +

+ AI Agents + Multi-Agent + Goal-Driven + HITL + Production +

+

+ OpenAI + Anthropic + Gemini + MCP +

+ +## Visão Geral + +Construa agentes de IA confiáveis e auto-aperfeiçoáveis sem codificar fluxos de trabalho. Defina seu objetivo através de uma conversa com um agente de codificação, e o framework gera um grafo de nós com código de conexão criado dinamicamente. Quando algo quebra, o framework captura dados de falha, evolui o agente através do agente de codificação e reimplanta. Nós de intervenção humana integrados, gerenciamento de credenciais e monitoramento em tempo real dão a você controle sem sacrificar a adaptabilidade. + +Visite [adenhq.com](https://adenhq.com) para documentação completa, exemplos e guias. + +## O que é Aden + +

+ Aden Architecture +

+ +Aden é uma plataforma para construir, implantar, operar e adaptar agentes de IA: + +- **Construir** - Um Agente de Codificação gera Agentes de Trabalho especializados (Vendas, Marketing, Operações) a partir de objetivos em linguagem natural +- **Implantar** - Implantação headless com integração CI/CD e gerenciamento completo do ciclo de vida de API +- **Operar** - Monitoramento em tempo real, observabilidade e guardrails de runtime mantêm os agentes confiáveis +- **Adaptar** - Avaliação contínua, supervisão e adaptação garantem que os agentes melhorem ao longo do tempo +- **Infraestrutura** - Memória compartilhada, integrações LLM, ferramentas e habilidades alimentam cada agente + +## Links Rápidos + +- **[Documentação](https://docs.adenhq.com/)** - Guias completos e referência de API +- **[Guia de Auto-Hospedagem](https://docs.adenhq.com/getting-started/quickstart)** - Implante o Hive em sua infraestrutura +- **[Changelog](https://github.com/adenhq/hive/releases)** - Últimas atualizações e versões + +- **[Reportar Problemas](https://github.com/adenhq/hive/issues)** - Relatórios de bugs e solicitações de funcionalidades + +## Início Rápido + +### Pré-requisitos + +- [Python 3.11+](https://www.python.org/downloads/) - Para desenvolvimento de agentes +- [Docker](https://docs.docker.com/get-docker/) (v20.10+) - Opcional, para ferramentas containerizadas + +### Instalação + +```bash +# Clonar o repositório +git clone https://github.com/adenhq/hive.git +cd hive + +# Executar configuração do ambiente Python +./scripts/setup-python.sh +``` + +Isto instala: +- **framework** - Runtime do agente principal e executor de grafos +- **aden_tools** - 19 ferramentas MCP para capacidades de agentes +- Todas as dependências necessárias + +### Construa Seu Primeiro Agente + +```bash +# Instalar habilidades do Claude Code (uma vez) +./quickstart.sh + +# Construir um agente usando Claude Code +claude> /building-agents + +# Testar seu agente +claude> /testing-agent + +# Executar seu agente +PYTHONPATH=core:exports python -m your_agent_name run --input '{...}' +``` + +**[📖 Guia Completo de Configuração](ENVIRONMENT_SETUP.md)** - Instruções detalhadas para desenvolvimento de agentes + +## Funcionalidades + +- **Desenvolvimento Orientado a Objetivos** - Defina objetivos em linguagem natural; o agente de codificação gera o grafo de agentes e código de conexão para alcançá-los +- **Agentes Auto-Adaptáveis** - Framework captura falhas, atualiza objetivos e atualiza o grafo de agentes +- **Conexões de Nós Dinâmicas** - Sem arestas predefinidas; código de conexão é gerado por qualquer LLM capaz baseado em seus objetivos +- **Nós Envolvidos em SDK** - Cada nó recebe memória compartilhada, memória RLM local, monitoramento, ferramentas e acesso LLM prontos para uso +- **Humano no Loop** - Nós de intervenção que pausam a execução para entrada humana com timeouts e escalonamento configuráveis +- **Observabilidade em Tempo Real** - Streaming WebSocket para monitoramento ao vivo de execução de agentes, decisões e comunicação entre nós +- **Controle de Custo e Orçamento** - Defina limites de gastos, throttles e políticas de degradação automática de modelo +- **Pronto para Produção** - Auto-hospedável, construído para escala e confiabilidade + +## Por que Aden + +Frameworks de agentes tradicionais exigem que você projete manualmente fluxos de trabalho, defina interações de agentes e lide com falhas reativamente. Aden inverte esse paradigma—**você descreve resultados, e o sistema se constrói sozinho**. + +```mermaid +flowchart LR + subgraph BUILD["🏗️ BUILD"] + GOAL["Define Goal
+ Success Criteria"] --> NODES["Add Nodes
LLM/Router/Function"] + NODES --> EDGES["Connect Edges
on_success/failure/conditional"] + EDGES --> TEST["Test & Validate"] --> APPROVE["Approve & Export"] + end + + subgraph EXPORT["📦 EXPORT"] + direction TB + JSON["agent.json
(GraphSpec)"] + TOOLS["tools.py
(Functions)"] + MCP["mcp_servers.json
(Integrations)"] + end + + subgraph RUN["🚀 RUNTIME"] + LOAD["AgentRunner
Load + Parse"] --> SETUP["Setup Runtime
+ ToolRegistry"] + SETUP --> EXEC["GraphExecutor
Execute Nodes"] + + subgraph DECISION["Decision Recording"] + DEC1["runtime.decide()
intent → options → choice"] + DEC2["runtime.record_outcome()
success, result, metrics"] + end + end + + subgraph INFRA["⚙️ INFRASTRUCTURE"] + CTX["NodeContext
memory • llm • tools"] + STORE[("FileStorage
Runs & Decisions")] + end + + APPROVE --> EXPORT + EXPORT --> LOAD + EXEC --> DECISION + EXEC --> CTX + DECISION --> STORE + STORE -.->|"Analyze & Improve"| NODES + + style BUILD fill:#ffbe42,stroke:#cc5d00,stroke-width:3px,color:#333 + style EXPORT fill:#fff59d,stroke:#ed8c00,stroke-width:2px,color:#333 + style RUN fill:#ffb100,stroke:#cc5d00,stroke-width:3px,color:#333 + style DECISION fill:#ffcc80,stroke:#ed8c00,stroke-width:2px,color:#333 + style INFRA fill:#e8763d,stroke:#cc5d00,stroke-width:3px,color:#fff + style STORE fill:#ed8c00,stroke:#cc5d00,stroke-width:2px,color:#fff +``` + +### A Vantagem Aden + +| Frameworks Tradicionais | Aden | +|-------------------------|------| +| Codificar fluxos de trabalho de agentes | Descrever objetivos em linguagem natural | +| Definição manual de grafos | Grafos de agentes auto-gerados | +| Tratamento reativo de erros | Auto-evolução proativa | +| Configurações de ferramentas estáticas | Nós dinâmicos envolvidos em SDK | +| Configuração de monitoramento separada | Observabilidade em tempo real integrada | +| Gerenciamento de orçamento DIY | Controles de custo e degradação integrados | + +### Como Funciona + +1. **Defina Seu Objetivo** → Descreva o que você quer alcançar em linguagem simples +2. **Agente de Codificação Gera** → Cria o grafo de agentes, código de conexão e casos de teste +3. **Workers Executam** → Nós envolvidos em SDK executam com observabilidade completa e acesso a ferramentas +4. **Plano de Controle Monitora** → Métricas em tempo real, aplicação de orçamento, gerenciamento de políticas +5. **Auto-Aperfeiçoamento** → Em caso de falha, o sistema evolui o grafo e reimplanta automaticamente + +## Como Aden se Compara + +Aden adota uma abordagem fundamentalmente diferente para o desenvolvimento de agentes. Enquanto a maioria dos frameworks exige que você codifique fluxos de trabalho ou defina manualmente grafos de agentes, Aden usa um **agente de codificação para gerar todo o seu sistema de agentes** a partir de objetivos em linguagem natural. Quando os agentes falham, o framework não apenas registra erros—**ele evolui automaticamente o grafo de agentes** e reimplanta. + +> **Nota:** Para a tabela de comparação detalhada de frameworks e perguntas frequentes, consulte o [README.md](README.md) em inglês. + +### Quando Escolher Aden + +Escolha Aden quando você precisar de: + +- Agentes que **se auto-aperfeiçoam a partir de falhas** sem intervenção manual +- **Desenvolvimento orientado a objetivos** onde você descreve resultados, não fluxos de trabalho +- **Confiabilidade em produção** com recuperação e reimplantação automáticas +- **Iteração rápida** em arquiteturas de agentes sem reescrever código +- **Observabilidade completa** com monitoramento em tempo real e supervisão humana + +Escolha outros frameworks quando você precisar de: + +- **Fluxos de trabalho previsíveis e type-safe** (PydanticAI, Mastra) +- **RAG e processamento de documentos** (LlamaIndex, Haystack) +- **Pesquisa sobre emergência de agentes** (CAMEL) +- **Voz/multimodal em tempo real** (TEN Framework) +- **Encadeamento simples de componentes** (LangChain, Swarm) + +## Estrutura do Projeto + +``` +hive/ +├── core/ # Framework principal - Runtime de agentes, executor de grafos, protocolos +├── tools/ # Pacote de Ferramentas MCP - 19 ferramentas para capacidades de agentes +├── exports/ # Pacotes de Agentes - Agentes pré-construídos e exemplos +├── docs/ # Documentação e guias +├── scripts/ # Scripts de build e utilitários +├── .claude/ # Habilidades Claude Code para construir agentes +├── ENVIRONMENT_SETUP.md # Guia de configuração Python para desenvolvimento de agentes +├── DEVELOPER.md # Guia do desenvolvedor +├── CONTRIBUTING.md # Diretrizes de contribuição +└── ROADMAP.md # Roadmap do produto +``` + +## Desenvolvimento + +### Desenvolvimento de Agentes Python + +Para construir e executar agentes orientados a objetivos com o framework: + +```bash +# Configuração única +./scripts/setup-python.sh + +# Isto instala: +# - pacote framework (runtime principal) +# - pacote aden_tools (19 ferramentas MCP) +# - Todas as dependências + +# Construir novos agentes usando habilidades Claude Code +claude> /building-agents + +# Testar agentes +claude> /testing-agent + +# Executar agentes +PYTHONPATH=core:exports python -m agent_name run --input '{...}' +``` + +Consulte [ENVIRONMENT_SETUP.md](ENVIRONMENT_SETUP.md) para instruções completas de configuração. + +## Documentação + +- **[Guia do Desenvolvedor](DEVELOPER.md)** - Guia abrangente para desenvolvedores +- [Começando](docs/getting-started.md) - Instruções de configuração rápida +- [Guia de Configuração](docs/configuration.md) - Todas as opções de configuração +- [Visão Geral da Arquitetura](docs/architecture.md) - Design e estrutura do sistema + +## Roadmap + +O Aden Agent Framework visa ajudar desenvolvedores a construir agentes auto-adaptativos orientados a resultados. Encontre nosso roadmap aqui + +[ROADMAP.md](ROADMAP.md) + +```mermaid +timeline + title Aden Agent Framework Roadmap + section Foundation + Architecture : Node-Based Architecture : Python SDK : LLM Integration (OpenAI, Anthropic, Google) : Communication Protocol + Coding Agent : Goal Creation Session : Worker Agent Creation : MCP Tools Integration + Worker Agent : Human-in-the-Loop : Callback Handlers : Intervention Points : Streaming Interface + Tools : File Use : Memory (STM/LTM) : Web Search : Web Scraper : Audit Trail + Core : Eval System : Pydantic Validation : Docker Deployment : Documentation : Sample Agents + section Expansion + Intelligence : Guardrails : Streaming Mode : Semantic Search + Platform : JavaScript SDK : Custom Tool Integrator : Credential Store + Deployment : Self-Hosted : Cloud Services : CI/CD Pipeline + Templates : Sales Agent : Marketing Agent : Analytics Agent : Training Agent : Smart Form Agent +``` + +## Comunidade e Suporte + +Usamos [Discord](https://discord.com/invite/MXE49hrKDk) para suporte, solicitações de funcionalidades e discussões da comunidade. + +- Discord - [Junte-se à nossa comunidade](https://discord.com/invite/MXE49hrKDk) +- Twitter/X - [@adenhq](https://x.com/aden_hq) +- LinkedIn - [Página da Empresa](https://www.linkedin.com/company/teamaden/) + +## Contribuindo + +Aceitamos contribuições! Por favor, consulte [CONTRIBUTING.md](CONTRIBUTING.md) para diretrizes. + +1. Faça fork do repositório +2. Crie sua branch de funcionalidade (`git checkout -b feature/amazing-feature`) +3. Faça commit das suas alterações (`git commit -m 'Add amazing feature'`) +4. Faça push para a branch (`git push origin feature/amazing-feature`) +5. Abra um Pull Request + +## Junte-se ao Nosso Time + +**Estamos contratando!** Junte-se a nós em funções de engenharia, pesquisa e go-to-market. + +[Ver Posições Abertas](https://jobs.adenhq.com/a8cec478-cdbc-473c-bbd4-f4b7027ec193/applicant) + +## Segurança + +Para questões de segurança, por favor consulte [SECURITY.md](SECURITY.md). + +## Licença + +Este projeto está licenciado sob a Licença Apache 2.0 - veja o arquivo [LICENSE](LICENSE) para detalhes. + +## Perguntas Frequentes (FAQ) + +> **Nota:** Para as perguntas frequentes completas, consulte o [README.md](README.md) em inglês. + +**P: O Aden depende do LangChain ou outros frameworks de agentes?** + +Não. O Aden é construído do zero sem dependências do LangChain, CrewAI ou outros frameworks de agentes. O framework é projetado para ser leve e flexível, gerando grafos de agentes dinamicamente em vez de depender de componentes predefinidos. + +**P: Quais provedores de LLM o Aden suporta?** + +O Aden suporta mais de 100 provedores de LLM através da integração LiteLLM, incluindo OpenAI (GPT-4, GPT-4o), Anthropic (modelos Claude), Google Gemini, Mistral, Groq e muitos mais. Simplesmente configure a variável de ambiente da chave API apropriada e especifique o nome do modelo. + +**P: O Aden é open-source?** + +Sim, o Aden é totalmente open-source sob a Licença Apache 2.0. Incentivamos ativamente contribuições e colaboração da comunidade. + +**P: O que torna o Aden diferente de outros frameworks de agentes?** + +O Aden gera todo o seu sistema de agentes a partir de objetivos em linguagem natural usando um agente de codificação—você não codifica fluxos de trabalho nem define grafos manualmente. Quando os agentes falham, o framework captura automaticamente os dados de falha, evolui o grafo de agentes e reimplanta. Este loop de auto-aperfeiçoamento é único do Aden. + +**P: O Aden suporta fluxos de trabalho com humano no loop?** + +Sim, o Aden suporta totalmente fluxos de trabalho com humano no loop através de nós de intervenção que pausam a execução para entrada humana. Estes incluem timeouts configuráveis e políticas de escalonamento, permitindo colaboração perfeita entre especialistas humanos e agentes de IA. + +--- + +

+ Feito com 🔥 Paixão em San Francisco +

diff --git a/README.ru.md b/README.ru.md new file mode 100644 index 00000000..524af454 --- /dev/null +++ b/README.ru.md @@ -0,0 +1,339 @@ +

+ Hive Banner +

+ +

+ English | + 简体中文 | + Español | + Português | + 日本語 | + Русский +

+ +[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/adenhq/hive/blob/main/LICENSE) +[![Y Combinator](https://img.shields.io/badge/Y%20Combinator-Aden-orange)](https://www.ycombinator.com/companies/aden) +[![Docker Pulls](https://img.shields.io/docker/pulls/adenhq/hive?logo=Docker&labelColor=%23528bff)](https://hub.docker.com/u/adenhq) +[![Discord](https://img.shields.io/discord/1172610340073242735?logo=discord&labelColor=%235462eb&logoColor=%23f5f5f5&color=%235462eb)](https://discord.com/invite/MXE49hrKDk) +[![Twitter Follow](https://img.shields.io/twitter/follow/teamaden?logo=X&color=%23f5f5f5)](https://x.com/aden_hq) +[![LinkedIn](https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff)](https://www.linkedin.com/company/teamaden/) + +

+ AI Agents + Multi-Agent + Goal-Driven + HITL + Production +

+

+ OpenAI + Anthropic + Gemini + MCP +

+ +## Обзор + +Создавайте надёжных, самосовершенствующихся ИИ-агентов без жёсткого кодирования рабочих процессов. Определите свою цель через разговор с кодирующим агентом, и фреймворк сгенерирует граф узлов с динамически созданным кодом соединений. Когда что-то ломается, фреймворк захватывает данные об ошибке, эволюционирует агента через кодирующего агента и переразвёртывает. Встроенные узлы человеческого вмешательства, управление учётными данными и мониторинг в реальном времени дают вам контроль без ущерба для адаптивности. + +Посетите [adenhq.com](https://adenhq.com) для полной документации, примеров и руководств. + +## Что такое Aden + +

+ Aden Architecture +

+ +Aden — это платформа для создания, развёртывания, эксплуатации и адаптации ИИ-агентов: + +- **Создание** - Кодирующий агент генерирует специализированных рабочих агентов (продажи, маркетинг, операции) из целей на естественном языке +- **Развёртывание** - Headless-развёртывание с интеграцией CI/CD и полным управлением жизненным циклом API +- **Эксплуатация** - Мониторинг в реальном времени, наблюдаемость и защитные барьеры времени выполнения обеспечивают надёжность агентов +- **Адаптация** - Непрерывная оценка, контроль и адаптация гарантируют улучшение агентов со временем +- **Инфраструктура** - Общая память, интеграции LLM, инструменты и навыки питают каждого агента + +## Быстрые ссылки + +- **[Документация](https://docs.adenhq.com/)** - Полные руководства и справочник API +- **[Руководство по самостоятельному хостингу](https://docs.adenhq.com/getting-started/quickstart)** - Разверните Hive в своей инфраструктуре +- **[История изменений](https://github.com/adenhq/hive/releases)** - Последние обновления и релизы + +- **[Сообщить о проблеме](https://github.com/adenhq/hive/issues)** - Отчёты об ошибках и запросы функций + +## Быстрый старт + +### Предварительные требования + +- [Python 3.11+](https://www.python.org/downloads/) - Для разработки агентов +- [Docker](https://docs.docker.com/get-docker/) (v20.10+) - Опционально, для контейнеризованных инструментов + +### Установка + +```bash +# Клонировать репозиторий +git clone https://github.com/adenhq/hive.git +cd hive + +# Запустить настройку окружения Python +./scripts/setup-python.sh +``` + +Это установит: +- **framework** - Основная среда выполнения агентов и исполнитель графов +- **aden_tools** - 19 инструментов MCP для возможностей агентов +- Все необходимые зависимости + +### Создайте своего первого агента + +```bash +# Установить навыки Claude Code (один раз) +./quickstart.sh + +# Создать агента с помощью Claude Code +claude> /building-agents + +# Протестировать агента +claude> /testing-agent + +# Запустить агента +PYTHONPATH=core:exports python -m your_agent_name run --input '{...}' +``` + +**[📖 Полное руководство по настройке](ENVIRONMENT_SETUP.md)** - Подробные инструкции для разработки агентов + +## Функции + +- **Целеориентированная разработка** - Определяйте цели на естественном языке; кодирующий агент генерирует граф агентов и код соединений для их достижения +- **Самоадаптирующиеся агенты** - Фреймворк захватывает сбои, обновляет цели и обновляет граф агентов +- **Динамические соединения узлов** - Без предопределённых рёбер; код соединений генерируется любым способным LLM на основе ваших целей +- **Узлы, обёрнутые SDK** - Каждый узел получает общую память, локальную RLM-память, мониторинг, инструменты и доступ к LLM из коробки +- **Человек в контуре** - Узлы вмешательства, которые приостанавливают выполнение для человеческого ввода с настраиваемыми таймаутами и эскалацией +- **Наблюдаемость в реальном времени** - WebSocket-стриминг для живого мониторинга выполнения агентов, решений и межузловой коммуникации +- **Контроль затрат и бюджета** - Устанавливайте лимиты расходов, ограничения и политики автоматической деградации модели +- **Готовность к продакшену** - Возможность самостоятельного хостинга, создан для масштабирования и надёжности + +## Почему Aden + +Традиционные фреймворки агентов требуют ручного проектирования рабочих процессов, определения взаимодействий агентов и реактивной обработки сбоев. Aden переворачивает эту парадигму — **вы описываете результаты, и система строит себя сама**. + +```mermaid +flowchart LR + subgraph BUILD["🏗️ BUILD"] + GOAL["Define Goal
+ Success Criteria"] --> NODES["Add Nodes
LLM/Router/Function"] + NODES --> EDGES["Connect Edges
on_success/failure/conditional"] + EDGES --> TEST["Test & Validate"] --> APPROVE["Approve & Export"] + end + + subgraph EXPORT["📦 EXPORT"] + direction TB + JSON["agent.json
(GraphSpec)"] + TOOLS["tools.py
(Functions)"] + MCP["mcp_servers.json
(Integrations)"] + end + + subgraph RUN["🚀 RUNTIME"] + LOAD["AgentRunner
Load + Parse"] --> SETUP["Setup Runtime
+ ToolRegistry"] + SETUP --> EXEC["GraphExecutor
Execute Nodes"] + + subgraph DECISION["Decision Recording"] + DEC1["runtime.decide()
intent → options → choice"] + DEC2["runtime.record_outcome()
success, result, metrics"] + end + end + + subgraph INFRA["⚙️ INFRASTRUCTURE"] + CTX["NodeContext
memory • llm • tools"] + STORE[("FileStorage
Runs & Decisions")] + end + + APPROVE --> EXPORT + EXPORT --> LOAD + EXEC --> DECISION + EXEC --> CTX + DECISION --> STORE + STORE -.->|"Analyze & Improve"| NODES + + style BUILD fill:#ffbe42,stroke:#cc5d00,stroke-width:3px,color:#333 + style EXPORT fill:#fff59d,stroke:#ed8c00,stroke-width:2px,color:#333 + style RUN fill:#ffb100,stroke:#cc5d00,stroke-width:3px,color:#333 + style DECISION fill:#ffcc80,stroke:#ed8c00,stroke-width:2px,color:#333 + style INFRA fill:#e8763d,stroke:#cc5d00,stroke-width:3px,color:#fff + style STORE fill:#ed8c00,stroke:#cc5d00,stroke-width:2px,color:#fff +``` + +### Преимущество Aden + +| Традиционные фреймворки | Aden | +|-------------------------|------| +| Жёсткое кодирование рабочих процессов | Описание целей на естественном языке | +| Ручное определение графов | Автоматически генерируемые графы агентов | +| Реактивная обработка ошибок | Проактивная самоэволюция | +| Статические конфигурации инструментов | Динамические узлы, обёрнутые SDK | +| Отдельная настройка мониторинга | Встроенная наблюдаемость в реальном времени | +| DIY управление бюджетом | Интегрированный контроль затрат и деградация | + +### Как это работает + +1. **Определите цель** → Опишите, чего хотите достичь, простым языком +2. **Кодирующий агент генерирует** → Создаёт граф агентов, код соединений и тестовые случаи +3. **Рабочие выполняют** → Узлы, обёрнутые SDK, работают с полной наблюдаемостью и доступом к инструментам +4. **Плоскость управления мониторит** → Метрики в реальном времени, применение бюджета, управление политиками +5. **Самосовершенствование** → При сбое система эволюционирует граф и автоматически переразвёртывает + +## Сравнение Aden + +Aden использует принципиально иной подход к разработке агентов. В то время как большинство фреймворков требуют жёсткого кодирования рабочих процессов или ручного определения графов агентов, Aden использует **кодирующего агента для генерации всей системы агентов** из целей на естественном языке. Когда агенты терпят неудачу, фреймворк не просто регистрирует ошибки — он **автоматически эволюционирует граф агентов** и переразвёртывает. + +> **Примечание:** Для подробной таблицы сравнения фреймворков и часто задаваемых вопросов обратитесь к английской версии [README.md](README.md). + +### Когда выбирать Aden + +Выбирайте Aden, когда вам нужны: + +- Агенты, которые **самосовершенствуются на основе сбоев** без ручного вмешательства +- **Целеориентированная разработка**, где вы описываете результаты, а не рабочие процессы +- **Надёжность продакшена** с автоматическим восстановлением и переразвёртыванием +- **Быстрая итерация** архитектур агентов без переписывания кода +- **Полная наблюдаемость** с мониторингом в реальном времени и человеческим надзором + +Выбирайте другие фреймворки, когда вам нужны: + +- **Предсказуемые, типобезопасные рабочие процессы** (PydanticAI, Mastra) +- **RAG и обработка документов** (LlamaIndex, Haystack) +- **Исследование эмерджентности агентов** (CAMEL) +- **Голос/мультимодальность в реальном времени** (TEN Framework) +- **Простое связывание компонентов** (LangChain, Swarm) + +## Структура проекта + +``` +hive/ +├── core/ # Основной фреймворк - Среда выполнения агентов, исполнитель графов, протоколы +├── tools/ # Пакет инструментов MCP - 19 инструментов для возможностей агентов +├── exports/ # Пакеты агентов - Предварительно созданные агенты и примеры +├── docs/ # Документация и руководства +├── scripts/ # Скрипты сборки и утилиты +├── .claude/ # Навыки Claude Code для создания агентов +├── ENVIRONMENT_SETUP.md # Руководство по настройке Python для разработки агентов +├── DEVELOPER.md # Руководство разработчика +├── CONTRIBUTING.md # Руководство по участию +└── ROADMAP.md # Дорожная карта продукта +``` + +## Разработка + +### Разработка агентов на Python + +Для создания и запуска целеориентированных агентов с помощью фреймворка: + +```bash +# Одноразовая настройка +./scripts/setup-python.sh + +# Это установит: +# - пакет framework (основная среда выполнения) +# - пакет aden_tools (19 инструментов MCP) +# - Все зависимости + +# Создать новых агентов с помощью навыков Claude Code +claude> /building-agents + +# Протестировать агентов +claude> /testing-agent + +# Запустить агентов +PYTHONPATH=core:exports python -m agent_name run --input '{...}' +``` + +Обратитесь к [ENVIRONMENT_SETUP.md](ENVIRONMENT_SETUP.md) для полных инструкций по настройке. + +## Документация + +- **[Руководство разработчика](DEVELOPER.md)** - Полное руководство для разработчиков +- [Начало работы](docs/getting-started.md) - Инструкции по быстрой настройке +- [Руководство по конфигурации](docs/configuration.md) - Все опции конфигурации +- [Обзор архитектуры](docs/architecture.md) - Дизайн и структура системы + +## Дорожная карта + +Aden Agent Framework призван помочь разработчикам создавать самоадаптирующихся агентов, ориентированных на результат. Найдите нашу дорожную карту здесь + +[ROADMAP.md](ROADMAP.md) + +```mermaid +timeline + title Aden Agent Framework Roadmap + section Foundation + Architecture : Node-Based Architecture : Python SDK : LLM Integration (OpenAI, Anthropic, Google) : Communication Protocol + Coding Agent : Goal Creation Session : Worker Agent Creation : MCP Tools Integration + Worker Agent : Human-in-the-Loop : Callback Handlers : Intervention Points : Streaming Interface + Tools : File Use : Memory (STM/LTM) : Web Search : Web Scraper : Audit Trail + Core : Eval System : Pydantic Validation : Docker Deployment : Documentation : Sample Agents + section Expansion + Intelligence : Guardrails : Streaming Mode : Semantic Search + Platform : JavaScript SDK : Custom Tool Integrator : Credential Store + Deployment : Self-Hosted : Cloud Services : CI/CD Pipeline + Templates : Sales Agent : Marketing Agent : Analytics Agent : Training Agent : Smart Form Agent +``` + +## Сообщество и поддержка + +Мы используем [Discord](https://discord.com/invite/MXE49hrKDk) для поддержки, запросов функций и обсуждений сообщества. + +- Discord - [Присоединиться к сообществу](https://discord.com/invite/MXE49hrKDk) +- Twitter/X - [@adenhq](https://x.com/aden_hq) +- LinkedIn - [Страница компании](https://www.linkedin.com/company/teamaden/) + +## Участие в разработке + +Мы приветствуем вклад! Пожалуйста, ознакомьтесь с [CONTRIBUTING.md](CONTRIBUTING.md) для руководств. + +1. Сделайте форк репозитория +2. Создайте ветку функции (`git checkout -b feature/amazing-feature`) +3. Зафиксируйте изменения (`git commit -m 'Add amazing feature'`) +4. Отправьте в ветку (`git push origin feature/amazing-feature`) +5. Откройте Pull Request + +## Присоединяйтесь к команде + +**Мы нанимаем!** Присоединяйтесь к нам на позициях в инженерии, исследованиях и выходе на рынок. + +[Посмотреть открытые позиции](https://jobs.adenhq.com/a8cec478-cdbc-473c-bbd4-f4b7027ec193/applicant) + +## Безопасность + +По вопросам безопасности, пожалуйста, обратитесь к [SECURITY.md](SECURITY.md). + +## Лицензия + +Этот проект лицензирован под лицензией Apache 2.0 - см. файл [LICENSE](LICENSE) для деталей. + +## Часто задаваемые вопросы (FAQ) + +> **Примечание:** Для полных часто задаваемых вопросов обратитесь к английской версии [README.md](README.md). + +**В: Зависит ли Aden от LangChain или других фреймворков агентов?** + +Нет. Aden построен с нуля без зависимостей от LangChain, CrewAI или других фреймворков агентов. Фреймворк разработан лёгким и гибким, динамически генерируя графы агентов вместо того, чтобы полагаться на предопределённые компоненты. + +**В: Каких провайдеров LLM поддерживает Aden?** + +Aden поддерживает более 100 провайдеров LLM через интеграцию LiteLLM, включая OpenAI (GPT-4, GPT-4o), Anthropic (модели Claude), Google Gemini, Mistral, Groq и многих других. Просто настройте соответствующую переменную окружения API-ключа и укажите имя модели. + +**В: Aden с открытым исходным кодом?** + +Да, Aden полностью с открытым исходным кодом под лицензией Apache 2.0. Мы активно поощряем вклад и сотрудничество сообщества. + +**В: Что делает Aden отличным от других фреймворков агентов?** + +Aden генерирует всю систему агентов из целей на естественном языке, используя кодирующего агента — вы не кодируете рабочие процессы и не определяете графы вручную. Когда агенты терпят неудачу, фреймворк автоматически захватывает данные о сбое, эволюционирует граф агентов и переразвёртывает. Этот цикл самосовершенствования уникален для Aden. + +**В: Поддерживает ли Aden рабочие процессы с человеком в контуре?** + +Да, Aden полностью поддерживает рабочие процессы с человеком в контуре через узлы вмешательства, которые приостанавливают выполнение для человеческого ввода. Они включают настраиваемые таймауты и политики эскалации, обеспечивая бесшовное сотрудничество между экспертами-людьми и ИИ-агентами. + +--- + +

+ Сделано с 🔥 Страстью в Сан-Франциско +

diff --git a/README.zh-CN.md b/README.zh-CN.md new file mode 100644 index 00000000..5608e199 --- /dev/null +++ b/README.zh-CN.md @@ -0,0 +1,339 @@ +

+ Hive Banner +

+ +

+ English | + 简体中文 | + Español | + Português | + 日本語 | + Русский +

+ +[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/adenhq/hive/blob/main/LICENSE) +[![Y Combinator](https://img.shields.io/badge/Y%20Combinator-Aden-orange)](https://www.ycombinator.com/companies/aden) +[![Docker Pulls](https://img.shields.io/docker/pulls/adenhq/hive?logo=Docker&labelColor=%23528bff)](https://hub.docker.com/u/adenhq) +[![Discord](https://img.shields.io/discord/1172610340073242735?logo=discord&labelColor=%235462eb&logoColor=%23f5f5f5&color=%235462eb)](https://discord.com/invite/MXE49hrKDk) +[![Twitter Follow](https://img.shields.io/twitter/follow/teamaden?logo=X&color=%23f5f5f5)](https://x.com/aden_hq) +[![LinkedIn](https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff)](https://www.linkedin.com/company/teamaden/) + +

+ AI Agents + Multi-Agent + Goal-Driven + HITL + Production +

+

+ OpenAI + Anthropic + Gemini + MCP +

+ +## 概述 + +构建可靠的、自我改进的 AI 智能体,无需硬编码工作流。通过与编码智能体对话来定义目标,框架会生成带有动态创建连接代码的节点图。当出现问题时,框架会捕获故障数据,通过编码智能体进化智能体,并重新部署。内置的人机协作节点、凭证管理和实时监控让您在保持适应性的同时拥有完全控制权。 + +访问 [adenhq.com](https://adenhq.com) 获取完整文档、示例和指南。 + +## 什么是 Aden + +

+ Aden Architecture +

+ +Aden 是一个用于构建、部署、运营和适应 AI 智能体的平台: + +- **构建** - 编码智能体根据自然语言目标生成专业的工作智能体(销售、营销、运营) +- **部署** - 无头部署,支持 CI/CD 集成和完整的 API 生命周期管理 +- **运营** - 实时监控、可观测性和运行时护栏确保智能体可靠运行 +- **适应** - 持续评估、监督和适应确保智能体随时间改进 +- **基础设施** - 共享内存、LLM 集成、工具和技能为每个智能体提供支持 + +## 快速链接 + +- **[文档](https://docs.adenhq.com/)** - 完整指南和 API 参考 +- **[自托管指南](https://docs.adenhq.com/getting-started/quickstart)** - 在您的基础设施上部署 Hive +- **[更新日志](https://github.com/adenhq/hive/releases)** - 最新更新和版本 + +- **[报告问题](https://github.com/adenhq/hive/issues)** - Bug 报告和功能请求 + +## 快速开始 + +### 前置要求 + +- [Python 3.11+](https://www.python.org/downloads/) - 用于智能体开发 +- [Docker](https://docs.docker.com/get-docker/) (v20.10+) - 可选,用于容器化工具 + +### 安装 + +```bash +# 克隆仓库 +git clone https://github.com/adenhq/hive.git +cd hive + +# 运行 Python 环境设置 +./scripts/setup-python.sh +``` + +这将安装: +- **framework** - 核心智能体运行时和图执行器 +- **aden_tools** - 19 个 MCP 工具提供智能体能力 +- 所有必需的依赖项 + +### 构建您的第一个智能体 + +```bash +# 安装 Claude Code 技能(一次性) +./quickstart.sh + +# 使用 Claude Code 构建智能体 +claude> /building-agents + +# 测试您的智能体 +claude> /testing-agent + +# 运行您的智能体 +PYTHONPATH=core:exports python -m your_agent_name run --input '{...}' +``` + +**[📖 完整设置指南](ENVIRONMENT_SETUP.md)** - 智能体开发的详细说明 + +## 功能特性 + +- **目标驱动开发** - 用自然语言定义目标;编码智能体生成智能体图和连接代码来实现它们 +- **自适应智能体** - 框架捕获故障,更新目标并更新智能体图 +- **动态节点连接** - 没有预定义边;连接代码由任何有能力的 LLM 根据您的目标生成 +- **SDK 封装节点** - 每个节点开箱即用地获得共享内存、本地 RLM 内存、监控、工具和 LLM 访问 +- **人机协作** - 干预节点暂停执行以等待人工输入,支持可配置的超时和升级 +- **实时可观测性** - WebSocket 流式传输用于实时监控智能体执行、决策和节点间通信 +- **成本与预算控制** - 设置支出限制、节流和自动模型降级策略 +- **生产就绪** - 可自托管,为规模和可靠性而构建 + +## 为什么选择 Aden + +传统智能体框架要求您手动设计工作流、定义智能体交互并被动处理故障。Aden 颠覆了这一范式——**您描述结果,系统自动构建自己**。 + +```mermaid +flowchart LR + subgraph BUILD["🏗️ BUILD"] + GOAL["Define Goal
+ Success Criteria"] --> NODES["Add Nodes
LLM/Router/Function"] + NODES --> EDGES["Connect Edges
on_success/failure/conditional"] + EDGES --> TEST["Test & Validate"] --> APPROVE["Approve & Export"] + end + + subgraph EXPORT["📦 EXPORT"] + direction TB + JSON["agent.json
(GraphSpec)"] + TOOLS["tools.py
(Functions)"] + MCP["mcp_servers.json
(Integrations)"] + end + + subgraph RUN["🚀 RUNTIME"] + LOAD["AgentRunner
Load + Parse"] --> SETUP["Setup Runtime
+ ToolRegistry"] + SETUP --> EXEC["GraphExecutor
Execute Nodes"] + + subgraph DECISION["Decision Recording"] + DEC1["runtime.decide()
intent → options → choice"] + DEC2["runtime.record_outcome()
success, result, metrics"] + end + end + + subgraph INFRA["⚙️ INFRASTRUCTURE"] + CTX["NodeContext
memory • llm • tools"] + STORE[("FileStorage
Runs & Decisions")] + end + + APPROVE --> EXPORT + EXPORT --> LOAD + EXEC --> DECISION + EXEC --> CTX + DECISION --> STORE + STORE -.->|"Analyze & Improve"| NODES + + style BUILD fill:#ffbe42,stroke:#cc5d00,stroke-width:3px,color:#333 + style EXPORT fill:#fff59d,stroke:#ed8c00,stroke-width:2px,color:#333 + style RUN fill:#ffb100,stroke:#cc5d00,stroke-width:3px,color:#333 + style DECISION fill:#ffcc80,stroke:#ed8c00,stroke-width:2px,color:#333 + style INFRA fill:#e8763d,stroke:#cc5d00,stroke-width:3px,color:#fff + style STORE fill:#ed8c00,stroke:#cc5d00,stroke-width:2px,color:#fff +``` + +### Aden 的优势 + +| 传统框架 | Aden | +|----------|------| +| 硬编码智能体工作流 | 用自然语言描述目标 | +| 手动图定义 | 自动生成智能体图 | +| 被动错误处理 | 主动自我进化 | +| 静态工具配置 | 动态 SDK 封装节点 | +| 单独设置监控 | 内置实时可观测性 | +| DIY 预算管理 | 集成成本控制和降级 | + +### 工作原理 + +1. **定义目标** → 用简单英语描述您想要实现的目标 +2. **编码智能体生成** → 创建智能体图、连接代码和测试用例 +3. **工作节点执行** → SDK 封装节点以完全可观测性和工具访问运行 +4. **控制平面监控** → 实时指标、预算执行、策略管理 +5. **自我改进** → 失败时,系统进化图并自动重新部署 + +## Aden 与其他框架的比较 + +Aden 在智能体开发方面采取了根本不同的方法。虽然大多数框架要求您硬编码工作流或手动定义智能体图,但 Aden 使用**编码智能体从自然语言目标生成整个智能体系统**。当智能体失败时,框架不仅记录错误——它会**自动进化智能体图**并重新部署。 + +> **注意:** 详细的框架比较表和常见问题解答,请参阅英文版 [README.md](README.md)。 + +### 何时选择 Aden + +选择 Aden 当您需要: + +- 智能体从失败中**自我改进**而无需人工干预 +- **目标驱动的开发**,您描述结果而非工作流 +- 具有自动恢复和重新部署的**生产可靠性** +- 无需重写代码即可**快速迭代**智能体架构 +- 具有实时监控和人工监督的**完整可观测性** + +选择其他框架当您需要: + +- **类型安全、可预测的工作流**(PydanticAI、Mastra) +- **RAG 和文档处理**(LlamaIndex、Haystack) +- **智能体涌现的研究**(CAMEL) +- **实时语音/多模态**(TEN Framework) +- **简单的组件链接**(LangChain、Swarm) + +## 项目结构 + +``` +hive/ +├── core/ # 核心框架 - 智能体运行时、图执行器、协议 +├── tools/ # MCP 工具包 - 19 个工具提供智能体能力 +├── exports/ # 智能体包 - 预构建的智能体和示例 +├── docs/ # 文档和指南 +├── scripts/ # 构建和实用脚本 +├── .claude/ # Claude Code 技能用于构建智能体 +├── ENVIRONMENT_SETUP.md # 智能体开发的 Python 设置指南 +├── DEVELOPER.md # 开发者指南 +├── CONTRIBUTING.md # 贡献指南 +└── ROADMAP.md # 产品路线图 +``` + +## 开发 + +### Python 智能体开发 + +使用框架构建和运行目标驱动的智能体: + +```bash +# 一次性设置 +./scripts/setup-python.sh + +# 这将安装: +# - framework 包(核心运行时) +# - aden_tools 包(19 个 MCP 工具) +# - 所有依赖项 + +# 使用 Claude Code 技能构建新智能体 +claude> /building-agents + +# 测试智能体 +claude> /testing-agent + +# 运行智能体 +PYTHONPATH=core:exports python -m agent_name run --input '{...}' +``` + +完整设置说明请参阅 [ENVIRONMENT_SETUP.md](ENVIRONMENT_SETUP.md)。 + +## 文档 + +- **[开发者指南](DEVELOPER.md)** - 开发者综合指南 +- [入门指南](docs/getting-started.md) - 快速设置说明 +- [配置指南](docs/configuration.md) - 所有配置选项 +- [架构概述](docs/architecture.md) - 系统设计和结构 + +## 路线图 + +Aden 智能体框架旨在帮助开发者构建面向结果的、自适应的智能体。请在此查看我们的路线图 + +[ROADMAP.md](ROADMAP.md) + +```mermaid +timeline + title Aden Agent Framework Roadmap + section Foundation + Architecture : Node-Based Architecture : Python SDK : LLM Integration (OpenAI, Anthropic, Google) : Communication Protocol + Coding Agent : Goal Creation Session : Worker Agent Creation : MCP Tools Integration + Worker Agent : Human-in-the-Loop : Callback Handlers : Intervention Points : Streaming Interface + Tools : File Use : Memory (STM/LTM) : Web Search : Web Scraper : Audit Trail + Core : Eval System : Pydantic Validation : Docker Deployment : Documentation : Sample Agents + section Expansion + Intelligence : Guardrails : Streaming Mode : Semantic Search + Platform : JavaScript SDK : Custom Tool Integrator : Credential Store + Deployment : Self-Hosted : Cloud Services : CI/CD Pipeline + Templates : Sales Agent : Marketing Agent : Analytics Agent : Training Agent : Smart Form Agent +``` + +## 社区与支持 + +我们使用 [Discord](https://discord.com/invite/MXE49hrKDk) 进行支持、功能请求和社区讨论。 + +- Discord - [加入我们的社区](https://discord.com/invite/MXE49hrKDk) +- Twitter/X - [@adenhq](https://x.com/aden_hq) +- LinkedIn - [公司主页](https://www.linkedin.com/company/teamaden/) + +## 贡献 + +我们欢迎贡献!请参阅 [CONTRIBUTING.md](CONTRIBUTING.md) 了解指南。 + +1. Fork 仓库 +2. 创建功能分支 (`git checkout -b feature/amazing-feature`) +3. 提交更改 (`git commit -m 'Add amazing feature'`) +4. 推送到分支 (`git push origin feature/amazing-feature`) +5. 创建 Pull Request + +## 加入我们的团队 + +**我们正在招聘!** 加入我们的工程、研究和市场推广团队。 + +[查看开放职位](https://jobs.adenhq.com/a8cec478-cdbc-473c-bbd4-f4b7027ec193/applicant) + +## 安全 + +有关安全问题,请参阅 [SECURITY.md](SECURITY.md)。 + +## 许可证 + +本项目采用 Apache License 2.0 许可证 - 详情请参阅 [LICENSE](LICENSE) 文件。 + +## 常见问题 (FAQ) + +> **注意:** 完整的常见问题解答,请参阅英文版 [README.md](README.md)。 + +**问:Aden 是否依赖 LangChain 或其他智能体框架?** + +不。Aden 从头开始构建,不依赖 LangChain、CrewAI 或其他智能体框架。该框架设计精简灵活,动态生成智能体图而非依赖预定义组件。 + +**问:Aden 支持哪些 LLM 提供商?** + +Aden 通过 LiteLLM 集成支持 100 多个 LLM 提供商,包括 OpenAI(GPT-4、GPT-4o)、Anthropic(Claude 模型)、Google Gemini、Mistral、Groq 等。只需设置适当的 API 密钥环境变量并指定模型名称即可。 + +**问:Aden 是开源的吗?** + +是的,Aden 在 Apache License 2.0 下完全开源。我们积极鼓励社区贡献和协作。 + +**问:Aden 与其他智能体框架有何不同?** + +Aden 使用编码智能体从自然语言目标生成整个智能体系统——您无需硬编码工作流或手动定义图。当智能体失败时,框架会自动捕获故障数据、进化智能体图并重新部署。这种自我改进循环是 Aden 独有的。 + +**问:Aden 支持人机协作工作流吗?** + +是的,Aden 通过干预节点完全支持人机协作工作流,这些节点会暂停执行以等待人工输入。包括可配置的超时和升级策略,实现人类专家与 AI 智能体的无缝协作。 + +--- + +

+ 用 🔥 热情打造于旧金山 +

diff --git a/ROADMAP.md b/ROADMAP.md index d2816244..d5e888b2 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -126,7 +126,7 @@ timeline - [ ] Docker container standardization - [ ] Headless backend execution - [ ] Exposed API for frontend attachment -- [ ] Local monitoring & observability (from hive repo) +- [ ] Local monitoring & observability - [ ] Basic lifecycle APIs (Start, Stop, Pause, Resume) ### Deployment (Cloud) diff --git a/aden-tools/src/aden_tools/tools/file_read_tool/README.md b/aden-tools/src/aden_tools/tools/file_read_tool/README.md deleted file mode 100644 index 7ee31712..00000000 --- a/aden-tools/src/aden_tools/tools/file_read_tool/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# File Read Tool - -Read contents of local files with encoding support. - -## Description - -Use for reading configs, data files, source code, logs, or any text file. Returns file content along with path, name, size, and encoding metadata. - -## Arguments - -| Argument | Type | Required | Default | Description | -|----------|------|----------|---------|-------------| -| `file_path` | str | Yes | - | Path to the file to read (absolute or relative) | -| `encoding` | str | No | `utf-8` | File encoding (utf-8, latin-1, etc.) | -| `max_size` | int | No | `10000000` | Maximum file size to read in bytes (default 10MB) | - -## Environment Variables - -This tool does not require any environment variables. - -## Error Handling - -Returns error dicts for common issues: -- `File not found: ` - File does not exist -- `Not a file: ` - Path points to a directory -- `File too large: bytes (max: )` - File exceeds max_size limit -- `Failed to decode file with encoding ''` - Wrong encoding specified -- `Permission denied: ` - No read access to file diff --git a/aden-tools/src/aden_tools/tools/file_read_tool/__init__.py b/aden-tools/src/aden_tools/tools/file_read_tool/__init__.py deleted file mode 100644 index a02fc205..00000000 --- a/aden-tools/src/aden_tools/tools/file_read_tool/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -"""File Read Tool - Read contents of local files.""" -from .file_read_tool import register_tools - -__all__ = ["register_tools"] diff --git a/aden-tools/src/aden_tools/tools/file_read_tool/file_read_tool.py b/aden-tools/src/aden_tools/tools/file_read_tool/file_read_tool.py deleted file mode 100644 index 0d2ce90b..00000000 --- a/aden-tools/src/aden_tools/tools/file_read_tool/file_read_tool.py +++ /dev/null @@ -1,75 +0,0 @@ -""" -File Read Tool - Read contents of local files. - -Supports reading text files with various encodings. -Returns file content along with metadata. -""" -from __future__ import annotations - -from pathlib import Path - -from fastmcp import FastMCP - - -def register_tools(mcp: FastMCP) -> None: - """Register file read tools with the MCP server.""" - - @mcp.tool() - def file_read( - file_path: str, - encoding: str = "utf-8", - max_size: int = 10_000_000, - ) -> dict: - """ - Read the contents of a local file. - - Use for reading configs, data files, source code, logs, or any text file. - Returns file content along with path, name, size, and encoding. - - Args: - file_path: Path to the file to read (absolute or relative) - encoding: File encoding (utf-8, latin-1, etc.) - max_size: Maximum file size to read in bytes (default 10MB) - - Returns: - Dict with file content and metadata, or error dict - """ - try: - path = Path(file_path).resolve() - - # Check if file exists - if not path.exists(): - return {"error": f"File not found: {file_path}"} - - # Check if it's a file (not directory) - if not path.is_file(): - return {"error": f"Not a file: {file_path}"} - - # Check file size - file_size = path.stat().st_size - if max_size > 0 and file_size > max_size: - return { - "error": f"File too large: {file_size} bytes (max: {max_size})", - "file_size": file_size, - } - - # Read the file - content = path.read_text(encoding=encoding) - - return { - "path": str(path), - "name": path.name, - "content": content, - "size": len(content), - "encoding": encoding, - } - - except UnicodeDecodeError as e: - return { - "error": f"Failed to decode file with encoding '{encoding}': {str(e)}", - "suggestion": "Try a different encoding like 'latin-1' or 'cp1252'", - } - except PermissionError: - return {"error": f"Permission denied: {file_path}"} - except Exception as e: - return {"error": f"Failed to read file: {str(e)}"} diff --git a/aden-tools/src/aden_tools/tools/file_write_tool/README.md b/aden-tools/src/aden_tools/tools/file_write_tool/README.md deleted file mode 100644 index 9a692be4..00000000 --- a/aden-tools/src/aden_tools/tools/file_write_tool/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# File Write Tool - -Write content to local files with encoding support. - -## Description - -Can create new files or overwrite/append to existing ones. Use for saving data, creating configs, writing reports, or exporting results. Optionally creates parent directories if they don't exist. - -## Arguments - -| Argument | Type | Required | Default | Description | -|----------|------|----------|---------|-------------| -| `file_path` | str | Yes | - | Path to the file to write (absolute or relative) | -| `content` | str | Yes | - | Content to write to the file | -| `encoding` | str | No | `utf-8` | File encoding (utf-8, latin-1, etc.) | -| `mode` | str | No | `write` | Write mode - 'write' (overwrite) or 'append' | -| `create_dirs` | bool | No | `True` | Create parent directories if they don't exist | - -## Environment Variables - -This tool does not require any environment variables. - -## Error Handling - -Returns error dicts for common issues: -- `Parent directory does not exist: ` - Parent dir missing and create_dirs=False -- `Invalid mode: . Use 'write' or 'append'.` - Invalid mode specified -- `Permission denied: ` - No write access to file/directory -- `OS error writing file: ` - Filesystem error diff --git a/aden-tools/src/aden_tools/tools/file_write_tool/__init__.py b/aden-tools/src/aden_tools/tools/file_write_tool/__init__.py deleted file mode 100644 index 15380945..00000000 --- a/aden-tools/src/aden_tools/tools/file_write_tool/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -"""File Write Tool - Create or update local files.""" -from .file_write_tool import register_tools - -__all__ = ["register_tools"] diff --git a/aden-tools/src/aden_tools/tools/file_write_tool/file_write_tool.py b/aden-tools/src/aden_tools/tools/file_write_tool/file_write_tool.py deleted file mode 100644 index cb3c5f07..00000000 --- a/aden-tools/src/aden_tools/tools/file_write_tool/file_write_tool.py +++ /dev/null @@ -1,83 +0,0 @@ -""" -File Write Tool - Create or update local files. - -Supports writing text files with various encodings. -Can create directories if they don't exist. -""" -from __future__ import annotations - -from pathlib import Path - -from fastmcp import FastMCP - - -def register_tools(mcp: FastMCP) -> None: - """Register file write tools with the MCP server.""" - - @mcp.tool() - def file_write( - file_path: str, - content: str, - encoding: str = "utf-8", - mode: str = "write", - create_dirs: bool = True, - ) -> dict: - """ - Write content to a local file. - - Can create new files or overwrite/append to existing ones. - Use for saving data, creating configs, writing reports, or exporting results. - - Args: - file_path: Path to the file to write (absolute or relative) - content: Content to write to the file - encoding: File encoding (utf-8, latin-1, etc.) - mode: Write mode - 'write' (overwrite) or 'append' - create_dirs: Create parent directories if they don't exist - - Returns: - Dict with write result or error dict - """ - try: - path = Path(file_path).resolve() - - # Create parent directories if requested - if create_dirs: - path.parent.mkdir(parents=True, exist_ok=True) - elif not path.parent.exists(): - return {"error": f"Parent directory does not exist: {path.parent}"} - - # Determine write mode - if mode == "append": - write_mode = "a" - elif mode == "write": - write_mode = "w" - else: - return {"error": f"Invalid mode: {mode}. Use 'write' or 'append'."} - - # Check if we're overwriting - existed = path.exists() - previous_size = path.stat().st_size if existed else 0 - - # Write the file - with open(path, write_mode, encoding=encoding) as f: - f.write(content) - - new_size = path.stat().st_size - - return { - "path": str(path), - "name": path.name, - "bytes_written": len(content.encode(encoding)), - "total_size": new_size, - "mode": mode, - "created": not existed, - "previous_size": previous_size if existed else None, - } - - except PermissionError: - return {"error": f"Permission denied: {file_path}"} - except OSError as e: - return {"error": f"OS error writing file: {str(e)}"} - except Exception as e: - return {"error": f"Failed to write file: {str(e)}"} diff --git a/aden-tools/tests/tools/test_file_read_tool.py b/aden-tools/tests/tools/test_file_read_tool.py deleted file mode 100644 index c9902584..00000000 --- a/aden-tools/tests/tools/test_file_read_tool.py +++ /dev/null @@ -1,96 +0,0 @@ -"""Tests for file_read tool (FastMCP).""" -import pytest -from pathlib import Path - -from fastmcp import FastMCP -from aden_tools.tools.file_read_tool import register_tools - - -@pytest.fixture -def file_read_fn(mcp: FastMCP): - """Register and return the file_read tool function.""" - register_tools(mcp) - # Access the registered tool's function directly - return mcp._tool_manager._tools["file_read"].fn - - -class TestFileReadTool: - """Tests for file_read tool.""" - - def test_read_existing_file(self, file_read_fn, sample_text_file: Path): - """Reading an existing file returns content and metadata.""" - result = file_read_fn(file_path=str(sample_text_file)) - - assert "error" not in result - assert result["content"] == "Hello, World!\nLine 2\nLine 3" - assert result["name"] == "test.txt" - assert result["encoding"] == "utf-8" - assert "size" in result - - def test_read_file_not_found(self, file_read_fn, tmp_path: Path): - """Reading a non-existent file returns an error dict.""" - missing_file = tmp_path / "does_not_exist.txt" - - result = file_read_fn(file_path=str(missing_file)) - - assert "error" in result - assert "not found" in result["error"].lower() - - def test_read_directory_returns_error(self, file_read_fn, tmp_path: Path): - """Reading a directory (not a file) returns an error.""" - result = file_read_fn(file_path=str(tmp_path)) - - assert "error" in result - assert "not a file" in result["error"].lower() - - def test_read_file_too_large(self, file_read_fn, tmp_path: Path): - """Reading a file exceeding max_size returns an error.""" - large_file = tmp_path / "large.txt" - large_file.write_text("x" * 1000) - - result = file_read_fn(file_path=str(large_file), max_size=100) - - assert "error" in result - assert "too large" in result["error"].lower() - assert "file_size" in result - - def test_read_with_no_size_limit(self, file_read_fn, tmp_path: Path): - """Reading with max_size=0 allows any file size.""" - large_file = tmp_path / "large.txt" - content = "x" * 100_000 - large_file.write_text(content) - - # max_size=0 means no limit in the implementation - result = file_read_fn(file_path=str(large_file), max_size=0) - - assert "error" not in result - assert result["content"] == content - - def test_read_with_different_encoding(self, file_read_fn, tmp_path: Path): - """Reading with a specific encoding works.""" - latin_file = tmp_path / "latin.txt" - # Write bytes directly with latin-1 encoding - latin_file.write_bytes("café".encode("latin-1")) - - result = file_read_fn(file_path=str(latin_file), encoding="latin-1") - - assert "error" not in result - assert result["content"] == "café" - assert result["encoding"] == "latin-1" - - def test_read_with_wrong_encoding_returns_error(self, file_read_fn, tmp_path: Path): - """Reading with wrong encoding returns helpful error.""" - # Create a file with bytes that aren't valid UTF-8 - binary_file = tmp_path / "binary.txt" - binary_file.write_bytes(b"\xff\xfe") - - result = file_read_fn(file_path=str(binary_file), encoding="utf-8") - - assert "error" in result - assert "suggestion" in result - - def test_returns_absolute_path(self, file_read_fn, sample_text_file: Path): - """Result includes the absolute path.""" - result = file_read_fn(file_path=str(sample_text_file)) - - assert result["path"] == str(sample_text_file.resolve()) diff --git a/aden-tools/tests/tools/test_file_write_tool.py b/aden-tools/tests/tools/test_file_write_tool.py deleted file mode 100644 index ed2a3d37..00000000 --- a/aden-tools/tests/tools/test_file_write_tool.py +++ /dev/null @@ -1,99 +0,0 @@ -"""Tests for file_write tool (FastMCP).""" -import pytest -from pathlib import Path - -from fastmcp import FastMCP -from aden_tools.tools.file_write_tool import register_tools - - -@pytest.fixture -def file_write_fn(mcp: FastMCP): - """Register and return the file_write tool function.""" - register_tools(mcp) - return mcp._tool_manager._tools["file_write"].fn - - -class TestFileWriteTool: - """Tests for file_write tool.""" - - def test_write_creates_new_file(self, file_write_fn, tmp_path: Path): - """Writing to a new file creates it with content.""" - new_file = tmp_path / "new.txt" - - result = file_write_fn(file_path=str(new_file), content="Hello, World!") - - assert "error" not in result - assert result["created"] is True - assert result["name"] == "new.txt" - assert new_file.read_text() == "Hello, World!" - - def test_write_overwrites_existing(self, file_write_fn, tmp_path: Path): - """Writing to existing file overwrites by default.""" - existing = tmp_path / "existing.txt" - existing.write_text("old content") - - result = file_write_fn(file_path=str(existing), content="new content") - - assert "error" not in result - assert result["created"] is False - assert result["previous_size"] is not None - assert existing.read_text() == "new content" - - def test_write_appends_to_existing(self, file_write_fn, tmp_path: Path): - """Writing with mode='append' adds to existing content.""" - existing = tmp_path / "existing.txt" - existing.write_text("line1\n") - - result = file_write_fn(file_path=str(existing), content="line2\n", mode="append") - - assert "error" not in result - assert result["mode"] == "append" - assert existing.read_text() == "line1\nline2\n" - - def test_write_creates_parent_dirs(self, file_write_fn, tmp_path: Path): - """Writing with create_dirs=True creates missing directories.""" - deep_path = tmp_path / "nested" / "dirs" / "file.txt" - - result = file_write_fn(file_path=str(deep_path), content="content", create_dirs=True) - - assert "error" not in result - assert deep_path.exists() - assert deep_path.read_text() == "content" - - def test_write_fails_without_parent_dir(self, file_write_fn, tmp_path: Path): - """Writing with create_dirs=False fails if parent doesn't exist.""" - missing_dir = tmp_path / "missing" / "file.txt" - - result = file_write_fn(file_path=str(missing_dir), content="content", create_dirs=False) - - assert "error" in result - assert "parent directory" in result["error"].lower() - - def test_write_invalid_mode(self, file_write_fn, tmp_path: Path): - """Writing with invalid mode returns error.""" - result = file_write_fn( - file_path=str(tmp_path / "test.txt"), - content="content", - mode="invalid" - ) - - assert "error" in result - assert "invalid mode" in result["error"].lower() - - def test_write_returns_bytes_written(self, file_write_fn, tmp_path: Path): - """Result includes accurate bytes_written count.""" - content = "Hello, World!" - - result = file_write_fn(file_path=str(tmp_path / "test.txt"), content=content) - - assert result["bytes_written"] == len(content.encode("utf-8")) - - def test_write_with_encoding(self, file_write_fn, tmp_path: Path): - """Writing with specific encoding works.""" - file_path = tmp_path / "latin.txt" - - result = file_write_fn(file_path=str(file_path), content="café", encoding="latin-1") - - assert "error" not in result - # Verify it was written with latin-1 encoding - assert file_path.read_bytes() == "café".encode("latin-1") diff --git a/config.yaml.example b/config.yaml.example deleted file mode 100644 index 6cbf990f..00000000 --- a/config.yaml.example +++ /dev/null @@ -1,118 +0,0 @@ -# Hive Configuration -# ====================== -# Copy this file to config.yaml and customize for your environment. -# Run `npm run setup` to generate .env files from this configuration. -# -# For detailed documentation, see: docs/configuration.md - -# ----------------------------------------------------------------------------- -# Application Settings -# ----------------------------------------------------------------------------- -app: - # Application name (displayed in UI and logs) - name: Hive - - # Environment: development, production, or test - environment: development - - # Log level: debug, info, warn, error - log_level: info - -# ----------------------------------------------------------------------------- -# Server Configuration -# ----------------------------------------------------------------------------- -server: - # Frontend settings - frontend: - # Port for the frontend application - port: 3000 - - # Backend (Hive) settings - backend: - # Port for the backend API - port: 4000 - - # Host to bind to (0.0.0.0 for all interfaces) - host: 0.0.0.0 - -# ----------------------------------------------------------------------------- -# TimescaleDB Configuration (Time-series metrics storage) -# ----------------------------------------------------------------------------- -timescaledb: - # Connection URL for TimescaleDB - # Format: postgresql://user:password@host:port/database - url: postgresql://postgres:postgres@localhost:5432/aden_tsdb - - # External port mapping (for docker-compose) - port: 5432 - -# ----------------------------------------------------------------------------- -# MongoDB Configuration (Policies, pricing, control config) -# ----------------------------------------------------------------------------- -mongodb: - # Connection URL for MongoDB - url: mongodb://localhost:27017 - - # Database name for main data - database: aden - - # Database name for ERP data - erp_database: erp - - # External port mapping (for docker-compose) - port: 27017 - -# ----------------------------------------------------------------------------- -# Redis Configuration (Caching and Socket.IO) -# ----------------------------------------------------------------------------- -redis: - # Connection URL for Redis - url: redis://localhost:6379 - - # External port mapping (for docker-compose) - port: 6379 - -# ----------------------------------------------------------------------------- -# Authentication & Security -# ----------------------------------------------------------------------------- -auth: - # JWT secret key - CHANGE THIS IN PRODUCTION! - # Generate with: openssl rand -base64 32 - jwt_secret: change-this-to-a-secure-random-string-min-32-chars - - # JWT token expiration (e.g., 1h, 7d, 30d) - jwt_expires_in: 7d - - # Passphrase for additional encryption - CHANGE THIS IN PRODUCTION! - passphrase: change-this-to-a-secure-passphrase - -# ----------------------------------------------------------------------------- -# NPM Configuration -# ----------------------------------------------------------------------------- -npm: - # NPM token for private package access (if needed) - token: "" - -# ----------------------------------------------------------------------------- -# CORS Configuration -# ----------------------------------------------------------------------------- -cors: - # Allowed origin for CORS requests - # In production, set this to your frontend URL - origin: http://localhost:3000 - -# ----------------------------------------------------------------------------- -# Feature Flags -# ----------------------------------------------------------------------------- -features: - # Enable user registration - registration: true - - # Enable API rate limiting - rate_limiting: false - - # Enable request logging - request_logging: true - - # Enable MCP (Model Context Protocol) server - mcp_server: true diff --git a/core/.mcp.json b/core/.mcp.json index 321d8274..b6e685de 100644 --- a/core/.mcp.json +++ b/core/.mcp.json @@ -5,10 +5,10 @@ "args": ["-m", "framework.mcp.agent_builder_server"], "cwd": "/home/timothy/oss/hive/core" }, - "aden-tools": { + "tools": { "command": "python", "args": ["-m", "aden_tools.mcp_server", "--stdio"], - "cwd": "/home/timothy/oss/hive/aden-tools" + "cwd": "/home/timothy/oss/hive/tools" } } } diff --git a/core/MCP_BUILDER_TOOLS_GUIDE.md b/core/MCP_BUILDER_TOOLS_GUIDE.md index b5a800cb..b445b13e 100644 --- a/core/MCP_BUILDER_TOOLS_GUIDE.md +++ b/core/MCP_BUILDER_TOOLS_GUIDE.md @@ -6,7 +6,7 @@ This guide explains how to use the new MCP integration tools in the agent builde The agent builder now supports registering external MCP servers as tool sources. This allows you to: -1. Register MCP servers (like aden-tools) during agent building +1. Register MCP servers (like tools) during agent building 2. Discover available tools from those servers 3. Use those tools in your agent nodes 4. Automatically generate `mcp_servers.json` configuration on export @@ -18,6 +18,7 @@ The agent builder now supports registering external MCP servers as tool sources. Register an MCP server as a tool source for your agent. **Parameters:** + - `name` (string, required): Unique name for the MCP server - `transport` (string, required): Transport type - "stdio" or "http" - `command` (string): Command to run (for stdio transport) @@ -29,21 +30,23 @@ Register an MCP server as a tool source for your agent. - `description` (string): Description of the MCP server **Example - STDIO:** + ```json { "name": "add_mcp_server", "arguments": { - "name": "aden-tools", + "name": "tools", "transport": "stdio", "command": "python", "args": "[\"mcp_server.py\", \"--stdio\"]", - "cwd": "../aden-tools", + "cwd": "../tools", "description": "Aden tools for web search and file operations" } } ``` **Example - HTTP:** + ```json { "name": "add_mcp_server", @@ -57,15 +60,16 @@ Register an MCP server as a tool source for your agent. ``` **Response:** + ```json { "success": true, "server": { - "name": "aden-tools", + "name": "tools", "transport": "stdio", "command": "python", "args": ["mcp_server.py", "--stdio"], - "cwd": "../aden-tools", + "cwd": "../tools", "description": "Aden tools..." }, "tools_discovered": 6, @@ -78,7 +82,7 @@ Register an MCP server as a tool source for your agent. "example_tool" ], "total_mcp_servers": 1, - "note": "MCP server 'aden-tools' registered with 6 tools. These tools can now be used in llm_tool_use nodes." + "note": "MCP server 'tools' registered with 6 tools. These tools can now be used in llm_tool_use nodes." } ``` @@ -89,15 +93,16 @@ List all registered MCP servers. **Parameters:** None **Response:** + ```json { "mcp_servers": [ { - "name": "aden-tools", + "name": "tools", "transport": "stdio", "command": "python", "args": ["mcp_server.py", "--stdio"], - "cwd": "../aden-tools", + "cwd": "../tools", "description": "Aden tools..." } ], @@ -110,24 +115,27 @@ List all registered MCP servers. List tools available from registered MCP servers. **Parameters:** + - `server_name` (string, optional): Name of specific server to list tools from. If omitted, lists tools from all servers. **Example:** + ```json { "name": "list_mcp_tools", "arguments": { - "server_name": "aden-tools" + "server_name": "tools" } } ``` **Response:** + ```json { "success": true, "tools_by_server": { - "aden-tools": [ + "tools": [ { "name": "web_search", "description": "Search the web for information using Brave Search API...", @@ -150,23 +158,26 @@ List tools available from registered MCP servers. Remove a registered MCP server. **Parameters:** + - `name` (string, required): Name of the MCP server to remove **Example:** + ```json { "name": "remove_mcp_server", "arguments": { - "name": "aden-tools" + "name": "tools" } } ``` **Response:** + ```json { "success": true, - "removed": "aden-tools", + "removed": "tools", "remaining_servers": 0 } ``` @@ -176,6 +187,7 @@ Remove a registered MCP server. Here's a complete workflow for building an agent with MCP tools: ### 1. Create Session + ```json { "name": "create_session", @@ -186,30 +198,33 @@ Here's a complete workflow for building an agent with MCP tools: ``` ### 2. Register MCP Server + ```json { "name": "add_mcp_server", "arguments": { - "name": "aden-tools", + "name": "tools", "transport": "stdio", "command": "python", "args": "[\"mcp_server.py\", \"--stdio\"]", - "cwd": "../aden-tools" + "cwd": "../tools" } } ``` ### 3. List Available Tools + ```json { "name": "list_mcp_tools", "arguments": { - "server_name": "aden-tools" + "server_name": "tools" } } ``` ### 4. Set Goal + ```json { "name": "set_goal", @@ -223,6 +238,7 @@ Here's a complete workflow for building an agent with MCP tools: ``` ### 5. Add Node with MCP Tool + ```json { "name": "add_node", @@ -239,9 +255,10 @@ Here's a complete workflow for building an agent with MCP tools: } ``` -Note: `web_search` is now available because we registered the aden-tools MCP server! +Note: `web_search` is now available because we registered the tools MCP server! ### 6. Export Agent + ```json { "name": "export_graph", @@ -250,6 +267,7 @@ Note: `web_search` is now available because we registered the aden-tools MCP ser ``` The export will create: + - `exports/web-research-agent/agent.json` - Agent specification - `exports/web-research-agent/README.md` - Documentation - `exports/web-research-agent/mcp_servers.json` - **MCP server configuration** ✨ @@ -262,11 +280,11 @@ When you export an agent with registered MCP servers, an `mcp_servers.json` file { "servers": [ { - "name": "aden-tools", + "name": "tools", "transport": "stdio", "command": "python", "args": ["mcp_server.py", "--stdio"], - "cwd": "../aden-tools", + "cwd": "../tools", "description": "Aden tools for web search and file operations" } ] @@ -288,7 +306,7 @@ runner = AgentRunner.load("exports/web-research-agent") # Run with input result = await runner.run({"query": "latest AI breakthroughs"}) -# The web_search tool from aden-tools is automatically available! +# The web_search tool from tools is automatically available! ``` ## Benefits @@ -301,14 +319,17 @@ result = await runner.run({"query": "latest AI breakthroughs"}) ## Common MCP Servers -### aden-tools +### tools + Provides: + - `web_search` - Brave Search API integration - `web_scrape` - Web page content extraction - `file_read` / `file_write` - File operations - `pdf_read` - PDF text extraction ### Custom MCP Servers + You can register any MCP server that follows the Model Context Protocol specification. ## Troubleshooting @@ -372,19 +393,21 @@ If credentials are missing, you'll receive a response like: 1. Get the required API key from the URL in `help_url` 2. Add it to your environment: + ```bash # Option 1: Export directly export BRAVE_SEARCH_API_KEY=your-key-here - # Option 2: Add to aden-tools/.env - echo "BRAVE_SEARCH_API_KEY=your-key-here" >> aden-tools/.env + # Option 2: Add to tools/.env + echo "BRAVE_SEARCH_API_KEY=your-key-here" >> tools/.env ``` + 3. Retry the `add_node` command ### Required Credentials by Tool -| Tool | Credential | Get Key | -|------|------------|---------| +| Tool | Credential | Get Key | +| ------------ | ---------------------- | ----------------------------------------------------- | | `web_search` | `BRAVE_SEARCH_API_KEY` | [brave.com/search/api](https://brave.com/search/api/) | Note: The MCP server itself requires `ANTHROPIC_API_KEY` at startup for LLM operations. diff --git a/core/MCP_INTEGRATION_GUIDE.md b/core/MCP_INTEGRATION_GUIDE.md index bc7d59cc..159ca3b6 100644 --- a/core/MCP_INTEGRATION_GUIDE.md +++ b/core/MCP_INTEGRATION_GUIDE.md @@ -21,13 +21,13 @@ from framework.runner.runner import AgentRunner # Load your agent runner = AgentRunner.load("exports/my-agent") -# Register aden-tools MCP server +# Register tools MCP server runner.register_mcp_server( - name="aden-tools", + name="tools", transport="stdio", command="python", args=["-m", "aden_tools.mcp_server", "--stdio"], - cwd="/path/to/aden-tools" + cwd="/path/to/tools" ) # Tools are now available to your agent @@ -42,11 +42,11 @@ Create `mcp_servers.json` in your agent folder: { "servers": [ { - "name": "aden-tools", + "name": "tools", "transport": "stdio", "command": "python", "args": ["-m", "aden_tools.mcp_server", "--stdio"], - "cwd": "../aden-tools" + "cwd": "../tools" } ] } @@ -78,6 +78,7 @@ runner.register_mcp_server( ``` **Configuration:** + - `command`: Executable to run (e.g., "python", "node") - `args`: List of command-line arguments - `cwd`: Working directory for the process @@ -99,6 +100,7 @@ runner.register_mcp_server( ``` **Configuration:** + - `url`: Base URL of the MCP server - `headers`: HTTP headers to include (optional) @@ -119,7 +121,7 @@ builder.add_node( name="Web Researcher", node_type="llm_tool_use", system_prompt="Research the topic using web_search", - tools=["web_search"], # Tool from aden-tools MCP server + tools=["web_search"], # Tool from tools MCP server input_keys=["topic"], output_keys=["findings"] ) @@ -145,9 +147,9 @@ Tools from MCP servers can be referenced in your agent.json just like built-in t } ``` -## Available Tools from aden-tools +## Available Tools from tools -When you register the `aden-tools` MCP server, the following tools become available: +When you register the `tools` MCP server, the following tools become available: - **web_search**: Search the web using Brave Search API - **web_scrape**: Scrape content from a URL @@ -163,11 +165,11 @@ Some MCP tools require environment variables. You can pass them in the configura ```python runner.register_mcp_server( - name="aden-tools", + name="tools", transport="stdio", command="python", args=["-m", "aden_tools.mcp_server", "--stdio"], - cwd="../aden-tools", + cwd="../tools", env={ "BRAVE_SEARCH_API_KEY": os.environ["BRAVE_SEARCH_API_KEY"] } @@ -180,11 +182,11 @@ runner.register_mcp_server( { "servers": [ { - "name": "aden-tools", + "name": "tools", "transport": "stdio", "command": "python", "args": ["-m", "aden_tools.mcp_server", "--stdio"], - "cwd": "../aden-tools", + "cwd": "../tools", "env": { "BRAVE_SEARCH_API_KEY": "${BRAVE_SEARCH_API_KEY}" } @@ -203,11 +205,11 @@ You can register multiple MCP servers to access different sets of tools: { "servers": [ { - "name": "aden-tools", + "name": "tools", "transport": "stdio", "command": "python", "args": ["-m", "aden_tools.mcp_server", "--stdio"], - "cwd": "../aden-tools" + "cwd": "../tools" }, { "name": "database-tools", @@ -243,6 +245,7 @@ runner.register_mcp_server( ### 2. Use HTTP for Production HTTP transport is better for: + - Containerized deployments - Shared tools across multiple agents - Remote tool execution @@ -330,11 +333,11 @@ async def main(): # Register MCP server runner.register_mcp_server( - name="aden-tools", + name="tools", transport="stdio", command="python", args=["-m", "aden_tools.mcp_server", "--stdio"], - cwd="../aden-tools", + cwd="../tools", env={ "BRAVE_SEARCH_API_KEY": "your-api-key" } diff --git a/core/examples/mcp_integration_example.py b/core/examples/mcp_integration_example.py index 05132678..53acc5d5 100644 --- a/core/examples/mcp_integration_example.py +++ b/core/examples/mcp_integration_example.py @@ -21,16 +21,16 @@ async def example_1_programmatic_registration(): # Load an existing agent runner = AgentRunner.load("exports/task-planner") - # Register aden-tools MCP server via STDIO + # Register tools MCP server via STDIO num_tools = runner.register_mcp_server( - name="aden-tools", + name="tools", transport="stdio", command="python", args=["-m", "aden_tools.mcp_server", "--stdio"], - cwd="../aden-tools", + cwd="../tools", ) - print(f"Registered {num_tools} tools from aden-tools MCP server") + print(f"Registered {num_tools} tools from tools MCP server") # List all available tools tools = runner._tool_registry.get_tools() @@ -51,14 +51,14 @@ async def example_2_http_transport(): """Example 2: Connect to MCP server via HTTP""" print("\n=== Example 2: HTTP MCP Server Connection ===\n") - # First, start the aden-tools MCP server in HTTP mode: - # cd aden-tools && python mcp_server.py --port 4001 + # First, start the tools MCP server in HTTP mode: + # cd tools && python mcp_server.py --port 4001 runner = AgentRunner.load("exports/task-planner") - # Register aden-tools via HTTP + # Register tools via HTTP num_tools = runner.register_mcp_server( - name="aden-tools-http", + name="tools-http", transport="http", url="http://localhost:4001", ) @@ -130,7 +130,7 @@ async def example_4_custom_agent_with_mcp_tools(): description="Search the web for information", node_type="llm_tool_use", system_prompt="Search for {query} and return the top results. Use the web_search tool.", - tools=["web_search"], # This tool comes from aden-tools MCP server + tools=["web_search"], # This tool comes from tools MCP server input_keys=["query"], output_keys=["search_results"], ) @@ -160,11 +160,11 @@ async def example_4_custom_agent_with_mcp_tools(): # Load and register MCP server runner = AgentRunner.load(export_path) runner.register_mcp_server( - name="aden-tools", + name="tools", transport="stdio", command="python", args=["-m", "aden_tools.mcp_server", "--stdio"], - cwd="../aden-tools", + cwd="../tools", ) # Run the agent diff --git a/core/examples/mcp_servers.json b/core/examples/mcp_servers.json index 588e67f4..a886e7a8 100644 --- a/core/examples/mcp_servers.json +++ b/core/examples/mcp_servers.json @@ -1,18 +1,18 @@ { "servers": [ { - "name": "aden-tools", + "name": "tools", "description": "Aden tools including web search, file operations, and PDF reading", "transport": "stdio", "command": "python", "args": ["mcp_server.py", "--stdio"], - "cwd": "../aden-tools", + "cwd": "../tools", "env": { "BRAVE_SEARCH_API_KEY": "${BRAVE_SEARCH_API_KEY}" } }, { - "name": "aden-tools-http", + "name": "tools-http", "description": "Aden tools via HTTP (for Docker deployments)", "transport": "http", "url": "http://localhost:4001", diff --git a/core/framework/llm/__init__.py b/core/framework/llm/__init__.py index a2f01595..c17226c0 100644 --- a/core/framework/llm/__init__.py +++ b/core/framework/llm/__init__.py @@ -2,5 +2,6 @@ from framework.llm.provider import LLMProvider, LLMResponse from framework.llm.anthropic import AnthropicProvider +from framework.llm.litellm import LiteLLMProvider -__all__ = ["LLMProvider", "LLMResponse", "AnthropicProvider"] +__all__ = ["LLMProvider", "LLMResponse", "AnthropicProvider", "LiteLLMProvider"] diff --git a/core/framework/llm/anthropic.py b/core/framework/llm/anthropic.py index b305b3b9..c8b32eaf 100644 --- a/core/framework/llm/anthropic.py +++ b/core/framework/llm/anthropic.py @@ -1,11 +1,27 @@ -"""Anthropic Claude LLM provider.""" +"""Anthropic Claude LLM provider - backward compatible wrapper around LiteLLM.""" -import os from typing import Any -import anthropic +from framework.llm.provider import LLMProvider, LLMResponse, Tool +from framework.llm.litellm import LiteLLMProvider -from framework.llm.provider import LLMProvider, LLMResponse, Tool, ToolUse, ToolResult + +def _get_api_key_from_credential_manager() -> str | None: + """Get API key from CredentialManager or environment. + + Priority: + 1. CredentialManager (supports .env hot-reload) + 2. os.environ fallback + """ + try: + from aden_tools.credentials import CredentialManager + + creds = CredentialManager() + if creds.is_available("anthropic"): + return creds.get("anthropic") + except ImportError: + pass + return os.environ.get("ANTHROPIC_API_KEY") def _get_api_key_from_credential_manager() -> str | None: @@ -30,7 +46,9 @@ class AnthropicProvider(LLMProvider): """ Anthropic Claude LLM provider. - Uses the Anthropic API to interact with Claude models. + This is a backward-compatible wrapper that internally uses LiteLLMProvider. + Existing code using AnthropicProvider will continue to work unchanged, + while benefiting from LiteLLM's unified interface and features. """ def __init__( @@ -46,6 +64,7 @@ class AnthropicProvider(LLMProvider): or ANTHROPIC_API_KEY env var. model: Model to use (default: claude-haiku-4-5-20251001) """ + # Delegate to LiteLLMProvider internally. self.api_key = api_key or _get_api_key_from_credential_manager() if not self.api_key: raise ValueError( @@ -53,7 +72,17 @@ class AnthropicProvider(LLMProvider): ) self.model = model - self.client = anthropic.Anthropic(api_key=self.api_key) + + self._provider = LiteLLMProvider( + model=model, + api_key=self.api_key, + ) + + + + + self.model = model + self.api_key = api_key def complete( self, @@ -62,34 +91,12 @@ class AnthropicProvider(LLMProvider): tools: list[Tool] | None = None, max_tokens: int = 1024, ) -> LLMResponse: - """Generate a completion from Claude.""" - kwargs: dict[str, Any] = { - "model": self.model, - "max_tokens": max_tokens, - "messages": messages, - } - - if system: - kwargs["system"] = system - - if tools: - kwargs["tools"] = [self._tool_to_dict(t) for t in tools] - - response = self.client.messages.create(**kwargs) - - # Extract text content - content = "" - for block in response.content: - if block.type == "text": - content += block.text - - return LLMResponse( - content=content, - model=response.model, - input_tokens=response.usage.input_tokens, - output_tokens=response.usage.output_tokens, - stop_reason=response.stop_reason, - raw_response=response, + """Generate a completion from Claude (via LiteLLM).""" + return self._provider.complete( + messages=messages, + system=system, + tools=tools, + max_tokens=max_tokens, ) def complete_with_tools( @@ -186,15 +193,3 @@ class AnthropicProvider(LLMProvider): stop_reason="max_iterations", raw_response=None, ) - - def _tool_to_dict(self, tool: Tool) -> dict[str, Any]: - """Convert Tool to Anthropic API format.""" - return { - "name": tool.name, - "description": tool.description, - "input_schema": { - "type": "object", - "properties": tool.parameters.get("properties", {}), - "required": tool.parameters.get("required", []), - }, - } diff --git a/core/framework/llm/litellm.py b/core/framework/llm/litellm.py new file mode 100644 index 00000000..cfb2ef91 --- /dev/null +++ b/core/framework/llm/litellm.py @@ -0,0 +1,248 @@ +"""LiteLLM provider for pluggable multi-provider LLM support. + +LiteLLM provides a unified, OpenAI-compatible interface that supports +multiple LLM providers including OpenAI, Anthropic, Gemini, Mistral, +Groq, and local models. + +See: https://docs.litellm.ai/docs/providers +""" + +import json +from typing import Any + +import litellm + +from framework.llm.provider import LLMProvider, LLMResponse, Tool, ToolUse, ToolResult + + +class LiteLLMProvider(LLMProvider): + """ + LiteLLM-based LLM provider for multi-provider support. + + Supports any model that LiteLLM supports, including: + - OpenAI: gpt-4o, gpt-4o-mini, gpt-4-turbo, gpt-3.5-turbo + - Anthropic: claude-3-opus, claude-3-sonnet, claude-3-haiku + - Google: gemini-pro, gemini-1.5-pro, gemini-1.5-flash + - Mistral: mistral-large, mistral-medium, mistral-small + - Groq: llama3-70b, mixtral-8x7b + - Local: ollama/llama3, ollama/mistral + - And many more... + + Usage: + # OpenAI + provider = LiteLLMProvider(model="gpt-4o-mini") + + # Anthropic + provider = LiteLLMProvider(model="claude-3-haiku-20240307") + + # Google Gemini + provider = LiteLLMProvider(model="gemini/gemini-1.5-flash") + + # Local Ollama + provider = LiteLLMProvider(model="ollama/llama3") + + # With custom API base + provider = LiteLLMProvider( + model="gpt-4o-mini", + api_base="https://my-proxy.com/v1" + ) + """ + + def __init__( + self, + model: str = "gpt-4o-mini", + api_key: str | None = None, + api_base: str | None = None, + **kwargs: Any, + ): + """ + Initialize the LiteLLM provider. + + Args: + model: Model identifier (e.g., "gpt-4o-mini", "claude-3-haiku-20240307") + LiteLLM auto-detects the provider from the model name. + api_key: API key for the provider. If not provided, LiteLLM will + look for the appropriate env var (OPENAI_API_KEY, + ANTHROPIC_API_KEY, etc.) + api_base: Custom API base URL (for proxies or local deployments) + **kwargs: Additional arguments passed to litellm.completion() + """ + self.model = model + self.api_key = api_key + self.api_base = api_base + self.extra_kwargs = kwargs + + def complete( + self, + messages: list[dict[str, Any]], + system: str = "", + tools: list[Tool] | None = None, + max_tokens: int = 1024, + ) -> LLMResponse: + """Generate a completion using LiteLLM.""" + # Prepare messages with system prompt + full_messages = [] + if system: + full_messages.append({"role": "system", "content": system}) + full_messages.extend(messages) + + # Build kwargs + kwargs: dict[str, Any] = { + "model": self.model, + "messages": full_messages, + "max_tokens": max_tokens, + **self.extra_kwargs, + } + + if self.api_key: + kwargs["api_key"] = self.api_key + if self.api_base: + kwargs["api_base"] = self.api_base + + # Add tools if provided + if tools: + kwargs["tools"] = [self._tool_to_openai_format(t) for t in tools] + + # Make the call + response = litellm.completion(**kwargs) + + # Extract content + content = response.choices[0].message.content or "" + + # Get usage info + usage = response.usage + input_tokens = usage.prompt_tokens if usage else 0 + output_tokens = usage.completion_tokens if usage else 0 + + return LLMResponse( + content=content, + model=response.model or self.model, + input_tokens=input_tokens, + output_tokens=output_tokens, + stop_reason=response.choices[0].finish_reason or "", + raw_response=response, + ) + + def complete_with_tools( + self, + messages: list[dict[str, Any]], + system: str, + tools: list[Tool], + tool_executor: callable, + max_iterations: int = 10, + ) -> LLMResponse: + """Run a tool-use loop until the LLM produces a final response.""" + # Prepare messages with system prompt + current_messages = [] + if system: + current_messages.append({"role": "system", "content": system}) + current_messages.extend(messages) + + total_input_tokens = 0 + total_output_tokens = 0 + + # Convert tools to OpenAI format + openai_tools = [self._tool_to_openai_format(t) for t in tools] + + for _ in range(max_iterations): + # Build kwargs + kwargs: dict[str, Any] = { + "model": self.model, + "messages": current_messages, + "max_tokens": 1024, + "tools": openai_tools, + **self.extra_kwargs, + } + + if self.api_key: + kwargs["api_key"] = self.api_key + if self.api_base: + kwargs["api_base"] = self.api_base + + response = litellm.completion(**kwargs) + + # Track tokens + usage = response.usage + if usage: + total_input_tokens += usage.prompt_tokens + total_output_tokens += usage.completion_tokens + + choice = response.choices[0] + message = choice.message + + # Check if we're done (no tool calls) + if choice.finish_reason == "stop" or not message.tool_calls: + return LLMResponse( + content=message.content or "", + model=response.model or self.model, + input_tokens=total_input_tokens, + output_tokens=total_output_tokens, + stop_reason=choice.finish_reason or "stop", + raw_response=response, + ) + + # Process tool calls. + # Add assistant message with tool calls. + current_messages.append({ + "role": "assistant", + "content": message.content, + "tool_calls": [ + { + "id": tc.id, + "type": "function", + "function": { + "name": tc.function.name, + "arguments": tc.function.arguments, + }, + } + for tc in message.tool_calls + ], + }) + + # Execute tools and add results. + for tool_call in message.tool_calls: + # Parse arguments + try: + args = json.loads(tool_call.function.arguments) + except json.JSONDecodeError: + args = {} + + tool_use = ToolUse( + id=tool_call.id, + name=tool_call.function.name, + input=args, + ) + + result = tool_executor(tool_use) + + # Add tool result message + current_messages.append({ + "role": "tool", + "tool_call_id": result.tool_use_id, + "content": result.content, + }) + + # Max iterations reached + return LLMResponse( + content="Max tool iterations reached", + model=self.model, + input_tokens=total_input_tokens, + output_tokens=total_output_tokens, + stop_reason="max_iterations", + raw_response=None, + ) + + def _tool_to_openai_format(self, tool: Tool) -> dict[str, Any]: + """Convert Tool to OpenAI function calling format.""" + return { + "type": "function", + "function": { + "name": tool.name, + "description": tool.description, + "parameters": { + "type": "object", + "properties": tool.parameters.get("properties", {}), + "required": tool.parameters.get("required", []), + }, + }, + } diff --git a/core/framework/mcp/agent_builder_server.py b/core/framework/mcp/agent_builder_server.py index 20839858..6a4cbdbf 100644 --- a/core/framework/mcp/agent_builder_server.py +++ b/core/framework/mcp/agent_builder_server.py @@ -446,7 +446,7 @@ def _validate_tool_credentials(tools_list: list[str]) -> dict | None: "warnings": [ f"⚠️ Credential validation SKIPPED: aden_tools not available ({e}). " "Tools may fail at runtime if credentials are missing. " - "Add aden-tools/src to PYTHONPATH to enable validation." + "Add tools/src to PYTHONPATH to enable validation." ], } @@ -1435,11 +1435,11 @@ def add_mcp_server( Example for stdio: add_mcp_server( - name="aden-tools", + name="tools", transport="stdio", command="python", args='["mcp_server.py", "--stdio"]', - cwd="../aden-tools" + cwd="../tools" ) Example for http: diff --git a/core/framework/runner/orchestrator.py b/core/framework/runner/orchestrator.py index 4b2c0142..31772368 100644 --- a/core/framework/runner/orchestrator.py +++ b/core/framework/runner/orchestrator.py @@ -4,7 +4,6 @@ from __future__ import annotations import asyncio import json -import os from dataclasses import dataclass, field from datetime import datetime from pathlib import Path @@ -71,10 +70,10 @@ class AgentOrchestrator: self._model = model self._message_log: list[AgentMessage] = [] - # Auto-create LLM if API key available - if self._llm is None and os.environ.get("ANTHROPIC_API_KEY"): - from framework.llm.anthropic import AnthropicProvider - self._llm = AnthropicProvider(model=model) + # Auto-create LLM - LiteLLM auto-detects provider and API key from model name + if self._llm is None: + from framework.llm.litellm import LiteLLMProvider + self._llm = LiteLLMProvider(model=self._model) def register( self, diff --git a/core/framework/runner/runner.py b/core/framework/runner/runner.py index b157066c..0973ad19 100644 --- a/core/framework/runner/runner.py +++ b/core/framework/runner/runner.py @@ -12,6 +12,7 @@ from framework.graph.edge import GraphSpec, EdgeSpec, EdgeCondition from framework.graph.node import NodeSpec from framework.graph.executor import GraphExecutor, ExecutionResult from framework.llm.provider import LLMProvider, Tool, ToolResult, ToolUse +from framework.llm.litellm import LiteLLMProvider from framework.runner.tool_registry import ToolRegistry from framework.runtime.core import Runtime @@ -183,7 +184,8 @@ class AgentRunner: goal: Loaded Goal object mock_mode: If True, use mock LLM responses storage_path: Path for runtime storage (defaults to temp) - model: Anthropic model to use + model: Model to use - any LiteLLM-compatible model name + (e.g., "claude-sonnet-4-20250514", "gpt-4o-mini", "gemini/gemini-pro") """ self.agent_path = agent_path self.graph = graph @@ -313,16 +315,16 @@ class AgentRunner: Example: # Register STDIO MCP server runner.register_mcp_server( - name="aden-tools", + name="tools", transport="stdio", command="python", args=["-m", "aden_tools.mcp_server", "--stdio"], - cwd="/path/to/aden-tools" + cwd="/path/to/tools" ) # Register HTTP MCP server runner.register_mcp_server( - name="aden-tools", + name="tools", transport="http", url="http://localhost:4001" ) diff --git a/core/pyproject.toml b/core/pyproject.toml index ea93fa79..daa840f4 100644 --- a/core/pyproject.toml +++ b/core/pyproject.toml @@ -10,6 +10,7 @@ dependencies = [ "pytest>=8.0", "pytest-asyncio>=0.23", "pytest-xdist>=3.0", + "litellm>=1.81.0", ] [project.optional-dependencies] diff --git a/core/requirements.txt b/core/requirements.txt index 45bd560d..c88ec74a 100644 --- a/core/requirements.txt +++ b/core/requirements.txt @@ -2,6 +2,7 @@ pydantic>=2.0 anthropic>=0.40.0 httpx>=0.27.0 +litellm>=1.81.0 # MCP server dependencies mcp diff --git a/core/tests/test_litellm_provider.py b/core/tests/test_litellm_provider.py new file mode 100644 index 00000000..cf6b369e --- /dev/null +++ b/core/tests/test_litellm_provider.py @@ -0,0 +1,332 @@ +"""Tests for LiteLLM provider. + +Run with: + cd core + pip install litellm pytest + pytest tests/test_litellm_provider.py -v + +For live tests (requires API keys): + OPENAI_API_KEY=sk-... pytest tests/test_litellm_provider.py -v -m live +""" + +import os +import pytest +from unittest.mock import Mock, patch, MagicMock + +from framework.llm.litellm import LiteLLMProvider +from framework.llm.anthropic import AnthropicProvider +from framework.llm.provider import LLMProvider, Tool, ToolUse, ToolResult + + +class TestLiteLLMProviderInit: + """Test LiteLLMProvider initialization.""" + + def test_init_with_defaults(self): + """Test initialization with default parameters.""" + with patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"}): + provider = LiteLLMProvider() + assert provider.model == "gpt-4o-mini" + assert provider.api_key is None + assert provider.api_base is None + + def test_init_with_custom_model(self): + """Test initialization with custom model.""" + with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "test-key"}): + provider = LiteLLMProvider(model="claude-3-haiku-20240307") + assert provider.model == "claude-3-haiku-20240307" + + def test_init_with_api_key(self): + """Test initialization with explicit API key.""" + provider = LiteLLMProvider(model="gpt-4o-mini", api_key="my-api-key") + assert provider.api_key == "my-api-key" + + def test_init_with_api_base(self): + """Test initialization with custom API base.""" + provider = LiteLLMProvider( + model="gpt-4o-mini", + api_key="my-key", + api_base="https://my-proxy.com/v1" + ) + assert provider.api_base == "https://my-proxy.com/v1" + + def test_init_ollama_no_key_needed(self): + """Test that Ollama models don't require API key.""" + with patch.dict(os.environ, {}, clear=True): + # Should not raise. + provider = LiteLLMProvider(model="ollama/llama3") + assert provider.model == "ollama/llama3" + + +class TestLiteLLMProviderComplete: + """Test LiteLLMProvider.complete() method.""" + + @patch("litellm.completion") + def test_complete_basic(self, mock_completion): + """Test basic completion call.""" + # Mock response + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message.content = "Hello! I'm an AI assistant." + mock_response.choices[0].finish_reason = "stop" + mock_response.model = "gpt-4o-mini" + mock_response.usage.prompt_tokens = 10 + mock_response.usage.completion_tokens = 20 + mock_completion.return_value = mock_response + + provider = LiteLLMProvider(model="gpt-4o-mini", api_key="test-key") + result = provider.complete( + messages=[{"role": "user", "content": "Hello"}] + ) + + assert result.content == "Hello! I'm an AI assistant." + assert result.model == "gpt-4o-mini" + assert result.input_tokens == 10 + assert result.output_tokens == 20 + assert result.stop_reason == "stop" + + # Verify litellm.completion was called correctly + mock_completion.assert_called_once() + call_kwargs = mock_completion.call_args[1] + assert call_kwargs["model"] == "gpt-4o-mini" + assert call_kwargs["api_key"] == "test-key" + + @patch("litellm.completion") + def test_complete_with_system_prompt(self, mock_completion): + """Test completion with system prompt.""" + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message.content = "Response" + mock_response.choices[0].finish_reason = "stop" + mock_response.model = "gpt-4o-mini" + mock_response.usage.prompt_tokens = 15 + mock_response.usage.completion_tokens = 5 + mock_completion.return_value = mock_response + + provider = LiteLLMProvider(model="gpt-4o-mini", api_key="test-key") + provider.complete( + messages=[{"role": "user", "content": "Hello"}], + system="You are a helpful assistant." + ) + + call_kwargs = mock_completion.call_args[1] + messages = call_kwargs["messages"] + assert messages[0]["role"] == "system" + assert messages[0]["content"] == "You are a helpful assistant." + + @patch("litellm.completion") + def test_complete_with_tools(self, mock_completion): + """Test completion with tools.""" + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message.content = "Response" + mock_response.choices[0].finish_reason = "stop" + mock_response.model = "gpt-4o-mini" + mock_response.usage.prompt_tokens = 20 + mock_response.usage.completion_tokens = 10 + mock_completion.return_value = mock_response + + provider = LiteLLMProvider(model="gpt-4o-mini", api_key="test-key") + + tools = [ + Tool( + name="get_weather", + description="Get the weather for a location", + parameters={ + "properties": { + "location": {"type": "string", "description": "City name"} + }, + "required": ["location"] + } + ) + ] + + provider.complete( + messages=[{"role": "user", "content": "What's the weather?"}], + tools=tools + ) + + call_kwargs = mock_completion.call_args[1] + assert "tools" in call_kwargs + assert call_kwargs["tools"][0]["type"] == "function" + assert call_kwargs["tools"][0]["function"]["name"] == "get_weather" + + +class TestLiteLLMProviderToolUse: + """Test LiteLLMProvider.complete_with_tools() method.""" + + @patch("litellm.completion") + def test_complete_with_tools_single_iteration(self, mock_completion): + """Test tool use with single iteration.""" + # First response: tool call + tool_call_response = MagicMock() + tool_call_response.choices = [MagicMock()] + tool_call_response.choices[0].message.content = None + tool_call_response.choices[0].message.tool_calls = [MagicMock()] + tool_call_response.choices[0].message.tool_calls[0].id = "call_123" + tool_call_response.choices[0].message.tool_calls[0].function.name = "get_weather" + tool_call_response.choices[0].message.tool_calls[0].function.arguments = '{"location": "London"}' + tool_call_response.choices[0].finish_reason = "tool_calls" + tool_call_response.model = "gpt-4o-mini" + tool_call_response.usage.prompt_tokens = 20 + tool_call_response.usage.completion_tokens = 15 + + # Second response: final answer + final_response = MagicMock() + final_response.choices = [MagicMock()] + final_response.choices[0].message.content = "The weather in London is sunny." + final_response.choices[0].message.tool_calls = None + final_response.choices[0].finish_reason = "stop" + final_response.model = "gpt-4o-mini" + final_response.usage.prompt_tokens = 30 + final_response.usage.completion_tokens = 10 + + mock_completion.side_effect = [tool_call_response, final_response] + + provider = LiteLLMProvider(model="gpt-4o-mini", api_key="test-key") + + tools = [ + Tool( + name="get_weather", + description="Get the weather", + parameters={"properties": {"location": {"type": "string"}}, "required": ["location"]} + ) + ] + + def tool_executor(tool_use: ToolUse) -> ToolResult: + return ToolResult( + tool_use_id=tool_use.id, + content="Sunny, 22C", + is_error=False + ) + + result = provider.complete_with_tools( + messages=[{"role": "user", "content": "What's the weather in London?"}], + system="You are a weather assistant.", + tools=tools, + tool_executor=tool_executor + ) + + assert result.content == "The weather in London is sunny." + assert result.input_tokens == 50 # 20 + 30 + assert result.output_tokens == 25 # 15 + 10 + assert mock_completion.call_count == 2 + + +class TestToolConversion: + """Test tool format conversion.""" + + def test_tool_to_openai_format(self): + """Test converting Tool to OpenAI format.""" + provider = LiteLLMProvider(model="gpt-4o-mini", api_key="test-key") + + tool = Tool( + name="search", + description="Search the web", + parameters={ + "properties": { + "query": {"type": "string", "description": "Search query"} + }, + "required": ["query"] + } + ) + + result = provider._tool_to_openai_format(tool) + + assert result["type"] == "function" + assert result["function"]["name"] == "search" + assert result["function"]["description"] == "Search the web" + assert result["function"]["parameters"]["properties"]["query"]["type"] == "string" + assert result["function"]["parameters"]["required"] == ["query"] + + +class TestAnthropicProviderBackwardCompatibility: + """Test AnthropicProvider backward compatibility with LiteLLM backend.""" + + def test_anthropic_provider_is_llm_provider(self): + """Test that AnthropicProvider implements LLMProvider interface.""" + provider = AnthropicProvider(api_key="test-key") + assert isinstance(provider, LLMProvider) + + def test_anthropic_provider_init_defaults(self): + """Test AnthropicProvider initialization with defaults.""" + provider = AnthropicProvider(api_key="test-key") + assert provider.model == "claude-sonnet-4-20250514" + assert provider.api_key == "test-key" + + def test_anthropic_provider_init_custom_model(self): + """Test AnthropicProvider initialization with custom model.""" + provider = AnthropicProvider(api_key="test-key", model="claude-3-haiku-20240307") + assert provider.model == "claude-3-haiku-20240307" + + def test_anthropic_provider_uses_litellm_internally(self): + """Test that AnthropicProvider delegates to LiteLLMProvider.""" + provider = AnthropicProvider(api_key="test-key", model="claude-3-haiku-20240307") + assert isinstance(provider._provider, LiteLLMProvider) + assert provider._provider.model == "claude-3-haiku-20240307" + assert provider._provider.api_key == "test-key" + + @patch("litellm.completion") + def test_anthropic_provider_complete(self, mock_completion): + """Test AnthropicProvider.complete() delegates to LiteLLM.""" + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message.content = "Hello from Claude!" + mock_response.choices[0].finish_reason = "stop" + mock_response.model = "claude-3-haiku-20240307" + mock_response.usage.prompt_tokens = 10 + mock_response.usage.completion_tokens = 5 + mock_completion.return_value = mock_response + + provider = AnthropicProvider(api_key="test-key", model="claude-3-haiku-20240307") + result = provider.complete( + messages=[{"role": "user", "content": "Hello"}], + system="You are helpful.", + max_tokens=100 + ) + + assert result.content == "Hello from Claude!" + assert result.model == "claude-3-haiku-20240307" + assert result.input_tokens == 10 + assert result.output_tokens == 5 + + mock_completion.assert_called_once() + call_kwargs = mock_completion.call_args[1] + assert call_kwargs["model"] == "claude-3-haiku-20240307" + assert call_kwargs["api_key"] == "test-key" + + @patch("litellm.completion") + def test_anthropic_provider_complete_with_tools(self, mock_completion): + """Test AnthropicProvider.complete_with_tools() delegates to LiteLLM.""" + # Mock a simple response (no tool calls) + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message.content = "The time is 3:00 PM." + mock_response.choices[0].message.tool_calls = None + mock_response.choices[0].finish_reason = "stop" + mock_response.model = "claude-3-haiku-20240307" + mock_response.usage.prompt_tokens = 20 + mock_response.usage.completion_tokens = 10 + mock_completion.return_value = mock_response + + provider = AnthropicProvider(api_key="test-key", model="claude-3-haiku-20240307") + + tools = [ + Tool( + name="get_time", + description="Get current time", + parameters={"properties": {}, "required": []} + ) + ] + + def tool_executor(tool_use: ToolUse) -> ToolResult: + return ToolResult(tool_use_id=tool_use.id, content="3:00 PM", is_error=False) + + result = provider.complete_with_tools( + messages=[{"role": "user", "content": "What time is it?"}], + system="You are a time assistant.", + tools=tools, + tool_executor=tool_executor + ) + + assert result.content == "The time is 3:00 PM." + mock_completion.assert_called_once() diff --git a/core/tests/test_orchestrator.py b/core/tests/test_orchestrator.py new file mode 100644 index 00000000..5b4ebcb7 --- /dev/null +++ b/core/tests/test_orchestrator.py @@ -0,0 +1,82 @@ +"""Tests for AgentOrchestrator LiteLLM integration. + +Run with: + cd core + pytest tests/test_orchestrator.py -v +""" + +from unittest.mock import Mock, patch + +from framework.llm.provider import LLMProvider +from framework.llm.litellm import LiteLLMProvider +from framework.runner.orchestrator import AgentOrchestrator + + +class TestOrchestratorLLMInitialization: + """Test AgentOrchestrator LLM provider initialization.""" + + def test_auto_creates_litellm_provider_when_no_llm_passed(self): + """Test that LiteLLMProvider is auto-created when no llm is passed.""" + with patch.object(LiteLLMProvider, '__init__', return_value=None) as mock_init: + orchestrator = AgentOrchestrator() + + mock_init.assert_called_once_with(model="claude-sonnet-4-20250514") + assert orchestrator._llm is not None + + def test_uses_custom_model_parameter(self): + """Test that custom model parameter is passed to LiteLLMProvider.""" + with patch.object(LiteLLMProvider, '__init__', return_value=None) as mock_init: + orchestrator = AgentOrchestrator(model="gpt-4o") + + mock_init.assert_called_once_with(model="gpt-4o") + + def test_supports_openai_model_names(self): + """Test that OpenAI model names are supported.""" + with patch.object(LiteLLMProvider, '__init__', return_value=None) as mock_init: + orchestrator = AgentOrchestrator(model="gpt-4o-mini") + + mock_init.assert_called_once_with(model="gpt-4o-mini") + assert orchestrator._model == "gpt-4o-mini" + + def test_supports_anthropic_model_names(self): + """Test that Anthropic model names are supported.""" + with patch.object(LiteLLMProvider, '__init__', return_value=None) as mock_init: + orchestrator = AgentOrchestrator(model="claude-3-haiku-20240307") + + mock_init.assert_called_once_with(model="claude-3-haiku-20240307") + assert orchestrator._model == "claude-3-haiku-20240307" + + def test_skips_auto_creation_when_llm_passed(self): + """Test that auto-creation is skipped when llm is explicitly passed.""" + mock_llm = Mock(spec=LLMProvider) + + with patch.object(LiteLLMProvider, '__init__', return_value=None) as mock_init: + orchestrator = AgentOrchestrator(llm=mock_llm) + + mock_init.assert_not_called() + assert orchestrator._llm is mock_llm + + def test_model_attribute_stored_correctly(self): + """Test that _model attribute is stored correctly.""" + with patch.object(LiteLLMProvider, '__init__', return_value=None): + orchestrator = AgentOrchestrator(model="gemini/gemini-1.5-flash") + + assert orchestrator._model == "gemini/gemini-1.5-flash" + + +class TestOrchestratorLLMProviderType: + """Test that orchestrator uses correct LLM provider type.""" + + def test_llm_is_litellm_provider_instance(self): + """Test that auto-created _llm is a LiteLLMProvider instance.""" + orchestrator = AgentOrchestrator() + + assert isinstance(orchestrator._llm, LiteLLMProvider) + + def test_llm_implements_llm_provider_interface(self): + """Test that _llm implements LLMProvider interface.""" + orchestrator = AgentOrchestrator() + + assert isinstance(orchestrator._llm, LLMProvider) + assert hasattr(orchestrator._llm, 'complete') + assert hasattr(orchestrator._llm, 'complete_with_tools') diff --git a/docker-compose.override.yml.example b/docker-compose.override.yml.example deleted file mode 100644 index caea0365..00000000 --- a/docker-compose.override.yml.example +++ /dev/null @@ -1,37 +0,0 @@ -# Development overrides -# Copy this file to docker-compose.override.yml for local development -# -# Usage: -# cp docker-compose.override.yml.example docker-compose.override.yml -# docker compose up -# -# This enables: -# - Hot reload for both frontend and backend -# - Source code mounted as volumes -# - Debug ports exposed -# - Development environment settings - -services: - honeycomb: - build: - context: ./honeycomb - dockerfile: Dockerfile.dev - volumes: - - ./honeycomb/src:/app/src:ro - - ./honeycomb/public:/app/public:ro - - ./honeycomb/index.html:/app/index.html:ro - environment: - - VITE_API_URL=http://localhost:4000 - - hive: - build: - context: ./hive - dockerfile: Dockerfile.dev - volumes: - - ./hive/src:/app/src:ro - environment: - - NODE_ENV=development - - LOG_LEVEL=debug - # Uncomment to enable Node.js debugging - # ports: - # - "9229:9229" diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index 135997cc..00000000 --- a/docker-compose.yml +++ /dev/null @@ -1,168 +0,0 @@ -services: - # Frontend - React application - honeycomb: - build: - context: ./honeycomb - target: production - args: - VITE_API_URL: ${VITE_API_URL:-http://localhost:4000} - container_name: honeycomb-frontend - ports: - - "${FRONTEND_PORT:-3000}:3000" - depends_on: - hive: - condition: service_healthy - restart: unless-stopped - networks: - - honeycomb-network - - # Backend - Hive API (LLM observability & control plane) - hive: - build: - context: ./hive - target: production - args: - NPM_TOKEN: ${NPM_TOKEN:-} - container_name: honeycomb-backend - ports: - - "${BACKEND_PORT:-4000}:4000" - environment: - - NODE_ENV=${NODE_ENV:-production} - - PORT=4000 - - LOG_LEVEL=${LOG_LEVEL:-info} - # PostgreSQL (TimescaleDB) - - TSDB_PG_URL=postgresql://postgres:postgres@timescaledb:5432/aden_tsdb - # MongoDB - - MONGODB_URL=mongodb://mongodb:27017 - - MONGODB_DBNAME=${MONGODB_DBNAME:-aden} - - MONGODB_ERP_DBNAME=${MONGODB_ERP_DBNAME:-erp} - # Redis - - REDIS_URL=redis://redis:6379 - # Authentication - - JWT_SECRET=${JWT_SECRET:-change-me-in-production-use-min-32-chars} - - PASSPHRASE=${PASSPHRASE:-change-me-in-production} - # Hive backend URL for SDK quickstart documents - - HIVE_HOST=${HIVE_HOST:-http://localhost:4000} - depends_on: - timescaledb: - condition: service_healthy - mongodb: - condition: service_healthy - redis: - condition: service_healthy - healthcheck: - test: - [ - "CMD", - "node", - "-e", - "fetch('http://localhost:4000/health').then(r => process.exit(r.ok ? 0 : 1)).catch(() => process.exit(1))", - ] - interval: 10s - timeout: 5s - retries: 5 - start_period: 15s - restart: unless-stopped - networks: - - honeycomb-network - - # TimescaleDB - Time series database for LLM metrics - timescaledb: - image: timescale/timescaledb:latest-pg16 - container_name: honeycomb-timescaledb - ports: - - "${TSDB_PORT:-5432}:5432" - environment: - - POSTGRES_USER=postgres - - POSTGRES_PASSWORD=postgres - - POSTGRES_DB=aden_tsdb - command: ["postgres", "-c", "log_min_messages=warning", "-c", "log_statement=none"] - volumes: - - timescaledb_data:/var/lib/postgresql/data - # Auto-run schema files on first startup (alphabetical order) - - ./hive/src/services/tsdb/00-init-timescaledb.sql:/docker-entrypoint-initdb.d/00-init-timescaledb.sql:ro - - ./hive/src/services/tsdb/schema.sql:/docker-entrypoint-initdb.d/01-schema.sql:ro - - ./hive/src/services/tsdb/users_schema.sql:/docker-entrypoint-initdb.d/02-users.sql:ro - healthcheck: - test: ["CMD-SHELL", "pg_isready -U postgres -d aden_tsdb"] - interval: 10s - timeout: 5s - retries: 5 - start_period: 10s - restart: unless-stopped - networks: - - honeycomb-network - - # MongoDB - Policies, pricing, and control configuration - mongodb: - image: mongo:7 - container_name: honeycomb-mongodb - ports: - - "${MONGODB_PORT:-27017}:27017" - command: ["mongod", "--quiet", "--logpath", "/dev/null"] - volumes: - - mongodb_data:/data/db - healthcheck: - test: ["CMD", "mongosh", "--quiet", "--eval", "db.adminCommand('ping')"] - interval: 10s - timeout: 5s - retries: 5 - start_period: 10s - restart: unless-stopped - networks: - - honeycomb-network - - # Redis - Caching and Socket.IO adapter - redis: - image: redis:7-alpine - container_name: honeycomb-redis - ports: - - "${REDIS_PORT:-6379}:6379" - command: ["redis-server", "--loglevel", "warning"] - volumes: - - redis_data:/data - healthcheck: - test: ["CMD", "redis-cli", "ping"] - interval: 10s - timeout: 5s - retries: 5 - start_period: 5s - restart: unless-stopped - networks: - - honeycomb-network - - # Aden Tools MCP Server - Python tools via Model Context Protocol - aden-tools-mcp: - build: - context: ./aden-tools - container_name: honeycomb-aden-tools-mcp - ports: - - "${ADEN_TOOLS_MCP_PORT:-4001}:4001" - environment: - - MCP_PORT=4001 - # Pass through tool-specific env vars - - BRAVE_SEARCH_API_KEY=${BRAVE_SEARCH_API_KEY:-} - volumes: - - .:/workspace:rw # Mount project root for file access - - aden_tools_workspaces:/app/workdir/workspaces # Persist file system tool workspaces - working_dir: /workspace # Set working directory so relative paths work - command: ["python", "/app/mcp_server.py"] # Use absolute path since working_dir changed - healthcheck: - test: ["CMD", "python", "-c", "import httpx; httpx.get('http://localhost:4001/health').raise_for_status()"] - interval: 30s - timeout: 5s - retries: 5 - start_period: 10s - restart: unless-stopped - networks: - - honeycomb-network - -networks: - honeycomb-network: - driver: bridge - -volumes: - timescaledb_data: - mongodb_data: - redis_data: - aden_tools_workspaces: diff --git a/docs/articles/Architecture Diagram (github readme) (2).jpg b/docs/articles/Architecture Diagram (github readme) (2).jpg new file mode 100644 index 00000000..f6955c9e Binary files /dev/null and b/docs/articles/Architecture Diagram (github readme) (2).jpg differ diff --git a/docs/assets/aden-architecture-diagram.jpg b/docs/assets/aden-architecture-diagram.jpg new file mode 100644 index 00000000..f6955c9e Binary files /dev/null and b/docs/assets/aden-architecture-diagram.jpg differ diff --git a/docs/getting-started.md b/docs/getting-started.md index b7fd3379..0cd6b637 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -1,163 +1,191 @@ # Getting Started -This guide will help you get Hive running on your local machine. +This guide will help you set up the Aden Agent Framework and build your first agent. ## Prerequisites -- **Docker** (v20.10+) and **Docker Compose** (v2.0+) - for containerized deployment -- **Node.js** (v20+) - for local development without Docker +- **Python 3.11+** ([Download](https://www.python.org/downloads/)) - Python 3.12 or 3.13 recommended +- **pip** - Package installer for Python (comes with Python) +- **git** - Version control +- **Claude Code** ([Install](https://docs.anthropic.com/claude/docs/claude-code)) - Optional, for using building skills -## Quick Start with Docker +## Quick Start -The fastest way to get started is using Docker Compose: +The fastest way to get started: ```bash # 1. Clone the repository git clone https://github.com/adenhq/hive.git cd hive -# 2. Copy and configure -cp config.yaml.example config.yaml +# 2. Run automated Python setup +./scripts/setup-python.sh -# 3. Run setup -npm run setup - -# 4. Start services -docker compose up +# 3. Verify installation +python -c "import framework; import aden_tools; print('✓ Setup complete')" ``` -The application will be available at: -- **Frontend**: http://localhost:3000 -- **Backend API**: http://localhost:4000 -- **Health Check**: http://localhost:4000/health +## Building Your First Agent -## Development Setup - -For local development with hot reload: +### Option 1: Using Claude Code Skills (Recommended) ```bash -# 1. Clone and configure (same as above) -git clone https://github.com/adenhq/hive.git -cd hive -cp config.yaml.example config.yaml +# Install Claude Code skills (one-time) +./quickstart.sh -# 2. Install dependencies -npm install - -# 3. Generate environment files -npm run generate:env - -# 4. Start frontend (terminal 1) -cd honeycomb -npm run dev - -# 5. Start backend (terminal 2) -cd hive -npm run dev +# Start Claude Code and build an agent +claude> /building-agents ``` -### Using Docker for Development +Follow the interactive prompts to: +1. Define your agent's goal +2. Design the workflow (nodes and edges) +3. Generate the agent package +4. Test the agent -You can also use Docker with hot reload enabled: +### Option 2: From an Example ```bash -# Copy development overrides -cp docker-compose.override.yml.example docker-compose.override.yml +# Copy an example agent +cp -r exports/support_ticket_agent exports/my_agent -# Start with hot reload -docker compose up +# Customize the agent +cd exports/my_agent +# Edit agent.json, tools.py, README.md + +# Validate the agent +PYTHONPATH=core:exports python -m my_agent validate ``` ## Project Structure ``` hive/ -├── honeycomb/ # Frontend (React + TypeScript + Vite) -│ ├── src/ -│ │ ├── components/ # Reusable UI components -│ │ ├── pages/ # Page components -│ │ ├── hooks/ # Custom React hooks -│ │ ├── services/ # API client and services -│ │ ├── types/ # TypeScript type definitions -│ │ └── utils/ # Utility functions -│ └── public/ # Static assets +├── core/ # Core Framework +│ ├── framework/ # Agent runtime, graph executor +│ │ ├── runner/ # AgentRunner - loads and runs agents +│ │ ├── executor/ # GraphExecutor - executes node graphs +│ │ ├── protocols/ # Standard protocols (hooks, tracing) +│ │ ├── llm/ # LLM provider integrations +│ │ └── memory/ # Memory systems (STM, LTM/RLM) +│ └── pyproject.toml # Package metadata │ -├── hive/ # Backend (Node.js + TypeScript + Express) -│ └── src/ -│ ├── controllers/ # Request handlers -│ ├── middleware/ # Express middleware -│ ├── models/ # Data models -│ ├── routes/ # API routes -│ ├── services/ # Business logic -│ ├── types/ # TypeScript types -│ └── utils/ # Utility functions +├── tools/ # MCP Tools Package +│ └── src/aden_tools/ # 19 tools for agent capabilities +│ ├── tools/ # Individual tool implementations +│ │ ├── web_search_tool/ +│ │ ├── web_scrape_tool/ +│ │ └── file_system_toolkits/ +│ └── mcp_server.py # HTTP MCP server │ -├── docs/ # Documentation -├── scripts/ # Build and utility scripts -└── config.yaml # Application configuration +├── exports/ # Agent Packages +│ ├── support_ticket_agent/ +│ ├── market_research_agent/ +│ └── ... # Your agents go here +│ +├── .claude/ # Claude Code Skills +│ └── skills/ +│ ├── building-agents/ +│ └── testing-agent/ +│ +└── docs/ # Documentation ``` -## AI Agent Tools Setup (Optional) - -If you're using the AI agent framework with aden-tools: +## Running an Agent ```bash -# 1. Navigate to aden-tools -cd aden-tools +# Validate agent structure +PYTHONPATH=core:exports python -m my_agent validate -# 2. Copy environment template -cp .env.example .env +# Show agent information +PYTHONPATH=core:exports python -m my_agent info -# 3. Add your API keys to .env -# - ANTHROPIC_API_KEY: Required for LLM operations -# - BRAVE_SEARCH_API_KEY: Required for web search tool +# Run agent with input +PYTHONPATH=core:exports python -m my_agent run --input '{ + "task": "Your input here" +}' + +# Run in mock mode (no LLM calls) +PYTHONPATH=core:exports python -m my_agent run --mock --input '{...}' +``` + +## API Keys Setup + +For running agents with real LLMs: + +```bash +# Add to your shell profile (~/.bashrc, ~/.zshrc, etc.) +export ANTHROPIC_API_KEY="your-key-here" +export OPENAI_API_KEY="your-key-here" # Optional +export BRAVE_SEARCH_API_KEY="your-key-here" # Optional, for web search ``` Get your API keys: - **Anthropic**: [console.anthropic.com](https://console.anthropic.com/) +- **OpenAI**: [platform.openai.com](https://platform.openai.com/) - **Brave Search**: [brave.com/search/api](https://brave.com/search/api/) +## Testing Your Agent + +```bash +# Using Claude Code +claude> /testing-agent + +# Or manually +PYTHONPATH=core:exports python -m my_agent test + +# Run with specific test type +PYTHONPATH=core:exports python -m my_agent test --type constraint +PYTHONPATH=core:exports python -m my_agent test --type success +``` + ## Next Steps -1. **Configure the Application**: See [Configuration Guide](configuration.md) -2. **Understand the Architecture**: See [Architecture Overview](architecture.md) -3. **Start Building**: Add your own components and API endpoints +1. **Detailed Setup**: See [ENVIRONMENT_SETUP.md](../ENVIRONMENT_SETUP.md) +2. **Developer Guide**: See [DEVELOPER.md](../DEVELOPER.md) +3. **Agent Patterns**: Explore examples in `/exports` +4. **Custom Tools**: Learn to integrate MCP servers +5. **Join Community**: [Discord](https://discord.com/invite/MXE49hrKDk) ## Troubleshooting -### Port Already in Use - -If ports 3000 or 4000 are in use, update `config.yaml`: - -```yaml -server: - frontend: - port: 3001 # Change to available port - backend: - port: 4001 -``` - -Then regenerate environment files: +### ModuleNotFoundError: No module named 'framework' ```bash -npm run generate:env +# Reinstall framework package +cd core +pip install -e . ``` -### Docker Build Fails - -Clear Docker cache and rebuild: +### ModuleNotFoundError: No module named 'aden_tools' ```bash -docker compose down -docker compose build --no-cache -docker compose up +# Reinstall tools package +cd tools +pip install -e . ``` -### Dependencies Issues - -Clear node_modules and reinstall: +### LLM API Errors ```bash -npm run clean -npm install +# Verify API key is set +echo $ANTHROPIC_API_KEY + +# Run in mock mode to test without API +PYTHONPATH=core:exports python -m my_agent run --mock --input '{...}' ``` + +### Package Installation Issues + +```bash +# Remove and reinstall +pip uninstall -y framework aden-tools +./scripts/setup-python.sh +``` + +## Getting Help + +- **Documentation**: Check the `/docs` folder +- **Issues**: [github.com/adenhq/hive/issues](https://github.com/adenhq/hive/issues) +- **Discord**: [discord.com/invite/MXE49hrKDk](https://discord.com/invite/MXE49hrKDk) +- **Examples**: Explore `/exports` for working agents diff --git a/docs/quizzes/00-job-post.md b/docs/quizzes/00-job-post.md new file mode 100644 index 00000000..35a4fbce --- /dev/null +++ b/docs/quizzes/00-job-post.md @@ -0,0 +1,157 @@ +# 🚀 Software Development Engineer + +**Location:** San Francisco, CA (Hybrid) or Remote +**Type:** Full-time +**Team:** Engineering + +--- + +## About Aden + +We're building the future of AI agents. Aden is an open-source framework for creating self-improving, production-ready AI agents with built-in cost controls, human-in-the-loop capabilities, and comprehensive observability. + +Our mission: Make AI agents reliable enough for real-world production use. + +--- + +## The Role + +We're looking for a Software Development Engineer to help build and scale our AI agent platform. You'll work across the full stack, from our React dashboard to our Node.js backend, contributing to core infrastructure that powers autonomous AI systems. + +This is an opportunity to work on cutting-edge AI infrastructure alongside a small, experienced team passionate about shipping great software. + +--- + +## What You'll Do + +- Build and maintain features across our full-stack TypeScript codebase +- Design and implement APIs for agent management, monitoring, and control +- Work with real-time systems (WebSockets, event streaming) +- Optimize database performance (TimescaleDB, MongoDB, Redis) +- Contribute to our Model Context Protocol (MCP) server and tooling +- Collaborate on architecture decisions for scalability and reliability +- Write clean, tested, well-documented code +- Participate in code reviews and help maintain code quality + +--- + +## Tech Stack + +**Frontend (Honeycomb Dashboard)** +- React 18 + TypeScript +- Vite +- Tailwind CSS + Radix UI +- Zustand (state management) +- TanStack Query +- Recharts + Vega (data visualization) +- Socket.io (real-time updates) + +**Backend (Hive)** +- Node.js + Express + TypeScript +- Socket.io (WebSocket) +- Model Context Protocol (MCP) +- Zod (validation) +- Passport + JWT (authentication) + +**Data Layer** +- TimescaleDB (time-series metrics) +- MongoDB (policies, configuration) +- Redis (caching, pub/sub) + +**Infrastructure** +- Docker + Docker Compose +- Kubernetes + Kustomize +- GitHub Actions (CI/CD) +- Nginx + +--- + +## What We're Looking For + +**Required:** +- 2+ years of professional software development experience +- Strong proficiency in TypeScript and Node.js +- Experience with React and modern frontend development +- Familiarity with SQL and NoSQL databases +- Understanding of RESTful APIs and WebSocket communication +- Comfortable with Git and collaborative development workflows +- Strong problem-solving skills and attention to detail + +**Nice to Have:** +- Experience with AI/LLM applications or agent frameworks +- Knowledge of time-series databases (TimescaleDB, InfluxDB) +- Kubernetes and container orchestration experience +- Experience with real-time systems at scale +- Contributions to open-source projects +- Familiarity with Model Context Protocol (MCP) + +--- + +## What We Offer + +- Competitive salary + equity +- Health, dental, and vision insurance +- Flexible work arrangements (hybrid/remote) +- Learning & development budget +- Home office setup stipend +- Opportunity to work on open-source AI infrastructure +- Small team, big impact + +--- + +## How to Apply + +**Show us what you can do by contributing to our open-source project:** + +1. **Solve an existing issue** + - Browse our [GitHub Issues](https://github.com/adenhq/hive/issues) + - Look for issues labeled `good first issue` or `help wanted` + - Comment on the issue to claim it + - Submit a Pull Request with your solution + +2. **Create new issues** + - Found a bug? Report it with clear reproduction steps + - Have an idea? Open a feature request with your proposal + - Spotted documentation gaps? Suggest improvements + - Quality issues that show you understand the codebase stand out + +3. **Submit Pull Requests** + - Fix bugs, add features, or improve documentation + - Follow our contribution guidelines + - Write clear PR descriptions explaining your changes + - Respond to code review feedback + +4. **Submit your application:** + - Email: `contact@adenhq.com` + - Subject: `[SDE] Your Name` + - Include: + - Resume/CV + - GitHub profile + - Links to your Issues and/or PRs on our repo + - Brief intro about yourself + +5. **What happens next:** + - We review your contributions (1-2 weeks) + - Technical interview (60 min) + - Team interview (45 min) + - Offer 🎉 + +--- + +## Why Join Us? + +- **Impact:** Your code will power AI agents used by developers worldwide +- **Open Source:** Everything we build is open source +- **Learning:** Work with cutting-edge AI and distributed systems +- **Culture:** Small team, low ego, high trust, ship fast +- **Growth:** Early-stage company with room to grow + +--- + +*Aden is an equal opportunity employer. We celebrate diversity and are committed to creating an inclusive environment for all employees.* + +--- + +**Questions?** Email us at `contact@adenhq.com` or open an issue on [GitHub](https://github.com/adenhq/hive). + +Made with 🔥 Passion in San Francisco diff --git a/docs/quizzes/README.md b/docs/quizzes/README.md index 030b4387..1ec206e1 100644 --- a/docs/quizzes/README.md +++ b/docs/quizzes/README.md @@ -2,6 +2,14 @@ Welcome to the Aden Engineering Challenges! These quizzes are designed for students and applicants who want to join the Aden team or contribute to our open-source projects. +--- + +## 💼 We're Hiring! + +**[Software Development Engineer](./00-job-post.md)** - Full-stack TypeScript, React, Node.js, AI agents + +--- + ## How It Works 1. **Choose your track** based on your interests and skill level diff --git a/hive/.env.example b/hive/.env.example deleted file mode 100644 index 330b93ba..00000000 --- a/hive/.env.example +++ /dev/null @@ -1,28 +0,0 @@ -# Server Configuration -PORT=4000 -NODE_ENV=development - -# TSDB PostgreSQL (TimescaleDB) -TSDB_PG_URL=postgresql://user:password@localhost:5432/aden_tsdb - -# User Database (MySQL - read-only access) -MYSQL_HOST=localhost -MYSQL_PORT=3306 -MYSQL_USER=aden_reader -MYSQL_PASSWORD= -MYSQL_DATABASE=aden - -# MongoDB (policies and pricing data) -MONGODB_URL=mongodb://localhost:27017 -MONGODB_DBNAME=aden -MONGODB_ERP_DBNAME=erp - -# Redis (caching and socket.io adapter) -REDIS_URL=redis://localhost:6379 - -# JWT Authentication -JWT_SECRET=your-jwt-secret -PASSPHRASE=your-passphrase - -# Logging -LOG_LEVEL=info diff --git a/hive/.eslintrc.cjs b/hive/.eslintrc.cjs deleted file mode 100644 index 4c5b5d4d..00000000 --- a/hive/.eslintrc.cjs +++ /dev/null @@ -1,24 +0,0 @@ -module.exports = { - root: true, - env: { node: true, es2020: true }, - extends: [ - 'eslint:recommended', - 'plugin:@typescript-eslint/recommended', - ], - ignorePatterns: ['dist', '.eslintrc.cjs'], - parser: '@typescript-eslint/parser', - parserOptions: { - ecmaVersion: 'latest', - sourceType: 'module', - }, - rules: { - // Allow unused vars that start with underscore - '@typescript-eslint/no-unused-vars': ['error', { - argsIgnorePattern: '^_', - varsIgnorePattern: '^_', - destructuredArrayIgnorePattern: '^_', - }], - // Allow any types (common in API/external data handling) - '@typescript-eslint/no-explicit-any': 'off', - }, -} diff --git a/hive/Dockerfile b/hive/Dockerfile deleted file mode 100644 index 89819395..00000000 --- a/hive/Dockerfile +++ /dev/null @@ -1,66 +0,0 @@ -# Build stage -FROM node:20-alpine AS builder - -ARG NPM_TOKEN - -WORKDIR /app - -# Configure npm for private packages (@acho-inc/administration) -RUN echo "//registry.npmjs.org/:_authToken=${NPM_TOKEN}" > .npmrc - -# Copy package files -COPY package*.json ./ -COPY tsconfig.json ./ - -# Install all dependencies (including dev for TypeScript build) -RUN npm install - -# Copy source code -COPY src ./src - -# Copy docs for quickstart templates -COPY docs ./docs - -# Build TypeScript -RUN npm run build - -# Remove npmrc after build -RUN rm -f .npmrc - -# Production stage -FROM node:20-alpine AS production - -WORKDIR /app - -# Create non-root user -RUN addgroup -g 1001 -S nodejs && \ - adduser -S nodejs -u 1001 - -# Copy package files for production deps -COPY package*.json ./ - -# Configure npm for private packages (needed for production install) -ARG NPM_TOKEN -RUN echo "//registry.npmjs.org/:_authToken=${NPM_TOKEN}" > .npmrc && \ - npm install --omit=dev && \ - rm -f .npmrc && \ - npm cache clean --force - -# Copy compiled JavaScript from builder -COPY --from=builder --chown=nodejs:nodejs /app/dist ./dist - -# Copy docs directory for quickstart templates -COPY --from=builder --chown=nodejs:nodejs /app/docs ./docs - -USER nodejs - -# Default port (can be overridden via PORT env var) -EXPOSE 4000 - -ENV NODE_ENV=production - -# Health check -HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \ - CMD node -e "fetch('http://localhost:' + (process.env.PORT || 4000) + '/health').then(r => r.ok ? process.exit(0) : process.exit(1)).catch(() => process.exit(1))" - -CMD ["node", "dist/index.js"] diff --git a/hive/Dockerfile.dev b/hive/Dockerfile.dev deleted file mode 100644 index 0c77b34e..00000000 --- a/hive/Dockerfile.dev +++ /dev/null @@ -1,24 +0,0 @@ -# Development Dockerfile with hot reload -FROM node:20-alpine - -ARG NPM_TOKEN - -WORKDIR /app - -# Configure npm for private packages (@acho-inc/administration) -RUN echo "//registry.npmjs.org/:_authToken=${NPM_TOKEN}" > .npmrc - -# Copy package files -COPY package*.json ./ - -# Install dependencies -RUN npm install && rm -f .npmrc - -# Copy source code -COPY . . - -# Expose ports (app + debug) -EXPOSE 4000 9229 - -# Start development server with hot reload -CMD ["npm", "run", "dev"] diff --git a/hive/docs/aden-sdk-documents/config/agent-frameworks.json b/hive/docs/aden-sdk-documents/config/agent-frameworks.json deleted file mode 100644 index 7b9fbbdb..00000000 --- a/hive/docs/aden-sdk-documents/config/agent-frameworks.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "generic": { - "name": "Generic", - "description": "Generic agent integration", - "pythonSupport": true, - "typescriptSupport": true, - "templateFile": "generic" - }, - "langgraph": { - "name": "LangGraph", - "description": "LangGraph agent integration", - "pythonSupport": true, - "typescriptSupport": true, - "templateFile": "langgraph" - }, - "livekit": { - "name": "LiveKit", - "description": "LiveKit voice agent integration", - "pythonSupport": true, - "typescriptSupport": false, - "adenPythonExtra": "livekit", - "templateFile": "livekit" - } -} diff --git a/hive/docs/aden-sdk-documents/config/llm-vendors.json b/hive/docs/aden-sdk-documents/config/llm-vendors.json deleted file mode 100644 index d9cb26d5..00000000 --- a/hive/docs/aden-sdk-documents/config/llm-vendors.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "openai": { - "name": "OpenAI", - "envVarComment": "# or ANTHROPIC_API_KEY, GOOGLE_API_KEY" - }, - "anthropic": { - "name": "Anthropic", - "envVarComment": "# or OPENAI_API_KEY, GOOGLE_API_KEY" - }, - "google": { - "name": "Google", - "envVarComment": "# or OPENAI_API_KEY, ANTHROPIC_API_KEY" - } -} diff --git a/hive/docs/aden-sdk-documents/config/sdk-languages.json b/hive/docs/aden-sdk-documents/config/sdk-languages.json deleted file mode 100644 index 7efad1fc..00000000 --- a/hive/docs/aden-sdk-documents/config/sdk-languages.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "python": { - "name": "Python", - "adenPackage": "aden-py" - }, - "javascript": { - "name": "JavaScript/TypeScript", - "adenPackage": "aden-ts" - } -} diff --git a/hive/docs/aden-sdk-documents/python/quickstart-langflow.md b/hive/docs/aden-sdk-documents/python/quickstart-langflow.md deleted file mode 100644 index e3bd9292..00000000 --- a/hive/docs/aden-sdk-documents/python/quickstart-langflow.md +++ /dev/null @@ -1,191 +0,0 @@ -Quick reference for integrating Aden LLM observability & cost control into LangFlow applications. - -## Prerequisites - -`.env` file should contain: - -``` -OPENAI_API_KEY=sk-xxx # or ANTHROPIC_API_KEY, GOOGLE_API_KEY -ADEN_API_URL=https://hive.adenhq.com -ADEN_API_KEY=your-aden-api-key - -``` - -## Installation - -```bash -pip install aden-py langflow python-dotenv - -``` - -## Basic Setup (3 Steps) - -### 1. Import and Load Environment - -```python -import os -from dotenv import load_dotenv -load_dotenv() - -from aden import ( - instrument, - uninstrument, - MeterOptions, - create_console_emitter, - BeforeRequestResult, - RequestCancelledError, -) - -``` - -### 2. Define Budget Check Callback - -```python -def budget_check(params, context): - """Enforce budget limits before each LLM request.""" - budget_info = getattr(context, 'budget', None) - - if budget_info and budget_info.get('exhausted', False): - return BeforeRequestResult.cancel("Budget exhausted") - - if budget_info and budget_info.get('percent_used', 0) >= 95: - return BeforeRequestResult.throttle(delay_ms=2000) - - if budget_info and budget_info.get('percent_used', 0) >= 80: - return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") - - return BeforeRequestResult.proceed() - -``` - -### 3. Initialize Aden (at startup) - -```python -instrument(MeterOptions( - api_key=os.environ.get("ADEN_API_KEY"), - server_url=os.environ.get("ADEN_API_URL"), - emit_metric=create_console_emitter(pretty=True), - on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), - before_request=budget_check, -)) - -``` - -### 4. Use LangFlow Components - -```python -from langflow.components.models import LanguageModelComponent - -comp = LanguageModelComponent() -comp.set_attributes({ - "provider": "Google", # or "OpenAI" - "model_name": "gemini-2.0-flash", - "api_key": os.getenv("GOOGLE_API_KEY"), - "stream": False, -}) - -model = comp.build_model() - -try: - response = model.invoke("Hello!") - print(response.content) -except RequestCancelledError as e: - print(f"Budget exceeded: {e}") - -``` - -### 5. Cleanup (on exit) - -```python -uninstrument() - -``` - -## Complete Template - -```python -"""LangFlow with Aden instrumentation""" -import os -from dotenv import load_dotenv -load_dotenv() - -from aden import ( - instrument, uninstrument, MeterOptions, - create_console_emitter, BeforeRequestResult, RequestCancelledError, -) - -# Budget enforcement callback -def budget_check(params, context): - budget_info = getattr(context, 'budget', None) - if budget_info and budget_info.get('exhausted', False): - return BeforeRequestResult.cancel("Budget exhausted") - if budget_info and budget_info.get('percent_used', 0) >= 95: - return BeforeRequestResult.throttle(delay_ms=2000) - if budget_info and budget_info.get('percent_used', 0) >= 80: - return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") - return BeforeRequestResult.proceed() - -# Initialize Aden -instrument(MeterOptions( - api_key=os.environ.get("ADEN_API_KEY"), - server_url=os.environ.get("ADEN_API_URL"), - emit_metric=create_console_emitter(pretty=True), - on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), - before_request=budget_check, -)) - -# === YOUR LANGFLOW CODE HERE === - -from langflow.components.models import LanguageModelComponent - -def run_model(user_input: str): - try: - comp = LanguageModelComponent() - comp.set_attributes({ - "provider": "Google", - "model_name": "gemini-2.0-flash", - "api_key": os.getenv("GOOGLE_API_KEY"), - "stream": False, - }) - model = comp.build_model() - return model.invoke(user_input).content - except RequestCancelledError as e: - return f"Sorry, you have used up your allowance. {e}" - -if __name__ == "__main__": - try: - print(run_model("Say hello!")) - finally: - uninstrument() - -``` - -## Supported Providers - -| Provider | Model Example | Notes | -| --------- | ------------------- | -------------------------------- | -| OpenAI | gpt-4o, gpt-4o-mini | Direct SDK instrumentation | -| Google | gemini-2.0-flash | Uses gRPC client instrumentation | -| Anthropic | claude-3-opus | Direct SDK instrumentation | - -## Budget Actions Reference - -| Action | When | Behavior | -| ----------------------------------------------- | ----------------- | ------------------------------ | -| `BeforeRequestResult.proceed()` | Within budget | Request continues normally | -| `BeforeRequestResult.cancel(msg)` | Budget exhausted | Raises `RequestCancelledError` | -| `BeforeRequestResult.throttle(delay_ms=N)` | Near limit | Delays request by N ms | -| `BeforeRequestResult.degrade(to_model, reason)` | Approaching limit | Switches to cheaper model | - -## Key Points - -- `emit_metric` is **required** - use `create_console_emitter(pretty=True)` for dev -- `before_request` callback enables budget enforcement -- Always wrap model calls in `try/except RequestCancelledError` -- Call `uninstrument()` on exit to flush remaining metrics -- Control agent connects automatically when `api_key` + `server_url` are provided -- Google Gemini support works automatically via gRPC client instrumentation - -## Documentation - -Full docs: [https://pypi.org/project/aden-py](https://pypi.org/project/aden-py/) diff --git a/hive/docs/aden-sdk-documents/python/quickstart-langgraph.md b/hive/docs/aden-sdk-documents/python/quickstart-langgraph.md deleted file mode 100644 index 7a413234..00000000 --- a/hive/docs/aden-sdk-documents/python/quickstart-langgraph.md +++ /dev/null @@ -1,164 +0,0 @@ -Quick reference for integrating Aden LLM observability & cost control into Python agents. - -## Prerequisites - -`.env` file should contain: - -``` -OPENAI_API_KEY=sk-xxx # or ANTHROPIC_API_KEY, GOOGLE_API_KEY -ADEN_API_URL=https://hive.adenhq.com -ADEN_API_KEY=your-aden-api-key - -``` - -## Installation - -```bash -pip install aden-py python-dotenv - -``` - -## Basic Setup (3 Steps) - -### 1. Import and Load Environment - -```python -import os -from dotenv import load_dotenv -load_dotenv() - -from aden import ( - instrument, - uninstrument, - MeterOptions, - create_console_emitter, - BeforeRequestResult, - RequestCancelledError, -) - -``` - -### 2. Define Budget Check Callback - -```python -def budget_check(params, context): - """Enforce budget limits before each LLM request.""" - budget_info = getattr(context, 'budget', None) - - if budget_info and budget_info.get('exhausted', False): - return BeforeRequestResult.cancel("Budget exhausted") - - if budget_info and budget_info.get('percent_used', 0) >= 95: - return BeforeRequestResult.throttle(delay_ms=2000) - - if budget_info and budget_info.get('percent_used', 0) >= 80: - return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") - - return BeforeRequestResult.proceed() - -``` - -### 3. Initialize Aden (at startup) - -```python -instrument(MeterOptions( - api_key=os.environ.get("ADEN_API_KEY"), - server_url=os.environ.get("ADEN_API_URL"), - emit_metric=create_console_emitter(pretty=True), - on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), - before_request=budget_check, -)) - -``` - -### 4. Handle Budget Errors in Your Agent - -```python -def run_agent(user_input: str): - try: - # Your agent logic here - result = graph.invoke({"messages": [{"role": "user", "content": user_input}]}) - return result["messages"][-1].content - except RequestCancelledError as e: - return f"Sorry, you have used up your allowance. {e}" - -``` - -### 5. Cleanup (on exit) - -```python -uninstrument() - -``` - -## Complete Template - -```python -"""Agent with Aden instrumentation""" -import os -from dotenv import load_dotenv -load_dotenv() - -from aden import ( - instrument, uninstrument, MeterOptions, - create_console_emitter, BeforeRequestResult, RequestCancelledError, -) - -# Budget enforcement callback -def budget_check(params, context): - budget_info = getattr(context, 'budget', None) - if budget_info and budget_info.get('exhausted', False): - return BeforeRequestResult.cancel("Budget exhausted") - if budget_info and budget_info.get('percent_used', 0) >= 95: - return BeforeRequestResult.throttle(delay_ms=2000) - if budget_info and budget_info.get('percent_used', 0) >= 80: - return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") - return BeforeRequestResult.proceed() - -# Initialize Aden -instrument(MeterOptions( - api_key=os.environ.get("ADEN_API_KEY"), - server_url=os.environ.get("ADEN_API_URL"), - emit_metric=create_console_emitter(pretty=True), - on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), - before_request=budget_check, -)) - -# === YOUR AGENT CODE HERE === - -def run_agent(user_input: str): - try: - # Your LLM calls here - pass - except RequestCancelledError as e: - return f"Sorry, you have used up your allowance. {e}" - -if __name__ == "__main__": - try: - # Your main loop - pass - finally: - uninstrument() - -``` - -## Budget Actions Reference - -| Action | When | Behavior | -| ----------------------------------------------- | ----------------- | ------------------------------ | -| `BeforeRequestResult.proceed()` | Within budget | Request continues normally | -| `BeforeRequestResult.cancel(msg)` | Budget exhausted | Raises `RequestCancelledError` | -| `BeforeRequestResult.throttle(delay_ms=N)` | Near limit | Delays request by N ms | -| `BeforeRequestResult.degrade(to_model, reason)` | Approaching limit | Switches to cheaper model | - -## Key Points - -- `emit_metric` is **required** - use `create_console_emitter(pretty=True)` for dev -- `before_request` callback enables budget enforcement -- Always wrap agent calls in `try/except RequestCancelledError` -- Call `uninstrument()` on exit to flush remaining metrics -- Control agent connects automatically when `api_key` + `server_url` are provided - -## Documentation - -Full docs: [https://pypi.org/project/aden-py](https://pypi.org/project/aden-py/json) diff --git a/hive/docs/aden-sdk-documents/python/quickstart-livekit.md b/hive/docs/aden-sdk-documents/python/quickstart-livekit.md deleted file mode 100644 index 35c0f9ab..00000000 --- a/hive/docs/aden-sdk-documents/python/quickstart-livekit.md +++ /dev/null @@ -1,165 +0,0 @@ -# Aden-py LiveKit Integration Guide - -Quick reference for integrating Aden LLM observability & cost control into LiveKit voice agents. - -## Prerequisites - -`.env` file should contain: - -``` -OPENAI_API_KEY=sk-xxx -ADEN_API_URL=https://hive.adenhq.com -ADEN_API_KEY=your-aden-api-key -``` - -## Installation - -```bash -pip install 'aden-py[livekit]' python-dotenv -``` - -## Setup (4 Steps) - -### 1. Import and Load Environment - -```python -import os -from dotenv import load_dotenv -load_dotenv() - -from aden import ( - instrument, - MeterOptions, - create_console_emitter, - BeforeRequestResult, - RequestCancelledError, -) -``` - -### 2. Define Budget Check Callback - -```python -def budget_check(params, context): - """Enforce budget limits before each LLM request.""" - budget_info = getattr(context, 'budget', None) - - if budget_info and budget_info.get('exhausted', False): - return BeforeRequestResult.cancel("Budget exhausted") - - if budget_info and budget_info.get('percent_used', 0) >= 95: - return BeforeRequestResult.throttle(delay_ms=2000) - - if budget_info and budget_info.get('percent_used', 0) >= 80: - return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") - - return BeforeRequestResult.proceed() -``` - -### 3. Create Worker Prewarm Function - -**IMPORTANT:** LiveKit uses multiprocessing. Instrumentation must happen in each worker process, not the main process. - -```python -def initialize_aden_in_worker(proc): - """Initialize Aden instrumentation in each worker process.""" - instrument(MeterOptions( - api_key=os.environ.get("ADEN_API_KEY"), - server_url=os.environ.get("ADEN_API_URL"), - emit_metric=create_console_emitter(pretty=True), - on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), - before_request=budget_check, - )) -``` - -### 4. Pass Prewarm Function to WorkerOptions - -```python -if __name__ == "__main__": - agents.cli.run_app(agents.WorkerOptions( - entrypoint_fnc=entrypoint, - agent_name="my-agent", - prewarm_fnc=initialize_aden_in_worker, # <-- This is the key! - )) -``` - -## Complete Template - -```python -"""LiveKit Voice Agent with Aden instrumentation""" -import os -from dotenv import load_dotenv -load_dotenv() - -from livekit import agents -from livekit.plugins import openai - -from aden import ( - instrument, MeterOptions, create_console_emitter, - BeforeRequestResult, RequestCancelledError, -) - -# Budget enforcement callback -def budget_check(params, context): - budget_info = getattr(context, 'budget', None) - if budget_info and budget_info.get('exhausted', False): - return BeforeRequestResult.cancel("Budget exhausted") - if budget_info and budget_info.get('percent_used', 0) >= 95: - return BeforeRequestResult.throttle(delay_ms=2000) - if budget_info and budget_info.get('percent_used', 0) >= 80: - return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") - return BeforeRequestResult.proceed() - -# Worker initialization - runs in each spawned process -def initialize_aden_in_worker(proc): - instrument(MeterOptions( - api_key=os.environ.get("ADEN_API_KEY"), - server_url=os.environ.get("ADEN_API_URL"), - emit_metric=create_console_emitter(pretty=True), - on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), - before_request=budget_check, - )) - -async def entrypoint(ctx: agents.JobContext): - # Your agent logic here - session = agents.AgentSession( - llm=openai.LLM(model="gpt-4o-mini"), - # ... - ) - await session.start(ctx.room) - -if __name__ == "__main__": - agents.cli.run_app(agents.WorkerOptions( - entrypoint_fnc=entrypoint, - agent_name="my-agent", - prewarm_fnc=initialize_aden_in_worker, - )) -``` - -## Budget Actions Reference - -| Action | When | Behavior | -| ----------------------------------------------- | ------------------------ | ------------------------------ | -| `BeforeRequestResult.proceed()` | Within budget | Request continues normally | -| `BeforeRequestResult.cancel(msg)` | Budget exhausted | Raises `RequestCancelledError` | -| `BeforeRequestResult.throttle(delay_ms=N)` | Near limit (95%+) | Delays request by N ms | -| `BeforeRequestResult.degrade(to_model, reason)` | Approaching limit (80%+) | Switches to cheaper model | - -## Key Points - -- **Use `prewarm_fnc`** - LiveKit spawns worker processes; instrumentation must happen in each worker -- **Don't instrument in main process** - It won't affect the worker processes where LLM calls happen -- `emit_metric` is **required** - use `create_console_emitter(pretty=True)` for dev -- Control agent connects automatically when `api_key` + `server_url` are provided - -## Troubleshooting - -**No metrics showing?** - -- Ensure `prewarm_fnc` is set in `WorkerOptions` -- Check that `ADEN_API_KEY` and `ADEN_API_URL` are in your `.env` -- Verify you're using `aden-py[livekit]` (with the livekit extra) - -**Metrics in test but not in agent?** - -- LiveKit uses multiprocessing - the main process instrumentation doesn't carry over -- The `prewarm_fnc` runs in each worker before your `entrypoint` is called diff --git a/hive/docs/aden-sdk-documents/templates/javascript/generic.md b/hive/docs/aden-sdk-documents/templates/javascript/generic.md deleted file mode 100644 index fe8dca0c..00000000 --- a/hive/docs/aden-sdk-documents/templates/javascript/generic.md +++ /dev/null @@ -1,194 +0,0 @@ -Quick reference for integrating Aden LLM observability & cost control into TypeScript/JavaScript agents. - -## Prerequisites - -`.env` file should contain: - -``` -OPENAI_API_KEY=sk-xxx {{envVarComment}} -ADEN_API_URL={{serverUrl}} -ADEN_API_KEY={{apiKey}} -``` - -## Installation - -```bash -npm install aden-ts dotenv - -# Install the LLM SDKs you use -npm install openai # For OpenAI -npm install @anthropic-ai/sdk # For Anthropic -npm install @google/generative-ai # For Google Gemini -``` - -## Basic Setup - -### 1. Import Aden and SDK (at top of file) - -```typescript -import "dotenv/config"; -import OpenAI from "openai"; -import { - instrument, - uninstrument, - createConsoleEmitter, - RequestCancelledError, -} from "aden-ts"; -import type { BeforeRequestContext, BeforeRequestResult } from "aden-ts"; -``` - -### 2. Define Before Request Callback (optional) - -```typescript -// Custom logic before each LLM request -// Budget enforcement is handled server-side by the control agent -function beforeRequest( - _params: Record, - context: BeforeRequestContext -): BeforeRequestResult { - console.log(`[Aden] Request to model: ${context.model}`); - return { action: "proceed" }; -} -``` - -### 3. Initialize Aden (at startup, BEFORE using SDK) - -```typescript -await instrument({ - apiKey: process.env.ADEN_API_KEY, - serverUrl: process.env.ADEN_API_URL, - emitMetric: createConsoleEmitter({ pretty: true }), - onAlert: (alert: { level: string; message: string }) => - console.log(`[Aden ${alert.level}] ${alert.message}`), - beforeRequest, - sdks: { OpenAI }, -}); -``` - -### 4. Handle Budget Errors in Your Agent - -```typescript -async function runAgent(userInput: string): Promise { - try { - const openai = new OpenAI(); - const response = await openai.chat.completions.create({ - model: "gpt-4o", - messages: [{ role: "user", content: userInput }], - }); - return response.choices[0]?.message?.content ?? ""; - } catch (e) { - if (e instanceof RequestCancelledError) { - return `Sorry, your budget has been exhausted. ${e.message}`; - } - throw e; - } -} -``` - -### 5. Cleanup (on exit) - -```typescript -await uninstrument(); -``` - -## Complete Template - -```typescript -/** - * Agent with Aden instrumentation - */ -import "dotenv/config"; -import OpenAI from "openai"; -import { - instrument, - uninstrument, - createConsoleEmitter, - RequestCancelledError, -} from "aden-ts"; -import type { BeforeRequestContext, BeforeRequestResult } from "aden-ts"; - -// Before request callback (optional) -function beforeRequest( - _params: Record, - context: BeforeRequestContext -): BeforeRequestResult { - console.log(`[Aden] Request to model: ${context.model}`); - return { action: "proceed" }; -} - -// Initialize Aden FIRST -await instrument({ - apiKey: process.env.ADEN_API_KEY, - serverUrl: process.env.ADEN_API_URL, - emitMetric: createConsoleEmitter({ pretty: true }), - onAlert: (alert: { level: string; message: string }) => - console.log(`[Aden ${alert.level}] ${alert.message}`), - beforeRequest, - sdks: { OpenAI }, -}); - -// === YOUR AGENT CODE HERE === - -async function runAgent(userInput: string): Promise { - try { - const openai = new OpenAI(); - const response = await openai.chat.completions.create({ - model: "gpt-4o", - messages: [{ role: "user", content: userInput }], - }); - return response.choices[0]?.message?.content ?? ""; - } catch (e) { - if (e instanceof RequestCancelledError) { - return `Sorry, your budget has been exhausted. ${e.message}`; - } - throw e; - } -} - -// Main entry point -async function main() { - try { - const result = await runAgent("Hello, world!"); - console.log(result); - } finally { - await uninstrument(); - } -} - -main(); -``` - -## BeforeRequestContext Reference - -The `context` parameter in `beforeRequest` contains: - -| Field | Type | Description | -| --- | --- | --- | -| `model` | string | Model being used for this request | -| `stream` | boolean | Whether this is a streaming request | -| `spanId` | string | Generated span ID (OTel standard) | -| `traceId` | string | Trace ID grouping related operations | -| `timestamp` | Date | When the request was initiated | -| `metadata` | Record | Custom metadata (optional) | - -## BeforeRequestResult Actions - -| Action | Usage | Behavior | -| --- | --- | --- | -| `{ action: "proceed" }` | Allow request | Request continues normally | -| `{ action: "cancel", reason: "..." }` | Block request | Throws `RequestCancelledError` | -| `{ action: "throttle", delayMs: N }` | Rate limit | Delays request by N ms | -| `{ action: "degrade", toModel: "...", reason: "..." }` | Downgrade | Switches to specified model | - -## Key Points - -- Module name is `aden-ts` (not `aden`) -- `emitMetric` is **required** - use `createConsoleEmitter({ pretty: true })` for dev -- Budget enforcement is handled **server-side** by the control agent -- Always wrap agent calls in `try/catch` for `RequestCancelledError` -- Call `await uninstrument()` on exit to flush remaining metrics -- Control agent connects automatically when `apiKey` + `serverUrl` are provided - -## Documentation - -Full docs: [https://www.npmjs.com/package/aden-ts](https://www.npmjs.com/package/aden-ts) diff --git a/hive/docs/aden-sdk-documents/templates/javascript/langgraph.md b/hive/docs/aden-sdk-documents/templates/javascript/langgraph.md deleted file mode 100644 index a911a7bd..00000000 --- a/hive/docs/aden-sdk-documents/templates/javascript/langgraph.md +++ /dev/null @@ -1,297 +0,0 @@ -Quick reference for integrating Aden LLM observability & cost control into TypeScript/JavaScript agents. - -## Prerequisites - -`.env` file should contain: - -``` -OPENAI_API_KEY=sk-xxx {{envVarComment}} -ADEN_API_URL={{serverUrl}} -ADEN_API_KEY={{apiKey}} -``` - -## Installation - -```bash -npm install aden-ts dotenv - -# Install the LLM SDKs you use -npm install openai # For OpenAI -npm install @anthropic-ai/sdk # For Anthropic -npm install @google/generative-ai # For Google Gemini -``` - -## Basic Setup - -### 1. Import Aden and SDK (at top of file) - -```typescript -import "dotenv/config"; -import OpenAI from "openai"; -import { - instrument, - uninstrument, - createConsoleEmitter, - RequestCancelledError, -} from "aden-ts"; -import type { BeforeRequestContext, BeforeRequestResult } from "aden-ts"; -``` - -### 2. Define Before Request Callback (optional) - -```typescript -// Custom logic before each LLM request -// Budget enforcement is handled server-side by the control agent -function beforeRequest( - _params: Record, - context: BeforeRequestContext -): BeforeRequestResult { - console.log(`[Aden] Request to model: ${context.model}`); - return { action: "proceed" }; -} -``` - -### 3. Initialize Aden (at startup, BEFORE using SDK) - -```typescript -await instrument({ - apiKey: process.env.ADEN_API_KEY, - serverUrl: process.env.ADEN_API_URL, - emitMetric: createConsoleEmitter({ pretty: true }), - onAlert: (alert: { level: string; message: string }) => - console.log(`[Aden ${alert.level}] ${alert.message}`), - beforeRequest, - sdks: { OpenAI }, -}); -``` - -### 4. Handle Budget Errors in Your Agent - -```typescript -async function runAgent(userInput: string): Promise { - try { - const openai = new OpenAI(); - const response = await openai.chat.completions.create({ - model: "gpt-4o", - messages: [{ role: "user", content: userInput }], - }); - return response.choices[0]?.message?.content ?? ""; - } catch (e) { - if (e instanceof RequestCancelledError) { - return `Sorry, your budget has been exhausted. ${e.message}`; - } - throw e; - } -} -``` - -### 5. Cleanup (on exit) - -```typescript -await uninstrument(); -``` - -## Complete Template (Direct SDK Usage) - -```typescript -/** - * Agent with Aden instrumentation - Direct SDK usage - */ -import "dotenv/config"; -import OpenAI from "openai"; -import { - instrument, - uninstrument, - createConsoleEmitter, - RequestCancelledError, -} from "aden-ts"; -import type { BeforeRequestContext, BeforeRequestResult } from "aden-ts"; - -// Before request callback (optional) -function beforeRequest( - _params: Record, - context: BeforeRequestContext -): BeforeRequestResult { - console.log(`[Aden] Request to model: ${context.model}`); - return { action: "proceed" }; -} - -// Initialize Aden FIRST -await instrument({ - apiKey: process.env.ADEN_API_KEY, - serverUrl: process.env.ADEN_API_URL, - emitMetric: createConsoleEmitter({ pretty: true }), - onAlert: (alert: { level: string; message: string }) => - console.log(`[Aden ${alert.level}] ${alert.message}`), - beforeRequest, - sdks: { OpenAI }, -}); - -// === YOUR AGENT CODE HERE === - -async function runAgent(userInput: string): Promise { - try { - const openai = new OpenAI(); - const response = await openai.chat.completions.create({ - model: "gpt-4o", - messages: [{ role: "user", content: userInput }], - }); - return response.choices[0]?.message?.content ?? ""; - } catch (e) { - if (e instanceof RequestCancelledError) { - return `Sorry, your budget has been exhausted. ${e.message}`; - } - throw e; - } -} - -// Main entry point -async function main() { - try { - const result = await runAgent("Hello, world!"); - console.log(result); - } finally { - await uninstrument(); - } -} - -main(); -``` - -## LangChain / LangGraph Integration - -When using LangChain or LangGraph, you **MUST** use dynamic imports to ensure instrumentation is applied before LangChain loads the SDK. - -### Critical: SDK Version Matching - -LangChain bundles its own SDK dependencies. To ensure instrumentation works, your SDK version must match LangChain's: - -```bash -# Check what version LangChain uses -cat node_modules/@langchain/anthropic/node_modules/@anthropic-ai/sdk/package.json | grep version - -# Update your package.json to match that version -# e.g., "@anthropic-ai/sdk": "^0.65.0" - -# Reinstall to dedupe -rm -rf node_modules package-lock.json && npm install - -# Verify no nested SDK (should show "No such file") -ls node_modules/@langchain/anthropic/node_modules 2>/dev/null || echo "OK: SDK is shared" -``` - -### LangChain Template - -```typescript -/** - * LangGraph Agent with Aden instrumentation - * Key: Use dynamic imports AFTER instrument() - */ -import "dotenv/config"; -import Anthropic from "@anthropic-ai/sdk"; -import { - instrument, - uninstrument, - createConsoleEmitter, - RequestCancelledError, -} from "aden-ts"; -import type { BeforeRequestContext, BeforeRequestResult } from "aden-ts"; - -function beforeRequest( - _params: Record, - context: BeforeRequestContext -): BeforeRequestResult { - console.log(`[Aden] Request to model: ${context.model}`); - return { action: "proceed" }; -} - -async function main() { - // 1. Initialize Aden FIRST (before any LangChain imports) - await instrument({ - apiKey: process.env.ADEN_API_KEY, - serverUrl: process.env.ADEN_API_URL, - emitMetric: createConsoleEmitter({ pretty: true }), - onAlert: (alert: { level: string; message: string }) => - console.log(`[Aden ${alert.level}] ${alert.message}`), - beforeRequest, - sdks: { Anthropic }, - }); - - // 2. Dynamic imports AFTER instrumentation - const { ChatAnthropic } = await import("@langchain/anthropic"); - const { HumanMessage } = await import("@langchain/core/messages"); - // ... other LangChain imports - - // 3. Now create your LangChain components - const model = new ChatAnthropic({ - model: "claude-sonnet-4-20250514", - temperature: 0, - }); - - try { - // Your agent logic here - const response = await model.invoke([new HumanMessage("Hello!")]); - console.log(response.content); - } catch (error) { - if (error instanceof RequestCancelledError) { - console.log(`Budget exhausted: ${error.message}`); - } else { - throw error; - } - } finally { - await uninstrument(); - } -} - -main(); -``` - -## BeforeRequestContext Reference - -The `context` parameter in `beforeRequest` contains: - -| Field | Type | Description | -| --- | --- | --- | -| `model` | string | Model being used for this request | -| `stream` | boolean | Whether this is a streaming request | -| `spanId` | string | Generated span ID (OTel standard) | -| `traceId` | string | Trace ID grouping related operations | -| `timestamp` | Date | When the request was initiated | -| `metadata` | Record | Custom metadata (optional) | - -## BeforeRequestResult Actions - -| Action | Usage | Behavior | -| --- | --- | --- | -| `{ action: "proceed" }` | Allow request | Request continues normally | -| `{ action: "cancel", reason: "..." }` | Block request | Throws `RequestCancelledError` | -| `{ action: "throttle", delayMs: N }` | Rate limit | Delays request by N ms | -| `{ action: "degrade", toModel: "...", reason: "..." }` | Downgrade | Switches to specified model | - -## Key Points - -- Module name is `aden-ts` (not `aden`) -- `emitMetric` is **required** - use `createConsoleEmitter({ pretty: true })` for dev -- Budget enforcement is handled **server-side** by the control agent -- Always wrap agent calls in `try/catch` for `RequestCancelledError` -- Call `await uninstrument()` on exit to flush remaining metrics -- Control agent connects automatically when `apiKey` + `serverUrl` are provided -- **LangChain users**: Must use dynamic imports and match SDK versions - -## Troubleshooting - -### No metrics being captured - -1. **Check SDK version match**: Run `npm ls @anthropic-ai/sdk` - should show only ONE version -2. **Use dynamic imports**: Import LangChain modules AFTER `instrument()` is called -3. **Verify instrumentation**: Look for `[aden] Instrumented: anthropic + control agent` at startup - -### RequestCancelledError not thrown - -Budget enforcement is server-side. Ensure: -- `ADEN_API_KEY` and `ADEN_API_URL` are set correctly -- Control agent connection is established (check startup logs) - -## Documentation - -Full docs: [https://www.npmjs.com/package/aden-ts](https://www.npmjs.com/package/aden-ts) diff --git a/hive/docs/aden-sdk-documents/templates/python/generic.md b/hive/docs/aden-sdk-documents/templates/python/generic.md deleted file mode 100644 index d5e6fe79..00000000 --- a/hive/docs/aden-sdk-documents/templates/python/generic.md +++ /dev/null @@ -1,164 +0,0 @@ -Quick reference for integrating Aden LLM observability & cost control into Python agents. - -## Prerequisites - -`.env` file should contain: - -``` -OPENAI_API_KEY=sk-xxx {{envVarComment}} -ADEN_API_URL={{serverUrl}} -ADEN_API_KEY={{apiKey}} - -``` - -## Installation - -```bash -pip install aden-py python-dotenv - -``` - -## Basic Setup (3 Steps) - -### 1. Import and Load Environment - -```python -import os -from dotenv import load_dotenv -load_dotenv() - -from aden import ( - instrument, - uninstrument, - MeterOptions, - create_console_emitter, - BeforeRequestResult, - RequestCancelledError, -) - -``` - -### 2. Define Budget Check Callback - -```python -def budget_check(params, context): - """Enforce budget limits before each LLM request.""" - budget_info = getattr(context, 'budget', None) - - if budget_info and budget_info.get('exhausted', False): - return BeforeRequestResult.cancel("Budget exhausted") - - if budget_info and budget_info.get('percent_used', 0) >= 95: - return BeforeRequestResult.throttle(delay_ms=2000) - - if budget_info and budget_info.get('percent_used', 0) >= 80: - return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") - - return BeforeRequestResult.proceed() - -``` - -### 3. Initialize Aden (at startup) - -```python -instrument(MeterOptions( - api_key=os.environ.get("ADEN_API_KEY"), - server_url=os.environ.get("ADEN_API_URL"), - emit_metric=create_console_emitter(pretty=True), - on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), - before_request=budget_check, -)) - -``` - -### 4. Handle Budget Errors in Your Agent - -```python -def run_agent(user_input: str): - try: - # Your agent logic here - result = graph.invoke({"messages": [{"role": "user", "content": user_input}]}) - return result["messages"][-1].content - except RequestCancelledError as e: - return f"Sorry, you have used up your allowance. {e}" - -``` - -### 5. Cleanup (on exit) - -```python -uninstrument() - -``` - -## Complete Template - -```python -"""Agent with Aden instrumentation""" -import os -from dotenv import load_dotenv -load_dotenv() - -from aden import ( - instrument, uninstrument, MeterOptions, - create_console_emitter, BeforeRequestResult, RequestCancelledError, -) - -# Budget enforcement callback -def budget_check(params, context): - budget_info = getattr(context, 'budget', None) - if budget_info and budget_info.get('exhausted', False): - return BeforeRequestResult.cancel("Budget exhausted") - if budget_info and budget_info.get('percent_used', 0) >= 95: - return BeforeRequestResult.throttle(delay_ms=2000) - if budget_info and budget_info.get('percent_used', 0) >= 80: - return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") - return BeforeRequestResult.proceed() - -# Initialize Aden -instrument(MeterOptions( - api_key=os.environ.get("ADEN_API_KEY"), - server_url=os.environ.get("ADEN_API_URL"), - emit_metric=create_console_emitter(pretty=True), - on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), - before_request=budget_check, -)) - -# === YOUR AGENT CODE HERE === - -def run_agent(user_input: str): - try: - # Your LLM calls here - pass - except RequestCancelledError as e: - return f"Sorry, you have used up your allowance. {e}" - -if __name__ == "__main__": - try: - # Your main loop - pass - finally: - uninstrument() - -``` - -## Budget Actions Reference - -| Action | When | Behavior | -| --- | --- | --- | -| `BeforeRequestResult.proceed()` | Within budget | Request continues normally | -| `BeforeRequestResult.cancel(msg)` | Budget exhausted | Raises `RequestCancelledError` | -| `BeforeRequestResult.throttle(delay_ms=N)` | Near limit | Delays request by N ms | -| `BeforeRequestResult.degrade(to_model, reason)` | Approaching limit | Switches to cheaper model | - -## Key Points - -- `emit_metric` is **required** - use `create_console_emitter(pretty=True)` for dev -- `before_request` callback enables budget enforcement -- Always wrap agent calls in `try/except RequestCancelledError` -- Call `uninstrument()` on exit to flush remaining metrics -- Control agent connects automatically when `api_key` + `server_url` are provided - -## Documentation - -Full docs: [https://pypi.org/project/aden-py](https://pypi.org/project/aden-py/) diff --git a/hive/docs/aden-sdk-documents/templates/python/langflow.md b/hive/docs/aden-sdk-documents/templates/python/langflow.md deleted file mode 100644 index 12e52166..00000000 --- a/hive/docs/aden-sdk-documents/templates/python/langflow.md +++ /dev/null @@ -1,191 +0,0 @@ -Quick reference for integrating Aden LLM observability & cost control into LangFlow applications. - -## Prerequisites - -`.env` file should contain: - -``` -OPENAI_API_KEY=sk-xxx {{envVarComment}} -ADEN_API_URL={{serverUrl}} -ADEN_API_KEY={{apiKey}} - -``` - -## Installation - -```bash -pip install aden-py langflow python-dotenv - -``` - -## Basic Setup (3 Steps) - -### 1. Import and Load Environment - -```python -import os -from dotenv import load_dotenv -load_dotenv() - -from aden import ( - instrument, - uninstrument, - MeterOptions, - create_console_emitter, - BeforeRequestResult, - RequestCancelledError, -) - -``` - -### 2. Define Budget Check Callback - -```python -def budget_check(params, context): - """Enforce budget limits before each LLM request.""" - budget_info = getattr(context, 'budget', None) - - if budget_info and budget_info.get('exhausted', False): - return BeforeRequestResult.cancel("Budget exhausted") - - if budget_info and budget_info.get('percent_used', 0) >= 95: - return BeforeRequestResult.throttle(delay_ms=2000) - - if budget_info and budget_info.get('percent_used', 0) >= 80: - return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") - - return BeforeRequestResult.proceed() - -``` - -### 3. Initialize Aden (at startup) - -```python -instrument(MeterOptions( - api_key=os.environ.get("ADEN_API_KEY"), - server_url=os.environ.get("ADEN_API_URL"), - emit_metric=create_console_emitter(pretty=True), - on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), - before_request=budget_check, -)) - -``` - -### 4. Use LangFlow Components - -```python -from langflow.components.models import LanguageModelComponent - -comp = LanguageModelComponent() -comp.set_attributes({ - "provider": "Google", # or "OpenAI" - "model_name": "gemini-2.0-flash", - "api_key": os.getenv("GOOGLE_API_KEY"), - "stream": False, -}) - -model = comp.build_model() - -try: - response = model.invoke("Hello!") - print(response.content) -except RequestCancelledError as e: - print(f"Budget exceeded: {e}") - -``` - -### 5. Cleanup (on exit) - -```python -uninstrument() - -``` - -## Complete Template - -```python -"""LangFlow with Aden instrumentation""" -import os -from dotenv import load_dotenv -load_dotenv() - -from aden import ( - instrument, uninstrument, MeterOptions, - create_console_emitter, BeforeRequestResult, RequestCancelledError, -) - -# Budget enforcement callback -def budget_check(params, context): - budget_info = getattr(context, 'budget', None) - if budget_info and budget_info.get('exhausted', False): - return BeforeRequestResult.cancel("Budget exhausted") - if budget_info and budget_info.get('percent_used', 0) >= 95: - return BeforeRequestResult.throttle(delay_ms=2000) - if budget_info and budget_info.get('percent_used', 0) >= 80: - return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") - return BeforeRequestResult.proceed() - -# Initialize Aden -instrument(MeterOptions( - api_key=os.environ.get("ADEN_API_KEY"), - server_url=os.environ.get("ADEN_API_URL"), - emit_metric=create_console_emitter(pretty=True), - on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), - before_request=budget_check, -)) - -# === YOUR LANGFLOW CODE HERE === - -from langflow.components.models import LanguageModelComponent - -def run_model(user_input: str): - try: - comp = LanguageModelComponent() - comp.set_attributes({ - "provider": "Google", - "model_name": "gemini-2.0-flash", - "api_key": os.getenv("GOOGLE_API_KEY"), - "stream": False, - }) - model = comp.build_model() - return model.invoke(user_input).content - except RequestCancelledError as e: - return f"Sorry, you have used up your allowance. {e}" - -if __name__ == "__main__": - try: - print(run_model("Say hello!")) - finally: - uninstrument() - -``` - -## Supported Providers - -| Provider | Model Example | Notes | -| --- | --- | --- | -| OpenAI | gpt-4o, gpt-4o-mini | Direct SDK instrumentation | -| Google | gemini-2.0-flash | Uses gRPC client instrumentation | -| Anthropic | claude-3-opus | Direct SDK instrumentation | - -## Budget Actions Reference - -| Action | When | Behavior | -| --- | --- | --- | -| `BeforeRequestResult.proceed()` | Within budget | Request continues normally | -| `BeforeRequestResult.cancel(msg)` | Budget exhausted | Raises `RequestCancelledError` | -| `BeforeRequestResult.throttle(delay_ms=N)` | Near limit | Delays request by N ms | -| `BeforeRequestResult.degrade(to_model, reason)` | Approaching limit | Switches to cheaper model | - -## Key Points - -- `emit_metric` is **required** - use `create_console_emitter(pretty=True)` for dev -- `before_request` callback enables budget enforcement -- Always wrap model calls in `try/except RequestCancelledError` -- Call `uninstrument()` on exit to flush remaining metrics -- Control agent connects automatically when `api_key` + `server_url` are provided -- Google Gemini support works automatically via gRPC client instrumentation - -## Documentation - -Full docs: [https://pypi.org/project/aden-py](https://pypi.org/project/aden-py/) diff --git a/hive/docs/aden-sdk-documents/templates/python/langgraph.md b/hive/docs/aden-sdk-documents/templates/python/langgraph.md deleted file mode 100644 index d5e6fe79..00000000 --- a/hive/docs/aden-sdk-documents/templates/python/langgraph.md +++ /dev/null @@ -1,164 +0,0 @@ -Quick reference for integrating Aden LLM observability & cost control into Python agents. - -## Prerequisites - -`.env` file should contain: - -``` -OPENAI_API_KEY=sk-xxx {{envVarComment}} -ADEN_API_URL={{serverUrl}} -ADEN_API_KEY={{apiKey}} - -``` - -## Installation - -```bash -pip install aden-py python-dotenv - -``` - -## Basic Setup (3 Steps) - -### 1. Import and Load Environment - -```python -import os -from dotenv import load_dotenv -load_dotenv() - -from aden import ( - instrument, - uninstrument, - MeterOptions, - create_console_emitter, - BeforeRequestResult, - RequestCancelledError, -) - -``` - -### 2. Define Budget Check Callback - -```python -def budget_check(params, context): - """Enforce budget limits before each LLM request.""" - budget_info = getattr(context, 'budget', None) - - if budget_info and budget_info.get('exhausted', False): - return BeforeRequestResult.cancel("Budget exhausted") - - if budget_info and budget_info.get('percent_used', 0) >= 95: - return BeforeRequestResult.throttle(delay_ms=2000) - - if budget_info and budget_info.get('percent_used', 0) >= 80: - return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") - - return BeforeRequestResult.proceed() - -``` - -### 3. Initialize Aden (at startup) - -```python -instrument(MeterOptions( - api_key=os.environ.get("ADEN_API_KEY"), - server_url=os.environ.get("ADEN_API_URL"), - emit_metric=create_console_emitter(pretty=True), - on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), - before_request=budget_check, -)) - -``` - -### 4. Handle Budget Errors in Your Agent - -```python -def run_agent(user_input: str): - try: - # Your agent logic here - result = graph.invoke({"messages": [{"role": "user", "content": user_input}]}) - return result["messages"][-1].content - except RequestCancelledError as e: - return f"Sorry, you have used up your allowance. {e}" - -``` - -### 5. Cleanup (on exit) - -```python -uninstrument() - -``` - -## Complete Template - -```python -"""Agent with Aden instrumentation""" -import os -from dotenv import load_dotenv -load_dotenv() - -from aden import ( - instrument, uninstrument, MeterOptions, - create_console_emitter, BeforeRequestResult, RequestCancelledError, -) - -# Budget enforcement callback -def budget_check(params, context): - budget_info = getattr(context, 'budget', None) - if budget_info and budget_info.get('exhausted', False): - return BeforeRequestResult.cancel("Budget exhausted") - if budget_info and budget_info.get('percent_used', 0) >= 95: - return BeforeRequestResult.throttle(delay_ms=2000) - if budget_info and budget_info.get('percent_used', 0) >= 80: - return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") - return BeforeRequestResult.proceed() - -# Initialize Aden -instrument(MeterOptions( - api_key=os.environ.get("ADEN_API_KEY"), - server_url=os.environ.get("ADEN_API_URL"), - emit_metric=create_console_emitter(pretty=True), - on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), - before_request=budget_check, -)) - -# === YOUR AGENT CODE HERE === - -def run_agent(user_input: str): - try: - # Your LLM calls here - pass - except RequestCancelledError as e: - return f"Sorry, you have used up your allowance. {e}" - -if __name__ == "__main__": - try: - # Your main loop - pass - finally: - uninstrument() - -``` - -## Budget Actions Reference - -| Action | When | Behavior | -| --- | --- | --- | -| `BeforeRequestResult.proceed()` | Within budget | Request continues normally | -| `BeforeRequestResult.cancel(msg)` | Budget exhausted | Raises `RequestCancelledError` | -| `BeforeRequestResult.throttle(delay_ms=N)` | Near limit | Delays request by N ms | -| `BeforeRequestResult.degrade(to_model, reason)` | Approaching limit | Switches to cheaper model | - -## Key Points - -- `emit_metric` is **required** - use `create_console_emitter(pretty=True)` for dev -- `before_request` callback enables budget enforcement -- Always wrap agent calls in `try/except RequestCancelledError` -- Call `uninstrument()` on exit to flush remaining metrics -- Control agent connects automatically when `api_key` + `server_url` are provided - -## Documentation - -Full docs: [https://pypi.org/project/aden-py](https://pypi.org/project/aden-py/) diff --git a/hive/docs/aden-sdk-documents/templates/python/livekit.md b/hive/docs/aden-sdk-documents/templates/python/livekit.md deleted file mode 100644 index e0cc13f5..00000000 --- a/hive/docs/aden-sdk-documents/templates/python/livekit.md +++ /dev/null @@ -1,162 +0,0 @@ -# Aden-py LiveKit Integration Guide - -Quick reference for integrating Aden LLM observability & cost control into LiveKit voice agents. - -## Prerequisites - -`.env` file should contain: -``` -OPENAI_API_KEY=sk-xxx -ADEN_API_URL={{serverUrl}} -ADEN_API_KEY={{apiKey}} -``` - -## Installation - -```bash -pip install 'aden-py[livekit]' python-dotenv -``` - -## Setup (4 Steps) - -### 1. Import and Load Environment - -```python -import os -from dotenv import load_dotenv -load_dotenv() - -from aden import ( - instrument, - MeterOptions, - create_console_emitter, - BeforeRequestResult, - RequestCancelledError, -) -``` - -### 2. Define Budget Check Callback - -```python -def budget_check(params, context): - """Enforce budget limits before each LLM request.""" - budget_info = getattr(context, 'budget', None) - - if budget_info and budget_info.get('exhausted', False): - return BeforeRequestResult.cancel("Budget exhausted") - - if budget_info and budget_info.get('percent_used', 0) >= 95: - return BeforeRequestResult.throttle(delay_ms=2000) - - if budget_info and budget_info.get('percent_used', 0) >= 80: - return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") - - return BeforeRequestResult.proceed() -``` - -### 3. Create Worker Prewarm Function - -**IMPORTANT:** LiveKit uses multiprocessing. Instrumentation must happen in each worker process, not the main process. - -```python -def initialize_aden_in_worker(proc): - """Initialize Aden instrumentation in each worker process.""" - instrument(MeterOptions( - api_key=os.environ.get("ADEN_API_KEY"), - server_url=os.environ.get("ADEN_API_URL"), - emit_metric=create_console_emitter(pretty=True), - on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), - before_request=budget_check, - )) -``` - -### 4. Pass Prewarm Function to WorkerOptions - -```python -if __name__ == "__main__": - agents.cli.run_app(agents.WorkerOptions( - entrypoint_fnc=entrypoint, - agent_name="my-agent", - prewarm_fnc=initialize_aden_in_worker, # <-- This is the key! - )) -``` - -## Complete Template - -```python -"""LiveKit Voice Agent with Aden instrumentation""" -import os -from dotenv import load_dotenv -load_dotenv() - -from livekit import agents -from livekit.plugins import openai - -from aden import ( - instrument, MeterOptions, create_console_emitter, - BeforeRequestResult, RequestCancelledError, -) - -# Budget enforcement callback -def budget_check(params, context): - budget_info = getattr(context, 'budget', None) - if budget_info and budget_info.get('exhausted', False): - return BeforeRequestResult.cancel("Budget exhausted") - if budget_info and budget_info.get('percent_used', 0) >= 95: - return BeforeRequestResult.throttle(delay_ms=2000) - if budget_info and budget_info.get('percent_used', 0) >= 80: - return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") - return BeforeRequestResult.proceed() - -# Worker initialization - runs in each spawned process -def initialize_aden_in_worker(proc): - instrument(MeterOptions( - api_key=os.environ.get("ADEN_API_KEY"), - server_url=os.environ.get("ADEN_API_URL"), - emit_metric=create_console_emitter(pretty=True), - on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), - before_request=budget_check, - )) - -async def entrypoint(ctx: agents.JobContext): - # Your agent logic here - session = agents.AgentSession( - llm=openai.LLM(model="gpt-4o-mini"), - # ... - ) - await session.start(ctx.room) - -if __name__ == "__main__": - agents.cli.run_app(agents.WorkerOptions( - entrypoint_fnc=entrypoint, - agent_name="my-agent", - prewarm_fnc=initialize_aden_in_worker, - )) -``` - -## Budget Actions Reference - -| Action | When | Behavior | -|--------|------|----------| -| `BeforeRequestResult.proceed()` | Within budget | Request continues normally | -| `BeforeRequestResult.cancel(msg)` | Budget exhausted | Raises `RequestCancelledError` | -| `BeforeRequestResult.throttle(delay_ms=N)` | Near limit (95%+) | Delays request by N ms | -| `BeforeRequestResult.degrade(to_model, reason)` | Approaching limit (80%+) | Switches to cheaper model | - -## Key Points - -- **Use `prewarm_fnc`** - LiveKit spawns worker processes; instrumentation must happen in each worker -- **Don't instrument in main process** - It won't affect the worker processes where LLM calls happen -- `emit_metric` is **required** - use `create_console_emitter(pretty=True)` for dev -- Control agent connects automatically when `api_key` + `server_url` are provided - -## Troubleshooting - -**No metrics showing?** -- Ensure `prewarm_fnc` is set in `WorkerOptions` -- Check that `ADEN_API_KEY` and `ADEN_API_URL` are in your `.env` -- Verify you're using `aden-py[livekit]` (with the livekit extra) - -**Metrics in test but not in agent?** -- LiveKit uses multiprocessing - the main process instrumentation doesn't carry over -- The `prewarm_fnc` runs in each worker before your `entrypoint` is called diff --git a/hive/docs/api/user-authentication.md b/hive/docs/api/user-authentication.md deleted file mode 100644 index 7d19ff31..00000000 --- a/hive/docs/api/user-authentication.md +++ /dev/null @@ -1,247 +0,0 @@ -# User Authentication API - -This document describes the user authentication endpoints available in the Hive backend. - -## Base URL - -``` -http://localhost:4000 -``` - -## Endpoints - -### Register a New User - -Create a new user account and receive an authentication token. - -``` -POST /user/register -``` - -#### Request Headers - -| Header | Value | Required | -| ------------ | ---------------- | -------- | -| Content-Type | application/json | Yes | - -#### Request Body - -| Field | Type | Required | Description | -| --------- | ------ | -------- | ------------------------------- | -| email | string | Yes | User's email address | -| password | string | Yes | Password (minimum 8 characters) | -| name | string | No | Display name | -| firstname | string | No | First name | -| lastname | string | No | Last name | - -#### Example Request - -```bash -curl -X POST http://localhost:4000/user/register \ - -H "Content-Type: application/json" \ - -d '{ - "email": "user@example.com", - "password": "securepassword123", - "firstname": "John", - "lastname": "Doe" - }' -``` - -#### Success Response (201 Created) - -```json -{ - "success": true, - "token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...", - "email": "user@example.com", - "name": "John Doe", - "firstname": "John", - "lastname": "Doe", - "current_team_id": 1, - "create_time": "2026-01-13T01:52:56.604Z" -} -``` - -#### Error Responses - -| Status | Code | Message | -| ------ | --------------------- | -------------------------------------- | -| 400 | Bad Request | Email and password are required | -| 400 | Bad Request | Please enter a valid email | -| 400 | Bad Request | Password must be at least 8 characters | -| 409 | Conflict | Email already registered | -| 500 | Internal Server Error | Registration failed. Please try again. | - ---- - -### Login - -Authenticate an existing user and receive an authentication token. - -``` -POST /user/login-v2 -``` - -#### Request Headers - -| Header | Value | Required | -| ------------ | ---------------- | -------- | -| Content-Type | application/json | Yes | - -#### Request Body - -| Field | Type | Required | Description | -| -------- | ------ | -------- | -------------------- | -| email | string | Yes | User's email address | -| password | string | Yes | User's password | - -#### Example Request - -```bash -curl -X POST http://localhost:4000/user/login-v2 \ - -H "Content-Type: application/json" \ - -d '{ - "email": "user@example.com", - "password": "securepassword123" - }' -``` - -#### Success Response (200 OK) - -```json -{ - "success": true, - "token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...", - "email": "user@example.com", - "firstname": "John", - "lastname": "Doe", - "name": "John Doe", - "current_team_id": 1, - "create_time": "2026-01-13T01:52:56.594Z" -} -``` - -#### Error Responses - -| Status | Code | Message | -| ------ | --------------------- | -------------------------------------- | -| 400 | Bad Request | Email and password are required | -| 400 | Bad Request | Please enter a valid email | -| 400 | Bad Request | Password must be at least 6 characters | -| 400 | Bad Request | Please sign in with OAuth | -| 401 | Unauthorized | Invalid email or password | -| 403 | Forbidden | Your account has been disabled | -| 500 | Internal Server Error | Login failed. Please try again. | - ---- - -### Get Current User - -Retrieve information about the currently authenticated user. - -``` -GET /user/me -``` - -#### Request Headers - -| Header | Value | Required | -| ------------- | ------- | -------- | -| Authorization | {token} | Yes | - -#### Example Request - -```bash -curl -X GET http://localhost:4000/user/me \ - -H "Authorization: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9..." -``` - -#### Success Response (200 OK) - -```json -{ - "success": true, - "user": { - "id": 1, - "email": "user@example.com", - "name": "John Doe", - "firstname": "John", - "lastname": "Doe", - "current_team_id": 1, - "avatar_url": null - } -} -``` - -#### Error Responses - -| Status | Code | Message | -| ------ | --------------------- | ----------------------- | -| 401 | Unauthorized | No token provided | -| 401 | Unauthorized | Invalid token | -| 500 | Internal Server Error | Failed to get user info | - ---- - -## Authentication - -After successful login or registration, the API returns a JWT token. Include this token in the `Authorization` header for authenticated requests: - -``` -Authorization: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9... -``` - -### Token Structure - -The JWT token contains the following claims: - -| Claim | Description | -| --------------- | -------------------------------- | -| id | User ID | -| email | User email | -| firstname | User first name | -| lastname | User last name | -| current_team_id | User's current team ID | -| salt | Random salt for token validation | -| iat | Issued at timestamp | -| exp | Expiration timestamp | - -### Token Expiration - -By default, tokens expire after 7 days. This can be configured via the `JWT_EXPIRES_IN` environment variable. - ---- - -## Development Credentials - -For local development, the following default user is available: - -| Field | Value | -| -------- | ------------------- | -| Email | dev@honeycomb.local | -| Password | honeycomb123 | - ---- - -## Error Response Format - -All error responses follow this format: - -```json -{ - "success": false, - "msg": "Error message describing what went wrong" -} -``` - ---- - -## Rate Limiting - -Currently, rate limiting is not enabled by default. It can be enabled via the `features.rate_limiting` config option. - ---- - -## CORS - -The API supports CORS. Configure the allowed origin via the `cors.origin` config option (default: `http://localhost:3000`). diff --git a/hive/docs/sdk-event-specification.md b/hive/docs/sdk-event-specification.md deleted file mode 100644 index 78ccf07b..00000000 --- a/hive/docs/sdk-event-specification.md +++ /dev/null @@ -1,703 +0,0 @@ -# Aden SDK Trace Event Specification - -**Version:** 2.0.0 -**Last Updated:** 2026-01-08 - -This document defines the authoritative specification for all events transmitted between the Aden SDK and the Aden Hive control server. - ---- - -## Table of Contents - -1. [Overview](#overview) -2. [Event Types](#event-types) -3. [MetricEvent](#metricevent) -4. [ContentCapture (Layer 0)](#contentcapture-layer-0) -5. [ToolCallCapture (Layer 6)](#toolcallcapture-layer-6) -6. [ControlEvent](#controlevent) -7. [HeartbeatEvent](#heartbeatevent) -8. [ErrorEvent](#errorevent) -9. [API Endpoints](#api-endpoints) -10. [Storage Architecture](#storage-architecture) - ---- - -## Overview - -The Aden SDK captures telemetry from LLM API calls and transmits events to the Aden Hive server for: -- **Observability**: Token usage, latency, cost tracking -- **Governance**: Content capture, tool call validation -- **Control**: Budget enforcement, rate limiting, model degradation - -### Providers Supported - -| Provider | Value | -|----------|-------| -| OpenAI | `openai` | -| Anthropic | `anthropic` | -| Google Gemini | `gemini` | - -### Transport - -Events are sent via: -- **HTTP POST** to `/v1/control/events` (batch) -- **WebSocket** for real-time policy sync - ---- - -## Event Types - -| Event Type | Description | Direction | -|------------|-------------|-----------| -| `metric` | LLM call telemetry | SDK → Server | -| `control` | Control action taken | SDK → Server | -| `heartbeat` | Health status | SDK → Server | -| `error` | Error report | SDK → Server | - ---- - -## MetricEvent - -The primary event emitted after each LLM API call. Contains flat fields for consistent cross-provider analytics. - -### Envelope Structure - -```json -{ - "event_type": "metric", - "timestamp": "2026-01-08T12:00:00.000Z", - "sdk_instance_id": "uuid-v4", - "data": { /* MetricEvent fields */ } -} -``` - -### MetricEvent Fields - -#### Identity (OpenTelemetry-compatible) - -| Field | Type | Required | Description | -|-------|------|----------|-------------| -| `trace_id` | string | **Yes** | Trace ID grouping related operations | -| `span_id` | string | Yes | Unique span ID for this operation | -| `parent_span_id` | string | No | Parent span for nested calls | -| `request_id` | string | No | Provider-specific request ID | -| `call_sequence` | integer | Yes | Sequence number within the trace | - -#### Provider & Model - -| Field | Type | Required | Description | -|-------|------|----------|-------------| -| `provider` | string | **Yes** | `openai`, `anthropic`, `gemini` | -| `model` | string | **Yes** | Model identifier (e.g., `gpt-4o`, `claude-3-opus`) | -| `stream` | boolean | Yes | Whether streaming was enabled | -| `timestamp` | string | **Yes** | ISO 8601 timestamp of request start | - -#### Performance - -| Field | Type | Required | Description | -|-------|------|----------|-------------| -| `latency_ms` | float | Yes | Request latency in milliseconds | -| `status_code` | integer | No | HTTP status code | -| `error` | string | No | Error message if request failed | - -#### Token Usage - -| Field | Type | Required | Description | -|-------|------|----------|-------------| -| `input_tokens` | integer | Yes | Input/prompt tokens consumed | -| `output_tokens` | integer | Yes | Output/completion tokens consumed | -| `total_tokens` | integer | Yes | Total tokens (input + output) | -| `cached_tokens` | integer | No | Tokens served from cache | -| `reasoning_tokens` | integer | No | Reasoning tokens (o1/o3 models) | - -#### Rate Limits - -| Field | Type | Required | Description | -|-------|------|----------|-------------| -| `rate_limit_remaining_requests` | integer | No | Remaining requests in window | -| `rate_limit_remaining_tokens` | integer | No | Remaining tokens in window | -| `rate_limit_reset_requests` | float | No | Seconds until request limit resets | -| `rate_limit_reset_tokens` | float | No | Seconds until token limit resets | - -#### Call Context - -| Field | Type | Required | Description | -|-------|------|----------|-------------| -| `agent_stack` | string[] | No | Stack of agent names leading to this call | -| `call_site_file` | string | No | File path of immediate caller | -| `call_site_line` | integer | No | Line number | -| `call_site_column` | integer | No | Column number | -| `call_site_function` | string | No | Function name | -| `call_stack` | string[] | No | Full call stack (file:line:function) | - -#### Tool Usage (Summary) - -| Field | Type | Required | Description | -|-------|------|----------|-------------| -| `tool_call_count` | integer | No | Number of tool calls made | -| `tool_names` | string | No | Tool names (comma-separated) | - -#### Provider-specific - -| Field | Type | Required | Description | -|-------|------|----------|-------------| -| `service_tier` | string | No | Service tier (auto, default, flex, priority) | -| `metadata` | object | No | Custom metadata attached to request | - -#### Layer 0: Content Capture - -| Field | Type | Required | Description | -|-------|------|----------|-------------| -| `content_capture` | ContentCapture | No | Full content capture (see below) | - -#### Layer 6: Tool Call Deep Inspection - -| Field | Type | Required | Description | -|-------|------|----------|-------------| -| `tool_calls_captured` | ToolCallCapture[] | No | Detailed tool call captures | -| `tool_validation_errors_count` | integer | No | Count of validation errors | - -### Example MetricEvent - -```json -{ - "event_type": "metric", - "timestamp": "2026-01-08T12:00:00.000Z", - "sdk_instance_id": "abc123", - "data": { - "trace_id": "tr_abc123", - "span_id": "sp_def456", - "call_sequence": 1, - "provider": "openai", - "model": "gpt-4o", - "stream": false, - "latency_ms": 1234.5, - "input_tokens": 150, - "output_tokens": 50, - "total_tokens": 200, - "cached_tokens": 0, - "agent_stack": ["main_agent", "sub_agent"], - "tool_call_count": 2, - "tool_names": "search,calculate", - "metadata": { - "user_id": "user_123", - "session_id": "sess_456" - }, - "content_capture": { - "system_prompt": "You are a helpful assistant.", - "messages": [...], - "response_content": "Here is my response...", - "finish_reason": "stop" - } - } -} -``` - ---- - -## ContentCapture (Layer 0) - -Full content capture for request and response. Enables governance, debugging, and compliance. - -### Fields - -| Field | Type | Description | -|-------|------|-------------| -| `system_prompt` | string \| ContentReference | System prompt | -| `messages` | MessageCapture[] \| ContentReference | Message history | -| `tools` | ToolSchemaCapture[] \| ContentReference | Tools schema | -| `params` | RequestParamsCapture | Request parameters | -| `response_content` | string \| ContentReference | Response text | -| `finish_reason` | string | Why response ended: `stop`, `length`, `tool_calls`, `content_filter` | -| `choice_count` | integer | Number of choices (for n > 1) | -| `has_images` | boolean | Whether request contained images | -| `image_urls` | string[] | Image URLs (never base64) | - -### ContentReference - -When content exceeds `max_content_bytes`, it's stored separately and referenced: - -```json -{ - "content_id": "uuid-v4", - "content_hash": "sha256-hex", - "byte_size": 12345, - "truncated_preview": "First 100 chars..." -} -``` - -### MessageCapture - -```json -{ - "role": "user|assistant|system|tool", - "content": "string or ContentReference", - "name": "optional name", - "tool_call_id": "for tool results" -} -``` - -### ToolSchemaCapture - -```json -{ - "name": "function_name", - "description": "Tool description", - "parameters_schema": { /* JSON Schema */ } -} -``` - -### RequestParamsCapture - -```json -{ - "temperature": 0.7, - "max_tokens": 1000, - "top_p": 1.0, - "frequency_penalty": 0, - "presence_penalty": 0, - "stop": ["STOP"], - "seed": 12345, - "top_k": 40 -} -``` - ---- - -## ToolCallCapture (Layer 6) - -Detailed tool call capture with validation results. - -### Fields - -| Field | Type | Description | -|-------|------|-------------| -| `id` | string | Tool call ID for correlation | -| `name` | string | Tool/function name | -| `arguments` | object \| ContentReference | Parsed arguments | -| `arguments_raw` | string \| ContentReference | Raw JSON string | -| `validation_errors` | ValidationError[] | Schema validation errors | -| `is_valid` | boolean | Whether arguments passed validation | -| `index` | integer | Position in tool_calls array | - -### ValidationError - -```json -{ - "path": "properties.name", - "message": "Required property missing", - "expected_type": "string", - "actual_type": "undefined" -} -``` - ---- - -## ControlEvent - -Emitted when a control action is taken on a request. - -### Fields - -| Field | Type | Required | Description | -|-------|------|----------|-------------| -| `event_type` | string | Yes | Always `"control"` | -| `timestamp` | string | Yes | ISO 8601 timestamp | -| `sdk_instance_id` | string | Yes | SDK instance identifier | -| `trace_id` | string | Yes | Associated trace ID | -| `span_id` | string | Yes | Associated span ID | -| `provider` | string | Yes | Provider name | -| `original_model` | string | Yes | Originally requested model | -| `action` | string | Yes | Action taken (see below) | -| `reason` | string | No | Human-readable reason | -| `degraded_to` | string | No | Model switched to (if degraded) | -| `throttle_delay_ms` | integer | No | Delay applied (if throttled) | -| `estimated_cost` | float | No | Estimated cost that triggered decision | -| `policy_id` | string | Yes | Policy ID (default: `"default"`) | -| `budget_id` | string | No | Budget that triggered action | -| `context_id` | string | No | Context ID (user, session, etc.) | - -### Control Actions - -| Action | Description | -|--------|-------------| -| `allow` | Request proceeds normally | -| `block` | Request is rejected | -| `throttle` | Request is delayed before proceeding | -| `degrade` | Request uses a cheaper/fallback model | -| `alert` | Request proceeds but triggers alert | - -### Example ControlEvent - -```json -{ - "event_type": "control", - "timestamp": "2026-01-08T12:00:00.000Z", - "sdk_instance_id": "abc123", - "trace_id": "tr_abc123", - "span_id": "sp_def456", - "provider": "openai", - "original_model": "gpt-4o", - "action": "degrade", - "reason": "Budget limit exceeded", - "degraded_to": "gpt-4o-mini", - "estimated_cost": 0.05, - "policy_id": "default", - "budget_id": "budget_monthly" -} -``` - ---- - -## HeartbeatEvent - -Periodic health check sent by the SDK. - -### Fields - -| Field | Type | Required | Description | -|-------|------|----------|-------------| -| `event_type` | string | Yes | Always `"heartbeat"` | -| `timestamp` | string | Yes | ISO 8601 timestamp | -| `sdk_instance_id` | string | Yes | SDK instance identifier | -| `status` | string | Yes | `healthy`, `degraded`, `reconnecting` | -| `requests_since_last` | integer | Yes | Requests since last heartbeat | -| `errors_since_last` | integer | Yes | Errors since last heartbeat | -| `policy_cache_age_seconds` | integer | Yes | Policy cache age | -| `websocket_connected` | boolean | Yes | WebSocket connection status | -| `sdk_version` | string | Yes | SDK version | - ---- - -## ErrorEvent - -Emitted when an error occurs in the SDK. - -### Fields - -| Field | Type | Required | Description | -|-------|------|----------|-------------| -| `event_type` | string | Yes | Always `"error"` | -| `timestamp` | string | Yes | ISO 8601 timestamp | -| `sdk_instance_id` | string | Yes | SDK instance identifier | -| `message` | string | Yes | Error message | -| `code` | string | No | Error code | -| `stack` | string | No | Stack trace | -| `trace_id` | string | No | Related trace ID | - ---- - -## API Endpoints - -### POST /v1/control/events - -Submit events batch. - -**Request:** -```json -{ - "events": [ - { "event_type": "metric", "timestamp": "...", "data": {...} }, - { "event_type": "control", "timestamp": "...", ... } - ] -} -``` - -**Response:** -```json -{ - "success": true, - "processed": 2 -} -``` - -### POST /v1/control/content - -Store large content items (MongoDB - for SDK content references). - -**Request:** -```json -{ - "items": [ - { - "content_id": "uuid", - "content_hash": "sha256-hex", - "content": "full content string", - "byte_size": 12345 - } - ] -} -``` - -**Response:** -```json -{ - "success": true, - "stored": 1 -} -``` - -### GET /v1/control/content/:contentId - -Retrieve stored content by ID (MongoDB). - -**Response:** -```json -{ - "content_id": "uuid", - "content_hash": "sha256-hex", - "content": "full content string", - "byte_size": 12345 -} -``` - -### GET /v1/control/events/:traceId/:callSequence/content - -Retrieve content for a specific event from TSDB warm/cold storage. - -**Response:** -```json -{ - "trace_id": "tr_abc123", - "call_sequence": 1, - "content_items": [ - { - "content_type": "system_prompt", - "content_hash": "sha256-hex", - "byte_size": 256, - "truncated_preview": "You are a helpful...", - "content": "You are a helpful assistant..." - }, - { - "content_type": "messages", - "content_hash": "sha256-hex", - "byte_size": 4096, - "message_count": 5, - "truncated_preview": "[{\"role\":\"user\"...", - "content": "[{\"role\":\"user\",\"content\":\"Hello\"}...]" - }, - { - "content_type": "response", - "content_hash": "sha256-hex", - "byte_size": 512, - "truncated_preview": "Here is my response...", - "content": "Here is my response to your question..." - } - ], - "count": 3 -} -``` - -### GET /v1/control/content/hash/:contentHash - -Retrieve content from cold storage by SHA-256 hash. - -**Response:** -```json -{ - "content_hash": "sha256-hex", - "content": "full content string", - "byte_size": 12345 -} -``` - -### GET /v1/control/policy - -Fetch current control policy. - -### POST /v1/control/budget/validate - -Server-side budget validation (hybrid enforcement). - ---- - -## Storage Architecture - -The storage system uses a **hot/warm/cold** architecture optimized for time-series analytics with content deduplication. - -``` -┌─────────────────────────────────────────────────────────────────────┐ -│ SDK Event Ingestion │ -└─────────────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────┐ -│ Event Normalization & Content Extraction │ -│ │ -│ • Extract content_capture fields │ -│ • Hash content with SHA-256 │ -│ • Create lightweight content flags for hot table │ -└─────────────────────────────────────────────────────────────────────┘ - │ - ┌───────────────┼───────────────┐ - ▼ ▼ ▼ - ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ - │ HOT TABLE │ │ WARM TABLE │ │ COLD TABLE │ - │ llm_events │ │llm_event_ │ │llm_content_ │ - │ │ │ content │ │ store │ - │ Metrics only │ │Content refs │ │ Deduplicated │ - │ Fast queries │ │ per event │ │ content │ - └──────────────┘ └──────────────┘ └──────────────┘ -``` - -### Design Principles - -1. **Hot/Cold Separation**: Metrics stay in the hot table for fast time-series queries; content is stored separately -2. **Content Deduplication**: Identical content (same SHA-256 hash) is stored once, regardless of how many events reference it -3. **Reference Counting**: Cold storage tracks how many events reference each piece of content -4. **Preview Without Fetch**: Warm table stores truncated previews for quick scanning without fetching full content - -### TSDB Hot Table: `llm_events` - -Stores metric events for fast time-series analytics. **Content is NOT stored here** (only lightweight flags). - -| Column | Type | Description | -|--------|------|-------------| -| `timestamp` | timestamptz | Event timestamp (partition key) | -| `ingest_date` | date | Ingestion date | -| `team_id` | text | Team identifier | -| `user_id` | text | User identifier | -| `trace_id` | text | Trace ID | -| `span_id` | text | Span ID | -| `parent_span_id` | text | Parent span ID | -| `request_id` | text | Provider request ID | -| `provider` | text | Provider name | -| `call_sequence` | integer | Sequence within trace | -| `model` | text | Model identifier | -| `stream` | boolean | Streaming flag | -| `agent` | text | Primary agent name | -| `agent_stack` | jsonb | Full agent stack | -| `latency_ms` | double precision | Latency in ms | -| `usage_input_tokens` | double precision | Input tokens | -| `usage_output_tokens` | double precision | Output tokens | -| `usage_total_tokens` | double precision | Total tokens | -| `usage_cached_tokens` | double precision | Cached tokens | -| `usage_reasoning_tokens` | double precision | Reasoning tokens | -| `cost_total` | numeric | Calculated cost | -| `metadata` | jsonb | Custom metadata | -| `call_site` | jsonb | Call site info | -| `has_content` | boolean | Whether content was captured | -| `finish_reason` | text | Response finish reason | -| `tool_call_count` | integer | Number of tool calls | -| `created_at` | timestamptz | Record creation time | - -**Primary Key:** `(timestamp, trace_id, call_sequence)` - -**Indexes:** -- `idx_llm_events_ts` - timestamp DESC -- `idx_llm_events_team_ts` - team_id, timestamp DESC -- `idx_llm_events_model` - model -- `idx_llm_events_agent` - agent -- `idx_llm_events_trace` - trace_id - -### TSDB Warm Table: `llm_event_content` - -Links events to deduplicated content in cold storage. One row per content type per event. - -| Column | Type | Description | -|--------|------|-------------| -| `id` | bigserial | Auto-increment ID | -| `timestamp` | timestamptz | Event timestamp | -| `trace_id` | text | Trace ID | -| `call_sequence` | integer | Sequence within trace | -| `team_id` | text | Team identifier | -| `content_type` | text | Type: `system_prompt`, `messages`, `response`, `tools`, `params` | -| `content_hash` | text | SHA-256 hash (FK to cold store) | -| `byte_size` | integer | Content size in bytes | -| `message_count` | integer | Number of messages (for `messages` type) | -| `truncated_preview` | text | First 200 chars for quick preview | -| `created_at` | timestamptz | Record creation time | - -**Primary Key:** `(id)` - -**Indexes:** -- `idx_llm_event_content_event` - trace_id, call_sequence, timestamp -- `idx_llm_event_content_type` - team_id, content_type, timestamp DESC -- `idx_llm_event_content_hash` - content_hash - -### TSDB Cold Table: `llm_content_store` - -Content-addressable storage with SHA-256 hashes. Deduplicated across all events. - -| Column | Type | Description | -|--------|------|-------------| -| `content_hash` | text | SHA-256 hash of content (PK) | -| `team_id` | text | Team identifier (PK) | -| `content` | text | Full content string | -| `byte_size` | integer | Content size in bytes | -| `ref_count` | integer | Number of events referencing this content | -| `first_seen_at` | timestamptz | When content was first stored | -| `last_seen_at` | timestamptz | When content was last referenced | - -**Primary Key:** `(content_hash, team_id)` - -**Indexes:** -- `idx_llm_content_store_refs` - team_id, ref_count, last_seen_at (for cleanup) - -### MongoDB: `aden_control_content` - -Stores large content items from SDK's content reference system (separate from TSDB storage). - -| Field | Type | Description | -|-------|------|-------------| -| `content_id` | string | Unique content identifier | -| `team_id` | string | Team identifier | -| `content_hash` | string | SHA-256 hash | -| `content` | string | Full content | -| `byte_size` | number | Content size in bytes | -| `created_at` | string | Creation timestamp | -| `updated_at` | string | Last update timestamp | - -**Index:** `{ content_id: 1, team_id: 1 }` (unique) - -### MongoDB: `aden_control_policies` - -Stores control policies for teams. - ---- - -## Content Types - -The warm table stores references to different content types: - -| Type | Description | Example | -|------|-------------|---------| -| `system_prompt` | System/developer message | "You are a helpful assistant..." | -| `messages` | Full conversation history | JSON array of messages | -| `response` | Model's response content | "Here is my response..." | -| `tools` | Tool/function schemas | JSON array of tool definitions | -| `params` | Request parameters | `{"temperature": 0.7, "max_tokens": 1000}` | - ---- - -## Deduplication Example - -When the same system prompt is used across multiple requests: - -``` -Request 1: system_prompt = "You are a helpful assistant." - → Hash: abc123... - → Cold store: INSERT (ref_count = 1) - → Warm store: INSERT reference for event 1 - -Request 2: system_prompt = "You are a helpful assistant." (same) - → Hash: abc123... (same hash) - → Cold store: UPDATE ref_count = 2 - → Warm store: INSERT reference for event 2 - -Request 3: system_prompt = "You are a code reviewer." - → Hash: def456... (different) - → Cold store: INSERT (ref_count = 1) - → Warm store: INSERT reference for event 3 -``` - -This means the first system prompt is stored **once** but referenced by two events. - ---- - -## Version History - -| Version | Date | Changes | -|---------|------|---------| -| 2.0.0 | 2026-01-08 | Hot/warm/cold storage architecture; content deduplication | -| 1.0.0 | 2026-01-08 | Initial specification | diff --git a/hive/k8s/base/deployment.yaml b/hive/k8s/base/deployment.yaml deleted file mode 100644 index 6023d634..00000000 --- a/hive/k8s/base/deployment.yaml +++ /dev/null @@ -1,101 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: aden-hive - labels: - app: aden-hive - app.kubernetes.io/name: aden-hive -spec: - replicas: 1 - selector: - matchLabels: - app: aden-hive - strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 25% - maxUnavailable: 25% - template: - metadata: - labels: - app: aden-hive - app.kubernetes.io/name: aden-hive - spec: - containers: - - name: aden-hive - image: aden-hive - imagePullPolicy: IfNotPresent - ports: - - name: http - containerPort: 3001 - protocol: TCP - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: MYSQL_SSL_CA - value: /mnt/certs/mysql/server-ca.pem - - name: MYSQL_SSL_KEY - value: /mnt/certs/mysql/client-key.pem - - name: MYSQL_SSL_CERT - value: /mnt/certs/mysql/client-cert.pem - volumeMounts: - - name: mysql-ssl-certs - mountPath: /mnt/certs/mysql - readOnly: true - envFrom: - - configMapRef: - name: aden-hive-config - - secretRef: - name: aden-hive-secrets - resources: - requests: - cpu: 250m - memory: 256Mi - limits: - cpu: 1000m - memory: 512Mi - livenessProbe: - httpGet: - path: /health - port: 3001 - initialDelaySeconds: 60 - periodSeconds: 60 - timeoutSeconds: 15 - failureThreshold: 5 - readinessProbe: - httpGet: - path: /health - port: 3001 - initialDelaySeconds: 30 - periodSeconds: 30 - timeoutSeconds: 10 - failureThreshold: 5 - securityContext: - allowPrivilegeEscalation: false - runAsNonRoot: true - runAsUser: 1001 - capabilities: - drop: - - ALL - volumes: - - name: mysql-ssl-certs - secret: - secretName: mysql-ssl-certs - defaultMode: 0444 - items: - - key: server-ca.pem - path: server-ca.pem - - key: client-key.pem - path: client-key.pem - - key: client-cert.pem - path: client-cert.pem diff --git a/hive/k8s/base/kustomization.yaml b/hive/k8s/base/kustomization.yaml deleted file mode 100644 index 5b98e944..00000000 --- a/hive/k8s/base/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: - - deployment.yaml - - service.yaml diff --git a/hive/k8s/base/service.yaml b/hive/k8s/base/service.yaml deleted file mode 100644 index 3df25259..00000000 --- a/hive/k8s/base/service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: aden-hive - labels: - app: aden-hive -spec: - type: ClusterIP - ports: - - port: 80 - targetPort: 3001 - protocol: TCP - name: http - selector: - app: aden-hive diff --git a/hive/k8s/overlays/production/kustomization.yaml b/hive/k8s/overlays/production/kustomization.yaml deleted file mode 100644 index b426fcc9..00000000 --- a/hive/k8s/overlays/production/kustomization.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namespace: production - -resources: - - ../../base - - namespace.yaml - -namePrefix: prod- - -commonLabels: - environment: production - -images: - - name: aden-hive - newName: gcr.io/tool-for-analyst/aden-hive - newTag: latest - -patches: - - path: patches/deployment.yaml diff --git a/hive/k8s/overlays/production/namespace.yaml b/hive/k8s/overlays/production/namespace.yaml deleted file mode 100644 index 43df7d33..00000000 --- a/hive/k8s/overlays/production/namespace.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: production - labels: - environment: production diff --git a/hive/k8s/overlays/production/patches/deployment.yaml b/hive/k8s/overlays/production/patches/deployment.yaml deleted file mode 100644 index f1f2423c..00000000 --- a/hive/k8s/overlays/production/patches/deployment.yaml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: aden-hive -spec: - replicas: 2 - template: - spec: - containers: - - name: aden-hive - env: - - name: NODE_ENV - value: production - envFrom: - - configMapRef: - name: aden-api-server-config - - secretRef: - name: aden-api-server-secrets - - secretRef: - name: database-secrets - resources: - requests: - cpu: 500m - memory: 512Mi - limits: - cpu: 1000m - memory: 1Gi diff --git a/hive/k8s/overlays/staging/kustomization.yaml b/hive/k8s/overlays/staging/kustomization.yaml deleted file mode 100644 index 4f83faf1..00000000 --- a/hive/k8s/overlays/staging/kustomization.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namespace: staging - -resources: - - ../../base - - namespace.yaml - -namePrefix: staging- - -commonLabels: - environment: staging - -images: - - name: aden-hive - newName: gcr.io/acho-alpha-project/aden-hive - newTag: latest - -patches: - - path: patches/deployment.yaml diff --git a/hive/k8s/overlays/staging/namespace.yaml b/hive/k8s/overlays/staging/namespace.yaml deleted file mode 100644 index 11eb8621..00000000 --- a/hive/k8s/overlays/staging/namespace.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: staging - labels: - environment: staging diff --git a/hive/k8s/overlays/staging/patches/deployment.yaml b/hive/k8s/overlays/staging/patches/deployment.yaml deleted file mode 100644 index 962acbf8..00000000 --- a/hive/k8s/overlays/staging/patches/deployment.yaml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: aden-hive -spec: - replicas: 1 - template: - spec: - containers: - - name: aden-hive - env: - - name: NODE_ENV - value: staging - envFrom: - - configMapRef: - name: aden-api-server-config - - secretRef: - name: aden-api-server-secrets - - secretRef: - name: database-secrets - resources: - requests: - cpu: 250m - memory: 256Mi - limits: - cpu: 500m - memory: 512Mi diff --git a/hive/package.json b/hive/package.json deleted file mode 100644 index 0bc4e12b..00000000 --- a/hive/package.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "name": "hive", - "version": "1.0.0", - "description": "Aden Hive - LLM observability and control plane backend", - "private": true, - "main": "dist/index.js", - "scripts": { - "dev": "ts-node-dev --respawn --transpile-only src/index.ts", - "build": "tsc && npm run build:copy-sql", - "build:copy-sql": "find src -name '*.sql' -exec sh -c 'mkdir -p dist/$(dirname ${1#src/}) && cp \"$1\" dist/${1#src/}' _ {} \\;", - "start": "node dist/index.js", - "test": "jest --passWithNoTests", - "test:mcp": "ts-node --transpile-only scripts/test-mcp.ts", - "test:mcp:quick": "./scripts/test-mcp-curl.sh", - "lint": "eslint src/", - "typecheck": "tsc --noEmit", - "clean": "rm -rf dist node_modules" - }, - "dependencies": { - "@acho-inc/administration": "^1.0.7", - "@modelcontextprotocol/sdk": "^1.25.2", - "@socket.io/redis-adapter": "^8.2.1", - "@socket.io/redis-emitter": "^5.1.0", - "compression": "^1.7.4", - "cors": "^2.8.5", - "dotenv": "^16.3.1", - "express": "^4.18.2", - "helmet": "^7.1.0", - "http-errors": "^2.0.0", - "ioredis": "^5.3.2", - "jsonwebtoken": "^9.0.2", - "mongodb": "^6.3.0", - "morgan": "^1.10.0", - "passport": "^0.7.0", - "passport-jwt": "^4.0.1", - "pg": "^8.11.3", - "socket.io": "^4.6.1", - "zod": "^4.3.5" - }, - "devDependencies": { - "@types/compression": "^1.7.5", - "@types/cors": "^2.8.17", - "@types/express": "^4.17.21", - "@types/jsonwebtoken": "^9.0.5", - "@types/morgan": "^1.9.9", - "@types/node": "^20.10.0", - "@types/passport": "^1.0.16", - "@types/passport-jwt": "^4.0.1", - "@types/pg": "^8.10.9", - "@typescript-eslint/eslint-plugin": "^6.14.0", - "@typescript-eslint/parser": "^6.14.0", - "eslint": "^8.56.0", - "jest": "^29.7.0", - "ts-node": "^10.9.2", - "ts-node-dev": "^2.0.0", - "typescript": "^5.3.0" - }, - "engines": { - "node": ">=18.0.0" - } -} diff --git a/hive/scripts/migrate-add-agent-name.ts b/hive/scripts/migrate-add-agent-name.ts deleted file mode 100644 index f9d8f945..00000000 --- a/hive/scripts/migrate-add-agent-name.ts +++ /dev/null @@ -1,129 +0,0 @@ -/** - * Migration: Add agent_name column to llm_events table - * - * This script adds the `agent_name` column to all existing team schemas. - * Run with: npx ts-node scripts/migrate-add-agent-name.ts - * - * Environment variables required: - * - PGHOST, PGUSER, PGPASSWORD, PGDATABASE, PGPORT (or PG_CONNECTION_STRING) - */ - -import { Pool } from "pg"; - -const getPool = (): Pool => { - // Support multiple env var names - const connectionString = - process.env.TSDB_PG_URL || - process.env.PG_CONNECTION_STRING || - process.env.DATABASE_URL; - - if (connectionString) { - return new Pool({ connectionString }); - } - - return new Pool({ - host: process.env.PGHOST || "localhost", - user: process.env.PGUSER || "postgres", - password: process.env.PGPASSWORD || "postgres", - database: process.env.PGDATABASE || "aden", - port: parseInt(process.env.PGPORT || "5432", 10), - }); -}; - -async function migrate() { - const pool = getPool(); - - try { - console.log("[Migration] Starting agent_name column migration..."); - - // Find all team schemas (schemas starting with 'team_') - const schemasResult = await pool.query(` - SELECT schema_name - FROM information_schema.schemata - WHERE schema_name LIKE 'team_%' - ORDER BY schema_name - `); - - const schemas = schemasResult.rows.map((r) => r.schema_name as string); - console.log(`[Migration] Found ${schemas.length} team schemas`); - - if (schemas.length === 0) { - console.log("[Migration] No team schemas found. Nothing to migrate."); - return; - } - - let successCount = 0; - let skipCount = 0; - let errorCount = 0; - - for (const schema of schemas) { - try { - // Check if llm_events table exists in this schema - const tableExists = await pool.query( - ` - SELECT 1 - FROM information_schema.tables - WHERE table_schema = $1 AND table_name = 'llm_events' - `, - [schema] - ); - - if (tableExists.rows.length === 0) { - console.log(`[Migration] ${schema}: No llm_events table, skipping`); - skipCount++; - continue; - } - - // Check if agent_name column already exists - const columnExists = await pool.query( - ` - SELECT 1 - FROM information_schema.columns - WHERE table_schema = $1 - AND table_name = 'llm_events' - AND column_name = 'agent_name' - `, - [schema] - ); - - if (columnExists.rows.length > 0) { - console.log(`[Migration] ${schema}: agent_name column already exists, skipping`); - skipCount++; - continue; - } - - // Add the agent_name column after agent column - await pool.query(` - ALTER TABLE ${schema}.llm_events - ADD COLUMN agent_name text - `); - - console.log(`[Migration] ${schema}: Added agent_name column`); - successCount++; - } catch (err) { - console.error(`[Migration] ${schema}: Error - ${(err as Error).message}`); - errorCount++; - } - } - - console.log("\n[Migration] Summary:"); - console.log(` - Schemas processed: ${schemas.length}`); - console.log(` - Successfully migrated: ${successCount}`); - console.log(` - Skipped (already migrated or no table): ${skipCount}`); - console.log(` - Errors: ${errorCount}`); - - if (errorCount === 0) { - console.log("\n[Migration] Completed successfully!"); - } else { - console.log("\n[Migration] Completed with errors. Please review above."); - process.exit(1); - } - } catch (err) { - console.error("[Migration] Fatal error:", (err as Error).message); - process.exit(1); - } finally { - await pool.end(); - } -} - -migrate(); diff --git a/hive/scripts/test-mcp-curl.sh b/hive/scripts/test-mcp-curl.sh deleted file mode 100755 index ed4d1f28..00000000 --- a/hive/scripts/test-mcp-curl.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash -# -# Quick MCP Server Test using curl -# -# Usage: -# ADEN_AUTH_TOKEN=your-jwt-token ./scripts/test-mcp-curl.sh -# -# The script tests basic connectivity and endpoints. - -set -e - -API_URL="${ADEN_API_URL:-http://localhost:3000}" -TOKEN="${ADEN_AUTH_TOKEN}" - -if [ -z "$TOKEN" ]; then - echo "Error: ADEN_AUTH_TOKEN environment variable is required" - echo "Usage: ADEN_AUTH_TOKEN=your-jwt-token ./scripts/test-mcp-curl.sh" - exit 1 -fi - -echo "============================================================" -echo "MCP Server Quick Test" -echo "============================================================" -echo "API URL: $API_URL" -echo "" - -# Test 1: Health check -echo "1. Health Check (GET /mcp/health)" -curl -s "$API_URL/mcp/health" | jq . -echo "" - -# Test 2: List sessions (should be empty or show existing) -echo "2. List Sessions (GET /mcp/sessions)" -curl -s -H "Authorization: Bearer $TOKEN" "$API_URL/mcp/sessions" | jq . -echo "" - -# Test 3: Start SSE connection and capture session ID -echo "3. Testing SSE Connection (GET /mcp)" -echo " Starting connection (will timeout after 2s)..." - -# Use timeout to limit the SSE connection -SESSION_ID=$(timeout 2s curl -s -N \ - -H "Authorization: Bearer $TOKEN" \ - -H "Accept: text/event-stream" \ - "$API_URL/mcp" 2>&1 | head -5 || true) - -echo " Response (first 5 lines):" -echo "$SESSION_ID" | head -5 -echo "" - -# Test 4: Check sessions again -echo "4. Sessions After Connection (GET /mcp/sessions)" -curl -s -H "Authorization: Bearer $TOKEN" "$API_URL/mcp/sessions" | jq . -echo "" - -echo "============================================================" -echo "Quick test completed!" -echo "" -echo "For full tool testing, use the TypeScript test client:" -echo " ADEN_AUTH_TOKEN=\$TOKEN npx ts-node scripts/test-mcp.ts" -echo "============================================================" diff --git a/hive/scripts/test-mcp.ts b/hive/scripts/test-mcp.ts deleted file mode 100644 index bb0be516..00000000 --- a/hive/scripts/test-mcp.ts +++ /dev/null @@ -1,176 +0,0 @@ -/** - * MCP Server Test Script - * - * Tests the MCP server by connecting via HTTP/SSE and invoking tools. - * - * Usage: - * npx ts-node scripts/test-mcp.ts - * - * Environment: - * ADEN_API_URL - Base URL (default: http://localhost:3000) - * ADEN_AUTH_TOKEN - JWT token for authentication - */ - -import { Client } from "@modelcontextprotocol/sdk/client/index.js"; -import { SSEClientTransport } from "@modelcontextprotocol/sdk/client/sse.js"; - -const API_URL = process.env.ADEN_API_URL || "http://localhost:3000"; -const AUTH_TOKEN = process.env.ADEN_AUTH_TOKEN; - -if (!AUTH_TOKEN) { - console.error("Error: ADEN_AUTH_TOKEN environment variable is required"); - console.error("Usage: ADEN_AUTH_TOKEN=your-jwt-token npx ts-node scripts/test-mcp.ts"); - process.exit(1); -} - -async function main() { - console.log("=".repeat(60)); - console.log("MCP Server Test"); - console.log("=".repeat(60)); - console.log(`API URL: ${API_URL}`); - console.log(""); - - // Create MCP client - const client = new Client({ - name: "mcp-test-client", - version: "1.0.0", - }); - - // Create SSE transport with auth headers - const transport = new SSEClientTransport(new URL(`${API_URL}/mcp`), { - requestInit: { - headers: { - Authorization: `Bearer ${AUTH_TOKEN}`, - }, - }, - }); - - try { - // Connect to MCP server - console.log("Connecting to MCP server..."); - await client.connect(transport); - console.log("✓ Connected successfully\n"); - - // List available tools - console.log("Listing available tools..."); - const tools = await client.listTools(); - console.log(`✓ Found ${tools.tools.length} tools:\n`); - - // Group tools by category - const categories: Record = { - budget: [], - agents: [], - analytics: [], - policies: [], - }; - - for (const tool of tools.tools) { - if (tool.name.includes("budget")) { - categories.budget.push(tool.name); - } else if (tool.name.includes("agent")) { - categories.agents.push(tool.name); - } else if ( - tool.name.includes("analytics") || - tool.name.includes("insights") || - tool.name.includes("metrics") || - tool.name.includes("logs") - ) { - categories.analytics.push(tool.name); - } else if (tool.name.includes("polic")) { - categories.policies.push(tool.name); - } - } - - for (const [category, toolNames] of Object.entries(categories)) { - console.log(` ${category.toUpperCase()} (${toolNames.length}):`); - for (const name of toolNames) { - console.log(` - ${name}`); - } - } - console.log(""); - - // Run test scenarios - console.log("=".repeat(60)); - console.log("Running Test Scenarios"); - console.log("=".repeat(60)); - console.log(""); - - // Test 1: Get policy - await runTest(client, "hive_policy_get", { policyId: "default" }, "Get default policy"); - - // Test 2: List agents - await runTest(client, "hive_agents_summary", {}, "Get agent fleet summary"); - - // Test 3: Get insights - await runTest(client, "hive_insights", { days: 7 }, "Get 7-day insights"); - - // Test 4: Get metrics - await runTest(client, "hive_metrics", { days: 30 }, "Get 30-day metrics"); - - // Test 5: Budget validation (dry run) - await runTest( - client, - "hive_budget_validate", - { - estimatedCost: 0.01, - context: { agent: "test-agent" }, - }, - "Validate budget (dry run)" - ); - - console.log("=".repeat(60)); - console.log("All tests completed!"); - console.log("=".repeat(60)); - } catch (error) { - console.error("Error:", error); - process.exit(1); - } finally { - await client.close(); - } -} - -async function runTest( - client: Client, - toolName: string, - args: Record, - description: string -) { - console.log(`Test: ${description}`); - console.log(` Tool: ${toolName}`); - console.log(` Args: ${JSON.stringify(args)}`); - - try { - const startTime = Date.now(); - const result = await client.callTool({ name: toolName, arguments: args }); - const duration = Date.now() - startTime; - - console.log(` Status: ✓ Success (${duration}ms)`); - - // Parse and display result - if (result.content && result.content.length > 0) { - const textContent = result.content.find((c) => c.type === "text"); - if (textContent && "text" in textContent) { - try { - const parsed = JSON.parse(textContent.text); - console.log(` Result: ${JSON.stringify(parsed, null, 2).split("\n").slice(0, 10).join("\n")}`); - if (JSON.stringify(parsed, null, 2).split("\n").length > 10) { - console.log(" ... (truncated)"); - } - } catch { - console.log(` Result: ${textContent.text.slice(0, 200)}...`); - } - } - } - - if (result.isError) { - console.log(` Warning: Tool returned isError=true`); - } - } catch (error) { - console.log(` Status: ✗ Failed`); - console.log(` Error: ${error instanceof Error ? error.message : error}`); - } - - console.log(""); -} - -main().catch(console.error); diff --git a/hive/src/app.ts b/hive/src/app.ts deleted file mode 100644 index 197dc493..00000000 --- a/hive/src/app.ts +++ /dev/null @@ -1,150 +0,0 @@ -/** - * Express App Configuration - * - * Sets up Express with middleware and routes. - * No global state - uses dependency injection. - * Supports both MySQL (production) and PostgreSQL (local development) for user auth. - */ - -import express, { Request, Response } from 'express'; -import compression from 'compression'; -import cors from 'cors'; -import passport from 'passport'; -import { Pool } from 'pg'; - -import { auth, database, models } from '@acho-inc/administration'; -import config from './config'; -import routes from './routes'; -import { errorHandler } from './middleware/error-handler.middleware'; -import { createMcpRouter } from './mcp'; - -// Initialize Express app -const app = express(); - -// ============================================================================= -// Middleware -// ============================================================================= - -app.use(compression({ - filter: (req, res) => { - // Don't compress SSE responses - compression breaks streaming - if (req.headers.accept === 'text/event-stream' || - req.path.endsWith('/stream')) { - return false; - } - return compression.filter(req, res); - } -})); -app.use(cors()); - -// Skip body parsing for MCP message route (SDK's handlePostMessage reads raw body stream) -app.use((req, res, next) => { - if (req.path === '/mcp/message') { - return next(); - } - express.json({ limit: '10mb' })(req, res, next); -}); -app.use((req, res, next) => { - if (req.path === '/mcp/message') { - return next(); - } - express.urlencoded({ extended: true })(req, res, next); -}); - -// Disable x-powered-by header -app.disable('x-powered-by'); - -// ============================================================================= -// Database Connections -// ============================================================================= - -let userDbService: ReturnType; - -if (config.userDbType === 'postgres') { - // PostgreSQL for local development - console.log('[App] Using PostgreSQL for user authentication'); - - const pgPool = new Pool({ - connectionString: config.userDb.url, - }); - - userDbService = models.createUserDbService({ - pgPool, - dbType: 'postgres', - tables: { - USER: 'users', - DEVELOPERS: 'developers', - }, - }); - - app.locals.pgPool = pgPool; -} else { - // MySQL for production - console.log('[App] Using MySQL for user authentication'); - - const mysqlPool = database.createMySQLPool(config.mysql); - - userDbService = models.createUserDbService({ - mysqlPool, - tables: { - USER: 'user', - DEVELOPERS: 'developers', - }, - }); - - app.locals.mysqlPool = mysqlPool; -} - -// Store user service in app.locals for access in routes -app.locals.userDbService = userDbService; - -// ============================================================================= -// Passport Authentication -// ============================================================================= - -const passportStrategy = auth.createPassportStrategy({ - findSaltByToken: userDbService.findSaltByToken, - jwtSecret: config.jwt.secret, -}); - -passport.use(passportStrategy); -app.use(passport.initialize()); - -// ============================================================================= -// Routes -// ============================================================================= - -// Health check (unauthenticated) -app.get('/health', (req: Request, res: Response) => { - res.json({ - status: 'ok', - service: 'aden-hive', - timestamp: new Date().toISOString(), - userDbType: config.userDbType, - }); -}); - -// API routes -app.use('/', routes); - -// MCP Server routes (Model Context Protocol) -// The controlEmitter is set in index.ts after WebSocket initialization -const mcpRouter = createMcpRouter(() => app.locals.controlEmitter); -app.use('/mcp', mcpRouter); - -// ============================================================================= -// Error Handling -// ============================================================================= - -// 404 handler -app.use((req: Request, res: Response) => { - res.status(404).json({ - error: 'not_found', - message: `Route ${req.method} ${req.path} not found`, - }); -}); - -// Global error handler -app.use(errorHandler); - -export default app; diff --git a/hive/src/config/index.ts b/hive/src/config/index.ts deleted file mode 100644 index 5568d1d7..00000000 --- a/hive/src/config/index.ts +++ /dev/null @@ -1,134 +0,0 @@ -/** - * Configuration Module - * - * Centralizes all configuration loading and validation. - * Supports both MySQL (production) and PostgreSQL (local development) for user database. - */ - -import fs from 'fs'; - -/** - * Helper function to safely read SSL certificates - * @param {string} envKey - Environment variable containing cert path - * @param {string} fallbackPath - Fallback path if env var not set - * @returns {Buffer|null} Certificate content or null - */ -function readCertificate(envKey: string, fallbackPath: string): Buffer | null { - const certPath = process.env[envKey]; - if (certPath && fs.existsSync(certPath)) { - return fs.readFileSync(certPath); - } - if (fallbackPath && fs.existsSync(fallbackPath)) { - return fs.readFileSync(fallbackPath); - } - return null; -} - -/** - * Load MySQL SSL certificates from environment or default paths - * @returns {Object|null} SSL config object or null if certs not found - */ -function loadMySQLSSL(): { ca: Buffer; key: Buffer; cert: Buffer } | null { - const ca = readCertificate('MYSQL_SSL_CA', '/mnt/certs/mysql/server-ca.pem'); - const key = readCertificate('MYSQL_SSL_KEY', '/mnt/certs/mysql/client-key.pem'); - const cert = readCertificate('MYSQL_SSL_CERT', '/mnt/certs/mysql/client-cert.pem'); - - return ca && key && cert ? { ca, key, cert } : null; -} - -/** - * Determine which database type to use for user authentication - * Priority: USER_DB_TYPE env var > MySQL if configured > PostgreSQL fallback - */ -function getUserDbType(): 'mysql' | 'postgres' { - const explicit = process.env.USER_DB_TYPE?.toLowerCase(); - if (explicit === 'mysql' || explicit === 'postgres') { - return explicit; - } - // Default to MySQL if MySQL host is configured, otherwise use PostgreSQL - return process.env.MYSQL_HOST ? 'mysql' : 'postgres'; -} - -const config = { - // Server - port: parseInt(process.env.PORT as string, 10) || 4000, - nodeEnv: process.env.NODE_ENV || 'development', - - // TSDB PostgreSQL (metrics storage) - tsdb: { - url: process.env.TSDB_PG_URL, - }, - - // User Database Type ('mysql' or 'postgres') - userDbType: getUserDbType(), - - // User Database (MySQL) - for production - mysql: { - host: process.env.MYSQL_HOST, - port: parseInt(process.env.MYSQL_PORT as string, 10) || 3306, - user: process.env.MYSQL_USER, - password: process.env.MYSQL_PASSWORD, - database: process.env.MYSQL_DATABASE, - ssl: loadMySQLSSL(), - }, - - // User Database (PostgreSQL) - for local development - // Defaults to same DB as TSDB if not specified - userDb: { - url: process.env.USER_DB_PG_URL || process.env.TSDB_PG_URL, - }, - - // MongoDB - mongodb: { - url: process.env.MONGODB_URL, - dbName: process.env.MONGODB_DBNAME || 'aden', - erpDbName: process.env.MONGODB_ERP_DBNAME || 'erp', - }, - - // Redis - redis: { - url: process.env.REDIS_URL, - }, - - // JWT - jwt: { - secret: process.env.JWT_SECRET || 'dev-secret-change-in-production', - expiresIn: process.env.JWT_EXPIRES_IN || '7d', - passphrase: process.env.PASSPHRASE, - }, -}; - -/** - * Validates required configuration - * @throws {Error} If required config is missing - */ -function validateConfig(): void { - const required: [string, string | undefined][] = [ - ['TSDB_PG_URL', config.tsdb.url], - ]; - - // Add database-specific requirements - if (config.userDbType === 'mysql') { - required.push( - ['MYSQL_HOST', config.mysql.host], - ['MYSQL_USER', config.mysql.user], - ['MYSQL_DATABASE', config.mysql.database], - ); - } else { - required.push(['USER_DB_PG_URL or TSDB_PG_URL', config.userDb.url]); - } - - const missing = required.filter(([, value]) => !value); - - if (missing.length > 0) { - const names = missing.map(([name]) => name).join(', '); - console.warn(`[Config] Warning: Missing environment variables: ${names}`); - } - - console.log(`[Config] User database type: ${config.userDbType}`); -} - -// Validate on load -validateConfig(); - -export default config; diff --git a/hive/src/controllers/control.controller.ts b/hive/src/controllers/control.controller.ts deleted file mode 100644 index 6c5b85a9..00000000 --- a/hive/src/controllers/control.controller.ts +++ /dev/null @@ -1,1885 +0,0 @@ -/** - * Aden Control Controller - * - * HTTP endpoints for Aden SDK control plane: - * - GET /v1/control/policy - Get current policy - * - POST /v1/control/events - Submit events (batch) - * - POST /v1/control/content - Store large content items - * - GET /v1/control/events - Get events (dashboard) - * - PUT /v1/control/policy - Update policy (dashboard) - */ - -import express, { Request, Response, NextFunction } from "express"; -import createError from "http-errors"; -import passport from "passport"; - -import controlService from "../services/control/control_service"; -import pricingService from "../services/tsdb/pricing_service"; -import * as tsdbService from "../services/tsdb/tsdb_service"; -import { getTeamPool, buildSchemaName } from "../services/tsdb/team_context"; - -const router = express.Router(); - -// Passport is initialized in app.js - -interface UserPayload { - id: string; - current_team_id: string; - [key: string]: unknown; -} - -interface UserContext { - user_id: string; - team_id: string; -} - -interface AuthenticatedRequest extends Request { - user?: UserPayload; -} - -interface BudgetAlert { - threshold: number; - enabled: boolean; -} - -interface BudgetNotifications { - inApp: boolean; - email: boolean; - emailRecipients: string[]; - webhook: boolean; -} - -interface BudgetRule { - id: string; - name: string; - type: string; - tags?: string[]; - limit: number; - spent: number; - limitAction: string; - degradeToModel?: string; - degradeToProvider?: string; - alerts: BudgetAlert[]; - notifications: BudgetNotifications; -} - -interface ValidationContext { - agent?: string; - tenant_id?: string; - customer_id?: string; - feature?: string; - tags?: string[]; -} - -declare const global: { - _ADEN_CONTROL_EMITTER?: { - emitPolicyUpdate: ( - teamId: string, - policyId: string | null, - policy: unknown - ) => void; - }; -}; - -/** - * Extract user context from JWT payload for audit/scoping - * @param req - Express request with req.user from passport - * @returns User context { user_id, team_id } - */ -function getUserContext(req: AuthenticatedRequest): UserContext | null { - if (!req.user) return null; - return { - user_id: req.user.id, - team_id: req.user.current_team_id, - }; -} - -/** - * Get policy ID from request (header or query param) - * Returns null if not specified (will use default policy) - */ -function getPolicyId(req: Request): string | null { - return ( - (req.headers["x-policy-id"] as string) || - (req.query.policy_id as string) || - null - ); -} - -/** - * Resolve policy ID - handles "default" as special value - * Returns null for "default" which tells service to use team's default policy - */ -function resolvePolicyId(policyId: string): string | null { - if (!policyId || policyId === "default") { - return null; - } - return policyId; -} - -// ============================================================================= -// SDK Endpoints (used by Aden SDK) -// ============================================================================= - -/** - * GET /v1/control/policy - * Get the current control policy for the SDK - * Optional X-Policy-ID header to specify policy (uses default if not specified) - */ -router.get( - "/policy", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const userContext = getUserContext(req); - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - const policyId = getPolicyId(req); - const policy = await controlService.getPolicy( - userContext.team_id, - policyId - ); - res.json(policy); - } catch (error) { - console.error("[Aden Control] Error getting policy:", error); - next(createError(500, "Failed to get policy")); - } - } -); - -/** - * POST /v1/control/events - * Submit events from the SDK (batch) - * Optional X-Policy-ID header to specify policy (uses default if not specified) - */ -router.post( - "/events", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const userContext = getUserContext(req); - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - const { events } = req.body; - - if (!events || !Array.isArray(events)) { - return next(createError(400, "events array required")); - } - - const policyId = getPolicyId(req); - await controlService.processEvents( - userContext.team_id, - policyId, - events, - userContext - ); - - res.json({ success: true, processed: events.length }); - } catch (error) { - console.error("[Aden Control] Error processing events:", error); - next(createError(500, "Failed to process events")); - } - } -); - -/** - * POST /v1/control/content - * Store large content items from the SDK (Layer 0 content capture) - * Used for content that exceeds max_content_bytes threshold - * - * Body: { items: Array<{ content_id, content_hash, content, byte_size }> } - */ -router.post( - "/content", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - console.log("[Aden Control] Received content storage request"); - const userContext = getUserContext(req); - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - const { items } = req.body; - - if (!items || !Array.isArray(items)) { - return next(createError(400, "items array required")); - } - - // Validate each item has required fields - for (let i = 0; i < items.length; i++) { - const item = items[i]; - if (!item.content_id || typeof item.content_id !== "string") { - return next( - createError(400, `items[${i}].content_id (string) is required`) - ); - } - if (!item.content_hash || typeof item.content_hash !== "string") { - return next( - createError(400, `items[${i}].content_hash (string) is required`) - ); - } - if (item.content === undefined || item.content === null) { - return next(createError(400, `items[${i}].content is required`)); - } - if (typeof item.byte_size !== "number" || item.byte_size < 0) { - return next( - createError( - 400, - `items[${i}].byte_size must be a non-negative number` - ) - ); - } - } - - const result = await controlService.storeContent( - userContext.team_id, - items - ); - - res.json({ success: true, stored: result.stored }); - } catch (error) { - console.error("[Aden Control] Error storing content:", error); - next(createError(500, "Failed to store content")); - } - } -); - -/** - * GET /v1/control/content/:contentId - * Retrieve content by ID - */ -router.get( - "/content/:contentId", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const userContext = getUserContext(req); - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - const { contentId } = req.params; - const content = await controlService.getContent( - userContext.team_id, - contentId - ); - - if (!content) { - return next(createError(404, "Content not found")); - } - - res.json(content); - } catch (error) { - console.error("[Aden Control] Error getting content:", error); - next(createError(500, "Failed to get content")); - } - } -); - -// ============================================================================= -// TSDB Content Retrieval Endpoints (warm/cold storage) -// ============================================================================= - -/** - * GET /v1/control/events/:traceId/:callSequence/content - * Get all content for a specific event from warm/cold storage - * Returns content references with full content from cold store - */ -router.get( - "/events/:traceId/:callSequence/content", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const userContext = getUserContext(req); - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - const { traceId, callSequence } = req.params; - const callSeq = parseInt(callSequence); - - if (!traceId || isNaN(callSeq)) { - return next(createError(400, "Valid traceId and callSequence required")); - } - - // Get team pool and set schema - const pool = await getTeamPool(userContext.team_id); - const schema = buildSchemaName(userContext.team_id); - const client = await pool.connect(); - - try { - await client.query(`SET search_path TO ${schema}, public`); - await tsdbService.ensureSchema(client); - - const content = await tsdbService.getEventContent( - userContext.team_id, - traceId, - callSeq, - client - ); - - res.json({ - trace_id: traceId, - call_sequence: callSeq, - content_items: content, - count: content.length, - }); - } finally { - client.release(); - } - } catch (error) { - console.error("[Aden Control] Error getting event content:", error); - next(createError(500, "Failed to get event content")); - } - } -); - -/** - * GET /v1/control/content/hash/:contentHash - * Get content from cold storage by hash - * Useful for fetching deduplicated content directly - */ -router.get( - "/content/hash/:contentHash", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const userContext = getUserContext(req); - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - const { contentHash } = req.params; - - if (!contentHash || contentHash.length !== 64) { - return next(createError(400, "Valid SHA-256 content hash required")); - } - - // Get team pool and set schema - const pool = await getTeamPool(userContext.team_id); - const schema = buildSchemaName(userContext.team_id); - const client = await pool.connect(); - - try { - await client.query(`SET search_path TO ${schema}, public`); - await tsdbService.ensureSchema(client); - - const content = await tsdbService.getContentByHash( - userContext.team_id, - contentHash, - client - ); - - if (!content) { - return next(createError(404, "Content not found")); - } - - res.json({ - content_hash: contentHash, - content, - byte_size: Buffer.byteLength(content, "utf8"), - }); - } finally { - client.release(); - } - } catch (error) { - console.error("[Aden Control] Error getting content by hash:", error); - next(createError(500, "Failed to get content")); - } - } -); - -// ============================================================================= -// Dashboard Endpoints (used by Aden Dashboard) -// ============================================================================= - -/** - * GET /v1/control/events - * Get events for the dashboard (queries TSDB) - * Query params: limit, offset, start_date, end_date, policy_id - */ -router.get( - "/events", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const { limit, offset, start_date, end_date, policy_id } = req.query; - const userContext = getUserContext(req); - - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - const events = await controlService.getEvents( - userContext.team_id, - (policy_id as string) || null, - { - limit: parseInt(limit as string) || 100, - offset: parseInt(offset as string) || 0, - start_date: start_date as string | undefined, - end_date: end_date as string | undefined, - } - ); - - res.json({ events, count: events.length }); - } catch (error) { - console.error("[Aden Control] Error getting events:", error); - next(createError(500, "Failed to get events")); - } - } -); - -/** - * PUT /v1/control/policies/:policyId - * Update the control policy (from dashboard) - * Use "default" as policyId to update the team's default policy - */ -router.put( - "/policies/:policyId", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const policyId = resolvePolicyId(req.params.policyId); - const policyUpdate = req.body; - const userContext = getUserContext(req); - - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - // Validate policy structure - const validKeys = [ - "name", - "budgets", - "throttles", - "blocks", - "degradations", - "alerts", - ]; - const invalidKeys = Object.keys(policyUpdate).filter( - (k) => !validKeys.includes(k) - ); - if (invalidKeys.length > 0) { - return next( - createError(400, `Invalid policy keys: ${invalidKeys.join(", ")}`) - ); - } - - const policy = await controlService.updatePolicy( - userContext.team_id, - policyId, - policyUpdate, - userContext - ); - - // Notify connected SDK instances via WebSocket - if (global._ADEN_CONTROL_EMITTER) { - global._ADEN_CONTROL_EMITTER.emitPolicyUpdate( - userContext.team_id, - policyId, - policy - ); - } - - res.json(policy); - } catch (error) { - console.error("[Aden Control] Error updating policy:", error); - next(createError(500, "Failed to update policy")); - } - } -); - -/** - * DELETE /v1/control/policies/:policyId/rules - * Clear all rules from the policy (keeps the policy itself) - * Use "default" as policyId to clear the team's default policy - */ -router.delete( - "/policies/:policyId/rules", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const policyId = resolvePolicyId(req.params.policyId); - const userContext = getUserContext(req); - - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - const policy = await controlService.clearPolicy( - userContext.team_id, - policyId, - userContext - ); - - // Notify connected SDK instances via WebSocket - if (global._ADEN_CONTROL_EMITTER) { - global._ADEN_CONTROL_EMITTER.emitPolicyUpdate( - userContext.team_id, - policyId, - policy - ); - } - - res.json(policy); - } catch (error) { - console.error("[Aden Control] Error clearing policy:", error); - next(createError(500, "Failed to clear policy")); - } - } -); - -// ============================================================================= -// Rule Management Endpoints -// ============================================================================= - -/** - * Valid budget types matching frontend BudgetType enum - */ -const VALID_BUDGET_TYPES = [ - "global", - "agent", - "tenant", - "customer", - "feature", - "tag", -]; - -/** - * Valid limit actions matching frontend LimitAction enum - */ -const VALID_LIMIT_ACTIONS = ["kill", "throttle", "degrade"]; - -/** - * Validate BudgetAlert structure - */ -function isValidBudgetAlert(alert: unknown): alert is BudgetAlert { - return ( - alert !== null && - typeof alert === "object" && - typeof (alert as BudgetAlert).threshold === "number" && - (alert as BudgetAlert).threshold >= 0 && - (alert as BudgetAlert).threshold <= 100 && - typeof (alert as BudgetAlert).enabled === "boolean" - ); -} - -/** - * Validate BudgetNotifications structure - */ -function isValidBudgetNotifications( - notifications: unknown -): notifications is BudgetNotifications { - if (!notifications || typeof notifications !== "object") return false; - const n = notifications as BudgetNotifications; - if (typeof n.inApp !== "boolean") return false; - if (typeof n.email !== "boolean") return false; - if (!Array.isArray(n.emailRecipients)) return false; - if (typeof n.webhook !== "boolean") return false; - return true; -} - -/** - * POST /v1/control/policies/:policyId/budgets - * Add a budget rule - * - * Expected body (BudgetConfig): - * { - * id: string, - * name: string, - * type: 'global' | 'agent' | 'tenant' | 'customer' | 'feature' | 'tag', - * tagCategory?: string, - * limit: number, - * spent: number, - * limitAction: 'kill' | 'throttle' | 'degrade', - * degradeToModel?: string, // required when limitAction is 'degrade' - * degradeToProvider?: string, // required when limitAction is 'degrade' - * alerts: Array<{ threshold: number, enabled: boolean }>, - * notifications: { inApp: boolean, email: boolean, emailRecipients: string[], webhook: boolean } - * } - */ -router.post( - "/policies/:policyId/budgets", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const policyId = resolvePolicyId(req.params.policyId); - const rule = req.body as BudgetRule; - const userContext = getUserContext(req); - - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - // Validate required fields - if (!rule.id || typeof rule.id !== "string") { - return next(createError(400, "id (string) is required")); - } - - if (!rule.name || typeof rule.name !== "string") { - return next(createError(400, "name (string) is required")); - } - - if (!rule.type || !VALID_BUDGET_TYPES.includes(rule.type)) { - return next( - createError( - 400, - `type must be one of: ${VALID_BUDGET_TYPES.join(", ")}` - ) - ); - } - - // tags array is required when type is 'tag' - if (rule.type === "tag") { - if (!Array.isArray(rule.tags) || rule.tags.length === 0) { - return next( - createError( - 400, - "tags (non-empty array) is required when type is 'tag'" - ) - ); - } - // Validate each tag is a string - for (let i = 0; i < rule.tags.length; i++) { - if (typeof rule.tags[i] !== "string") { - return next(createError(400, `tags[${i}] must be a string`)); - } - } - } - - if (typeof rule.limit !== "number" || rule.limit < 0) { - return next(createError(400, "limit must be a non-negative number")); - } - - if (typeof rule.spent !== "number" || rule.spent < 0) { - return next(createError(400, "spent must be a non-negative number")); - } - - if ( - !rule.limitAction || - !VALID_LIMIT_ACTIONS.includes(rule.limitAction) - ) { - return next( - createError( - 400, - `limitAction must be one of: ${VALID_LIMIT_ACTIONS.join(", ")}` - ) - ); - } - - // degradeToModel and degradeToProvider are required when limitAction is 'degrade' - if (rule.limitAction === "degrade") { - if (!rule.degradeToModel || typeof rule.degradeToModel !== "string") { - return next( - createError( - 400, - "degradeToModel is required when limitAction is 'degrade'" - ) - ); - } - if ( - !rule.degradeToProvider || - typeof rule.degradeToProvider !== "string" - ) { - return next( - createError( - 400, - "degradeToProvider is required when limitAction is 'degrade'" - ) - ); - } - - // Validate model belongs to the specified provider - const targets = await pricingService.getDegradationTargets(); - const providerModels = targets.models[rule.degradeToProvider]; - - if (!providerModels) { - return next( - createError(400, `Unknown provider: ${rule.degradeToProvider}`) - ); - } - - const validModelNames = providerModels.map( - (m: { model: string }) => m.model - ); - if (!validModelNames.includes(rule.degradeToModel)) { - return next( - createError( - 400, - `degradeToModel "${rule.degradeToModel}" does not belong to provider "${rule.degradeToProvider}"` - ) - ); - } - } - - if (!Array.isArray(rule.alerts)) { - return next(createError(400, "alerts must be an array")); - } - for (let i = 0; i < rule.alerts.length; i++) { - if (!isValidBudgetAlert(rule.alerts[i])) { - return next( - createError( - 400, - `alerts[${i}] must have threshold (0-100) and enabled (boolean)` - ) - ); - } - } - - if (!isValidBudgetNotifications(rule.notifications)) { - return next( - createError( - 400, - "notifications must have inApp, email, emailRecipients[], and webhook fields" - ) - ); - } - - const policy = await controlService.addBudgetRule( - userContext.team_id, - policyId, - rule, - userContext - ); - - if (global._ADEN_CONTROL_EMITTER) { - global._ADEN_CONTROL_EMITTER.emitPolicyUpdate( - userContext.team_id, - policyId, - policy - ); - } - - res.json(policy); - } catch (error) { - console.error("[Aden Control] Error adding budget rule:", error); - next(createError(500, "Failed to add budget rule")); - } - } -); - -/** - * POST /v1/control/policies/:policyId/throttles - * Add a throttle rule - */ -router.post( - "/policies/:policyId/throttles", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const policyId = resolvePolicyId(req.params.policyId); - const rule = req.body; - const userContext = getUserContext(req); - - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - if (!rule.requests_per_minute && !rule.delay_ms) { - return next( - createError(400, "requests_per_minute or delay_ms required") - ); - } - - const policy = await controlService.addThrottleRule( - userContext.team_id, - policyId, - rule, - userContext - ); - - if (global._ADEN_CONTROL_EMITTER) { - global._ADEN_CONTROL_EMITTER.emitPolicyUpdate( - userContext.team_id, - policyId, - policy - ); - } - - res.json(policy); - } catch (error) { - console.error("[Aden Control] Error adding throttle rule:", error); - next(createError(500, "Failed to add throttle rule")); - } - } -); - -/** - * POST /v1/control/policies/:policyId/blocks - * Add a block rule - */ -router.post( - "/policies/:policyId/blocks", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const policyId = resolvePolicyId(req.params.policyId); - const rule = req.body; - const userContext = getUserContext(req); - - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - if (!rule.reason) { - return next(createError(400, "reason required")); - } - - const policy = await controlService.addBlockRule( - userContext.team_id, - policyId, - rule, - userContext - ); - - if (global._ADEN_CONTROL_EMITTER) { - global._ADEN_CONTROL_EMITTER.emitPolicyUpdate( - userContext.team_id, - policyId, - policy - ); - } - - res.json(policy); - } catch (error) { - console.error("[Aden Control] Error adding block rule:", error); - next(createError(500, "Failed to add block rule")); - } - } -); - -/** - * POST /v1/control/policies/:policyId/degradations - * Add a degradation rule (within same provider only - no cross-vendor degradation) - * - * Body: - * { - * provider: string, // e.g., "openai", "anthropic" - * from_model: string, // Model to degrade from, e.g., "gpt-4o" - * to_model: string, // Model to degrade to, e.g., "gpt-4o-mini" - * trigger: string // When to trigger: "budget_exceeded", "rate_limit", etc. - * } - */ -router.post( - "/policies/:policyId/degradations", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const policyId = resolvePolicyId(req.params.policyId); - const rule = req.body; - const userContext = getUserContext(req); - - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - if (!rule.provider || typeof rule.provider !== "string") { - return next(createError(400, "provider (string) is required")); - } - - if (!rule.from_model || typeof rule.from_model !== "string") { - return next(createError(400, "from_model (string) is required")); - } - - if (!rule.to_model || typeof rule.to_model !== "string") { - return next(createError(400, "to_model (string) is required")); - } - - if (!rule.trigger || typeof rule.trigger !== "string") { - return next(createError(400, "trigger (string) is required")); - } - - // Validate models belong to the specified provider - const targets = await pricingService.getDegradationTargets(); - const providerModels = targets.models[rule.provider]; - - if (!providerModels) { - return next(createError(400, `Unknown provider: ${rule.provider}`)); - } - - const validModelNames = providerModels.map( - (m: { model: string }) => m.model - ); - - if (!validModelNames.includes(rule.from_model)) { - return next( - createError( - 400, - `from_model "${rule.from_model}" does not belong to provider "${rule.provider}"` - ) - ); - } - - if (!validModelNames.includes(rule.to_model)) { - return next( - createError( - 400, - `to_model "${rule.to_model}" does not belong to provider "${rule.provider}"` - ) - ); - } - - const policy = await controlService.addDegradeRule( - userContext.team_id, - policyId, - rule, - userContext - ); - - if (global._ADEN_CONTROL_EMITTER) { - global._ADEN_CONTROL_EMITTER.emitPolicyUpdate( - userContext.team_id, - policyId, - policy - ); - } - - res.json(policy); - } catch (error) { - console.error("[Aden Control] Error adding degradation rule:", error); - next(createError(500, "Failed to add degradation rule")); - } - } -); - -/** - * POST /v1/control/policies/:policyId/alerts - * Add an alert rule - */ -router.post( - "/policies/:policyId/alerts", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const policyId = resolvePolicyId(req.params.policyId); - const rule = req.body; - const userContext = getUserContext(req); - - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - if (!rule.trigger || !rule.level || !rule.message) { - return next(createError(400, "trigger, level, and message required")); - } - - // Validate level - if (!["info", "warning", "critical"].includes(rule.level)) { - return next( - createError(400, "level must be one of: info, warning, critical") - ); - } - - // Validate trigger - if ( - !["budget_threshold", "model_usage", "always"].includes(rule.trigger) - ) { - return next( - createError( - 400, - "trigger must be one of: budget_threshold, model_usage, always" - ) - ); - } - - const policy = await controlService.addAlertRule( - userContext.team_id, - policyId, - rule, - userContext - ); - - if (global._ADEN_CONTROL_EMITTER) { - global._ADEN_CONTROL_EMITTER.emitPolicyUpdate( - userContext.team_id, - policyId, - policy - ); - } - - res.json(policy); - } catch (error) { - console.error("[Aden Control] Error adding alert rule:", error); - next(createError(500, "Failed to add alert rule")); - } - } -); - -// ============================================================================= -// Budget Management Endpoints -// ============================================================================= - -/** - * GET /v1/control/budget/:budgetId - * Get budget status for a budget ID - */ -router.get( - "/budget/:budgetId", - passport.authenticate("jwt", { session: false }), - async (req: Request, res: Response, next: NextFunction) => { - try { - const { budgetId } = req.params; - const status = await controlService.getBudgetStatus(budgetId); - res.json(status); - } catch (error) { - console.error("[Aden Control] Error getting budget status:", error); - next(createError(500, "Failed to get budget status")); - } - } -); - -/** - * POST /v1/control/budget/:budgetId/reset - * Reset budget for a budget ID - */ -router.post( - "/budget/:budgetId/reset", - passport.authenticate("jwt", { session: false }), - async (req: Request, res: Response, next: NextFunction) => { - try { - const { budgetId } = req.params; - await controlService.resetBudget(budgetId); - res.json({ success: true, id: budgetId }); - } catch (error) { - console.error("[Aden Control] Error resetting budget:", error); - next(createError(500, "Failed to reset budget")); - } - } -); - -// ============================================================================= -// Team Policies & Metrics Endpoints -// ============================================================================= - -/** - * GET /v1/control/policies - * Get all policies for the current team (dashboard) - */ -router.get( - "/policies", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const userContext = getUserContext(req); - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - const { limit, offset } = req.query; - const policies = await controlService.getPoliciesByTeam( - userContext.team_id, - { - limit: parseInt(limit as string) || 100, - offset: parseInt(offset as string) || 0, - } - ); - - res.json({ policies, count: policies.length }); - } catch (error) { - console.error("[Aden Control] Error getting team policies:", error); - next(createError(500, "Failed to get team policies")); - } - } -); - -/** - * POST /v1/control/policies - * Create a new policy for the team - */ -router.post( - "/policies", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const userContext = getUserContext(req); - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - const { name } = req.body; - if (!name || typeof name !== "string") { - return next(createError(400, "name (string) is required")); - } - - // Create a new policy with the given name - const policy = await controlService.updatePolicy( - userContext.team_id, - null, // Will generate a new policy ID - { name }, - userContext - ); - - res.status(201).json(policy); - } catch (error) { - console.error("[Aden Control] Error creating policy:", error); - next(createError(500, "Failed to create policy")); - } - } -); - -/** - * GET /v1/control/policies/:policyId - * Get a specific policy by ID - * Use "default" as policyId to get the team's default policy - */ -router.get( - "/policies/:policyId", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const policyId = resolvePolicyId(req.params.policyId); - const userContext = getUserContext(req); - - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - const policy = await controlService.getPolicy( - userContext.team_id, - policyId, - userContext - ); - - if (!policy) { - return next(createError(404, "Policy not found")); - } - - res.json(policy); - } catch (error) { - console.error("[Aden Control] Error getting policy:", error); - next(createError(500, "Failed to get policy")); - } - } -); - -/** - * DELETE /v1/control/policies/:policyId - * Delete a policy - * Note: "default" is NOT allowed here - must specify actual policy ID - */ -router.delete( - "/policies/:policyId", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const { policyId } = req.params; - - // Don't allow deleting "default" - must specify actual policy ID - if (policyId === "default") { - return next( - createError(400, "Cannot delete 'default' - specify actual policy ID") - ); - } - - const userContext = getUserContext(req); - - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - await controlService.deletePolicy( - userContext.team_id, - policyId, - userContext - ); - - res.json({ success: true, id: policyId }); - } catch (error) { - console.error("[Aden Control] Error deleting policy:", error); - next(createError(500, "Failed to delete policy")); - } - } -); - -/** - * GET /v1/control/metrics - * Get metrics summary for the current team (dashboard analytics) - */ -router.get( - "/metrics", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const userContext = getUserContext(req); - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - const { start_date, end_date } = req.query; - const summary = await controlService.getMetricsSummary( - userContext.team_id, - { - start_date: start_date as string | undefined, - end_date: end_date as string | undefined, - } - ); - - res.json(summary); - } catch (error) { - console.error("[Aden Control] Error getting metrics summary:", error); - next(createError(500, "Failed to get metrics summary")); - } - } -); - -// ============================================================================= -// Usage & Rate Analytics Endpoints -// ============================================================================= - -/** - * GET /v1/control/metrics/usage - * Get usage breakdown (daily, by model, by feature) - */ -router.get( - "/metrics/usage", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const userContext = getUserContext(req); - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - const { days, context_id } = req.query; - const breakdown = await controlService.getUsageBreakdown( - userContext.team_id, - { - days: days ? parseInt(days as string) : 7, - context_id: context_id as string | undefined, - } - ); - - res.json(breakdown); - } catch (error) { - console.error("[Aden Control] Error getting usage breakdown:", error); - next(createError(500, "Failed to get usage breakdown")); - } - } -); - -/** - * GET /v1/control/metrics/rates - * Get rate metrics (peak, p95, avg, min, burst) - */ -router.get( - "/metrics/rates", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const userContext = getUserContext(req); - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - const { days, context_id } = req.query; - const rates = await controlService.getRateMetrics(userContext.team_id, { - days: days ? parseInt(days as string) : 30, - context_id: context_id as string | undefined, - }); - - res.json(rates); - } catch (error) { - console.error("[Aden Control] Error getting rate metrics:", error); - next(createError(500, "Failed to get rate metrics")); - } - } -); - -/** - * GET /v1/control/policies/:policyId/budgets/:budgetId - * Get detailed budget info including usage stats - */ -router.get( - "/policies/:policyId/budgets/:budgetId", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const policyId = resolvePolicyId(req.params.policyId); - const { budgetId } = req.params; - const userContext = getUserContext(req); - - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - const budget = await controlService.getBudgetDetails( - userContext.team_id, - policyId, - budgetId - ); - - if (!budget) { - return next(createError(404, "Budget not found")); - } - - res.json(budget); - } catch (error) { - console.error("[Aden Control] Error getting budget details:", error); - next(createError(500, "Failed to get budget details")); - } - } -); - -/** - * GET /v1/control/policies/:policyId/budgets/:budgetId/usage - * Get usage breakdown for a specific budget - * Returns: { daily, by_model, by_feature } - */ -router.get( - "/policies/:policyId/budgets/:budgetId/usage", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const policyId = resolvePolicyId(req.params.policyId); - const { budgetId } = req.params; - const { days } = req.query; - const userContext = getUserContext(req); - - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - // Get budget details for filtering - const budget = await controlService.getBudgetDetails( - userContext.team_id, - policyId, - budgetId - ); - - if (!budget) { - return next(createError(404, "Budget not found")); - } - - // Pass the budget object for type-aware filtering - const breakdown = await controlService.getUsageBreakdown( - userContext.team_id, - { - days: days ? parseInt(days as string) : 7, - budget, - } - ); - - res.json(breakdown); - } catch (error) { - console.error("[Aden Control] Error getting budget usage:", error); - next(createError(500, "Failed to get budget usage")); - } - } -); - -/** - * GET /v1/control/policies/:policyId/budgets/:budgetId/rates - * Get rate metrics for a specific budget - * Returns: { peak_rate, p95_rate, avg_rate, min_rate, max_burst } - */ -router.get( - "/policies/:policyId/budgets/:budgetId/rates", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const policyId = resolvePolicyId(req.params.policyId); - const { budgetId } = req.params; - const { days } = req.query; - const userContext = getUserContext(req); - - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - // Get budget details for filtering - const budget = await controlService.getBudgetDetails( - userContext.team_id, - policyId, - budgetId - ); - - if (!budget) { - return next(createError(404, "Budget not found")); - } - - // Pass the budget object for type-aware filtering - const rates = await controlService.getRateMetrics(userContext.team_id, { - days: days ? parseInt(days as string) : 30, - budget, - }); - - res.json(rates); - } catch (error) { - console.error("[Aden Control] Error getting budget rates:", error); - next(createError(500, "Failed to get budget rates")); - } - } -); - -// ============================================================================= -// Budget Validation Endpoint (for Hybrid Enforcement) -// ============================================================================= - -/** - * POST /v1/control/budget/validate - * Server-side budget validation for hybrid enforcement. - * - * Called by SDK when local budget usage approaches threshold (e.g., 80%). - * Returns authoritative spend from TSDB and enforcement decision. - */ -router.post( - "/budget/validate", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const userContext = getUserContext(req); - if (!userContext?.team_id) { - return next(createError(400, "Team context required")); - } - - const { budget_id, estimated_cost, context, local_spend } = req.body as { - budget_id?: string; - estimated_cost: number; - context?: ValidationContext; - local_spend?: number; - }; - const policyId = getPolicyId(req); - - if (typeof estimated_cost !== "number" || estimated_cost < 0) { - return next( - createError(400, "estimated_cost must be a non-negative number") - ); - } - - // Get the policy with authoritative budget data from TSDB - const policy = await controlService.getPolicy( - userContext.team_id, - policyId, - userContext - ); - - if (!policy) { - return next(createError(404, "Policy not found")); - } - - // MULTI-BUDGET MODE: Use context to find all matching budgets - if (context && typeof context === "object") { - const matchingBudgets = controlService.findMatchingBudgetsForContext( - policy.budgets || [], - context - ); - - if (matchingBudgets.length === 0) { - // No budgets match this context - allow by default - return res.json({ - allowed: true, - action: "allow", - reason: "No budgets match the provided context", - authoritative_spend: 0, - budget_limit: 0, - usage_percent: 0, - projected_percent: 0, - policy_version: policy.version, - budgets_checked: [], - }); - } - - // Validate all matching budgets and get most restrictive result - const result = controlService.validateMultipleBudgets( - matchingBudgets, - estimated_cost, - local_spend - ); - - // Log the validation for audit - console.log( - `[Aden Control] Multi-budget validation: ` + - `checked ${result.budgets_checked.length} budgets, ` + - `action: ${result.action}` + - (result.restricting_budget_name - ? `, restricting: ${result.restricting_budget_name}` - : "") - ); - - return res.json({ - ...result, - policy_version: policy.version, - }); - } - - // SINGLE-BUDGET MODE (backward compatible): Use budget_id - if (!budget_id) { - return next(createError(400, "budget_id or context is required")); - } - - // Find the budget by ID - const budget = policy.budgets?.find( - (b: { id: string }) => b.id === budget_id - ); - if (!budget) { - // Budget not found - allow by default (budget may have been removed) - return res.json({ - allowed: true, - action: "allow", - reason: "Budget not found in policy", - authoritative_spend: 0, - budget_limit: 0, - usage_percent: 0, - projected_percent: 0, - policy_version: policy.version, - budgets_checked: [], - }); - } - - // Use the multi-budget validator for consistency (with single budget) - const result = controlService.validateMultipleBudgets( - [budget], - estimated_cost, - local_spend - ); - - // Log the validation for audit - console.log( - `[Aden Control] Budget validation: ${budget_id} - ` + - `spend: $${result.authoritative_spend.toFixed(4)}, ` + - `limit: $${budget.limit}, ` + - `action: ${result.action}` - ); - - res.json({ - ...result, - policy_version: policy.version, - // Keep backward-compatible fields - updated_spend: result.authoritative_spend, - }); - } catch (error) { - console.error("[Aden Control] Error validating budget:", error); - next(createError(500, "Failed to validate budget")); - } - } -); - -// ============================================================================= -// Model Options for Degradation -// ============================================================================= - -/** - * GET /v1/control/degradation-targets - * Get available target models for budget degradation mode, grouped by provider - * Models are sorted by cost (cheapest first) - * - * Query params: - * provider (optional) - Filter to specific provider (e.g., "openai", "anthropic") - * - * Response (no filter): - * { providers: [...], models: { openai: [...], anthropic: [...] } } - * - * Response (with provider filter): - * { provider: "openai", models: [...] } - */ -router.get( - "/degradation-targets", - passport.authenticate("jwt", { session: false }), - async (req: Request, res: Response, next: NextFunction) => { - try { - const { provider } = req.query; - const targets = await pricingService.getDegradationTargets(); - - // If provider specified, filter to that provider only - if (provider) { - const providerModels = targets.models[provider as string]; - if (!providerModels) { - return next(createError(400, `Unknown provider: ${provider}`)); - } - return res.json({ - provider, - models: providerModels, - }); - } - - res.json(targets); - } catch (error) { - console.error("[Aden Control] Error getting degradation targets:", error); - next(createError(500, "Failed to get degradation targets")); - } - } -); - -// ============================================================================= -// SSE - Real-time Agent Status Stream -// ============================================================================= - -interface ControlEmitter { - getConnectedCount: (teamId: string | number) => number; - getConnectedInstances: (teamId: string | number) => Array<{ - instance_id: string; - policy_id: string | null; - agent_name: string | null; - connected_at: string; - last_heartbeat: string; - connection_type: "websocket" | "http"; - status?: string; - }>; - getTotalConnectedCount: () => number; -} - -/** - * GET /v1/control/agent-status/stream - * SSE endpoint for real-time agent connection status - * - * Streams updates every 2 seconds with: - * - active: boolean indicating if any agents are connected - * - count: number of connected agents - * - instances: array of connected agent details - */ -router.get( - "/agent-status/stream", - passport.authenticate("jwt", { session: false }), - (req: AuthenticatedRequest, res: Response) => { - const teamId = req.user?.current_team_id; - - if (!teamId) { - res.status(401).json({ error: "Team ID required" }); - return; - } - - // Set SSE headers - res.setHeader("Content-Type", "text/event-stream"); - res.setHeader("Cache-Control", "no-cache"); - res.setHeader("Connection", "keep-alive"); - res.setHeader("X-Accel-Buffering", "no"); // Disable nginx buffering - res.flushHeaders(); - - const controlEmitter = req.app.locals.controlEmitter as - | ControlEmitter - | undefined; - - // Send initial status immediately - const sendStatus = () => { - if (!controlEmitter) { - const data = { - active: false, - count: 0, - instances: [], - timestamp: new Date().toISOString(), - error: "WebSocket not initialized", - }; - res.write(`data: ${JSON.stringify(data)}\n\n`); - return; - } - - const count = controlEmitter.getConnectedCount(teamId); - const instances = controlEmitter.getConnectedInstances(teamId); - - const data = { - active: count > 0, - count, - instances, - timestamp: new Date().toISOString(), - }; - - res.write(`data: ${JSON.stringify(data)}\n\n`); - }; - - // Send immediately - sendStatus(); - - // Send updates every 2 seconds - const intervalId = setInterval(sendStatus, 2000); - - // Cleanup on client disconnect - req.on("close", () => { - clearInterval(intervalId); - }); - } -); - -/** - * GET /v1/control/agent-status - * Get current agent connection status (non-streaming) - */ -router.get( - "/agent-status", - passport.authenticate("jwt", { session: false }), - (req: AuthenticatedRequest, res: Response) => { - const teamId = req.user?.current_team_id; - - if (!teamId) { - res.status(401).json({ error: "Team ID required" }); - return; - } - - const controlEmitter = req.app.locals.controlEmitter as - | ControlEmitter - | undefined; - - if (!controlEmitter) { - res.json({ - active: false, - count: 0, - instances: [], - timestamp: new Date().toISOString(), - error: "WebSocket not initialized", - }); - return; - } - - const count = controlEmitter.getConnectedCount(teamId); - const instances = controlEmitter.getConnectedInstances(teamId); - - res.json({ - active: count > 0, - count, - instances, - timestamp: new Date().toISOString(), - }); - } -); - -// ============================================================================= -// Agent Discovery - Historical agents with availability -// ============================================================================= - -/** - * GET /v1/control/agents - * Get all agents from past events with their current availability status - * - * Query params: - * - since: ISO date string to filter events from (optional) - * - limit: Max number of agents to return (default: 100) - * - * Returns agents sorted by last_seen descending with: - * - agent: unique agent identifier - * - agent_name: human-readable name (if available) - * - status: "connected" | "disconnected" - * - connection_type: "websocket" | "http" | null (null if disconnected) - * - first_seen: when agent first appeared in events - * - last_seen: when agent last appeared in events - * - total_requests: total LLM requests made by this agent - * - total_cost: total cost incurred by this agent - */ -router.get( - "/agents", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const teamId = req.user?.current_team_id; - if (!teamId) { - throw createError(401, "Team ID required"); - } - - // Parse query params - const since = req.query.since ? new Date(req.query.since as string) : undefined; - const limit = req.query.limit ? parseInt(req.query.limit as string, 10) : 100; - - // Get team-specific pool/schema - const teamPool = await getTeamPool(teamId); - const schemaName = buildSchemaName(teamId); - const client = await teamPool.connect(); - - let historicalAgents; - try { - await client.query(`SET search_path TO ${schemaName}`); - await tsdbService.ensureSchema(client); - - // Get all distinct agents from TSDB - historicalAgents = await tsdbService.getDistinctAgents( - teamId, - { since, limit }, - client - ); - } finally { - client.release(); - } - - // Get currently connected instances - const controlEmitter = req.app.locals.controlEmitter as ControlEmitter | undefined; - const connectedInstances = controlEmitter?.getConnectedInstances(teamId) || []; - - // Build a map of connected agents (by instance_id and agent_name) - const connectedByInstanceId = new Map(); - const connectedByAgentName = new Map(); - - for (const instance of connectedInstances) { - connectedByInstanceId.set(instance.instance_id, instance); - if (instance.agent_name) { - connectedByAgentName.set(instance.agent_name, instance); - } - } - - // Merge historical agents with connection status - const agents = historicalAgents.map((agent) => { - // Try to match by agent ID (instance_id) or agent_name - const connectedInstance = - connectedByInstanceId.get(agent.agent) || - connectedByAgentName.get(agent.agent) || - (agent.agent_name ? connectedByAgentName.get(agent.agent_name) : null); - - return { - agent: agent.agent, - agent_name: agent.agent_name || connectedInstance?.agent_name || null, - status: connectedInstance ? "connected" : "disconnected", - connection_type: connectedInstance?.connection_type || null, - instance_id: connectedInstance?.instance_id || null, - first_seen: agent.first_seen.toISOString(), - last_seen: agent.last_seen.toISOString(), - total_requests: agent.total_requests, - total_cost: agent.total_cost, - }; - }); - - // Also add any connected agents that don't have historical events yet - const historicalAgentIds = new Set(historicalAgents.map((a) => a.agent)); - const historicalAgentNames = new Set( - historicalAgents.map((a) => a.agent_name).filter(Boolean) - ); - - for (const instance of connectedInstances) { - const isInHistory = - historicalAgentIds.has(instance.instance_id) || - (instance.agent_name && historicalAgentNames.has(instance.agent_name)); - - if (!isInHistory) { - agents.push({ - agent: instance.instance_id, - agent_name: instance.agent_name, - status: "connected", - connection_type: instance.connection_type, - instance_id: instance.instance_id, - first_seen: instance.connected_at, - last_seen: instance.last_heartbeat, - total_requests: 0, - total_cost: 0, - }); - } - } - - res.json({ - agents, - total: agents.length, - connected_count: agents.filter((a) => a.status === "connected").length, - timestamp: new Date().toISOString(), - }); - } catch (error) { - next(error); - } - } -); - -// ============================================================================= -// Health Check -// ============================================================================= - -/** - * GET /v1/control/health - * Health check endpoint - */ -router.get("/health", (_req: Request, res: Response) => { - res.json({ - status: "healthy", - timestamp: new Date().toISOString(), - websocket: !!global._ADEN_CONTROL_EMITTER, - }); -}); - -export default router; diff --git a/hive/src/controllers/iam.controller.ts b/hive/src/controllers/iam.controller.ts deleted file mode 100644 index 8a734bac..00000000 --- a/hive/src/controllers/iam.controller.ts +++ /dev/null @@ -1,154 +0,0 @@ -/** - * IAM Controller - * - * Handles Identity and Access Management endpoints. - */ - -import { Router, Request, Response } from 'express'; - -const router = Router(); - -/** - * Extract token from Authorization header - * Supports: "jwt ", "Bearer ", or raw "" - */ -function extractToken(authHeader: string): string { - if (authHeader.startsWith('jwt ')) { - return authHeader.slice(4); - } - if (authHeader.startsWith('Bearer ')) { - return authHeader.slice(7); - } - return authHeader; -} - -/** - * GET /iam/get-current-team - * - * Get the current team/organization for the authenticated user. - */ -router.get('/get-current-team', async (req: Request, res: Response) => { - try { - const authHeader = req.headers.authorization; - if (!authHeader) { - return res.status(401).json({ - success: false, - msg: 'No token provided', - }); - } - - const userDbService = req.app.locals.userDbService; - const user = await userDbService.findByToken(extractToken(authHeader)); - - if (!user) { - return res.status(401).json({ - success: false, - msg: 'Invalid token', - }); - } - - const pgPool = req.app.locals.pgPool; - if (!pgPool) { - // Return default team if no database - return res.json({ - orgId: user.current_team_id || 1, - orgName: 'Default Organization', - teamId: user.current_team_id || 1, - teamName: 'Default Team', - }); - } - - // Get team info from database - const result = await pgPool.query( - `SELECT id, name, slug FROM teams WHERE id = $1`, - [user.current_team_id || 1] - ); - - const team = result.rows[0]; - - if (!team) { - // Return default if team not found - return res.json({ - orgId: user.current_team_id || 1, - orgName: 'Default Organization', - teamId: user.current_team_id || 1, - teamName: 'Default Team', - }); - } - - res.json({ - orgId: team.id, - orgName: team.name, - teamId: team.id, - teamName: team.name, - }); - } catch (err) { - console.error('[IAMController] /get-current-team error:', err instanceof Error ? err.message : err); - res.status(500).json({ - success: false, - msg: 'Failed to get current team', - }); - } -}); - -/** - * GET /iam/team/get-team-role-by-id/:teamId - * - * Get the user's role in a specific team. - */ -router.get('/team/get-team-role-by-id/:teamId', async (req: Request, res: Response) => { - try { - const authHeader = req.headers.authorization; - if (!authHeader) { - return res.status(401).json({ - success: false, - msg: 'No token provided', - }); - } - - const userDbService = req.app.locals.userDbService; - const user = await userDbService.findByToken(extractToken(authHeader)); - - if (!user) { - return res.status(401).json({ - success: false, - msg: 'Invalid token', - }); - } - - const teamId = parseInt(req.params.teamId, 10); - - const pgPool = req.app.locals.pgPool; - if (!pgPool) { - // Return default role if no database - return res.json({ roleId: 1 }); - } - - // Get user's role in this team - const result = await pgPool.query( - `SELECT role FROM team_members WHERE user_id = $1 AND team_id = $2`, - [user.id, teamId] - ); - - const membership = result.rows[0]; - - // Map role name to roleId (admin=1, member=2, viewer=3) - const roleMap: Record = { - admin: 1, - member: 2, - viewer: 3, - }; - - const roleId = membership ? (roleMap[membership.role] || 2) : 2; - - res.json({ roleId }); - } catch (err) { - console.error('[IAMController] /team/get-team-role-by-id error:', err instanceof Error ? err.message : err); - res.status(500).json({ - success: false, - msg: 'Failed to get team role', - }); - } -}); - -export default router; diff --git a/hive/src/controllers/quickstart.controller.ts b/hive/src/controllers/quickstart.controller.ts deleted file mode 100644 index 1e66d58c..00000000 --- a/hive/src/controllers/quickstart.controller.ts +++ /dev/null @@ -1,192 +0,0 @@ -/** - * Quickstart Documentation API Controller - * Generates SDK quickstart documentation based on agent framework - */ -import express, { Request, Response, NextFunction } from "express"; -import passport from "passport"; -// Passport is initialized in app.js - -import * as quickstartService from "../services/quickstart/quickstart_service"; - -const router = express.Router(); - -interface AuthenticatedUser { - id: number; - current_team_id: number; - [key: string]: unknown; -} - -interface AuthenticatedRequest extends Request { - user?: AuthenticatedUser; -} - -/** - * @swagger - * /quickstart/options: - * get: - * summary: Get available options for quickstart generation - * tags: - * - Quickstart - * responses: - * 200: - * description: Available options for quickstart document generation - */ -router.get("/options", async (req: Request, res: Response, next: NextFunction) => { - try { - const options = quickstartService.getQuickstartOptions(); - res.send(options); - } catch (error) { - next(error); - } -}); - -/** - * @swagger - * /quickstart/generate: - * post: - * summary: Generate quickstart documentation with user's system token - * tags: - * - Quickstart - * security: - * - jwtAuth: [] - * requestBody: - * required: true - * content: - * application/json: - * schema: - * type: object - * required: - * - agentFramework - * properties: - * agentFramework: - * type: string - * enum: [generic, langgraph, livekit] - * description: The agent framework to use - * responses: - * 200: - * description: Generated quickstart documentation - * 400: - * description: Invalid parameters - * 401: - * description: Unauthorized - JWT token required - */ -router.post( - "/generate", - passport.authenticate("jwt", { session: false }), - async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { - try { - const { user, body } = req; - const { agentFramework, llmVendor, sdkLanguage } = body; - - // Get the user's latest non-system API key - const userDbService = req.app.locals.userDbService; - const tokenObj = user ? await userDbService.getLatestUserDevToken(user) : null; - - let apiKey: string; - let tokenName: string; - if (tokenObj) { - apiKey = tokenObj.token; - tokenName = tokenObj.label; - } else { - // No user API key - use placeholder - apiKey = "eyJ-xxx"; - tokenName = "No Key"; - } - - // Generate the quickstart document - const markdown = quickstartService.generateQuickstart({ - agentFramework, - llmVendor, - sdkLanguage, - apiKey, - }); - - res.send({ - markdown, - metadata: { - agentFramework, - llmVendor, - sdkLanguage, - tokenName, - generatedAt: new Date().toISOString(), - }, - }); - } catch (error) { - if ((error as Error).message.includes("Invalid")) { - return res.status(400).send({ error: (error as Error).message }); - } - next(error); - } - } -); - -/** - * @swagger - * /quickstart/generate-with-key: - * post: - * summary: Generate quickstart documentation with a provided API key - * description: Generate documentation without requiring authentication - API key is provided directly - * tags: - * - Quickstart - * requestBody: - * required: true - * content: - * application/json: - * schema: - * type: object - * required: - * - agentFramework - * - apiKey - * properties: - * agentFramework: - * type: string - * enum: [generic, livekit] - * apiKey: - * type: string - * description: The Aden API key to embed in the documentation - * responses: - * 200: - * description: Generated quickstart documentation - * 400: - * description: Invalid parameters - */ -router.post("/generate-with-key", async (req: Request, res: Response, next: NextFunction) => { - try { - const { agentFramework, llmVendor, sdkLanguage, apiKey } = req.body; - - if (!apiKey) { - return res.status(400).send({ - error: "API key is required", - message: "Please provide an apiKey in the request body", - }); - } - - // Generate the quickstart document - const markdown = quickstartService.generateQuickstart({ - agentFramework, - llmVendor, - sdkLanguage, - apiKey, - }); - - res.send({ - markdown, - metadata: { - agentFramework, - llmVendor, - sdkLanguage, - generatedAt: new Date().toISOString(), - }, - }); - } catch (error) { - if ( - (error as Error).message.includes("Invalid") || - (error as Error).message.includes("required") - ) { - return res.status(400).send({ error: (error as Error).message }); - } - next(error); - } -}); - -export default router; diff --git a/hive/src/controllers/tsdb.controller.ts b/hive/src/controllers/tsdb.controller.ts deleted file mode 100644 index d9944622..00000000 --- a/hive/src/controllers/tsdb.controller.ts +++ /dev/null @@ -1,1205 +0,0 @@ -/** - * TSDB ingestion and preview endpoints (protected) - */ -import express, { Request, Response } from "express"; -import passport from "passport"; -import type { PoolClient } from "pg"; - -import { - ensureSchema, - upsertEvents, -} from "../services/tsdb/tsdb_service"; -import pricingService from "../services/tsdb/pricing_service"; -import { parseToken, getTeamPool, buildSchemaName } from "../services/tsdb/team_context"; -import { buildAnalytics } from "../services/tsdb/analytics_service"; - -const router = express.Router(); - -const AUTH_MIDDLEWARE = passport.authenticate("jwt", { session: false }); - - -interface TokenContext { - team_id: string; - user_id?: string; -} - -interface QueryRow { - [key: string]: unknown; -} - - -interface MetricRow { - period: string; - total_requests: string | number; - unique_traces: string | number; - unique_users: string | number; - total_input_tokens: string | number; - total_output_tokens: string | number; - total_tokens: string | number; - cached_tokens: string | number; - reasoning_tokens: string | number; - total_cost: string | number; - avg_latency_ms: string | number; - p50_latency_ms: string | number; - p95_latency_ms: string | number; - p99_latency_ms: string | number; - max_latency_ms: string | number; - streaming_requests: string | number; -} - -interface LLMEventRow { - timestamp: Date; - trace_id: string; - call_sequence: number; - model: string; - provider: string; - usage_input_tokens: number; - usage_output_tokens: number; - usage_cached_tokens: number; - cost_total: string | number; -} - -interface MergedRow { - request_count: number; - total_input_tokens: number; - total_output_tokens: number; - total_tokens: number; - total_cost: number; - avg_latency_ms: number; - latency_sum: number; - first_seen: Date; - last_seen: Date; - [key: string]: unknown; -} - -const getAuthorizationHeader = (req: Request): string | undefined => { - return req.headers.authorization || (req.headers as Record).Authorization; -}; - -const getTokenContext = (req: Request): TokenContext | null => { - return parseToken(getAuthorizationHeader(req)) as TokenContext | null; -}; - -const connectTeamClient = async (teamId: string | number): Promise => { - const pool = await getTeamPool(teamId, {}); - const schema = buildSchemaName(teamId); - const client = await pool.connect(); - await client.query(`CREATE SCHEMA IF NOT EXISTS ${schema}`); - await client.query(`SET search_path TO ${schema}, public`); - await ensureSchema(client); - return client; -}; - -router.post("/events", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { - let client: PoolClient | undefined; - try { - const ctx = getTokenContext(req); - if (!ctx || !ctx.team_id) { - return res.status(401).json({ error: "invalid_token", detail: "Missing team_id in token" }); - } - const payload = Array.isArray(req.body) ? req.body : req.body?.events; - if (!Array.isArray(payload) || payload.length === 0) { - return res.status(400).json({ error: "events array required" }); - } - if (payload.length > 2000) { - return res.status(400).json({ error: "events array too large (max 2000)" }); - } - - client = await connectTeamClient(ctx.team_id); - const enriched = payload.map((e: Record) => ({ ...e, team_id: ctx.team_id, user_id: (ctx.user_id || e.user_id) as string | undefined })); - const result = await upsertEvents(enriched, client); - return res.json({ - message: "ingested", - rows_written: result.rowsWritten, - normalized: result.normalized, - }); - } catch (err) { - console.error("[tsdb] ingest error", err); - return res.status(500).json({ error: "ingest_failed", detail: (err as Error).message }); - } finally { - if (client) client.release(); - } -}); - -router.get("/sample", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { - let client: PoolClient | undefined; - try { - const ctx = getTokenContext(req); - if (!ctx || !ctx.team_id) { - return res.status(401).json({ error: "invalid_token", detail: "Missing team_id in token" }); - } - client = await connectTeamClient(ctx.team_id); - const limit = Math.min(parseInt((req.query.limit as string) || "20", 10), 100); - const { rows } = await client.query( - 'SELECT * FROM llm_events ORDER BY "timestamp" DESC LIMIT $1', - [limit] - ); - return res.json({ rows }); - } catch (err) { - console.error("[tsdb] sample error", err); - return res.status(500).json({ error: "sample_failed", detail: (err as Error).message }); - } finally { - if (client) client.release(); - } -}); - -router.get("/counts", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { - let client: PoolClient | undefined; - try { - const ctx = getTokenContext(req); - if (!ctx || !ctx.team_id) { - return res.status(401).json({ error: "invalid_token", detail: "Missing team_id in token" }); - } - client = await connectTeamClient(ctx.team_id); - const window = (req.query.window as string) || "1 day"; - const { rows } = await client.query( - 'SELECT COUNT(*)::bigint AS count FROM llm_events WHERE "timestamp" >= NOW() - $1::interval', - [window] - ); - return res.json({ window, count: Number(rows[0].count) }); - } catch (err) { - console.error("[tsdb] counts error", err); - return res.status(500).json({ error: "counts_failed", detail: (err as Error).message }); - } finally { - if (client) client.release(); - } -}); - -router.get("/health", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { - let client: PoolClient | undefined; - try { - const ctx = getTokenContext(req); - if (!ctx || !ctx.team_id) { - return res.status(401).json({ status: "error", detail: "Missing team_id in token" }); - } - client = await connectTeamClient(ctx.team_id); - const { rows } = await client.query("SELECT NOW() AS now"); - return res.json({ status: "ok", now: rows[0].now }); - } catch (err) { - console.error("[tsdb] health error", err); - return res.status(500).json({ status: "error", detail: (err as Error).message }); - } finally { - if (client) client.release(); - } -}); - -// GET /tsdb/logs?start=2025-01-01T00:00:00Z&end=2025-01-02T00:00:00Z&limit=500&offset=0 -// Optional: group_by=model|agent|model,agent for aggregation -router.get("/logs", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { - let poolClient: PoolClient | undefined; - try { - const ctx = getTokenContext(req); - if (!ctx || !ctx.team_id) { - return res.status(401).json({ error: "invalid_token", detail: "Missing team_id in token" }); - } - const { start, end, group_by } = req.query as { start?: string; end?: string; group_by?: string }; - const startDate = start ? new Date(start) : null; - const endDate = end ? new Date(end) : null; - if (!startDate || Number.isNaN(startDate.getTime()) || !endDate || Number.isNaN(endDate.getTime())) { - return res.status(400).json({ error: "invalid_time_window", detail: "start and end must be valid ISO dates" }); - } - - const limit = Math.min(parseInt((req.query.limit as string) || "500", 10), 5000); - const offset = Math.max(parseInt((req.query.offset as string) || "0", 10), 0); - - poolClient = await connectTeamClient(ctx.team_id); - - // Handle aggregation if group_by is specified - if (group_by) { - const validGroupFields = ["model", "agent", "provider"]; - const groupFields = group_by.split(",").map((f) => f.trim()).filter((f) => validGroupFields.includes(f)); - - if (groupFields.length === 0) { - return res.status(400).json({ - error: "invalid_group_by", - detail: `group_by must be one or more of: ${validGroupFields.join(", ")}`, - }); - } - - // Try to use continuous aggregates for better performance - // Use CA when: single group field (model or agent) and provider not requested - // Hybrid approach: CA for completed days + base table for today's partial data - const useModelCA = groupFields.length === 1 && groupFields[0] === "model"; - const useModelProviderCA = groupFields.length === 2 && groupFields.includes("model") && groupFields.includes("provider"); - const useAgentCA = groupFields.length === 1 && groupFields[0] === "agent"; - - let rows: QueryRow[]; - let usedCA = false; - - const utcDayStart = (d: Date): Date => { - const x = new Date(d); - x.setUTCHours(0, 0, 0, 0); - return x; - }; - - const addUtcDays = (d: Date, days: number): Date => { - return new Date(d.getTime() + days * 24 * 60 * 60 * 1000); - }; - - const startDayStart = utcDayStart(startDate); - const endDayStart = utcDayStart(endDate); - - const fullBucketStart = startDate.getTime() === startDayStart.getTime() - ? startDayStart - : addUtcDays(startDayStart, 1); - - const fullBucketEnd = endDayStart; - - const hasFullBuckets = fullBucketStart < fullBucketEnd; - - const partialRanges: Array<{ start: Date; end: Date }> = []; - const pushRange = (rangeStart: Date, rangeEnd: Date): void => { - if (rangeEnd.getTime() <= rangeStart.getTime()) return; - partialRanges.push({ start: rangeStart, end: rangeEnd }); - }; - - pushRange(startDate, new Date(Math.min(endDate.getTime(), fullBucketStart.getTime()))); - pushRange(new Date(Math.max(startDate.getTime(), fullBucketEnd.getTime())), endDate); - - const mergeResults = (caRows: QueryRow[], baseRows: QueryRow[], keyFields: string[]): MergedRow[] => { - const merged = new Map(); - - const addRow = (row: QueryRow): void => { - const key = keyFields.map((f) => row[f]).join("|"); - const requestCount = parseInt(row.request_count as string) || 0; - const inputTokens = parseInt(row.total_input_tokens as string) || 0; - const outputTokens = parseInt(row.total_output_tokens as string) || 0; - const totalTokens = parseInt(row.total_tokens as string) || 0; - const totalCost = parseFloat(row.total_cost as string) || 0; - const avgLatency = parseFloat(row.avg_latency_ms as string) || 0; - const firstSeen = row.first_seen as Date; - const lastSeen = row.last_seen as Date; - - const existing = merged.get(key); - if (!existing) { - merged.set(key, { - ...Object.fromEntries(keyFields.map((f) => [f, row[f]])), - request_count: requestCount, - total_input_tokens: inputTokens, - total_output_tokens: outputTokens, - total_tokens: totalTokens, - total_cost: totalCost, - avg_latency_ms: avgLatency, - latency_sum: avgLatency * requestCount, - first_seen: firstSeen, - last_seen: lastSeen, - }); - return; - } - - const newCount = existing.request_count + requestCount; - const newLatencySum = existing.latency_sum + avgLatency * requestCount; - - merged.set(key, { - ...existing, - request_count: newCount, - total_input_tokens: existing.total_input_tokens + inputTokens, - total_output_tokens: existing.total_output_tokens + outputTokens, - total_tokens: existing.total_tokens + totalTokens, - total_cost: existing.total_cost + totalCost, - avg_latency_ms: newCount > 0 ? newLatencySum / newCount : 0, - latency_sum: newLatencySum, - first_seen: existing.first_seen < firstSeen ? existing.first_seen : firstSeen, - last_seen: existing.last_seen > lastSeen ? existing.last_seen : lastSeen, - }); - }; - - for (const row of caRows) addRow(row); - for (const row of baseRows) addRow(row); - - // Convert to array and sort by cost desc - return Array.from(merged.values()) - .map(({ latency_sum: _latency_sum, ...rest }) => ({ ...rest, latency_sum: 0 })) - .sort((a, b) => b.total_cost - a.total_cost); - }; - - const getBaseAggData = async (rangeStart: Date, rangeEnd: Date, selectFields: string, groupByClause: string): Promise => { - if (rangeEnd.getTime() <= rangeStart.getTime()) return []; - - const baseSql = ` - SELECT - ${selectFields}, - COUNT(*) as request_count, - COALESCE(SUM(COALESCE(usage_input_tokens, 0)), 0) as total_input_tokens, - COALESCE(SUM(COALESCE(usage_output_tokens, 0)), 0) as total_output_tokens, - COALESCE(SUM(COALESCE(usage_total_tokens, COALESCE(usage_input_tokens, 0) + COALESCE(usage_output_tokens, 0))), 0) as total_tokens, - COALESCE(SUM(cost_total), 0) as total_cost, - COALESCE(AVG(latency_ms), 0) as avg_latency_ms, - MIN("timestamp") as first_seen, - MAX("timestamp") as last_seen - FROM llm_events - WHERE "timestamp" >= $1 AND "timestamp" <= $2 AND team_id = $3 - GROUP BY ${groupByClause} - `; - - const result = await poolClient.query(baseSql, [rangeStart.toISOString(), rangeEnd.toISOString(), String(ctx.team_id)]); - return result.rows; - }; - - if (useModelCA || useModelProviderCA) { - // Try model CA - includes provider so works for both cases - try { - const keyFields = useModelProviderCA ? ["model", "provider"] : ["model"]; - - const selectFields = useModelProviderCA ? "model, provider" : "model"; - - const baseRows = (await Promise.all( - partialRanges.map((r) => getBaseAggData(r.start, r.end, selectFields, selectFields)) - )).flat(); - - let caRows: QueryRow[] = []; - if (hasFullBuckets) { - const caSql = ` - SELECT - model, - ${useModelProviderCA ? "provider," : ""} - SUM(requests) as request_count, - COALESCE(SUM(input_tokens), 0) as total_input_tokens, - COALESCE(SUM(output_tokens), 0) as total_output_tokens, - (COALESCE(SUM(input_tokens), 0) + COALESCE(SUM(output_tokens), 0)) as total_tokens, - COALESCE(SUM(cost_total), 0) as total_cost, - COALESCE(SUM(avg_latency_ms * requests) / NULLIF(SUM(requests), 0), 0) as avg_latency_ms, - MIN(bucket) as first_seen, - MAX(bucket) as last_seen - FROM llm_events_daily_by_model_ca - WHERE bucket >= $1 AND bucket < $2 - GROUP BY model${useModelProviderCA ? ", provider" : ""} - `; - const result = await poolClient.query(caSql, [fullBucketStart.toISOString(), fullBucketEnd.toISOString()]); - caRows = result.rows; - } - - rows = mergeResults(caRows, baseRows, keyFields).slice(offset, offset + limit) as unknown as QueryRow[]; - usedCA = hasFullBuckets; - } catch (err) { - // CA not available, fall through to base table query - } - } else if (useAgentCA) { - // Try agent CA - try { - const baseRows = (await Promise.all( - partialRanges.map((r) => getBaseAggData(r.start, r.end, "agent", "agent")) - )).flat(); - - let caRows: QueryRow[] = []; - if (hasFullBuckets) { - const caSql = ` - SELECT - agent, - SUM(requests) as request_count, - COALESCE(SUM(input_tokens), 0) as total_input_tokens, - COALESCE(SUM(output_tokens), 0) as total_output_tokens, - (COALESCE(SUM(input_tokens), 0) + COALESCE(SUM(output_tokens), 0)) as total_tokens, - COALESCE(SUM(cost_total), 0) as total_cost, - COALESCE(SUM(avg_latency_ms * requests) / NULLIF(SUM(requests), 0), 0) as avg_latency_ms, - MIN(bucket) as first_seen, - MAX(bucket) as last_seen - FROM llm_events_daily_by_agent_ca - WHERE bucket >= $1 AND bucket < $2 - GROUP BY agent - `; - const result = await poolClient.query(caSql, [fullBucketStart.toISOString(), fullBucketEnd.toISOString()]); - caRows = result.rows; - } - - rows = mergeResults(caRows, baseRows, ["agent"]).slice(offset, offset + limit) as unknown as QueryRow[]; - usedCA = hasFullBuckets; - } catch (err) { - // CA not available, fall through to base table query - } - } - - // Fallback to base table query if CA not used or failed - if (!usedCA) { - const groupByClause = groupFields.join(", "); - const selectFields = groupFields.map((f) => f).join(", "); - - const aggSql = ` - SELECT - ${selectFields}, - COUNT(*) as request_count, - COALESCE(SUM(COALESCE(usage_input_tokens, 0)), 0) as total_input_tokens, - COALESCE(SUM(COALESCE(usage_output_tokens, 0)), 0) as total_output_tokens, - COALESCE(SUM(COALESCE(usage_total_tokens, COALESCE(usage_input_tokens, 0) + COALESCE(usage_output_tokens, 0))), 0) as total_tokens, - COALESCE(SUM(cost_total), 0) as total_cost, - COALESCE(AVG(latency_ms), 0) as avg_latency_ms, - MIN("timestamp") as first_seen, - MAX("timestamp") as last_seen - FROM llm_events - WHERE "timestamp" >= $1 AND "timestamp" <= $2 AND team_id = $3 - GROUP BY ${groupByClause} - ORDER BY total_cost DESC - LIMIT $4 OFFSET $5 - `; - const result = await poolClient.query(aggSql, [startDate.toISOString(), endDate.toISOString(), String(ctx.team_id), limit, offset]); - rows = result.rows; - } - - return res.json({ - window: { start: startDate.toISOString(), end: endDate.toISOString() }, - group_by: groupFields, - count: rows!.length, - source: usedCA ? "continuous_aggregate" : "base_table", - aggregations: rows!.map((row) => ({ - ...Object.fromEntries(groupFields.map((f) => [f, row[f]])), - request_count: parseInt(row.request_count as string) || 0, - total_input_tokens: parseInt(row.total_input_tokens as string) || 0, - total_output_tokens: parseInt(row.total_output_tokens as string) || 0, - total_tokens: parseInt(row.total_tokens as string) || 0, - total_cost: parseFloat(row.total_cost as string) || 0, - avg_latency_ms: parseFloat(row.avg_latency_ms as string) || 0, - first_seen: row.first_seen, - last_seen: row.last_seen, - })), - }); - } - - // Default: return raw rows with derived type and success fields - const { type: typeFilter, success: successFilter } = req.query as { type?: string; success?: string }; - - // Build WHERE conditions for optional filters - const whereConditions = [ - '"timestamp" >= $1', - '"timestamp" <= $2', - 'team_id = $3', - ]; - const params: (string | number | boolean)[] = [startDate.toISOString(), endDate.toISOString(), String(ctx.team_id)]; - - // Add type filter if specified - if (typeFilter && typeFilter !== 'all') { - if (typeFilter === 'tool_call') { - whereConditions.push('COALESCE(tool_call_count, 0) > 0'); - } else if (typeFilter === 'error') { - whereConditions.push('(finish_reason IS NULL OR finish_reason IN (\'error\', \'content_filter\'))'); - } else if (typeFilter === 'llm_request') { - whereConditions.push('COALESCE(tool_call_count, 0) = 0'); - whereConditions.push('(finish_reason IS NOT NULL AND finish_reason NOT IN (\'error\', \'content_filter\'))'); - } - } - - // Add success filter if specified - if (successFilter !== undefined && successFilter !== '') { - const isSuccess = successFilter === 'true'; - if (isSuccess) { - whereConditions.push('finish_reason IN (\'stop\', \'end_turn\', \'tool_calls\', \'length\')'); - } else { - whereConditions.push('(finish_reason IS NULL OR finish_reason NOT IN (\'stop\', \'end_turn\', \'tool_calls\', \'length\'))'); - } - } - - params.push(limit, offset); - - const sql = ` - SELECT *, - CASE - WHEN COALESCE(tool_call_count, 0) > 0 THEN 'tool_call' - WHEN finish_reason IS NULL OR finish_reason IN ('error', 'content_filter') THEN 'error' - ELSE 'llm_request' - END as derived_type, - CASE - WHEN finish_reason IN ('stop', 'end_turn', 'tool_calls', 'length') THEN true - ELSE false - END as derived_success - FROM llm_events - WHERE ${whereConditions.join(' AND ')} - ORDER BY "timestamp" DESC - LIMIT $${params.length - 1} OFFSET $${params.length} - `; - const { rows } = await poolClient.query(sql, params); - return res.json({ - window: { start: startDate.toISOString(), end: endDate.toISOString() }, - count: rows.length, - filters: { type: typeFilter || 'all', success: successFilter }, - rows, - }); - } catch (err) { - console.error("[tsdb] logs error", err); - return res.status(500).json({ error: "logs_failed", detail: (err as Error).message }); - } finally { - if (poolClient) poolClient.release(); - } -}); - -// GET /tsdb/metrics?days=30 -// Returns summary metrics with period-over-period % change -router.get("/metrics", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { - let client: PoolClient | undefined; - try { - const ctx = getTokenContext(req); - if (!ctx || !ctx.team_id) { - return res.status(401).json({ error: "invalid_token", detail: "Missing team_id in token" }); - } - - const days = Math.min(parseInt((req.query.days as string) || "30", 10), 365); - - client = await connectTeamClient(ctx.team_id); - - // Calculate date ranges for current and previous periods - const now = new Date(); - const currentStart = new Date(now); - currentStart.setDate(currentStart.getDate() - days); - const previousStart = new Date(currentStart); - previousStart.setDate(previousStart.getDate() - days); - - // Query metrics for both periods in a single query using CASE statements - const metricsSql = ` - WITH period_data AS ( - SELECT - CASE - WHEN "timestamp" >= $2 THEN 'current' - ELSE 'previous' - END as period, - 1 as request, - COALESCE(usage_input_tokens, 0) as input_tokens, - COALESCE(usage_output_tokens, 0) as output_tokens, - COALESCE( - usage_total_tokens, - COALESCE(usage_input_tokens, 0) + COALESCE(usage_output_tokens, 0), - 0 - ) as total_tokens, - COALESCE(usage_cached_tokens, 0) as cached_tokens, - COALESCE(usage_reasoning_tokens, 0) as reasoning_tokens, - COALESCE(cost_total, 0) as cost, - latency_ms, - trace_id, - user_id, - CASE WHEN stream = true THEN 1 ELSE 0 END as is_streaming - FROM llm_events - WHERE "timestamp" >= $1 AND "timestamp" <= $3 AND team_id = $4 - ), - aggregated AS ( - SELECT - period, - COUNT(*) as total_requests, - COUNT(DISTINCT trace_id) as unique_traces, - COUNT(DISTINCT user_id) as unique_users, - SUM(input_tokens) as total_input_tokens, - SUM(output_tokens) as total_output_tokens, - SUM(total_tokens) as total_tokens, - SUM(cached_tokens) as cached_tokens, - SUM(reasoning_tokens) as reasoning_tokens, - SUM(cost) as total_cost, - AVG(latency_ms) as avg_latency_ms, - PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY latency_ms) as p50_latency_ms, - PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY latency_ms) as p95_latency_ms, - PERCENTILE_CONT(0.99) WITHIN GROUP (ORDER BY latency_ms) as p99_latency_ms, - MAX(latency_ms) as max_latency_ms, - SUM(is_streaming) as streaming_requests - FROM period_data - GROUP BY period - ) - SELECT * FROM aggregated - `; - - const { rows } = await client.query(metricsSql, [ - previousStart.toISOString(), - currentStart.toISOString(), - now.toISOString(), - String(ctx.team_id), - ]); - - // Parse results into current and previous periods - const current = rows.find((r) => r.period === "current") || {} as Partial; - const previous = rows.find((r) => r.period === "previous") || {} as Partial; - - // Helper to calculate % change - const pctChange = (curr: string | number | undefined, prev: string | number | undefined): number => { - const c = parseFloat(curr as string) || 0; - const p = parseFloat(prev as string) || 0; - if (p === 0) return c > 0 ? 100 : 0; - return ((c - p) / p) * 100; - }; - - // Helper to safely parse numbers - const num = (val: string | number | undefined): number => parseFloat(val as string) || 0; - const int = (val: string | number | undefined): number => parseInt(val as string) || 0; - - // Calculate derived metrics - const totalRequests = int(current.total_requests); - const totalTokens = num(current.total_tokens); - const cachedTokens = num(current.cached_tokens); - const inputTokens = num(current.total_input_tokens); - const uniqueTraces = int(current.unique_traces); - const streamingRequests = int(current.streaming_requests); - - const cacheHitRate = inputTokens > 0 ? (cachedTokens / inputTokens) * 100 : 0; - const prevCacheHitRate = num(previous.total_input_tokens) > 0 - ? (num(previous.cached_tokens) / num(previous.total_input_tokens)) * 100 - : 0; - - const streamingRate = totalRequests > 0 ? (streamingRequests / totalRequests) * 100 : 0; - const prevStreamingRate = int(previous.total_requests) > 0 - ? (int(previous.streaming_requests) / int(previous.total_requests)) * 100 - : 0; - - const avgCallsPerTrace = uniqueTraces > 0 ? totalRequests / uniqueTraces : 0; - const prevAvgCallsPerTrace = int(previous.unique_traces) > 0 - ? int(previous.total_requests) / int(previous.unique_traces) - : 0; - - const totalCost = num(current.total_cost); - const costPer1kTokens = totalTokens > 0 ? (totalCost / (totalTokens / 1000)) : 0; - const prevTotalTokens = num(previous.total_tokens); - const prevCostPer1kTokens = prevTotalTokens > 0 - ? (num(previous.total_cost) / (prevTotalTokens / 1000)) - : 0; - - const metrics = { - period: { - days, - current: { start: currentStart.toISOString(), end: now.toISOString() }, - previous: { start: previousStart.toISOString(), end: currentStart.toISOString() }, - }, - volume: { - total_requests: { - value: totalRequests, - unit: "requests", - change_pct: pctChange(current.total_requests, previous.total_requests), - }, - unique_traces: { - value: uniqueTraces, - unit: "traces", - change_pct: pctChange(current.unique_traces, previous.unique_traces), - }, - unique_users: { - value: int(current.unique_users), - unit: "users", - change_pct: pctChange(current.unique_users, previous.unique_users), - }, - avg_calls_per_trace: { - value: Math.round(avgCallsPerTrace * 100) / 100, - unit: "calls/trace", - change_pct: pctChange(avgCallsPerTrace, prevAvgCallsPerTrace), - }, - }, - tokens: { - total_input_tokens: { - value: int(current.total_input_tokens), - unit: "tokens", - change_pct: pctChange(current.total_input_tokens, previous.total_input_tokens), - }, - total_output_tokens: { - value: int(current.total_output_tokens), - unit: "tokens", - change_pct: pctChange(current.total_output_tokens, previous.total_output_tokens), - }, - total_tokens: { - value: int(totalTokens), - unit: "tokens", - change_pct: pctChange(current.total_tokens, previous.total_tokens), - }, - cached_tokens: { - value: int(cachedTokens), - unit: "tokens", - change_pct: pctChange(current.cached_tokens, previous.cached_tokens), - }, - reasoning_tokens: { - value: int(current.reasoning_tokens), - unit: "tokens", - change_pct: pctChange(current.reasoning_tokens, previous.reasoning_tokens), - }, - cache_hit_rate: { - value: Math.round(cacheHitRate * 100) / 100, - unit: "%", - change_pct: pctChange(cacheHitRate, prevCacheHitRate), - }, - avg_tokens_per_request: { - value: totalRequests > 0 ? Math.round(totalTokens / totalRequests) : 0, - unit: "tokens/req", - change_pct: pctChange( - totalRequests > 0 ? totalTokens / totalRequests : 0, - int(previous.total_requests) > 0 ? prevTotalTokens / int(previous.total_requests) : 0 - ), - }, - }, - performance: { - avg_latency_ms: { - value: Math.round(num(current.avg_latency_ms) * 100) / 100, - unit: "ms", - change_pct: pctChange(current.avg_latency_ms, previous.avg_latency_ms), - }, - p50_latency_ms: { - value: Math.round(num(current.p50_latency_ms) * 100) / 100, - unit: "ms", - change_pct: pctChange(current.p50_latency_ms, previous.p50_latency_ms), - }, - p95_latency_ms: { - value: Math.round(num(current.p95_latency_ms) * 100) / 100, - unit: "ms", - change_pct: pctChange(current.p95_latency_ms, previous.p95_latency_ms), - }, - p99_latency_ms: { - value: Math.round(num(current.p99_latency_ms) * 100) / 100, - unit: "ms", - change_pct: pctChange(current.p99_latency_ms, previous.p99_latency_ms), - }, - max_latency_ms: { - value: Math.round(num(current.max_latency_ms) * 100) / 100, - unit: "ms", - change_pct: pctChange(current.max_latency_ms, previous.max_latency_ms), - }, - }, - cost: { - total_cost: { - value: Math.round(totalCost * 100) / 100, - unit: "USD", - change_pct: pctChange(current.total_cost, previous.total_cost), - }, - avg_cost_per_request: { - value: totalRequests > 0 ? Math.round((totalCost / totalRequests) * 10000) / 10000 : 0, - unit: "USD/req", - change_pct: pctChange( - totalRequests > 0 ? totalCost / totalRequests : 0, - int(previous.total_requests) > 0 ? num(previous.total_cost) / int(previous.total_requests) : 0 - ), - }, - cost_per_1k_tokens: { - value: Math.round(costPer1kTokens * 10000) / 10000, - unit: "USD/1k tokens", - change_pct: pctChange(costPer1kTokens, prevCostPer1kTokens), - }, - }, - usage_patterns: { - streaming_rate: { - value: Math.round(streamingRate * 100) / 100, - unit: "%", - change_pct: pctChange(streamingRate, prevStreamingRate), - }, - }, - }; - - return res.json(metrics); - } catch (err) { - console.error("[tsdb] metrics error", err); - return res.status(500).json({ error: "metrics_failed", detail: (err as Error).message }); - } finally { - if (client) client.release(); - } -}); - -// POST /tsdb/refresh-aggregates -// Manually refresh all continuous aggregates to ensure data is up-to-date -router.post("/refresh-aggregates", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { - let client: PoolClient | undefined; - try { - const ctx = getTokenContext(req); - if (!ctx || !ctx.team_id) { - return res.status(401).json({ error: "invalid_token", detail: "Missing team_id in token" }); - } - - client = await connectTeamClient(ctx.team_id); - - const results: Array<{ ca: string; status: string; error?: string }> = []; - - // Refresh all CAs from beginning of time to now - const cas = [ - "llm_events_daily_ca", - "llm_events_daily_by_model_ca", - "llm_events_daily_by_agent_ca", - ]; - - for (const ca of cas) { - try { - await client.query(`CALL refresh_continuous_aggregate('${ca}', NULL, NOW())`); - results.push({ ca, status: "refreshed" }); - } catch (err) { - results.push({ ca, status: "error", error: (err as Error).message }); - } - } - - return res.json({ - message: "Continuous aggregates refresh completed", - results, - refreshed_at: new Date().toISOString(), - }); - } catch (err) { - console.error("[tsdb] refresh-aggregates error", err); - return res.status(500).json({ error: "refresh_failed", detail: (err as Error).message }); - } finally { - if (client) client.release(); - } -}); - -// ==================== PRICING CRUD ENDPOINTS ==================== - -// GET /tsdb/pricing - List all pricing -// Optional: ?group_by=provider to group by provider -router.get("/pricing", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { - try { - const { group_by } = req.query; - - if (group_by === "provider") { - const pricing = await pricingService.getPricingByProvider(); - return res.json({ pricing, grouped_by: "provider" }); - } - - const pricing = await pricingService.getAllPricing(); - return res.json({ pricing, count: Object.keys(pricing).length }); - } catch (err) { - console.error("[tsdb] pricing list error", err); - return res.status(500).json({ error: "pricing_list_failed", detail: (err as Error).message }); - } -}); - -// GET /tsdb/pricing/:model - Get specific model pricing -router.get("/pricing/:model", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { - try { - const { model } = req.params; - const { provider } = req.query; - - const pricing = await pricingService.getModelPricing(model, provider as string | undefined); - return res.json({ pricing }); - } catch (err) { - console.error("[tsdb] pricing get error", err); - return res.status(500).json({ error: "pricing_get_failed", detail: (err as Error).message }); - } -}); - -// POST /tsdb/pricing - Add new model pricing -router.post("/pricing", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { - try { - const ctx = getTokenContext(req); - const { model, provider, input_per_1m, output_per_1m, cached_input_per_1m, aliases } = req.body; - - if (!model) { - return res.status(400).json({ error: "model is required" }); - } - if (input_per_1m === undefined || output_per_1m === undefined) { - return res.status(400).json({ error: "input_per_1m and output_per_1m are required" }); - } - - const result = await pricingService.upsertPricing( - model, - { - provider, - input_per_1m, - output_per_1m, - cached_input_per_1m: cached_input_per_1m ?? input_per_1m * 0.5, - aliases: aliases || [], - }, - ctx?.user_id - ); - - return res.json({ message: "pricing_created", pricing: result }); - } catch (err) { - console.error("[tsdb] pricing create error", err); - return res.status(500).json({ error: "pricing_create_failed", detail: (err as Error).message }); - } -}); - -// PUT /tsdb/pricing/:model - Update model pricing -router.put("/pricing/:model", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { - try { - const ctx = getTokenContext(req); - const { model } = req.params; - const { provider, input_per_1m, output_per_1m, cached_input_per_1m, aliases } = req.body; - - const result = await pricingService.upsertPricing( - model, - { - provider, - input_per_1m, - output_per_1m, - cached_input_per_1m, - aliases, - }, - ctx?.user_id - ); - - return res.json({ message: "pricing_updated", pricing: result }); - } catch (err) { - console.error("[tsdb] pricing update error", err); - return res.status(500).json({ error: "pricing_update_failed", detail: (err as Error).message }); - } -}); - -// DELETE /tsdb/pricing/:model - Remove model pricing -router.delete("/pricing/:model", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { - try { - const { model } = req.params; - const deleted = await pricingService.deletePricing(model); - - if (!deleted) { - return res.status(404).json({ error: "pricing_not_found", model }); - } - - return res.json({ message: "pricing_deleted", model }); - } catch (err) { - console.error("[tsdb] pricing delete error", err); - return res.status(500).json({ error: "pricing_delete_failed", detail: (err as Error).message }); - } -}); - -// POST /tsdb/pricing/seed - Seed default pricing to DB -router.post("/pricing/seed", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { - try { - const ctx = getTokenContext(req); - const { overwrite } = req.body; - - const result = await pricingService.seedDefaultPricing(ctx?.user_id, overwrite === true); - - return res.json({ - message: "pricing_seeded", - ...result, - }); - } catch (err) { - console.error("[tsdb] pricing seed error", err); - return res.status(500).json({ error: "pricing_seed_failed", detail: (err as Error).message }); - } -}); - -// POST /tsdb/pricing/refresh - Force refresh pricing cache -router.post("/pricing/refresh", AUTH_MIDDLEWARE, async (_req: Request, res: Response) => { - try { - await pricingService.loadPricingFromDb(true); - const pricing = await pricingService.getAllPricing(); - - return res.json({ - message: "cache_refreshed", - count: Object.keys(pricing).length, - refreshed_at: new Date().toISOString(), - }); - } catch (err) { - console.error("[tsdb] pricing refresh error", err); - return res.status(500).json({ error: "pricing_refresh_failed", detail: (err as Error).message }); - } -}); - -router.get("/analytics-wide", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { - let client: PoolClient | undefined; - try { - const ctx = getTokenContext(req); - if (!ctx || !ctx.team_id) { - return res.status(401).json({ error: "invalid_token", detail: "Missing team_id in token" }); - } - - const windowLabel = (req.query.window as string) || "this_month"; - - client = await connectTeamClient(ctx.team_id); - - const analytics = await buildAnalytics({ - windowLabel, - client, - resolution: "day", - }); - - return res.json({ analytics }); - } catch (err) { - console.error("[tsdb] analytics-wide error", err); - return res.status(500).json({ error: "analytics_failed", detail: (err as Error).message }); - } finally { - if (client) client.release(); - } -}); - -router.get("/analytics-narrow", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { - let client: PoolClient | undefined; - try { - const ctx = getTokenContext(req); - if (!ctx || !ctx.team_id) { - return res.status(401).json({ error: "invalid_token", detail: "Missing team_id in token" }); - } - - client = await connectTeamClient(ctx.team_id); - - const analytics = await buildAnalytics({ - windowLabel: "today", - client, - resolution: "hour", - }); - - return res.json({ analytics }); - } catch (err) { - console.error("[tsdb] analytics-narrow error", err); - return res.status(500).json({ error: "analytics_failed", detail: (err as Error).message }); - } finally { - if (client) client.release(); - } -}); - -// POST /tsdb/recalculate-costs - Recalculate historical costs with current pricing -router.post("/recalculate-costs", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { - let poolClient: PoolClient | undefined; - try { - const ctx = getTokenContext(req); - if (!ctx || !ctx.team_id) { - return res.status(401).json({ error: "invalid_token", detail: "Missing team_id in token" }); - } - - const { start, end, batch_size = 1000 } = req.body; - - if (!start || !end) { - return res.status(400).json({ error: "start and end dates are required" }); - } - - const startDate = new Date(start); - const endDate = new Date(end); - - if (isNaN(startDate.getTime()) || isNaN(endDate.getTime())) { - return res.status(400).json({ error: "invalid_dates", detail: "start and end must be valid ISO dates" }); - } - - if (endDate < startDate) { - return res.status(400).json({ error: "invalid_range", detail: "end must be after start" }); - } - - poolClient = await connectTeamClient(ctx.team_id); - - // Ensure pricing is loaded - await pricingService.loadPricingFromDb(true); - - const results: { - updated: number; - processed: number; - errors: Array<{ trace_id?: string; call_sequence?: number; batch?: number; error?: string; warning?: string }>; - batches: number; - } = { - updated: 0, - processed: 0, - errors: [], - batches: 0, - }; - - const startTime = Date.now(); - let offset = 0; - let hasMore = true; - - // Process in batches - while (hasMore) { - // Fetch batch of events - const selectSql = ` - SELECT - "timestamp", - trace_id, - call_sequence, - model, - provider, - usage_input_tokens, - usage_output_tokens, - usage_cached_tokens, - cost_total - FROM llm_events - WHERE "timestamp" >= $1 AND "timestamp" <= $2 AND team_id = $3 - ORDER BY "timestamp" - LIMIT $4 OFFSET $5 - `; - - const { rows } = await poolClient.query(selectSql, [ - startDate.toISOString(), - endDate.toISOString(), - String(ctx.team_id), - batch_size, - offset, - ]); - - if (rows.length === 0) { - hasMore = false; - break; - } - - results.batches++; - - // Calculate new costs and prepare updates - const updates: Array<{ timestamp: Date; trace_id: string; call_sequence: number; new_cost: number }> = []; - for (const row of rows) { - try { - const costResult = pricingService.calculateCostSync({ - model: row.model || "", - provider: row.provider, - input_tokens: row.usage_input_tokens || 0, - output_tokens: row.usage_output_tokens || 0, - cached_tokens: row.usage_cached_tokens || 0, - }); - - // Only update if cost changed - const oldCost = parseFloat(row.cost_total as string) || 0; - const newCost = costResult.total; - - if (Math.abs(newCost - oldCost) > 0.000001) { - updates.push({ - timestamp: row.timestamp, - trace_id: row.trace_id, - call_sequence: row.call_sequence, - new_cost: newCost, - }); - } - - results.processed++; - } catch (err) { - results.errors.push({ - trace_id: row.trace_id, - call_sequence: row.call_sequence, - error: (err as Error).message, - }); - } - } - - // Apply batch updates - if (updates.length > 0) { - // Use a single UPDATE with CASE for efficiency - const updateSql = ` - UPDATE llm_events - SET cost_total = updates.new_cost - FROM (VALUES ${updates.map((_, i) => `($${i * 4 + 1}::timestamptz, $${i * 4 + 2}::text, $${i * 4 + 3}::integer, $${i * 4 + 4}::numeric)`).join(", ")}) AS updates(ts, tid, cs, new_cost) - WHERE llm_events."timestamp" = updates.ts - AND llm_events.trace_id = updates.tid - AND llm_events.call_sequence = updates.cs - `; - - const updateValues = updates.flatMap((u) => [u.timestamp, u.trace_id, u.call_sequence, u.new_cost]); - - try { - await poolClient.query(updateSql, updateValues); - results.updated += updates.length; - } catch (err) { - results.errors.push({ batch: results.batches, error: (err as Error).message }); - } - } - - offset += batch_size; - - // Safety check - stop if taking too long (5 minutes) - if (Date.now() - startTime > 5 * 60 * 1000) { - results.errors.push({ warning: "Timeout reached after 5 minutes. Partial recalculation completed." }); - hasMore = false; - } - } - - // Refresh continuous aggregates after recalculation - const caRefreshResults: Array<{ ca: string; status: string; error?: string }> = []; - const cas = ["llm_events_daily_ca", "llm_events_daily_by_model_ca", "llm_events_daily_by_agent_ca"]; - - for (const ca of cas) { - try { - await poolClient.query(`CALL refresh_continuous_aggregate('${ca}', $1::timestamptz, $2::timestamptz)`, [ - startDate.toISOString(), - endDate.toISOString(), - ]); - caRefreshResults.push({ ca, status: "refreshed" }); - } catch (err) { - caRefreshResults.push({ ca, status: "error", error: (err as Error).message }); - } - } - - return res.json({ - message: "recalculation_complete", - period: { start: startDate.toISOString(), end: endDate.toISOString() }, - stats: { - processed: results.processed, - updated: results.updated, - batches: results.batches, - duration_ms: Date.now() - startTime, - }, - continuous_aggregates: caRefreshResults, - errors: results.errors.slice(0, 10), // Limit error output - error_count: results.errors.length, - }); - } catch (err) { - console.error("[tsdb] recalculate-costs error", err); - return res.status(500).json({ error: "recalculate_failed", detail: (err as Error).message }); - } finally { - if (poolClient) poolClient.release(); - } -}); - -export default router; diff --git a/hive/src/controllers/user.controller.ts b/hive/src/controllers/user.controller.ts deleted file mode 100644 index 277e1ed9..00000000 --- a/hive/src/controllers/user.controller.ts +++ /dev/null @@ -1,618 +0,0 @@ -/** - * User Controller - * - * Handles user authentication endpoints including login-v2. - */ - -import { Router, Request, Response, NextFunction } from "express"; -import config from "../config"; - -const router = Router(); - -/** - * Extract token from Authorization header - * Supports: "jwt ", "Bearer ", or raw "" - */ -function extractToken(authHeader: string): string { - if (authHeader.startsWith("jwt ")) { - return authHeader.slice(4); - } - if (authHeader.startsWith("Bearer ")) { - return authHeader.slice(7); - } - return authHeader; -} - -// Email validation regex -const EMAIL_REGEX = - /[\w!#$%&'*+/=?^_`{|}~-]+(?:\.[\w!#$%&'*+/=?^_`{|}~-]+)*@(?:[\w](?:[\w-]*[\w])?\.)+[\w](?:[\w-]*[\w])?/; - -/** - * POST /user/login-v2 - * - * Authenticate a user with email and password. - * Returns a JWT token on success. - */ -router.post( - "/login-v2", - async (req: Request, res: Response, _next: NextFunction) => { - try { - let { email } = req.body; - const { password } = req.body; - - // Validate required fields - if ( - !email || - typeof email !== "string" || - !password || - typeof password !== "string" - ) { - return res.status(400).json({ - success: false, - msg: "Email and password are required", - }); - } - - // Validate email format - if (!EMAIL_REGEX.test(email)) { - return res.status(400).json({ - success: false, - msg: "Please enter a valid email", - }); - } - - // Trim email - email = email.trim().toLowerCase(); - - // Validate password length - if (password.length < 6) { - return res.status(400).json({ - success: false, - msg: "Password must be at least 6 characters", - }); - } - - // Get userDbService from app.locals - const userDbService = req.app.locals.userDbService; - if (!userDbService) { - console.error("[UserController] userDbService not found in app.locals"); - return res.status(500).json({ - success: false, - msg: "Internal server error", - }); - } - - // Attempt login - const result = await userDbService.login(email, password, { - jwtSecret: config.jwt.secret, - expiresIn: config.jwt.expiresIn, - }); - - console.log( - `[UserController] login-v2: User ${email} logged in successfully` - ); - - // Return success response - res.json({ - success: true, - token: result.token, - email: result.email, - firstname: result.firstname, - lastname: result.lastname, - name: result.name, - current_team_id: result.current_team_id, - create_time: result.created_at, - }); - } catch (err) { - const error = err as { message?: string; code?: string }; - console.error("[UserController] login-v2 error:", error.message); - - // Handle specific error codes - if ( - error.code === "USER_NOT_FOUND" || - error.code === "INVALID_CREDENTIALS" - ) { - return res.status(401).json({ - success: false, - msg: "Invalid email or password", - }); - } - - if (error.code === "OAUTH_REQUIRED") { - return res.status(400).json({ - success: false, - msg: error.message, - }); - } - - if (error.code === "ACCOUNT_DISABLED") { - return res.status(403).json({ - success: false, - msg: "Your account has been disabled", - }); - } - - // Generic error - return res.status(500).json({ - success: false, - msg: "Login failed. Please try again.", - }); - } - } -); - -/** - * POST /user/register - * - * Register a new user account. - * Returns a JWT token on success. - */ -router.post("/register", async (req: Request, res: Response) => { - try { - let { email } = req.body; - const { password, name, firstname, lastname } = req.body; - - // Validate required fields - if ( - !email || - typeof email !== "string" || - !password || - typeof password !== "string" - ) { - return res.status(400).json({ - success: false, - msg: "Email and password are required", - }); - } - - // Validate email format - if (!EMAIL_REGEX.test(email)) { - return res.status(400).json({ - success: false, - msg: "Please enter a valid email", - }); - } - - // Trim and lowercase email - email = email.trim().toLowerCase(); - - // Validate password length - if (password.length < 8) { - return res.status(400).json({ - success: false, - msg: "Password must be at least 8 characters", - }); - } - - // Get userDbService from app.locals - const userDbService = req.app.locals.userDbService; - if (!userDbService) { - console.error("[UserController] userDbService not found in app.locals"); - return res.status(500).json({ - success: false, - msg: "Internal server error", - }); - } - - // Attempt registration - const result = await userDbService.register( - { email, password, name, firstname, lastname }, - { - jwtSecret: config.jwt.secret, - expiresIn: config.jwt.expiresIn, - defaultTeamId: 1, // Default to team 1 for local dev - } - ); - - console.log( - `[UserController] register: User ${email} registered successfully` - ); - - // Return success response - res.status(201).json({ - success: true, - token: result.token, - email: result.email, - name: result.name, - firstname: result.firstname, - lastname: result.lastname, - current_team_id: result.current_team_id, - create_time: result.created_at, - }); - } catch (err: any) { - console.error("[UserController] register error:", err.message); - - // Handle specific error codes - if (err.code === "EMAIL_EXISTS") { - return res.status(409).json({ - success: false, - msg: "Email already registered", - }); - } - - // Generic error - return res.status(500).json({ - success: false, - msg: "Registration failed. Please try again.", - }); - } -}); - -/** - * GET /user/profile - * - * Get current user profile. - * Requires authentication. - */ -router.get("/profile", async (req: Request, res: Response) => { - try { - const authHeader = req.headers.authorization; - if (!authHeader) { - return res.status(401).json({ - success: false, - msg: "No token provided", - }); - } - - const userDbService = req.app.locals.userDbService; - const user = await userDbService.findByToken(extractToken(authHeader)); - - if (!user) { - return res.status(401).json({ - success: false, - msg: "Invalid token", - }); - } - - // Return in format expected by frontend - res.json({ - data: { - firstname: user.firstname || "", - lastname: user.lastname || "", - email: user.email, - company_name: user.company_name || null, - profile_img_url: user.avatar_url || null, - roleId: user.role_id || 1, - user_id: String(user.id), - team_id: String(user.current_team_id || 1), - roles: user.roles || ["user"], - }, - }); - } catch (err: any) { - console.error("[UserController] /profile error:", err.message); - res.status(500).json({ - success: false, - msg: "Failed to get user profile", - }); - } -}); - -/** - * PUT /user/profile - * - * Update current user profile. - * Requires authentication. - */ -router.put("/profile", async (req: Request, res: Response) => { - try { - const authHeader = req.headers.authorization; - if (!authHeader) { - return res.status(401).json({ - success: false, - msg: "No token provided", - }); - } - - const userDbService = req.app.locals.userDbService; - const user = await userDbService.findByToken(extractToken(authHeader)); - - if (!user) { - return res.status(401).json({ - success: false, - msg: "Invalid token", - }); - } - - const { firstname, lastname } = req.body; - - // Update user profile (basic implementation) - if (userDbService.updateProfile) { - await userDbService.updateProfile(user.id, { firstname, lastname }); - } - - res.json({ message: "Profile updated successfully" }); - } catch (err: any) { - console.error("[UserController] PUT /profile error:", err.message); - res.status(500).json({ - success: false, - msg: "Failed to update profile", - }); - } -}); - -/** - * GET /user/me - * - * Get current user info from token. - * Requires authentication. - */ -router.get("/me", async (req: Request, res: Response) => { - try { - const authHeader = req.headers.authorization; - if (!authHeader) { - return res.status(401).json({ - success: false, - msg: "No token provided", - }); - } - - const userDbService = req.app.locals.userDbService; - const user = await userDbService.findByToken(extractToken(authHeader)); - - if (!user) { - return res.status(401).json({ - success: false, - msg: "Invalid token", - }); - } - - res.json({ - success: true, - user: { - id: user.id, - email: user.email, - name: user.name, - firstname: user.firstname, - lastname: user.lastname, - current_team_id: user.current_team_id, - avatar_url: user.avatar_url, - }, - }); - } catch (err: any) { - console.error("[UserController] /me error:", err.message); - res.status(500).json({ - success: false, - msg: "Failed to get user info", - }); - } -}); - -/** - * GET /user/get-dev-tokens - * - * Get all developer API tokens for the current user. - * Requires authentication. - */ -router.get("/get-dev-tokens", async (req: Request, res: Response) => { - try { - const authHeader = req.headers.authorization; - if (!authHeader) { - return res.status(401).json({ - success: false, - msg: "No token provided", - }); - } - - const userDbService = req.app.locals.userDbService; - const user = await userDbService.findByToken(extractToken(authHeader)); - - if (!user) { - return res.status(401).json({ - success: false, - msg: "Invalid token", - }); - } - - const tokens = await userDbService.getDevTokens(user); - - res.json({ - success: true, - data: tokens, - }); - } catch (err: any) { - console.error("[UserController] /get-dev-tokens error:", err.message); - res.status(500).json({ - success: false, - msg: "Failed to get API tokens", - }); - } -}); - -/** - * POST /user/generate-dev-token - * - * Generate a new developer API token. - * Requires authentication. - */ -router.post("/generate-dev-token", async (req: Request, res: Response) => { - try { - const authHeader = req.headers.authorization; - if (!authHeader) { - return res.status(401).json({ - success: false, - msg: "No token provided", - }); - } - - const userDbService = req.app.locals.userDbService; - const user = await userDbService.findByToken(extractToken(authHeader)); - - if (!user) { - return res.status(401).json({ - success: false, - msg: "Invalid token", - }); - } - - const { label, ttl } = req.body; - - const tokenResult = await userDbService.generateDevToken(user, { - label, - ttl, - jwtSecret: config.jwt.secret, - }); - - console.log( - `[UserController] generate-dev-token: Created token for user ${user.id}` - ); - - res.status(201).json({ - success: true, - data: tokenResult, - }); - } catch (err: any) { - console.error("[UserController] /generate-dev-token error:", err.message); - res.status(500).json({ - success: false, - msg: "Failed to generate API token", - }); - } -}); - -// ============================================================================= -// UI Settings Endpoints -// ============================================================================= - -/** - * Default UI settings for new users - */ -const DEFAULT_UI_SETTINGS = { - sidebarCollapsed: false, - performanceDashboardTimeRange: "today", -}; - -/** - * GET /user/settings - * - * Get user UI settings from preferences column. - * Returns defaults if no settings exist. - */ -router.get("/settings", async (req: Request, res: Response) => { - try { - const authHeader = req.headers.authorization; - if (!authHeader) { - return res.status(401).json({ - success: false, - msg: "No token provided", - }); - } - - const userDbService = req.app.locals.userDbService; - const user = await userDbService.findByToken(extractToken(authHeader)); - - if (!user) { - return res.status(401).json({ - success: false, - msg: "Invalid token", - }); - } - - // Extract UI settings from preferences, merge with defaults - const preferences = user.preferences || {}; - const uiSettings = { - sidebarCollapsed: - preferences.sidebarCollapsed ?? DEFAULT_UI_SETTINGS.sidebarCollapsed, - performanceDashboardTimeRange: - preferences.performanceDashboardTimeRange ?? - DEFAULT_UI_SETTINGS.performanceDashboardTimeRange, - }; - - res.json({ - success: true, - data: uiSettings, - }); - } catch (err: any) { - console.error("[UserController] GET /settings error:", err.message); - res.status(500).json({ - success: false, - msg: "Failed to get settings", - }); - } -}); - -/** - * PUT /user/settings - * - * Update user UI settings in preferences column. - * Supports partial updates - merges with existing preferences. - */ -router.put("/settings", async (req: Request, res: Response) => { - try { - const authHeader = req.headers.authorization; - if (!authHeader) { - return res.status(401).json({ - success: false, - msg: "No token provided", - }); - } - - const userDbService = req.app.locals.userDbService; - const user = await userDbService.findByToken(extractToken(authHeader)); - - if (!user) { - return res.status(401).json({ - success: false, - msg: "Invalid token", - }); - } - - const { sidebarCollapsed, performanceDashboardTimeRange } = req.body; - - // Build update object with only provided fields - const updates: Record = {}; - if (typeof sidebarCollapsed === "boolean") { - updates.sidebarCollapsed = sidebarCollapsed; - } - if (performanceDashboardTimeRange !== undefined) { - updates.performanceDashboardTimeRange = performanceDashboardTimeRange; - } - - // Merge with existing preferences - const currentPreferences = user.preferences || {}; - const newPreferences = { ...currentPreferences, ...updates }; - - // Update in database - use pgPool for Postgres, mysqlPool for MySQL - const pgPool = req.app.locals.pgPool; - const mysqlPool = req.app.locals.mysqlPool; - - if (pgPool) { - // PostgreSQL - use JSONB - await pgPool.query( - "UPDATE users SET preferences = $1, updated_at = NOW() WHERE id = $2", - [JSON.stringify(newPreferences), user.id] - ); - } else if (mysqlPool) { - // MySQL - use JSON column - await mysqlPool.query( - "UPDATE user SET preferences = ?, updated_at = NOW() WHERE id = ?", - [JSON.stringify(newPreferences), user.id] - ); - } else { - console.warn( - "[UserController] PUT /settings: No database pool available, settings not persisted" - ); - } - - // Return updated settings - const uiSettings = { - sidebarCollapsed: - newPreferences.sidebarCollapsed ?? DEFAULT_UI_SETTINGS.sidebarCollapsed, - performanceDashboardTimeRange: - newPreferences.performanceDashboardTimeRange ?? - DEFAULT_UI_SETTINGS.performanceDashboardTimeRange, - }; - - res.json({ - success: true, - data: uiSettings, - }); - } catch (err: any) { - console.error("[UserController] PUT /settings error:", err.message); - res.status(500).json({ - success: false, - msg: "Failed to update settings", - }); - } -}); - -export default router; diff --git a/hive/src/index.ts b/hive/src/index.ts deleted file mode 100644 index 41b9e0e5..00000000 --- a/hive/src/index.ts +++ /dev/null @@ -1,120 +0,0 @@ -/** - * Aden Hive - DevTool Backend Entry Point - * - * LLM observability and control plane service. - */ - -import "dotenv/config"; - -import http from "http"; -import { MongoClient } from "mongodb"; -import app from "./app"; -import config from "./config"; -import { initializeSockets, setUserDbService } from "./sockets/control.socket"; - -const PORT = process.env.PORT || 4000; - -// Declare globals for MongoDB (used by services) -// eslint-disable-next-line no-var -declare global { - // eslint-disable-next-line no-var - var _ACHO_MG_DB: MongoClient; - // eslint-disable-next-line no-var - var _ACHO_MDB_CONFIG: { ERP_DBNAME: string; DBNAME: string }; - // eslint-disable-next-line no-var - var _ACHO_MDB_COLLECTIONS: { - ADEN_CONTROL_POLICIES: string; - ADEN_CONTROL_CONTENT: string; - LLM_PRICING: string; - }; -} - -/** - * Initialize MongoDB connection - */ -async function initMongoDB(): Promise { - if (!config.mongodb.url) { - console.warn( - "[MongoDB] No MONGODB_URL configured, skipping MongoDB initialization" - ); - return; - } - - try { - const client = new MongoClient(config.mongodb.url); - await client.connect(); - - // Set global MongoDB client and config - global._ACHO_MG_DB = client; - global._ACHO_MDB_CONFIG = { - ERP_DBNAME: config.mongodb.erpDbName, - DBNAME: config.mongodb.dbName, - }; - global._ACHO_MDB_COLLECTIONS = { - ADEN_CONTROL_POLICIES: "aden_control_policies", - ADEN_CONTROL_CONTENT: "aden_control_content", - LLM_PRICING: "llm_pricing", - }; - - console.log("[MongoDB] Connected successfully"); - } catch (error) { - console.error("[MongoDB] Connection error:", error); - throw error; - } -} - -// Create HTTP server -const server = http.createServer(app); - -/** - * Start the server - */ -async function start(): Promise { - // Initialize MongoDB - await initMongoDB(); - - // Pass userDbService to socket layer for JWT verification - if (app.locals.userDbService) { - setUserDbService(app.locals.userDbService, config.jwt.secret); - } - - // Initialize WebSockets - const { controlEmitter } = await initializeSockets(server); - - // Make control emitter available for policy updates - app.locals.controlEmitter = controlEmitter; - console.log("[Aden Hive] WebSocket initialized"); - - // Start server - server.listen(PORT, () => { - console.log(`[Aden Hive] Server running on port ${PORT}`); - console.log( - `[Aden Hive] Environment: ${process.env.NODE_ENV || "development"}` - ); - }); -} - -// Start the application -start().catch((error) => { - console.error("[Aden Hive] Failed to start:", error); - process.exit(1); -}); - -// Graceful shutdown -process.on("SIGTERM", () => { - console.log("[Aden Hive] SIGTERM received, shutting down gracefully"); - server.close(() => { - console.log("[Aden Hive] Server closed"); - process.exit(0); - }); -}); - -process.on("SIGINT", () => { - console.log("[Aden Hive] SIGINT received, shutting down gracefully"); - server.close(() => { - console.log("[Aden Hive] Server closed"); - process.exit(0); - }); -}); - -export default server; diff --git a/hive/src/mcp/index.ts b/hive/src/mcp/index.ts deleted file mode 100644 index 97674d46..00000000 --- a/hive/src/mcp/index.ts +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Aden Hive MCP Server - * - * Model Context Protocol server for LLM governance. - * Exposes 19 tools: - * - * Budget Tools (6): - * - hive_budget_get, hive_budget_reset, hive_budget_validate - * - hive_budget_rule_create, hive_budget_rule_update, hive_budget_rule_delete - * - * Agent Status Tools (3): - * - hive_agents_list, hive_agent_health_check, hive_agents_summary - * - * Analytics Tools (5): - * - hive_analytics_wide, hive_analytics_narrow, hive_insights - * - hive_metrics, hive_logs - * - * Policy Tools (5): - * - hive_policies_list, hive_policy_get, hive_policy_create - * - hive_policy_update, hive_policy_clear - * - * Usage: - * import { createMcpRouter } from './mcp'; - * app.use('/mcp', createMcpRouter(getControlEmitter)); - */ - -// Server creation -export { createHiveMcpServer, TOOL_CATALOG } from "./server"; -export type { HiveMcpServerOptions } from "./server"; - -// HTTP transport -export { - createMcpRouter, - getActiveMcpSessionCount, - getTeamMcpSessions, -} from "./transport/http"; - -// API client for direct usage -export { createApiClient } from "./utils/api-client"; -export type { ApiClient, ApiContext } from "./utils/api-client"; - -// Response helpers -export { - createSuccessResponse, - createErrorResponse, - handleToolError, -} from "./utils/response-helpers"; - -// Schema helpers -export { - idSchema, - dateSchema, - dateTimeSchema, - amountSchema, - budgetTypeSchema, - limitActionSchema, - analyticsWindowSchema, - validationContextSchema, - budgetAlertSchema, - budgetNotificationsSchema, - paginationSchema, -} from "./utils/schema-helpers"; diff --git a/hive/src/mcp/server.ts b/hive/src/mcp/server.ts deleted file mode 100644 index cc2e6d6e..00000000 --- a/hive/src/mcp/server.ts +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Aden Hive MCP Server - * - * MCP server with tools for: - * - Cost control (budget management) - * - Agent status (fleet monitoring) - * - Analytics (insights, metrics, logs) - * - Policy management - */ -import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; -import { createApiClient, type ApiContext } from "./utils/api-client"; -import { registerBudgetTools } from "./tools/budget"; -import { registerAgentTools, type ControlEmitter } from "./tools/agents"; -import { registerAnalyticsTools } from "./tools/analytics"; -import { registerPolicyTools } from "./tools/policies"; - -export interface HiveMcpServerOptions { - context: ApiContext; - getControlEmitter?: () => ControlEmitter | undefined; -} - -/** - * Create and configure the Aden Hive MCP server - */ -export function createHiveMcpServer(options: HiveMcpServerOptions): McpServer { - const { context, getControlEmitter } = options; - - // Create MCP server - const server = new McpServer({ - name: "aden-hive", - version: "1.0.0", - }); - - // Create API client bound to team context - const api = createApiClient(context); - - // Register all tool categories - registerBudgetTools(server, api); - registerAgentTools(server, api, getControlEmitter || (() => undefined)); - registerAnalyticsTools(server, api); - registerPolicyTools(server, api); - - console.log( - `[MCP] Aden Hive server created with ${19} tools for team ${context.teamId}` - ); - - return server; -} - -/** - * Tool categories and counts for reference - */ -export const TOOL_CATALOG = { - budget: { - count: 6, - tools: [ - "hive_budget_get", - "hive_budget_reset", - "hive_budget_validate", - "hive_budget_rule_create", - "hive_budget_rule_update", - "hive_budget_rule_delete", - ], - }, - agents: { - count: 3, - tools: ["hive_agents_list", "hive_agent_health_check", "hive_agents_summary"], - }, - analytics: { - count: 5, - tools: [ - "hive_analytics_wide", - "hive_analytics_narrow", - "hive_insights", - "hive_metrics", - "hive_logs", - ], - }, - policies: { - count: 5, - tools: [ - "hive_policies_list", - "hive_policy_get", - "hive_policy_create", - "hive_policy_update", - "hive_policy_clear", - ], - }, - total: 19, -}; diff --git a/hive/src/mcp/tools/agents.ts b/hive/src/mcp/tools/agents.ts deleted file mode 100644 index a6bbe36f..00000000 --- a/hive/src/mcp/tools/agents.ts +++ /dev/null @@ -1,197 +0,0 @@ -/** - * Agent Status MCP Tools - * - * Tools for monitoring connected SDK agent instances: - * - hive_agents_list: List all connected SDK instances - * - hive_agent_health_check: Check health of specific agent - * - hive_agents_summary: Get fleet health overview - */ -import { z } from "zod"; -import type { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; -import type { ApiClient } from "../utils/api-client"; -import { - createSuccessResponse, - handleToolError, -} from "../utils/response-helpers"; - -export interface ControlEmitter { - getConnectedCount: (teamId: string) => number; - getConnectedInstances: (teamId: string) => Array<{ - instance_id: string; - agent?: string; - policy_id?: string | null; - connected_at: string; - last_heartbeat: string; - }>; -} - -export function registerAgentTools( - server: McpServer, - api: ApiClient, - getControlEmitter: () => ControlEmitter | undefined -) { - // ==================== hive_agents_list ==================== - server.tool( - "hive_agents_list", - "Get list of all connected SDK agent instances with health status and connection details", - { - includeMetrics: z - .boolean() - .default(false) - .describe("Include per-agent metrics (connection duration, heartbeat lag)"), - }, - async (params) => { - try { - const controlEmitter = getControlEmitter(); - const result = api.agents.getList(controlEmitter); - - if (params.includeMetrics && result.instances) { - const now = Date.now(); - const enrichedInstances = (result.instances as Array<{ - instance_id: string; - connected_at: string; - last_heartbeat: string; - }>).map((instance) => { - const connectedAt = new Date(instance.connected_at).getTime(); - const lastHeartbeat = new Date(instance.last_heartbeat).getTime(); - - return { - ...instance, - metrics: { - connection_duration_ms: now - connectedAt, - connection_duration_seconds: Math.round((now - connectedAt) / 1000), - heartbeat_lag_ms: now - lastHeartbeat, - heartbeat_lag_seconds: Math.round((now - lastHeartbeat) / 1000), - is_healthy: now - lastHeartbeat < 60000, - }, - }; - }); - - return createSuccessResponse({ - ...result, - instances: enrichedInstances, - }); - } - - return createSuccessResponse(result); - } catch (error) { - return handleToolError(error, "hive_agents_list"); - } - } - ); - - // ==================== hive_agent_health_check ==================== - server.tool( - "hive_agent_health_check", - "Check health of a specific agent by instance ID or agent name. Returns health status, last heartbeat, and connection details.", - { - instanceId: z - .string() - .optional() - .describe("SDK instance ID to check"), - agentName: z - .string() - .optional() - .describe("Agent name to filter (returns all instances with this name)"), - }, - async (params) => { - try { - if (!params.instanceId && !params.agentName) { - return handleToolError( - new Error("Either instanceId or agentName is required"), - "hive_agent_health_check" - ); - } - - const controlEmitter = getControlEmitter(); - const result = api.agents.getList(controlEmitter); - - if (!result.instances || result.instances.length === 0) { - return createSuccessResponse({ - found: false, - message: "No agents connected", - query: params, - }); - } - - const now = Date.now(); - const STALE_THRESHOLD_MS = 60000; // 60 seconds - - // Filter instances based on query - const instances = (result.instances as Array<{ - instance_id: string; - agent?: string; - connected_at: string; - last_heartbeat: string; - }>).filter((instance) => { - if (params.instanceId && instance.instance_id === params.instanceId) { - return true; - } - if (params.agentName && instance.agent === params.agentName) { - return true; - } - return false; - }); - - if (instances.length === 0) { - return createSuccessResponse({ - found: false, - message: params.instanceId - ? `Instance ${params.instanceId} not found` - : `No instances found for agent ${params.agentName}`, - query: params, - total_connected: result.count, - }); - } - - // Enrich with health status - const healthResults = instances.map((instance) => { - const lastHeartbeat = new Date(instance.last_heartbeat).getTime(); - const heartbeatLag = now - lastHeartbeat; - const isHealthy = heartbeatLag < STALE_THRESHOLD_MS; - - return { - instance_id: instance.instance_id, - agent_name: instance.agent || "unknown", - status: isHealthy ? "healthy" : "unhealthy", - last_heartbeat: instance.last_heartbeat, - last_heartbeat_ago_seconds: Math.round(heartbeatLag / 1000), - connected_at: instance.connected_at, - connection_duration_seconds: Math.round( - (now - new Date(instance.connected_at).getTime()) / 1000 - ), - health_threshold_seconds: STALE_THRESHOLD_MS / 1000, - }; - }); - - return createSuccessResponse({ - found: true, - count: healthResults.length, - instances: healthResults, - summary: { - healthy: healthResults.filter((h) => h.status === "healthy").length, - unhealthy: healthResults.filter((h) => h.status === "unhealthy").length, - }, - }); - } catch (error) { - return handleToolError(error, "hive_agent_health_check"); - } - } - ); - - // ==================== hive_agents_summary ==================== - server.tool( - "hive_agents_summary", - "Get summary of agent fleet health: total active, healthy count, unhealthy count, and breakdown by agent name", - {}, - async () => { - try { - const controlEmitter = getControlEmitter(); - const result = api.agents.getSummary(controlEmitter); - return createSuccessResponse(result); - } catch (error) { - return handleToolError(error, "hive_agents_summary"); - } - } - ); -} diff --git a/hive/src/mcp/tools/analytics.ts b/hive/src/mcp/tools/analytics.ts deleted file mode 100644 index 7dc38dab..00000000 --- a/hive/src/mcp/tools/analytics.ts +++ /dev/null @@ -1,169 +0,0 @@ -/** - * Analytics MCP Tools - * - * Tools for querying analytics and insights: - * - hive_analytics_wide: Dashboard analytics with daily resolution - * - hive_analytics_narrow: Hourly analytics for today - * - hive_insights: Actionable insights and anomalies - * - hive_metrics: Summary metrics with period-over-period change - * - hive_logs: Raw or aggregated event logs - */ -import { z } from "zod"; -import type { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; -import type { ApiClient } from "../utils/api-client"; -import { - createSuccessResponse, - handleToolError, -} from "../utils/response-helpers"; -import { analyticsWindowSchema, dateTimeSchema } from "../utils/schema-helpers"; - -export function registerAnalyticsTools(server: McpServer, api: ApiClient) { - // ==================== hive_analytics_wide ==================== - server.tool( - "hive_analytics_wide", - "Get dashboard analytics with daily resolution. Use for trend analysis over days/weeks/months. Returns volume, cost, tokens, and performance data points by day.", - { - window: analyticsWindowSchema.describe( - "Time window: all_time, this_month, this_week, last_2_weeks, or today" - ), - }, - async (params) => { - try { - const result = await api.analytics.getWide(params.window); - return createSuccessResponse(result); - } catch (error) { - return handleToolError(error, "hive_analytics_wide"); - } - } - ); - - // ==================== hive_analytics_narrow ==================== - server.tool( - "hive_analytics_narrow", - "Get hourly analytics for today. Use for intraday monitoring, detecting recent spikes, and real-time cost tracking.", - {}, - async () => { - try { - const result = await api.analytics.getNarrow(); - return createSuccessResponse(result); - } catch (error) { - return handleToolError(error, "hive_analytics_narrow"); - } - } - ); - - // ==================== hive_insights ==================== - server.tool( - "hive_insights", - "Get actionable insights: cost spikes, anomalies, trends, cache efficiency, and recommendations. Critical for autonomous monitoring and cost control.", - { - days: z - .number() - .min(1) - .max(90) - .default(30) - .describe("Analysis period in days (1-90)"), - }, - async (params) => { - try { - const result = await api.analytics.getInsights(params.days); - return createSuccessResponse(result); - } catch (error) { - return handleToolError(error, "hive_insights"); - } - } - ); - - // ==================== hive_metrics ==================== - server.tool( - "hive_metrics", - "Get summary metrics with period-over-period percentage change. Good for quick health checks and comparing current vs previous period.", - { - days: z - .number() - .min(1) - .max(365) - .default(30) - .describe("Period in days for current window and comparison"), - }, - async (params) => { - try { - const result = await api.analytics.getMetrics(params.days); - return createSuccessResponse(result); - } catch (error) { - return handleToolError(error, "hive_metrics"); - } - } - ); - - // ==================== hive_logs ==================== - server.tool( - "hive_logs", - "Query raw or aggregated event logs. Use for investigation, drill-down, and detailed analysis. Supports grouping by model, agent, or provider.", - { - start: dateTimeSchema.describe("Start time (ISO 8601 format)"), - end: dateTimeSchema.describe("End time (ISO 8601 format)"), - groupBy: z - .enum(["model", "agent", "provider", "model,agent", "model,provider"]) - .optional() - .describe( - "Aggregate by field(s). If not specified, returns raw log rows." - ), - limit: z - .number() - .min(1) - .max(5000) - .default(500) - .describe("Maximum rows/aggregations to return"), - offset: z - .number() - .min(0) - .default(0) - .describe("Number of rows to skip (for pagination)"), - }, - async (params) => { - try { - // Validate date range - const startDate = new Date(params.start); - const endDate = new Date(params.end); - - if (isNaN(startDate.getTime()) || isNaN(endDate.getTime())) { - return handleToolError( - new Error("Invalid date format. Use ISO 8601 format."), - "hive_logs" - ); - } - - if (endDate < startDate) { - return handleToolError( - new Error("End date must be after start date"), - "hive_logs" - ); - } - - // Warn if range is too large - const rangeDays = - (endDate.getTime() - startDate.getTime()) / (1000 * 60 * 60 * 24); - if (rangeDays > 90 && !params.groupBy) { - console.warn( - `[MCP] hive_logs: Large date range (${rangeDays.toFixed( - 0 - )} days) without aggregation may be slow` - ); - } - - const result = await api.analytics.getLogs({ - start: params.start, - end: params.end, - groupBy: params.groupBy, - limit: params.limit, - offset: params.offset, - }); - - return createSuccessResponse(result); - } catch (error) { - return handleToolError(error, "hive_logs"); - } - } - ); -} diff --git a/hive/src/mcp/tools/budget.ts b/hive/src/mcp/tools/budget.ts deleted file mode 100644 index 06c4f3d3..00000000 --- a/hive/src/mcp/tools/budget.ts +++ /dev/null @@ -1,335 +0,0 @@ -/** - * Budget MCP Tools - * - * Tools for cost control and budget management: - * - hive_budget_get: Get budget status - * - hive_budget_reset: Reset budget spend - * - hive_budget_validate: Validate request against budgets - * - hive_budget_rule_create: Create budget rule - * - hive_budget_rule_update: Update budget rule - * - hive_budget_rule_delete: Delete budget rule - */ -import { z } from "zod"; -import type { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; -import type { ApiClient } from "../utils/api-client"; -import { - createSuccessResponse, - handleToolError, -} from "../utils/response-helpers"; -import { - idSchema, - budgetTypeSchema, - limitActionSchema, - validationContextSchema, - budgetAlertSchema, - budgetNotificationsSchema, -} from "../utils/schema-helpers"; - -export function registerBudgetTools(server: McpServer, api: ApiClient) { - // ==================== hive_budget_get ==================== - server.tool( - "hive_budget_get", - "Get budget status including spend, limit, burn rate, and projected spend for a specific budget ID", - { - budgetId: idSchema.describe("Budget ID to query"), - }, - async (params) => { - try { - const result = await api.budget.getStatus(params.budgetId); - return createSuccessResponse(result); - } catch (error) { - return handleToolError(error, "hive_budget_get"); - } - } - ); - - // ==================== hive_budget_reset ==================== - server.tool( - "hive_budget_reset", - "Reset a budget spend counter to zero. Use when starting new billing cycle or after resolving overage.", - { - budgetId: idSchema.describe("Budget ID to reset"), - reason: z - .string() - .optional() - .describe("Reason for reset (for audit trail)"), - }, - async (params) => { - try { - const result = await api.budget.reset(params.budgetId); - return createSuccessResponse({ - ...result, - reason: params.reason, - reset_at: new Date().toISOString(), - }); - } catch (error) { - return handleToolError(error, "hive_budget_reset"); - } - } - ); - - // ==================== hive_budget_validate ==================== - server.tool( - "hive_budget_validate", - "Validate if a request should be allowed based on budget constraints. Returns allow/throttle/degrade/block decision with authoritative spend data.", - { - budgetId: z - .string() - .optional() - .describe("Specific budget ID to validate against"), - estimatedCost: z - .number() - .min(0) - .describe("Estimated cost of the request in USD"), - context: validationContextSchema - .optional() - .describe( - "Context for multi-budget matching (agent, tenant_id, customer_id, feature, tags)" - ), - localSpend: z - .number() - .optional() - .describe("Local spend tracked by SDK (for drift detection)"), - }, - async (params) => { - try { - const result = await api.budget.validate({ - budgetId: params.budgetId, - estimatedCost: params.estimatedCost, - context: params.context, - localSpend: params.localSpend, - }); - return createSuccessResponse(result); - } catch (error) { - return handleToolError(error, "hive_budget_validate"); - } - } - ); - - // ==================== hive_budget_rule_create ==================== - server.tool( - "hive_budget_rule_create", - "Create a new budget rule within a policy. Budget rules define spending limits and actions when exceeded.", - { - policyId: z - .string() - .default("default") - .describe('Policy ID (use "default" for default policy)'), - id: idSchema.describe("Unique budget rule ID"), - name: z.string().min(1).describe("Human-readable budget name"), - type: budgetTypeSchema.describe("Budget scope type"), - limit: z.number().min(0).describe("Budget limit in USD"), - limitAction: limitActionSchema - .default("kill") - .describe("Action when limit exceeded"), - degradeToModel: z - .string() - .optional() - .describe('Target model for degradation (required when limitAction is "degrade")'), - degradeToProvider: z - .string() - .optional() - .describe('Target provider for degradation (required when limitAction is "degrade")'), - tags: z - .array(z.string()) - .optional() - .describe('Tags for tag-type budgets (required when type is "tag")'), - alerts: z - .array(budgetAlertSchema) - .default([ - { threshold: 80, enabled: true }, - { threshold: 95, enabled: true }, - ]) - .describe("Alert thresholds as percentage of limit"), - notifications: budgetNotificationsSchema - .default({ - inApp: true, - email: false, - emailRecipients: [], - webhook: false, - }) - .describe("Notification settings"), - }, - async (params) => { - try { - // Validate degradation requirements - if (params.limitAction === "degrade") { - if (!params.degradeToModel || !params.degradeToProvider) { - return handleToolError( - new Error( - "degradeToModel and degradeToProvider are required when limitAction is 'degrade'" - ), - "hive_budget_rule_create" - ); - } - } - - // Validate tag requirements - if (params.type === "tag") { - if (!params.tags || params.tags.length === 0) { - return handleToolError( - new Error("tags array is required when type is 'tag'"), - "hive_budget_rule_create" - ); - } - } - - const result = await api.policy.addBudgetRule(params.policyId, { - id: params.id, - name: params.name, - type: params.type, - limit: params.limit, - spent: 0, - limitAction: params.limitAction, - degradeToModel: params.degradeToModel, - degradeToProvider: params.degradeToProvider, - tags: params.tags, - alerts: params.alerts, - notifications: params.notifications, - }); - - return createSuccessResponse({ - success: true, - budget_id: params.id, - policy: result, - }); - } catch (error) { - return handleToolError(error, "hive_budget_rule_create"); - } - } - ); - - // ==================== hive_budget_rule_update ==================== - server.tool( - "hive_budget_rule_update", - "Update an existing budget rule. Only provided fields will be updated.", - { - policyId: z - .string() - .default("default") - .describe('Policy ID (use "default" for default policy)'), - budgetId: idSchema.describe("Budget rule ID to update"), - name: z.string().optional().describe("New budget name"), - limit: z.number().min(0).optional().describe("New budget limit in USD"), - limitAction: limitActionSchema.optional().describe("New action when limit exceeded"), - degradeToModel: z - .string() - .optional() - .describe("New target model for degradation"), - degradeToProvider: z - .string() - .optional() - .describe("New target provider for degradation"), - alerts: z - .array(budgetAlertSchema) - .optional() - .describe("New alert thresholds"), - }, - async (params) => { - try { - // Get current policy to find and update the budget - const policy = await api.policy.get(params.policyId); - - if (!policy) { - return handleToolError( - new Error("Policy not found"), - "hive_budget_rule_update" - ); - } - - const budgets = policy.budgets || []; - const budgetIndex = budgets.findIndex( - (b: { id: string }) => b.id === params.budgetId - ); - - if (budgetIndex === -1) { - return handleToolError( - new Error(`Budget ${params.budgetId} not found in policy`), - "hive_budget_rule_update" - ); - } - - // Update the budget with new values - const updatedBudget = { - ...budgets[budgetIndex], - ...(params.name && { name: params.name }), - ...(params.limit !== undefined && { limit: params.limit }), - ...(params.limitAction && { limitAction: params.limitAction }), - ...(params.degradeToModel && { degradeToModel: params.degradeToModel }), - ...(params.degradeToProvider && { degradeToProvider: params.degradeToProvider }), - ...(params.alerts && { alerts: params.alerts }), - }; - - budgets[budgetIndex] = updatedBudget; - - const result = await api.policy.update(params.policyId, { budgets }); - - return createSuccessResponse({ - success: true, - budget_id: params.budgetId, - updated_fields: Object.keys(params).filter( - (k) => - k !== "policyId" && - k !== "budgetId" && - params[k as keyof typeof params] !== undefined - ), - policy: result, - }); - } catch (error) { - return handleToolError(error, "hive_budget_rule_update"); - } - } - ); - - // ==================== hive_budget_rule_delete ==================== - server.tool( - "hive_budget_rule_delete", - "Delete a budget rule from a policy", - { - policyId: z - .string() - .default("default") - .describe('Policy ID (use "default" for default policy)'), - budgetId: idSchema.describe("Budget rule ID to delete"), - }, - async (params) => { - try { - // Get current policy to remove the budget - const policy = await api.policy.get(params.policyId); - - if (!policy) { - return handleToolError( - new Error("Policy not found"), - "hive_budget_rule_delete" - ); - } - - const budgets = policy.budgets || []; - const budgetIndex = budgets.findIndex( - (b: { id: string }) => b.id === params.budgetId - ); - - if (budgetIndex === -1) { - return handleToolError( - new Error(`Budget ${params.budgetId} not found in policy`), - "hive_budget_rule_delete" - ); - } - - // Remove the budget - budgets.splice(budgetIndex, 1); - - const result = await api.policy.update(params.policyId, { budgets }); - - return createSuccessResponse({ - success: true, - deleted_budget_id: params.budgetId, - remaining_budgets: budgets.length, - policy: result, - }); - } catch (error) { - return handleToolError(error, "hive_budget_rule_delete"); - } - } - ); -} diff --git a/hive/src/mcp/tools/policies.ts b/hive/src/mcp/tools/policies.ts deleted file mode 100644 index 06001642..00000000 --- a/hive/src/mcp/tools/policies.ts +++ /dev/null @@ -1,224 +0,0 @@ -/** - * Policy Management MCP Tools - * - * Tools for managing control policies: - * - hive_policies_list: List all policies - * - hive_policy_get: Get specific policy with rules - * - hive_policy_create: Create new policy - * - hive_policy_update: Update policy - * - hive_policy_clear: Clear all rules from policy - */ -import { z } from "zod"; -import type { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; -import type { ApiClient } from "../utils/api-client"; -import { - createSuccessResponse, - handleToolError, -} from "../utils/response-helpers"; - -export function registerPolicyTools(server: McpServer, api: ApiClient) { - // ==================== hive_policies_list ==================== - server.tool( - "hive_policies_list", - "List all policies for the team. Returns policy IDs, names, and rule counts.", - { - limit: z - .number() - .min(1) - .max(100) - .default(100) - .describe("Maximum policies to return"), - offset: z - .number() - .min(0) - .default(0) - .describe("Number of policies to skip"), - }, - async (params) => { - try { - const policies = await api.policy.list({ - limit: params.limit, - offset: params.offset, - }); - - // Summarize policies - const summary = (policies as unknown as Array<{ - _id?: string; - id?: string; - name?: string; - budgets?: unknown[]; - throttles?: unknown[]; - blocks?: unknown[]; - degradations?: unknown[]; - }>).map((p) => ({ - id: p._id || p.id || "unknown", - name: p.name || "Unnamed Policy", - rule_counts: { - budgets: p.budgets?.length || 0, - throttles: p.throttles?.length || 0, - blocks: p.blocks?.length || 0, - degradations: p.degradations?.length || 0, - }, - })); - - return createSuccessResponse({ - count: policies.length, - policies: summary, - }); - } catch (error) { - return handleToolError(error, "hive_policies_list"); - } - } - ); - - // ==================== hive_policy_get ==================== - server.tool( - "hive_policy_get", - 'Get a specific policy with all rules (budgets, throttles, blocks, degradations). Use "default" to get the team\'s default policy.', - { - policyId: z - .string() - .default("default") - .describe('Policy ID or "default" for team default'), - }, - async (params) => { - try { - const policy = await api.policy.get(params.policyId); - - if (!policy) { - return handleToolError( - new Error(`Policy ${params.policyId} not found`), - "hive_policy_get" - ); - } - - return createSuccessResponse(policy); - } catch (error) { - return handleToolError(error, "hive_policy_get"); - } - } - ); - - // ==================== hive_policy_create ==================== - server.tool( - "hive_policy_create", - "Create a new policy for the team. New policies start empty (no rules).", - { - name: z.string().min(1).describe("Policy name"), - }, - async (params) => { - try { - const policy = await api.policy.create(params.name); - - return createSuccessResponse({ - success: true, - message: "Policy created", - policy, - }); - } catch (error) { - return handleToolError(error, "hive_policy_create"); - } - } - ); - - // ==================== hive_policy_update ==================== - server.tool( - "hive_policy_update", - "Update a policy's name or replace all rules. For individual rule changes, use budget/throttle/block rule tools.", - { - policyId: z - .string() - .default("default") - .describe('Policy ID or "default" for team default'), - name: z.string().optional().describe("New policy name"), - budgets: z - .array(z.any()) - .optional() - .describe("Complete budgets array (replaces all budgets)"), - throttles: z - .array(z.any()) - .optional() - .describe("Complete throttles array (replaces all throttles)"), - blocks: z - .array(z.any()) - .optional() - .describe("Complete blocks array (replaces all blocks)"), - degradations: z - .array(z.any()) - .optional() - .describe("Complete degradations array (replaces all degradations)"), - }, - async (params) => { - try { - // Only pass defined fields - const updates: { - name?: string; - budgets?: unknown[]; - throttles?: unknown[]; - blocks?: unknown[]; - degradations?: unknown[]; - } = {}; - - if (params.name !== undefined) updates.name = params.name; - if (params.budgets !== undefined) updates.budgets = params.budgets; - if (params.throttles !== undefined) updates.throttles = params.throttles; - if (params.blocks !== undefined) updates.blocks = params.blocks; - if (params.degradations !== undefined) - updates.degradations = params.degradations; - - if (Object.keys(updates).length === 0) { - return handleToolError( - new Error("No updates provided"), - "hive_policy_update" - ); - } - - const policy = await api.policy.update(params.policyId, updates); - - return createSuccessResponse({ - success: true, - updated_fields: Object.keys(updates), - policy, - }); - } catch (error) { - return handleToolError(error, "hive_policy_update"); - } - } - ); - - // ==================== hive_policy_clear ==================== - server.tool( - "hive_policy_clear", - "Clear all rules from a policy (budgets, throttles, blocks, degradations). The policy itself is preserved.", - { - policyId: z - .string() - .default("default") - .describe('Policy ID or "default" for team default'), - confirm: z - .boolean() - .describe("Set to true to confirm clearing all rules"), - }, - async (params) => { - try { - if (!params.confirm) { - return createSuccessResponse({ - warning: - "This will delete ALL rules from the policy. Set confirm=true to proceed.", - policy_id: params.policyId, - }); - } - - const policy = await api.policy.clear(params.policyId); - - return createSuccessResponse({ - success: true, - message: "All rules cleared from policy", - policy, - }); - } catch (error) { - return handleToolError(error, "hive_policy_clear"); - } - } - ); -} diff --git a/hive/src/mcp/transport/http.ts b/hive/src/mcp/transport/http.ts deleted file mode 100644 index fd30f6f5..00000000 --- a/hive/src/mcp/transport/http.ts +++ /dev/null @@ -1,238 +0,0 @@ -/** - * HTTP/SSE Transport for Aden Hive MCP Server - * - * Provides HTTP-based transport for autonomous LLM agents: - * - GET /mcp - SSE stream for server-to-client messages - * - POST /mcp/message - Client-to-server messages - */ -import express, { Request, Response, Router } from "express"; -import passport from "passport"; -import { SSEServerTransport } from "@modelcontextprotocol/sdk/server/sse.js"; -import { createHiveMcpServer, type HiveMcpServerOptions } from "../server"; -import type { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; - -interface AuthenticatedRequest extends Request { - user?: { - id: string; - current_team_id: string; - }; -} - -interface McpSession { - server: McpServer; - transport: SSEServerTransport; - teamId: string; - userId: string; - createdAt: Date; -} - -// Active MCP sessions by session ID -const sessions = new Map(); - -/** - * Create MCP HTTP router - */ -export function createMcpRouter( - getControlEmitter?: HiveMcpServerOptions["getControlEmitter"] -): Router { - const router = express.Router(); - - // All MCP routes require authentication - const authMiddleware = passport.authenticate("jwt", { session: false }); - - /** - * GET /mcp - * SSE endpoint - establishes persistent connection for server-to-client messages - */ - router.get( - "/", - authMiddleware, - async (req: AuthenticatedRequest, res: Response) => { - const teamId = req.user?.current_team_id; - const userId = req.user?.id; - - if (!teamId) { - res.status(401).json({ error: "Team ID required" }); - return; - } - - // Set custom headers (SSE headers are set by the transport) - res.setHeader("X-Accel-Buffering", "no"); - - // Create MCP server for this session - const server = createHiveMcpServer({ - context: { - teamId, - userId, - }, - getControlEmitter, - }); - - // Create SSE transport - it generates its own sessionId internally - const transport = new SSEServerTransport("/mcp/message", res); - - // Get the SDK's session ID (used in query params for POST requests) - const sdkSessionId = transport.sessionId; - - console.log(`[MCP] New SSE connection: session=${sdkSessionId}, team=${teamId}`); - - // Store session by the SDK's session ID - sessions.set(sdkSessionId, { - server, - transport, - teamId, - userId: userId || "unknown", - createdAt: new Date(), - }); - - // Connect server to transport - await server.connect(transport); - - // Handle client disconnect - req.on("close", () => { - console.log(`[MCP] SSE connection closed: session=${sdkSessionId}`); - sessions.delete(sdkSessionId); - server.close(); - }); - } - ); - - /** - * POST /mcp/message - * Receives messages from client - */ - // Note: Do NOT use express.json() here - handlePostMessage reads the raw body stream - router.post( - "/message", - authMiddleware, - async (req: AuthenticatedRequest, res: Response) => { - // SDK passes session ID as query parameter: /mcp/message?sessionId=xxx - const sessionId = req.query.sessionId as string; - - if (!sessionId) { - res.status(400).json({ error: "sessionId query parameter required" }); - return; - } - - const session = sessions.get(sessionId); - - if (!session) { - res.status(404).json({ - error: "Session not found", - hint: "Establish SSE connection first via GET /mcp", - }); - return; - } - - // Verify team ID matches - if (session.teamId !== req.user?.current_team_id) { - res.status(403).json({ error: "Session team mismatch" }); - return; - } - - try { - // Handle the message through the transport - await session.transport.handlePostMessage(req, res); - } catch (error) { - console.error(`[MCP] Error handling message:`, error); - res.status(500).json({ - error: "Failed to process message", - details: error instanceof Error ? error.message : "Unknown error", - }); - } - } - ); - - /** - * GET /mcp/sessions - * List active MCP sessions (admin/debug endpoint) - */ - router.get( - "/sessions", - authMiddleware, - (req: AuthenticatedRequest, res: Response) => { - const teamId = req.user?.current_team_id; - - // Only show sessions for the requesting team - const teamSessions = Array.from(sessions.entries()) - .filter(([, session]) => session.teamId === teamId) - .map(([id, session]) => ({ - session_id: id, - team_id: session.teamId, - user_id: session.userId, - created_at: session.createdAt.toISOString(), - age_seconds: Math.round( - (Date.now() - session.createdAt.getTime()) / 1000 - ), - })); - - res.json({ - count: teamSessions.length, - sessions: teamSessions, - }); - } - ); - - /** - * DELETE /mcp/sessions/:sessionId - * Close a specific MCP session - */ - router.delete( - "/sessions/:sessionId", - authMiddleware, - (req: AuthenticatedRequest, res: Response) => { - const { sessionId } = req.params; - const teamId = req.user?.current_team_id; - - const session = sessions.get(sessionId); - - if (!session) { - res.status(404).json({ error: "Session not found" }); - return; - } - - // Verify team ID matches - if (session.teamId !== teamId) { - res.status(403).json({ error: "Cannot close session from another team" }); - return; - } - - // Close the session - session.server.close(); - sessions.delete(sessionId); - - res.json({ - success: true, - message: `Session ${sessionId} closed`, - }); - } - ); - - /** - * GET /mcp/health - * Health check endpoint - */ - router.get("/health", (_req: Request, res: Response) => { - res.json({ - status: "healthy", - active_sessions: sessions.size, - timestamp: new Date().toISOString(), - }); - }); - - return router; -} - -/** - * Get count of active MCP sessions - */ -export function getActiveMcpSessionCount(): number { - return sessions.size; -} - -/** - * Get active sessions for a specific team - */ -export function getTeamMcpSessions(teamId: string): McpSession[] { - return Array.from(sessions.values()).filter((s) => s.teamId === teamId); -} diff --git a/hive/src/mcp/utils/api-client.ts b/hive/src/mcp/utils/api-client.ts deleted file mode 100644 index 04953c58..00000000 --- a/hive/src/mcp/utils/api-client.ts +++ /dev/null @@ -1,610 +0,0 @@ -/** - * Internal API client for MCP tools - * - * This client makes direct calls to the control and tsdb services - * rather than HTTP calls, since we're running inside the same process. - */ -import controlService from "../../services/control/control_service"; -import * as tsdbService from "../../services/tsdb/tsdb_service"; -import { buildAnalytics } from "../../services/tsdb/analytics_service"; -import { getTeamPool, buildSchemaName } from "../../services/tsdb/team_context"; -import type { PoolClient } from "pg"; - -export interface ApiContext { - teamId: string; - userId?: string; -} - -export interface BudgetRule { - id: string; - name?: string; - type?: string; - tags?: string[]; - limit?: number; - spent?: number; - limitAction?: string; - degradeToModel?: string; - degradeToProvider?: string; - alerts?: Array<{ threshold: number; enabled: boolean }>; - notifications?: { - inApp: boolean; - email: boolean; - emailRecipients: string[]; - webhook: boolean; - }; -} - -export interface ValidationContext { - agent?: string; - tenant_id?: string; - customer_id?: string; - feature?: string; - tags?: string[]; -} - -/** - * Create an API client bound to a specific team context - */ -export function createApiClient(context: ApiContext) { - const userContext = { - user_id: context.userId || "mcp-agent", - team_id: context.teamId, - }; - - return { - // ==================== Budget Operations ==================== - budget: { - /** - * Get budget status by ID - */ - async getStatus(budgetId: string) { - return controlService.getBudgetStatus(budgetId); - }, - - /** - * Reset budget spend to zero - */ - async reset(budgetId: string) { - await controlService.resetBudget(budgetId); - return { success: true, id: budgetId }; - }, - - /** - * Validate a request against budgets - */ - async validate(params: { - budgetId?: string; - estimatedCost: number; - context?: ValidationContext; - localSpend?: number; - }) { - // Get the policy to validate against - const policy = await controlService.getPolicy( - context.teamId, - null, - userContext - ); - - if (!policy) { - return { - allowed: true, - action: "allow", - reason: "No policy found", - budgets_checked: [], - }; - } - - // Multi-budget validation using context - if (params.context && typeof params.context === "object") { - const matchingBudgets = controlService.findMatchingBudgetsForContext( - policy.budgets || [], - params.context - ); - - if (matchingBudgets.length === 0) { - return { - allowed: true, - action: "allow", - reason: "No budgets match the provided context", - authoritative_spend: 0, - budget_limit: 0, - usage_percent: 0, - projected_percent: 0, - budgets_checked: [], - }; - } - - return controlService.validateMultipleBudgets( - matchingBudgets, - params.estimatedCost, - params.localSpend - ); - } - - // Single budget validation - if (params.budgetId) { - const budget = policy.budgets?.find( - (b: { id: string }) => b.id === params.budgetId - ); - if (!budget) { - return { - allowed: true, - action: "allow", - reason: "Budget not found in policy", - budgets_checked: [], - }; - } - - return controlService.validateMultipleBudgets( - [budget], - params.estimatedCost, - params.localSpend - ); - } - - return { - allowed: true, - action: "allow", - reason: "No budget_id or context provided", - budgets_checked: [], - }; - }, - }, - - // ==================== Policy Operations ==================== - policy: { - /** - * Get all policies for the team - */ - async list(pagination?: { limit?: number; offset?: number }) { - return controlService.getPoliciesByTeam(context.teamId, { - limit: pagination?.limit || 100, - offset: pagination?.offset || 0, - }); - }, - - /** - * Get a specific policy - */ - async get(policyId: string | null) { - const resolvedId = - policyId === "default" || !policyId ? null : policyId; - return controlService.getPolicy(context.teamId, resolvedId, userContext); - }, - - /** - * Create a new policy - */ - async create(name: string) { - return controlService.updatePolicy( - context.teamId, - null, - { name }, - userContext - ); - }, - - /** - * Update a policy - */ - async update( - policyId: string | null, - updates: { - name?: string; - budgets?: unknown[]; - throttles?: unknown[]; - blocks?: unknown[]; - degradations?: unknown[]; - } - ) { - const resolvedId = - policyId === "default" || !policyId ? null : policyId; - return controlService.updatePolicy( - context.teamId, - resolvedId, - updates as Record, - userContext - ); - }, - - /** - * Clear all rules from a policy - */ - async clear(policyId: string | null) { - const resolvedId = - policyId === "default" || !policyId ? null : policyId; - return controlService.clearPolicy( - context.teamId, - resolvedId, - userContext - ); - }, - - /** - * Delete a policy - */ - async delete(policyId: string) { - return controlService.deletePolicy( - context.teamId, - policyId, - userContext - ); - }, - - /** - * Add a budget rule to a policy - */ - async addBudgetRule(policyId: string | null, rule: BudgetRule) { - const resolvedId = - policyId === "default" || !policyId ? null : policyId; - // eslint-disable-next-line @typescript-eslint/no-explicit-any - return controlService.addBudgetRule( - context.teamId, - resolvedId, - rule as any, - userContext - ); - }, - }, - - // ==================== Analytics Operations ==================== - analytics: { - /** - * Get wide analytics (daily resolution) - */ - async getWide(window: string = "this_month") { - const pool = await getTeamPool(context.teamId); - const schema = buildSchemaName(context.teamId); - const client = await pool.connect(); - - try { - await client.query(`SET search_path TO ${schema}, public`); - await tsdbService.ensureSchema(client); - - return buildAnalytics({ - windowLabel: window, - client, - resolution: "day", - }); - } finally { - client.release(); - } - }, - - /** - * Get narrow analytics (hourly resolution for today) - */ - async getNarrow() { - const pool = await getTeamPool(context.teamId); - const schema = buildSchemaName(context.teamId); - const client = await pool.connect(); - - try { - await client.query(`SET search_path TO ${schema}, public`); - await tsdbService.ensureSchema(client); - - return buildAnalytics({ - windowLabel: "today", - client, - resolution: "hour", - }); - } finally { - client.release(); - } - }, - - /** - * Get actionable insights - */ - async getInsights(days: number = 30) { - const pool = await getTeamPool(context.teamId); - const schema = buildSchemaName(context.teamId); - const client = await pool.connect(); - - try { - await client.query(`SET search_path TO ${schema}, public`); - await tsdbService.ensureSchema(client); - - // Use the insights generation logic from tsdb controller - // This is a simplified version - full implementation would mirror the controller - return this._generateInsights(client, days); - } finally { - client.release(); - } - }, - - /** - * Get summary metrics with period-over-period change - */ - async getMetrics(days: number = 30) { - const pool = await getTeamPool(context.teamId); - const schema = buildSchemaName(context.teamId); - const client = await pool.connect(); - - try { - await client.query(`SET search_path TO ${schema}, public`); - await tsdbService.ensureSchema(client); - - return this._generateMetrics(client, days); - } finally { - client.release(); - } - }, - - /** - * Get logs (raw or aggregated) - */ - async getLogs(params: { - start: string; - end: string; - groupBy?: string; - limit?: number; - offset?: number; - }) { - const pool = await getTeamPool(context.teamId); - const schema = buildSchemaName(context.teamId); - const client = await pool.connect(); - - try { - await client.query(`SET search_path TO ${schema}, public`); - await tsdbService.ensureSchema(client); - - return this._getLogs(client, params); - } finally { - client.release(); - } - }, - - // Internal helper methods - async _generateInsights(client: PoolClient, days: number) { - // Simplified insights generation - const now = new Date(); - const periodStart = new Date(now); - periodStart.setDate(periodStart.getDate() - days); - - const { rows } = await client.query( - ` - SELECT - COUNT(*) as total_requests, - COALESCE(SUM(cost_total), 0) as total_cost, - COALESCE(AVG(latency_ms), 0) as avg_latency - FROM llm_events - WHERE "timestamp" >= $1 AND "timestamp" <= $2 - `, - [periodStart.toISOString(), now.toISOString()] - ); - - const stats = rows[0]; - const insights = []; - - // Basic usage summary insight - insights.push({ - id: "usage_snapshot", - severity: "summary", - title: "Period usage summary", - description: `${parseInt(stats.total_requests).toLocaleString()} requests totaling $${parseFloat(stats.total_cost).toFixed(2)} over the last ${days} days.`, - metric: { - total_requests: parseInt(stats.total_requests), - total_cost: parseFloat(stats.total_cost), - }, - }); - - return { - period: { days, start: periodStart.toISOString(), end: now.toISOString() }, - insights, - summary: { - total: insights.length, - critical: insights.filter((i) => i.severity === "critical").length, - warning: insights.filter((i) => i.severity === "warning").length, - info: insights.filter((i) => i.severity === "info").length, - }, - }; - }, - - async _generateMetrics(client: PoolClient, days: number) { - const now = new Date(); - const currentStart = new Date(now); - currentStart.setDate(currentStart.getDate() - days); - - const { rows } = await client.query( - ` - SELECT - COUNT(*) as total_requests, - COUNT(DISTINCT trace_id) as unique_traces, - COALESCE(SUM(usage_input_tokens), 0) as total_input_tokens, - COALESCE(SUM(usage_output_tokens), 0) as total_output_tokens, - COALESCE(SUM(cost_total), 0) as total_cost, - COALESCE(AVG(latency_ms), 0) as avg_latency_ms - FROM llm_events - WHERE "timestamp" >= $1 AND "timestamp" <= $2 - `, - [currentStart.toISOString(), now.toISOString()] - ); - - const stats = rows[0]; - - return { - period: { days, start: currentStart.toISOString(), end: now.toISOString() }, - volume: { - total_requests: parseInt(stats.total_requests), - unique_traces: parseInt(stats.unique_traces), - }, - tokens: { - total_input_tokens: parseInt(stats.total_input_tokens), - total_output_tokens: parseInt(stats.total_output_tokens), - }, - cost: { - total_cost: parseFloat(stats.total_cost), - }, - performance: { - avg_latency_ms: parseFloat(stats.avg_latency_ms), - }, - }; - }, - - async _getLogs( - client: PoolClient, - params: { - start: string; - end: string; - groupBy?: string; - limit?: number; - offset?: number; - } - ) { - const { start, end, groupBy, limit = 500, offset = 0 } = params; - - if (groupBy) { - const validFields = ["model", "agent", "provider"]; - const groupFields = groupBy - .split(",") - .map((f) => f.trim()) - .filter((f) => validFields.includes(f)); - - if (groupFields.length > 0) { - const selectFields = groupFields.join(", "); - const { rows } = await client.query( - ` - SELECT - ${selectFields}, - COUNT(*) as request_count, - COALESCE(SUM(cost_total), 0) as total_cost - FROM llm_events - WHERE "timestamp" >= $1 AND "timestamp" <= $2 - GROUP BY ${selectFields} - ORDER BY total_cost DESC - LIMIT $3 OFFSET $4 - `, - [start, end, limit, offset] - ); - - return { - window: { start, end }, - group_by: groupFields, - count: rows.length, - aggregations: rows, - }; - } - } - - // Raw logs - const { rows } = await client.query( - ` - SELECT * - FROM llm_events - WHERE "timestamp" >= $1 AND "timestamp" <= $2 - ORDER BY "timestamp" DESC - LIMIT $3 OFFSET $4 - `, - [start, end, limit, offset] - ); - - return { - window: { start, end }, - count: rows.length, - rows, - }; - }, - }, - - // ==================== Agent Status Operations ==================== - agents: { - /** - * Get connected agent instances - * This requires access to the controlEmitter which is set on the Express app - */ - getList(controlEmitter?: { - getConnectedCount: (teamId: string) => number; - getConnectedInstances: (teamId: string) => unknown[]; - }) { - if (!controlEmitter) { - return { - active: false, - count: 0, - instances: [], - timestamp: new Date().toISOString(), - error: "WebSocket not initialized", - }; - } - - const count = controlEmitter.getConnectedCount(context.teamId); - const instances = controlEmitter.getConnectedInstances(context.teamId); - - return { - active: count > 0, - count, - instances, - timestamp: new Date().toISOString(), - }; - }, - - /** - * Get agent fleet summary - */ - getSummary(controlEmitter?: { - getConnectedCount: (teamId: string) => number; - getConnectedInstances: (teamId: string) => Array<{ - instance_id: string; - agent?: string; - last_heartbeat: string; - }>; - }) { - if (!controlEmitter) { - return { - total_active: 0, - healthy: 0, - unhealthy: 0, - stale_connections: 0, - by_agent_name: {}, - timestamp: new Date().toISOString(), - error: "WebSocket not initialized", - }; - } - - const instances = controlEmitter.getConnectedInstances(context.teamId); - const now = Date.now(); - const STALE_THRESHOLD_MS = 60000; // 60 seconds - - let healthy = 0; - let unhealthy = 0; - const byAgentName: Record< - string, - { count: number; healthy: number; unhealthy: number } - > = {}; - - for (const instance of instances) { - const lastHeartbeat = new Date(instance.last_heartbeat).getTime(); - const isHealthy = now - lastHeartbeat < STALE_THRESHOLD_MS; - - if (isHealthy) { - healthy++; - } else { - unhealthy++; - } - - const agentName = instance.agent || "unknown"; - if (!byAgentName[agentName]) { - byAgentName[agentName] = { count: 0, healthy: 0, unhealthy: 0 }; - } - byAgentName[agentName].count++; - if (isHealthy) { - byAgentName[agentName].healthy++; - } else { - byAgentName[agentName].unhealthy++; - } - } - - return { - total_active: instances.length, - healthy, - unhealthy, - stale_connections: unhealthy, - by_agent_name: byAgentName, - timestamp: new Date().toISOString(), - }; - }, - }, - }; -} - -export type ApiClient = ReturnType; diff --git a/hive/src/mcp/utils/response-helpers.ts b/hive/src/mcp/utils/response-helpers.ts deleted file mode 100644 index 1ef37fa4..00000000 --- a/hive/src/mcp/utils/response-helpers.ts +++ /dev/null @@ -1,65 +0,0 @@ -/** - * MCP response formatting helpers - */ - -export interface MCPResponse { - [key: string]: unknown; - content: Array<{ - type: "text"; - text: string; - }>; - isError?: boolean; -} - -/** - * Create a successful MCP response - */ -export function createSuccessResponse(data: unknown): MCPResponse { - return { - content: [ - { - type: "text", - text: JSON.stringify(data, null, 2), - }, - ], - }; -} - -/** - * Create an error MCP response - */ -export function createErrorResponse( - error: string, - details?: unknown -): MCPResponse { - const errorData = { - error, - ...(details && { details }), - }; - - return { - content: [ - { - type: "text", - text: JSON.stringify(errorData, null, 2), - }, - ], - isError: true, - }; -} - -/** - * Handle tool errors consistently - */ -export function handleToolError(error: unknown, toolName: string): MCPResponse { - console.error(`[MCP] Error in ${toolName}:`, error); - - if (error instanceof Error) { - return createErrorResponse(error.message, { - tool: toolName, - stack: process.env.NODE_ENV === "development" ? error.stack : undefined, - }); - } - - return createErrorResponse("Unknown error occurred", { tool: toolName }); -} diff --git a/hive/src/mcp/utils/schema-helpers.ts b/hive/src/mcp/utils/schema-helpers.ts deleted file mode 100644 index 3a6b1d38..00000000 --- a/hive/src/mcp/utils/schema-helpers.ts +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Zod schema helpers for MCP tools - */ -import { z } from "zod"; - -// Basic types -export const idSchema = z.string().min(1).describe("Unique identifier"); -export const dateSchema = z - .string() - .regex(/^\d{4}-\d{2}-\d{2}$/) - .describe("Date in YYYY-MM-DD format"); -export const dateTimeSchema = z - .string() - .datetime() - .describe("ISO 8601 datetime string"); -export const amountSchema = z.number().describe("Monetary amount in USD"); - -// Budget types -export const budgetTypeSchema = z - .enum(["global", "agent", "tenant", "customer", "feature", "tag"]) - .describe("Type of budget scope"); - -export const limitActionSchema = z - .enum(["kill", "throttle", "degrade"]) - .describe("Action when budget limit exceeded"); - -// Pagination -export const paginationSchema = z.object({ - limit: z - .number() - .min(1) - .max(1000) - .default(100) - .describe("Max items to return"), - offset: z.number().min(0).default(0).describe("Number of items to skip"), -}); - -// Analytics window -export const analyticsWindowSchema = z - .enum(["all_time", "this_month", "this_week", "last_2_weeks", "today"]) - .default("this_month") - .describe("Time window for analytics data"); - -// Budget validation context -export const validationContextSchema = z - .object({ - agent: z.string().optional().describe("Agent name for agent-type budgets"), - tenant_id: z - .string() - .optional() - .describe("Tenant ID for tenant-type budgets"), - customer_id: z - .string() - .optional() - .describe("Customer ID for customer-type budgets"), - feature: z.string().optional().describe("Feature name for feature-type budgets"), - tags: z.array(z.string()).optional().describe("Tags for tag-type budgets"), - }) - .describe("Context for multi-budget matching"); - -// Budget alert configuration -export const budgetAlertSchema = z.object({ - threshold: z - .number() - .min(0) - .max(100) - .describe("Alert threshold as percentage of limit"), - enabled: z.boolean().describe("Whether alert is enabled"), -}); - -// Budget notifications configuration -export const budgetNotificationsSchema = z.object({ - inApp: z.boolean().default(true).describe("Enable in-app notifications"), - email: z.boolean().default(false).describe("Enable email notifications"), - emailRecipients: z - .array(z.string().email()) - .default([]) - .describe("Email recipients"), - webhook: z.boolean().default(false).describe("Enable webhook notifications"), -}); diff --git a/hive/src/middleware/error-handler.middleware.ts b/hive/src/middleware/error-handler.middleware.ts deleted file mode 100644 index 6add4754..00000000 --- a/hive/src/middleware/error-handler.middleware.ts +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Global Error Handler Middleware - * - * Handles all errors and sends consistent JSON responses. - */ - -import { Request, Response, NextFunction } from 'express'; - -interface HttpError extends Error { - status?: number; - statusCode?: number; -} - -/** - * Error handler middleware - * @param {Error} err - Error object - * @param {Object} req - Express request - * @param {Object} res - Express response - * @param {Function} next - Next middleware - */ -// eslint-disable-next-line @typescript-eslint/no-unused-vars -function errorHandler(err: HttpError, req: Request, res: Response, _next: NextFunction): void { - // Log error - console.error('[Error]', { - message: err.message, - status: err.status || err.statusCode || 500, - path: req.path, - method: req.method, - stack: process.env.NODE_ENV === 'development' ? err.stack : undefined, - }); - - // Get status code - const status = err.status || err.statusCode || 500; - - // Send error response - res.status(status).json({ - error: err.name || 'Error', - message: err.message || 'An unexpected error occurred', - status, - ...(process.env.NODE_ENV === 'development' && { stack: err.stack }), - }); -} - -export { errorHandler }; diff --git a/hive/src/routes.ts b/hive/src/routes.ts deleted file mode 100644 index eaed4283..00000000 --- a/hive/src/routes.ts +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Route Definitions - * - * Central route registration for all DevTool APIs. - */ - -import express from 'express'; - -// Controllers -import tsdbController from './controllers/tsdb.controller'; -import controlController from './controllers/control.controller'; -import quickstartController from './controllers/quickstart.controller'; -import userController from './controllers/user.controller'; -import iamController from './controllers/iam.controller'; - -const router = express.Router(); - -// ============================================================================= -// User Routes - Authentication and user management -// ============================================================================= -router.use('/user', userController); - -// ============================================================================= -// IAM Routes - Identity and Access Management -// ============================================================================= -router.use('/iam', iamController); - -// ============================================================================= -// TSDB Routes - Time Series Database for LLM metrics -// ============================================================================= -router.use('/tsdb', tsdbController); - -// ============================================================================= -// Control Routes - SDK control plane -// ============================================================================= -router.use('/v1/control', controlController); - -// ============================================================================= -// Quickstart Routes - SDK documentation generation -// ============================================================================= -router.use('/quickstart', quickstartController); - -export default router; diff --git a/hive/src/services/control/control_service.ts b/hive/src/services/control/control_service.ts deleted file mode 100644 index a7ee69a3..00000000 --- a/hive/src/services/control/control_service.ts +++ /dev/null @@ -1,2067 +0,0 @@ -/** - * Aden Control Service - * - * Manages control policies and events for the Aden SDK. - * Provides policy management, event storage, and budget tracking. - */ - -import { randomUUID } from "crypto"; -import * as tsdbService from "../tsdb/tsdb_service"; -import pricingService from "../tsdb/pricing_service"; -import { getTeamPool, buildSchemaName } from "../tsdb/team_context"; -// TODO: Integrate mail service from @aden/administration -// import mailService from "../mail_service/mail_service"; -import llmEventBatcher from "./llm_event_batcher"; -import { registerHttpAgent } from "./control_sockets"; - -// In-memory budget tracking (could be moved to Redis for distributed tracking) -// Map: budget_id -> { spent: number, lastReset: Date } -const budgetTracker = new Map(); - -// Notification cooldown tracking to prevent spam -// Map: "budget_id:alert_type:threshold" -> timestamp -const notificationCooldowns = new Map(); -const NOTIFICATION_COOLDOWN_MS = 15 * 60 * 1000; // 15 minutes - -interface MongoCollection { - find: (query: Record) => { toArray: () => Promise; sort: (sort: Record) => { skip: (n: number) => { limit: (n: number) => { toArray: () => Promise } } } }; - findOne: (query: Record) => Promise; - insertOne: (doc: Record) => Promise; - updateOne: (query: Record, update: Record, options?: Record) => Promise; - deleteOne: (query: Record) => Promise<{ deletedCount: number }>; -} - -declare const _ACHO_MG_DB: { db: (name: string) => { collection: (name: string) => MongoCollection } }; -declare const _ACHO_MDB_CONFIG: { ERP_DBNAME: string }; -declare const _ACHO_MDB_COLLECTIONS: { ADEN_CONTROL_POLICIES: string; ADEN_CONTROL_CONTENT: string }; -declare const _GLOBAL_CONST: { ARP_URL: string }; - -interface UserContext { - user_id?: string; - team_id?: string | number; -} - -interface Budget { - id: string; - name: string; - type: string; - limit: number; - spent?: number; - limitAction?: string; - degradeToModel?: string; - degradeToProvider?: string; - tagCategory?: string; - tags?: string[]; - alerts?: Array<{ threshold: number; enabled: boolean }>; - notifications?: { - email?: boolean; - emailRecipients?: string[]; - webhook?: boolean; - webhookUrl?: string; - }; - analytics?: { - burnRate: number; - projectedSpend: number; - daysUntilLimit: number | null; - usagePercent: number; - projectedPercent: number; - status: string; - period: { - daysInMonth: number; - daysElapsed: number; - daysRemaining: number; - startOfMonth: string; - endOfMonth: string; - }; - }; -} - -interface Policy { - id: string; - team_id: string | number; - name: string; - version: string; - budgets: Budget[]; - throttles: unknown[]; - blocks: unknown[]; - degradations: unknown[]; - alerts: unknown[]; - created_at: string; - updated_at: string; - created_by?: string; - updated_by?: string; -} - -interface ContentCapture { - system_prompt?: string; - messages?: unknown[]; - tools?: unknown[]; - params?: Record; - response_content?: string; - finish_reason?: string; - choice_count?: number; - has_images?: boolean; - image_urls?: string[]; -} - -interface MetricData { - provider?: string; - model?: string; - total_tokens?: number; - input_tokens?: number; - output_tokens?: number; - cached_tokens?: number; - reasoning_tokens?: number; - agent?: string; - metadata?: Record; - trace_id?: string; - span_id?: string; - request_id?: string; - call_sequence?: number; - stream?: boolean; - agent_stack?: string[]; - latency_ms?: number; - content_capture?: ContentCapture; -} - -interface Event { - event_type: string; - timestamp?: string; - trace_id?: string; - data?: MetricData; - action?: string; - original_model?: string; - provider?: string; - reason?: string; - budget_id?: string; - policy_id?: string; - agent?: string; - agent_name?: string; - sdk_instance_id?: string; - status?: string; - requests_since_last?: number; - message?: string; - stack?: string; -} - -/** - * Get the MongoDB collection for control policies - * @returns MongoDB collection - */ -function getPolicyCollection(): MongoCollection { - return _ACHO_MG_DB - .db(_ACHO_MDB_CONFIG.ERP_DBNAME) - .collection(_ACHO_MDB_COLLECTIONS.ADEN_CONTROL_POLICIES); -} - -/** - * Calculate actual spend and burn rate analytics for a budget from TSDB data - * Uses hybrid CA + base table approach for lowest latency - */ -async function calculateBudgetAnalyticsFromTsdb(teamId: string | number, budget: Budget): Promise<{ - spent: number; - burnRate: number; - projectedSpend: number; - daysUntilLimit: number | null; - usagePercent: number; - projectedPercent: number; - status: string; - source: string; - period: { - daysInMonth: number; - daysElapsed: number; - daysRemaining: number; - startOfMonth: string; - endOfMonth: string; - }; -}> { - const now = new Date(); - const startOfMonth = new Date(now.getFullYear(), now.getMonth(), 1); - const endOfMonth = new Date(now.getFullYear(), now.getMonth() + 1, 0); - const daysInMonth = endOfMonth.getDate(); - const daysElapsed = Math.max( - 1, - Math.floor((now.getTime() - startOfMonth.getTime()) / (1000 * 60 * 60 * 24)) + 1 - ); - const daysRemaining = daysInMonth - daysElapsed + 1; - - // Today's midnight for CA vs base table split - const todayMidnight = new Date(); - todayMidnight.setUTCHours(0, 0, 0, 0); - - try { - const pool = await getTeamPool(teamId); - const schema = buildSchemaName(teamId); - const client = await pool.connect(); - - try { - // Explicitly set search_path to team schema before querying - await client.query(`SET search_path TO ${schema}, public`); - await tsdbService.ensureSchema(client); - - let spent = 0; - const usedCA = false; - - // Determine which CA to use based on budget type - // Note: CA (continuous aggregates) are disabled for now because: - // 1. Team-specific schemas don't have CA tables populated - // 2. CA tables need to be refreshed periodically - // TODO: Enable CA once aggregation is set up per-team - const canUseGlobalCA = false; // Disabled: CA not populated in team schemas - const canUseAgentCA = false; // Disabled: agent may be in metadata->>'agent' - - // --- Query CA for historical data (before today) --- - if (startOfMonth < todayMidnight) { - if (canUseGlobalCA) { - // Use daily CA for global budgets - try { - const caSql = ` - SELECT COALESCE(SUM(cost_total), 0) as total_cost - FROM llm_events_daily_ca - WHERE bucket >= $1 AND bucket < $2 - `; - const caResult = await client.query(caSql, [ - startOfMonth.toISOString(), - todayMidnight.toISOString(), - ]); - spent += parseFloat(caResult.rows[0]?.total_cost) || 0; - } catch (caErr) { - // CA not available, will fall back to base table - } - } else if (canUseAgentCA) { - // Use agent CA for agent budgets - try { - const caSql = ` - SELECT COALESCE(SUM(cost_total), 0) as total_cost - FROM llm_events_daily_by_agent_ca - WHERE bucket >= $1 AND bucket < $2 AND agent = $3 - `; - const caResult = await client.query(caSql, [ - startOfMonth.toISOString(), - todayMidnight.toISOString(), - budget.name, - ]); - spent += parseFloat(caResult.rows[0]?.total_cost) || 0; - } catch (caErr) { - // CA not available, will fall back to base table - } - } - } - - // --- Query base table for today's data (always) + historical if CA failed --- - const baseTableStart = usedCA ? todayMidnight : startOfMonth; - - const conditions = [`team_id = $1`, `"timestamp" >= $2`, `"timestamp" <= $3`]; - const values: unknown[] = [String(teamId), baseTableStart, now]; - const paramIndex = 4; - - // Apply budget-specific filter based on budget type - const budgetFilter = getBudgetFilter(budget, paramIndex); - if (budgetFilter) { - conditions.push(budgetFilter.condition); - values.push(budgetFilter.value); - } - - const baseSql = ` - SELECT COALESCE(SUM(cost_total), 0) as total_cost - FROM llm_events - WHERE ${conditions.join(" AND ")} - `; - - console.log(`[Aden Control] Budget analytics query for team ${teamId}, schema ${schema}:`); - console.log(`[Aden Control] SQL: ${baseSql}`); - console.log(`[Aden Control] Values:`, values); - - // Debug: check row count and cost - const countResult = await client.query(`SELECT COUNT(*) as cnt FROM llm_events WHERE team_id = $1`, [String(teamId)]); - console.log(`[Aden Control] Total rows in llm_events for team ${teamId}: ${countResult.rows[0]?.cnt}`); - - // Debug: check total cost regardless of timestamp filter - const debugResult = await client.query(`SELECT SUM(cost_total) as total, MIN("timestamp") as min_ts, MAX("timestamp") as max_ts FROM llm_events WHERE team_id = $1`, [String(teamId)]); - console.log(`[Aden Control] All-time cost: $${debugResult.rows[0]?.total}, timestamps: ${debugResult.rows[0]?.min_ts} to ${debugResult.rows[0]?.max_ts}`); - - const baseResult = await client.query(baseSql, values); - console.log(`[Aden Control] Result:`, baseResult.rows[0]); - - spent += parseFloat(baseResult.rows[0]?.total_cost) || 0; - console.log(`[Aden Control] Total spent for budget ${budget.name}: $${spent}`); - - // Calculate burn rate analytics - const burnRate = daysElapsed > 0 ? spent / daysElapsed : 0; - const projectedSpend = burnRate * daysInMonth; - const remaining = Math.max(0, budget.limit - spent); - const daysUntilLimit = burnRate > 0 ? remaining / burnRate : Infinity; - const usagePercent = budget.limit > 0 ? (spent / budget.limit) * 100 : 0; - const projectedPercent = - budget.limit > 0 ? (projectedSpend / budget.limit) * 100 : 0; - - // Determine status based on projected spend and current usage - let status = "healthy"; - if (usagePercent >= 100) { - status = "exceeded"; - } else if (projectedPercent >= 100 || daysUntilLimit <= daysRemaining) { - status = "at_risk"; - } else if (usagePercent >= 80 || projectedPercent >= 80) { - status = "warning"; - } - - return { - spent, - burnRate, - projectedSpend, - daysUntilLimit: daysUntilLimit === Infinity ? null : daysUntilLimit, - usagePercent, - projectedPercent, - status, - source: usedCA ? "hybrid_ca" : "base_table", - period: { - daysInMonth, - daysElapsed, - daysRemaining, - startOfMonth: startOfMonth.toISOString(), - endOfMonth: endOfMonth.toISOString(), - }, - }; - } finally { - client.release(); - } - } catch (error) { - console.error( - `[Aden Control] Failed to calculate budget analytics from TSDB:`, - (error as Error).message - ); - // Fall back to in-memory tracker with minimal analytics - const tracker = budgetTracker.get(budget.id); - const spent = tracker?.spent ?? budget.spent ?? 0; - const burnRate = daysElapsed > 0 ? spent / daysElapsed : 0; - const projectedSpend = burnRate * daysInMonth; - - return { - spent, - burnRate, - projectedSpend, - daysUntilLimit: burnRate > 0 ? Math.max(0, budget.limit - spent) / burnRate : null, - usagePercent: budget.limit > 0 ? (spent / budget.limit) * 100 : 0, - projectedPercent: budget.limit > 0 ? (projectedSpend / budget.limit) * 100 : 0, - status: "unknown", - source: "fallback", - period: { - daysInMonth, - daysElapsed, - daysRemaining, - startOfMonth: startOfMonth.toISOString(), - endOfMonth: endOfMonth.toISOString(), - }, - }; - } -} - -/** - * Get policy for a team by policy ID - */ -async function getPolicy(teamId: string | number | null, policyId: string | null = null, userContext: UserContext | null = null): Promise { - if (!teamId) { - teamId = userContext?.team_id ?? null; - } - if (!teamId) { - throw new Error("team_id is required to get policy"); - } - - // Use "default" as the actual policy ID when not specified - const actualPolicyId = policyId || "default"; - - const collection = getPolicyCollection(); - let policyDoc = await collection.findOne({ team_id: teamId, id: actualPolicyId }) as Policy & { _id?: unknown } | null; - - if (!policyDoc) { - // Create empty policy with the specified ID - const newPolicy: Policy & { _id?: unknown } = { - id: actualPolicyId, - team_id: teamId, - name: actualPolicyId === "default" ? "Default Policy" : "New Policy", - version: randomUUID().slice(0, 8), - budgets: [], - throttles: [], - blocks: [], - degradations: [], - alerts: [], - created_at: new Date().toISOString(), - updated_at: new Date().toISOString(), - ...(userContext?.user_id && { created_by: userContext.user_id }), - }; - await collection.insertOne(newPolicy as unknown as Record); - policyDoc = newPolicy; - } - - // Remove MongoDB _id from response - const { _id, ...policy } = policyDoc; - - // Enrich budget rules with actual spend and analytics from TSDB - if (policy.budgets && policy.budgets.length > 0) { - policy.budgets = await Promise.all( - policy.budgets.map(async (budget) => { - const analytics = await calculateBudgetAnalyticsFromTsdb(teamId!, budget); - return { - ...budget, - spent: analytics.spent, - analytics: { - burnRate: analytics.burnRate, - projectedSpend: analytics.projectedSpend, - daysUntilLimit: analytics.daysUntilLimit, - usagePercent: analytics.usagePercent, - projectedPercent: analytics.projectedPercent, - status: analytics.status, - period: analytics.period, - }, - }; - }) - ); - } - - return policy as Policy; -} - -/** - * Update policy for a team (or create new if policyId is null) - */ -async function updatePolicy(teamId: string | number | null, policyId: string | null, policyUpdate: Partial, userContext: UserContext | null = null): Promise { - if (!teamId) { - teamId = userContext?.team_id ?? null; - } - if (!teamId) { - throw new Error("team_id is required to update policy"); - } - - // Use "default" as the actual policy ID when not specified - const actualPolicyId = policyId || "default"; - - const collection = getPolicyCollection(); - - const updateFields = { - ...policyUpdate, - version: randomUUID().slice(0, 8), - updated_at: new Date().toISOString(), - ...(userContext?.user_id && { updated_by: userContext.user_id }), - }; - - // Build setOnInsert with only fields NOT in policyUpdate to avoid MongoDB conflicts - // Fields in both $set and $setOnInsert cause "would create a conflict" errors - const defaultName = actualPolicyId === "default" ? "Default Policy" : "New Policy"; - const setOnInsert: Record = { - id: actualPolicyId, - team_id: teamId, - ...(!policyUpdate.name && { name: defaultName }), - ...(!("budgets" in policyUpdate) && { budgets: [] }), - ...(!("throttles" in policyUpdate) && { throttles: [] }), - ...(!("blocks" in policyUpdate) && { blocks: [] }), - ...(!("degradations" in policyUpdate) && { degradations: [] }), - ...(!("alerts" in policyUpdate) && { alerts: [] }), - created_at: new Date().toISOString(), - ...(userContext?.user_id && { created_by: userContext.user_id }), - }; - - await collection.updateOne( - { team_id: teamId, id: actualPolicyId }, - { - $set: updateFields, - $setOnInsert: setOnInsert, - }, - { upsert: true } - ); - - // Return the updated policy - return getPolicy(teamId, actualPolicyId); -} - -/** - * Transform a metric event to TSDB format - */ -function transformMetricToTsdbEvent(event: Event, teamId: string | number, policyId: string | null): Record { - const data = event.data || {}; - const now = new Date(); - // Extract agent - metadata.agent takes precedence over top-level agent - const effectiveAgent = (data.metadata?.agent as string) || data.agent || null; - - // Calculate cost for real-time streaming - const cost = pricingService.calculateCostSync({ - model: data.model || "", - provider: data.provider, - input_tokens: data.input_tokens || 0, - output_tokens: data.output_tokens || 0, - cached_tokens: data.cached_tokens || 0, - }).total; - - return { - timestamp: event.timestamp || now.toISOString(), - team_id: String(teamId), - user_id: (data.metadata?.user_id as string) || null, - trace_id: data.trace_id || event.trace_id || randomUUID(), - span_id: data.span_id || null, - request_id: data.request_id || null, - provider: data.provider || null, - call_sequence: data.call_sequence ?? 0, - model: data.model || "", - stream: Boolean(data.stream), - agent: effectiveAgent, - agent_name: event.agent_name || null, - agent_stack: data.agent_stack || [], - latency_ms: data.latency_ms || null, - usage: { - input_tokens: data.input_tokens || 0, - output_tokens: data.output_tokens || 0, - total_tokens: data.total_tokens || 0, - cached_tokens: data.cached_tokens || 0, - reasoning_tokens: data.reasoning_tokens || 0, - }, - cost_total: cost, - metadata: { - ...data.metadata, - policy_id: policyId, - event_type: event.event_type, - }, - // Layer 0 content capture (if enabled in SDK) - content_capture: data.content_capture || null, - }; -} - -/** - * Process incoming events from SDK - */ -async function processEvents(teamId: string | number | null, policyId: string | null, events: Event[], userContext: UserContext | null = null): Promise { - if (!teamId) { - teamId = userContext?.team_id ?? null; - } - if (!teamId) { - throw new Error("team_id is required to process events"); - } - - const tsdbEvents: Record[] = []; - - for (const event of events) { - // Process specific event types - switch (event.event_type) { - case "metric": - await processMetricEvent(teamId, policyId, event, userContext); - // Transform and collect metric events for TSDB - tsdbEvents.push(transformMetricToTsdbEvent(event, teamId, policyId)); - break; - case "control": - await processControlEvent(teamId, event, policyId); - break; - case "heartbeat": - await processHeartbeatEvent(teamId, policyId, event); - break; - case "error": - await processErrorEvent(teamId, event); - break; - } - } - - // Store metric events in TSDB if we have team context - if (tsdbEvents.length > 0) { - try { - const pool = await getTeamPool(teamId); - const schema = buildSchemaName(teamId); - const client = await pool.connect(); - try { - // Explicitly set search_path to team schema before inserting - await client.query(`CREATE SCHEMA IF NOT EXISTS ${schema}`); - await client.query(`SET search_path TO ${schema}, public`); - await tsdbService.ensureSchema(client); - const result = await tsdbService.upsertEvents(tsdbEvents as unknown[], client); - console.log( - `[Aden Control] Stored ${result.rowsWritten} events in TSDB for team ${teamId}` - ); - - // Push to real-time WebSocket stream - if (result.rowsWritten > 0) { - llmEventBatcher.add(teamId, tsdbEvents as unknown[]); - } - } finally { - client.release(); - } - } catch (error) { - console.error(`[Aden Control] Failed to store events in TSDB:`, (error as Error).message); - } - } -} - -/** - * Process a metric event - update budget tracking - * Updates spend for all matching budgets based on their type - */ -async function processMetricEvent(teamId: string | number, policyId: string | null, event: Event, userContext: UserContext | null = null): Promise { - const metricData = event.data; - if (!metricData) return; - - // Calculate cost from tokens (simplified pricing) - const cost = estimateCost(metricData); - - // Get the policy to find matching budgets - const policy = await getPolicy(teamId, policyId, userContext); - let budgetUpdated = false; - - if (policy.budgets && policy.budgets.length > 0) { - for (const budget of policy.budgets) { - // Determine if this metric applies to this budget based on type - const shouldApply = matchesBudgetType(budget, metricData); - - if (shouldApply) { - const tracker = budgetTracker.get(budget.id) || { - spent: 0, - lastReset: new Date(), - }; - tracker.spent += cost; - budgetTracker.set(budget.id, tracker); - budgetUpdated = true; - - // Check if budget alerts should be triggered - checkBudgetAlerts(budget, tracker.spent, teamId, policyId); - } - } - } - - // Push updated policy with new spend to SDK via WebSocket - if (budgetUpdated && (global as unknown as Record)._ADEN_CONTROL_EMITTER) { - const updatedPolicy = await getPolicy(teamId, policyId); - ((global as unknown as Record)._ADEN_CONTROL_EMITTER as { emitPolicyUpdate: (teamId: string | number, policyId: string | null, policy: Policy) => void }).emitPolicyUpdate(teamId, policyId, updatedPolicy); - } - - console.log( - `[Aden Control] Metric: ${metricData.provider}/${metricData.model} - ${ - metricData.total_tokens - } tokens, $${cost.toFixed(6)}` - ); -} - -/** - * Check if a metric event matches a budget's type criteria - */ -function matchesBudgetType(budget: Budget, metricData: MetricData): boolean { - const metadata = metricData.metadata || {}; - // metadata.agent takes precedence over top-level agent - const effectiveAgent = (metadata.agent as string) || metricData.agent; - - switch (budget.type) { - case "global": - // Global budgets apply to all metrics - return true; - - case "agent": - // Agent budgets apply when agent name matches (from top-level or metadata) - return !!effectiveAgent && budget.name === effectiveAgent; - - case "tenant": - // Tenant budgets apply when tenant_id matches - return !!metadata.tenant_id && budget.name === metadata.tenant_id; - - case "customer": - // Customer budgets apply when customer_id matches - return !!metadata.customer_id && budget.name === metadata.customer_id; - - case "feature": - // Feature budgets apply when feature name matches - return !!metadata.feature && budget.name === metadata.feature; - - case "tag": { - // Tag budgets apply when the tagCategory value matches budget name - if (!budget.tagCategory || !metadata.tags) return false; - const tagValue = (metadata.tags as Record)[budget.tagCategory]; - return !!tagValue && budget.name === tagValue; - } - - default: - return false; - } -} - -/** - * Send budget notifications via configured channels (email, webhook) - * Includes cooldown logic to prevent notification spam. - */ -async function sendBudgetNotifications(budget: Budget, alertData: Record, alertType: string = "threshold"): Promise { - const notifications = budget.notifications; - if (!notifications) { - console.log( - `[Aden Control] No notifications configured for budget ${budget.name} (${budget.id})` - ); - return false; - } - - // Check if any notification channel is enabled - if (!notifications.email && !notifications.webhook) { - console.log( - `[Aden Control] Notifications disabled for budget ${budget.name} (email: ${notifications.email}, webhook: ${notifications.webhook})` - ); - return false; - } - - // Check cooldown to prevent spam - const cooldownKey = `${budget.id}:${alertType}:${ - alertData.threshold || alertData.action || "default" - }`; - const lastSent = notificationCooldowns.get(cooldownKey); - const now = Date.now(); - - if (lastSent && now - lastSent < NOTIFICATION_COOLDOWN_MS) { - console.log( - `[Aden Control] Notification for budget ${budget.name} (${alertType}) skipped - cooldown active` - ); - return false; - } - - const { spent, limit, threshold, action } = alertData as { spent: number; limit: number; threshold?: number; action?: string }; - const spentPercentage = limit > 0 ? ((spent / limit) * 100).toFixed(1) : "0"; - - // Determine alert severity color - const isLimitAction = alertType === "limit_action"; - const alertColor = isLimitAction - ? "#dc2626" - : parseFloat(spentPercentage) >= 90 - ? "#f59e0b" - : "#3b82f6"; - const alertBgColor = isLimitAction - ? "#fef2f2" - : parseFloat(spentPercentage) >= 90 - ? "#fffbeb" - : "#eff6ff"; - - // Build notification content - let title: string, description: string; - if (isLimitAction) { - title = "Budget Limit Triggered"; - description = `The budget ${budget.name} has exceeded its limit and triggered a control action.`; - } else { - title = "Budget Threshold Alert"; - description = `The budget ${budget.name} has reached ${threshold}% of its limit.`; - } - - // Email subject and content prepared for future email notification implementation - const _subject = isLimitAction - ? `[Aden] Budget "${budget.name}" - ${(action || "").toUpperCase()}` - : `[Aden] Budget "${budget.name}" at ${spentPercentage}%`; - - const _htmlContent = ` - - - - - - - - - - - -
- - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - -
- - ${isLimitAction ? action : "Alert"} - -
-

${title}

-
-

${description}

-
-
- - - - - - - -
- ${spentPercentage}% - of budget used -
-
-
-
-
-
- - - - - - - - - - - ${alertData.model ? ` - - - - ` : ""} -
- - - - - -
Spent$${(spent || 0).toFixed(4)}
-
- - - - - -
Limit$${(limit || 0).toFixed(2)}
-
- - - - - -
Budget Type${budget.type}
-
- - - - - -
Model${alertData.model}
-
-
- - - - -
- - View Cost Control Center - -
-
-

- Sent by Aden Cost Control -

-
-
- -`; - - // Send email notifications - if (notifications.email) { - if (!notifications.emailRecipients?.length) { - console.log( - `[Aden Control] Email enabled but no recipients configured for budget ${budget.name}` - ); - } else { - // TODO: Re-enable when mailService is integrated from @aden/administration - console.log( - `[Aden Control] Email notification skipped (mail service not configured) for budget ${budget.name}` - ); - } - } - - // Send webhook notifications - if (notifications.webhook && notifications.webhookUrl) { - try { - const response = await fetch(notifications.webhookUrl, { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ - type: "budget_alert", - alert_type: alertType, - budget_id: budget.id, - budget_name: budget.name, - budget_type: budget.type, - ...alertData, - timestamp: new Date().toISOString(), - }), - }); - if (!response.ok) { - console.error(`[Aden Control] Webhook returned ${response.status}`); - } else { - console.log(`[Aden Control] Sent webhook notification for budget ${budget.name}`); - } - } catch (err) { - console.error(`[Aden Control] Failed to send webhook notification:`, (err as Error).message); - } - } - - // Record cooldown timestamp - notificationCooldowns.set(cooldownKey, now); - return true; -} - -/** - * Check budget alerts and emit notifications if thresholds are crossed - */ -async function checkBudgetAlerts(budget: Budget, currentSpent: number, teamId: string | number, policyId: string | null): Promise { - if (!budget.alerts || !budget.alerts.length || !budget.limit) return; - - const spentPercentage = (currentSpent / budget.limit) * 100; - - for (const alert of budget.alerts) { - if (!alert.enabled) continue; - if (spentPercentage >= alert.threshold) { - const alertData = { - budget_id: budget.id, - budget_name: budget.name, - threshold: alert.threshold, - current_percentage: spentPercentage, - spent: currentSpent, - limit: budget.limit, - }; - - // Emit alert event via WebSocket (inApp notification) - if ((global as unknown as Record)._ADEN_CONTROL_EMITTER) { - ((global as unknown as Record)._ADEN_CONTROL_EMITTER as { emitAlert: (teamId: string | number, policyId: string | null, alert: Record) => void }).emitAlert(teamId, policyId, { - ...alertData, - notifications: budget.notifications, - }); - } - - // Send email/webhook notifications - await sendBudgetNotifications(budget, alertData, "threshold"); - } - } -} - -/** - * Estimate cost from metric data using unified pricing service - */ -function estimateCost(metricData: MetricData): number { - const result = pricingService.calculateCostSync({ - model: metricData.model || "", - provider: metricData.provider, - input_tokens: metricData.input_tokens || 0, - output_tokens: metricData.output_tokens || 0, - cached_tokens: metricData.cached_tokens || 0, - }); - return result.total; -} - -/** - * Process a control event - log control decisions and send notifications - */ -async function processControlEvent(teamId: string | number, event: Event, policyId: string | null = null): Promise { - console.log( - `[Aden Control] Control action: ${event.action} on ${event.provider}/${ - event.original_model - }${event.reason ? ` - ${event.reason}` : ""}` - ); - - // Check if this is a budget-related control action - const isBudgetAction = - event.budget_id || - event.reason?.includes("budget") || - ["kill", "throttle", "degrade", "block"].includes(event.action || ""); - - // Fall back to default policy if not provided - const effectivePolicyId = policyId || event.policy_id || "default"; - - console.log( - `[Aden Control] Control event notification check: isBudgetAction=${isBudgetAction}, policyId=${effectivePolicyId}, budget_id=${event.budget_id}` - ); - - if (isBudgetAction) { - try { - // Get the policy to find the budget - const policy = await getPolicy(teamId, effectivePolicyId); - console.log( - `[Aden Control] Found policy with ${policy?.budgets?.length || 0} budgets` - ); - - if (policy?.budgets?.length) { - // Find matching budget by ID or by type/name - let budget = event.budget_id - ? policy.budgets.find((b) => b.id === event.budget_id) - : null; - - // If no budget_id, try to find by context (agent, etc.) - if (!budget && event.agent) { - budget = policy.budgets.find( - (b) => b.type === "agent" && b.name === event.agent - ); - } - - // Fallback to global budget - if (!budget) { - budget = policy.budgets.find((b) => b.type === "global"); - } - - console.log( - `[Aden Control] Budget lookup result: ${ - budget ? `found "${budget.name}" (${budget.id})` : "not found" - }` - ); - - if (budget) { - const alertData = { - action: event.action, - reason: event.reason, - model: event.original_model, - provider: event.provider, - spent: budget.spent || 0, - limit: budget.limit || 0, - }; - - console.log( - `[Aden Control] Sending notification for budget "${ - budget.name - }", notifications: ${JSON.stringify(budget.notifications)}` - ); - await sendBudgetNotifications(budget, alertData, "limit_action"); - } - } - } catch (err) { - console.error( - `[Aden Control] Failed to send control event notifications:`, - (err as Error).message, - (err as Error).stack - ); - } - } else { - console.log( - `[Aden Control] Skipping notification: isBudgetAction=${isBudgetAction}, policyId=${ - policyId || "null" - }` - ); - } -} - -/** - * Process a heartbeat event - track SDK health - */ -async function processHeartbeatEvent( - teamId: string | number, - policyId: string | null, - event: Event -): Promise { - console.log( - `[Aden Control] Heartbeat from ${event.agent_name || event.sdk_instance_id}: ${event.status}, ${event.requests_since_last} requests` - ); - - // Register/update HTTP agent tracking - if (event.sdk_instance_id) { - registerHttpAgent( - teamId, - event.sdk_instance_id, - event.policy_id || policyId, - event.agent_name || null, - event.status || "unknown" - ); - } -} - -/** - * Process an error event - */ -async function processErrorEvent(teamId: string | number, event: Event): Promise { - console.error(`[Aden Control] Error from SDK: ${event.message}`, event.stack); -} - -/** - * Get events for a team (for dashboard) - */ -async function getEvents(teamId: string | number, policyId: string | null = null, options: { limit?: number; offset?: number; start_date?: string; end_date?: string } = {}): Promise { - const { limit = 100, offset = 0, start_date, end_date } = options; - - if (!teamId) { - console.warn(`[Aden Control] No team_id provided, returning empty events`); - return []; - } - - try { - const pool = await getTeamPool(teamId); - const schema = buildSchemaName(teamId); - const client = await pool.connect(); - - try { - await client.query(`SET search_path TO ${schema}, public`); - await tsdbService.ensureSchema(client); - - // Build query with filters - const conditions = [`team_id = $1`]; - const values: unknown[] = [String(teamId)]; - let paramIndex = 2; - - // Filter by policy_id in metadata if provided - if (policyId) { - conditions.push(`metadata->>'policy_id' = $${paramIndex}`); - values.push(policyId); - paramIndex++; - } - - if (start_date) { - conditions.push(`"timestamp" >= $${paramIndex}`); - values.push(new Date(start_date)); - paramIndex++; - } - - if (end_date) { - conditions.push(`"timestamp" <= $${paramIndex}`); - values.push(new Date(end_date)); - paramIndex++; - } - - const sql = ` - SELECT - "timestamp", - trace_id, - span_id, - provider, - model, - agent, - latency_ms, - usage_input_tokens as input_tokens, - usage_output_tokens as output_tokens, - usage_total_tokens as total_tokens, - cost_total, - metadata - FROM llm_events - WHERE ${conditions.join(" AND ")} - ORDER BY "timestamp" DESC - LIMIT $${paramIndex} OFFSET $${paramIndex + 1} - `; - - values.push(limit, offset); - - const result = await client.query(sql, values); - - return result.rows.map((row: Record) => ({ - timestamp: row.timestamp, - trace_id: row.trace_id, - span_id: row.span_id, - provider: row.provider, - model: row.model, - agent: row.agent, - latency_ms: row.latency_ms, - input_tokens: row.input_tokens, - output_tokens: row.output_tokens, - total_tokens: row.total_tokens, - cost_usd: row.cost_total, - metadata: row.metadata, - })); - } finally { - client.release(); - } - } catch (error) { - console.error(`[Aden Control] Failed to get events from TSDB:`, (error as Error).message); - return []; - } -} - -/** - * Get metrics summary for a team (for dashboard analytics) - */ -async function getMetricsSummary(teamId: string | number, options: { start_date?: string; end_date?: string; group_by?: string } = {}): Promise<{ - total_requests: number; - total_cost: number; - total_input_tokens: number; - total_output_tokens: number; - total_tokens: number; - breakdown_by_model: Array<{ model: string; provider: string; requests: number; cost: number; tokens: number }>; -}> { - const { start_date, end_date } = options; - - if (!teamId) { - return { total_requests: 0, total_cost: 0, total_input_tokens: 0, total_output_tokens: 0, total_tokens: 0, breakdown_by_model: [] }; - } - - try { - const pool = await getTeamPool(teamId); - const schema = buildSchemaName(teamId); - const client = await pool.connect(); - - try { - await client.query(`SET search_path TO ${schema}, public`); - await tsdbService.ensureSchema(client); - - const conditions = [`team_id = $1`]; - const values: unknown[] = [String(teamId)]; - let paramIndex = 2; - - if (start_date) { - conditions.push(`"timestamp" >= $${paramIndex}`); - values.push(new Date(start_date)); - paramIndex++; - } - - if (end_date) { - conditions.push(`"timestamp" <= $${paramIndex}`); - values.push(new Date(end_date)); - paramIndex++; - } - - // Get totals - const totalsSql = ` - SELECT - COUNT(*) as total_requests, - COALESCE(SUM(cost_total), 0) as total_cost, - COALESCE(SUM(usage_input_tokens), 0) as total_input_tokens, - COALESCE(SUM(usage_output_tokens), 0) as total_output_tokens, - COALESCE(SUM(usage_total_tokens), 0) as total_tokens - FROM llm_events - WHERE ${conditions.join(" AND ")} - `; - - const totalsResult = await client.query(totalsSql, values); - const totals = totalsResult.rows[0] || {}; - - // Get breakdown by model - const breakdownSql = ` - SELECT - model, - provider, - COUNT(*) as requests, - COALESCE(SUM(cost_total), 0) as cost, - COALESCE(SUM(usage_total_tokens), 0) as tokens - FROM llm_events - WHERE ${conditions.join(" AND ")} - GROUP BY model, provider - ORDER BY cost DESC - LIMIT 20 - `; - - const breakdownResult = await client.query(breakdownSql, values); - - return { - total_requests: parseInt(totals.total_requests) || 0, - total_cost: parseFloat(totals.total_cost) || 0, - total_input_tokens: parseInt(totals.total_input_tokens) || 0, - total_output_tokens: parseInt(totals.total_output_tokens) || 0, - total_tokens: parseInt(totals.total_tokens) || 0, - breakdown_by_model: breakdownResult.rows.map((row: Record) => ({ - model: row.model as string, - provider: row.provider as string, - requests: parseInt(row.requests as string) || 0, - cost: parseFloat(row.cost as string) || 0, - tokens: parseInt(row.tokens as string) || 0, - })), - }; - } finally { - client.release(); - } - } catch (error) { - console.error(`[Aden Control] Failed to get metrics summary:`, (error as Error).message); - return { total_requests: 0, total_cost: 0, total_input_tokens: 0, total_output_tokens: 0, total_tokens: 0, breakdown_by_model: [] }; - } -} - -/** - * Get budget status for a budget ID - */ -async function getBudgetStatus(budgetId: string): Promise<{ id: string; spent: number; last_reset: string | null }> { - const tracker = budgetTracker.get(budgetId); - return { - id: budgetId, - spent: tracker?.spent || 0, - last_reset: tracker?.lastReset?.toISOString() || null, - }; -} - -/** - * Reset budget for a budget ID - */ -async function resetBudget(budgetId: string): Promise { - budgetTracker.set(budgetId, { spent: 0, lastReset: new Date() }); -} - -/** - * Add a budget rule to a policy - */ -async function addBudgetRule(teamId: string | number, policyId: string | null, rule: Budget, userContext: UserContext | null = null): Promise { - const policy = await getPolicy(teamId, policyId, userContext); - policy.budgets = policy.budgets || []; - policy.budgets.push(rule); - return updatePolicy(teamId, policyId, { budgets: policy.budgets }, userContext); -} - -/** - * Add a throttle rule to a policy - */ -async function addThrottleRule(teamId: string | number, policyId: string | null, rule: unknown, userContext: UserContext | null = null): Promise { - const policy = await getPolicy(teamId, policyId, userContext); - policy.throttles = policy.throttles || []; - policy.throttles.push(rule); - return updatePolicy(teamId, policyId, { throttles: policy.throttles }, userContext); -} - -/** - * Add a block rule to a policy - */ -async function addBlockRule(teamId: string | number, policyId: string | null, rule: unknown, userContext: UserContext | null = null): Promise { - const policy = await getPolicy(teamId, policyId, userContext); - policy.blocks = policy.blocks || []; - policy.blocks.push(rule); - return updatePolicy(teamId, policyId, { blocks: policy.blocks }, userContext); -} - -/** - * Add a degradation rule to a policy - */ -async function addDegradeRule(teamId: string | number, policyId: string | null, rule: unknown, userContext: UserContext | null = null): Promise { - const policy = await getPolicy(teamId, policyId, userContext); - policy.degradations = policy.degradations || []; - policy.degradations.push(rule); - return updatePolicy( - teamId, - policyId, - { degradations: policy.degradations }, - userContext - ); -} - -/** - * Add an alert rule to a policy - */ -async function addAlertRule(teamId: string | number, policyId: string | null, rule: unknown, userContext: UserContext | null = null): Promise { - const policy = await getPolicy(teamId, policyId, userContext); - policy.alerts = policy.alerts || []; - policy.alerts.push(rule); - return updatePolicy(teamId, policyId, { alerts: policy.alerts }, userContext); -} - -/** - * Clear all rules from a policy - */ -async function clearPolicy(teamId: string | number, policyId: string | null, userContext: UserContext | null = null): Promise { - return updatePolicy( - teamId, - policyId, - { - budgets: [], - throttles: [], - blocks: [], - degradations: [], - alerts: [], - }, - userContext - ); -} - -/** - * Delete a policy - */ -async function deletePolicy(teamId: string | number | null, policyId: string | null, userContext: UserContext | null = null): Promise { - if (!teamId) { - teamId = userContext?.team_id ?? null; - } - if (!teamId) { - throw new Error("team_id is required to delete policy"); - } - if (!policyId) { - throw new Error("policy_id is required to delete policy"); - } - - const collection = getPolicyCollection(); - const result = await collection.deleteOne({ team_id: teamId, id: policyId }); - - if (result.deletedCount === 0) { - throw new Error("Policy not found"); - } - - return true; -} - -/** - * Get all policies for a team - */ -async function getPoliciesByTeam(teamId: string | number, options: { limit?: number; offset?: number } = {}): Promise { - const { limit = 100, offset = 0 } = options; - const collection = getPolicyCollection(); - - const policies = await collection - .find({ team_id: teamId }) - .sort({ updated_at: -1 }) - .skip(offset) - .limit(limit) - .toArray() as (Policy & { _id?: unknown })[]; - - // Enrich each policy's budgets with actual spend and analytics from TSDB - const enrichedPolicies = await Promise.all( - policies.map(async ({ _id, ...policy }) => { - if (policy.budgets && policy.budgets.length > 0) { - policy.budgets = await Promise.all( - policy.budgets.map(async (budget) => { - const analytics = await calculateBudgetAnalyticsFromTsdb(teamId, budget); - return { - ...budget, - spent: analytics.spent, - analytics: { - burnRate: analytics.burnRate, - projectedSpend: analytics.projectedSpend, - daysUntilLimit: analytics.daysUntilLimit, - usagePercent: analytics.usagePercent, - projectedPercent: analytics.projectedPercent, - status: analytics.status, - period: analytics.period, - }, - }; - }) - ); - } - return policy as Policy; - }) - ); - - return enrichedPolicies; -} - -/** - * Get usage breakdown for dashboard analytics - */ -async function getUsageBreakdown(teamId: string | number, options: { days?: number; context_id?: string; budget?: Budget } = {}): Promise<{ - daily: Array<{ date: Date; cost: number; requests: number; tokens: number }>; - by_model: Array<{ model: string; provider: string; cost: number; requests: number; tokens: number }>; - by_feature: Array<{ feature: string; cost: number; requests: number; tokens: number; percentage: number }>; -}> { - const { days = 7, context_id, budget } = options; - - if (!teamId) { - return { daily: [], by_model: [], by_feature: [] }; - } - - try { - const pool = await getTeamPool(teamId); - const schema = buildSchemaName(teamId); - const client = await pool.connect(); - - try { - await client.query(`SET search_path TO ${schema}, public`); - await tsdbService.ensureSchema(client); - - const startDate = new Date(); - startDate.setDate(startDate.getDate() - days); - - const conditions = [`team_id = $1`, `"timestamp" >= $2`]; - const values: unknown[] = [String(teamId), startDate]; - let paramIndex = 3; - - // Apply budget-specific filter based on budget type - if (budget) { - const budgetFilter = getBudgetFilter(budget, paramIndex); - if (budgetFilter) { - conditions.push(budgetFilter.condition); - values.push(budgetFilter.value); - paramIndex++; - } - } else if (context_id) { - // Fallback to context_id filter - conditions.push(`metadata->>'context_id' = $${paramIndex}`); - values.push(context_id); - paramIndex++; - } - - // Daily usage breakdown - const dailySql = ` - SELECT - DATE_TRUNC('day', "timestamp") as date, - COALESCE(SUM(cost_total), 0) as cost, - COUNT(*) as requests, - COALESCE(SUM(usage_total_tokens), 0) as tokens - FROM llm_events - WHERE ${conditions.join(" AND ")} - GROUP BY DATE_TRUNC('day', "timestamp") - ORDER BY date DESC - LIMIT ${days} - `; - const dailyResult = await client.query(dailySql, values); - - // Usage by model - const byModelSql = ` - SELECT - model, - provider, - COALESCE(SUM(cost_total), 0) as cost, - COUNT(*) as requests, - COALESCE(SUM(usage_total_tokens), 0) as tokens - FROM llm_events - WHERE ${conditions.join(" AND ")} - GROUP BY model, provider - ORDER BY cost DESC - LIMIT 10 - `; - const byModelResult = await client.query(byModelSql, values); - - // Usage by feature (from metadata) - const byFeatureSql = ` - SELECT - COALESCE(metadata->>'feature', agent, 'unknown') as feature, - COALESCE(SUM(cost_total), 0) as cost, - COUNT(*) as requests, - COALESCE(SUM(usage_total_tokens), 0) as tokens - FROM llm_events - WHERE ${conditions.join(" AND ")} - GROUP BY COALESCE(metadata->>'feature', agent, 'unknown') - ORDER BY cost DESC - LIMIT 10 - `; - const byFeatureResult = await client.query(byFeatureSql, values); - - // Calculate totals for percentages - const totalCost = byFeatureResult.rows.reduce( - (sum: number, row: Record) => sum + parseFloat((row.cost as string) || "0"), - 0 - ); - - return { - daily: dailyResult.rows - .map((row: Record) => ({ - date: row.date as Date, - cost: parseFloat(row.cost as string) || 0, - requests: parseInt(row.requests as string) || 0, - tokens: parseInt(row.tokens as string) || 0, - })) - .reverse(), - by_model: byModelResult.rows.map((row: Record) => ({ - model: row.model as string, - provider: row.provider as string, - cost: parseFloat(row.cost as string) || 0, - requests: parseInt(row.requests as string) || 0, - tokens: parseInt(row.tokens as string) || 0, - })), - by_feature: byFeatureResult.rows.map((row: Record) => ({ - feature: row.feature as string, - cost: parseFloat(row.cost as string) || 0, - requests: parseInt(row.requests as string) || 0, - tokens: parseInt(row.tokens as string) || 0, - percentage: totalCost > 0 ? ((parseFloat(row.cost as string) || 0) / totalCost) * 100 : 0, - })), - }; - } finally { - client.release(); - } - } catch (error) { - console.error(`[Aden Control] Failed to get usage breakdown:`, (error as Error).message); - return { daily: [], by_model: [], by_feature: [] }; - } -} - -/** - * Get SQL filter condition for a budget based on its type - */ -function getBudgetFilter(budget: Budget, paramIndex: number): { condition: string; value: unknown } | null { - switch (budget.type) { - case "global": - // Global budgets apply to all events - no filter needed - return null; - - case "agent": - // Agent budgets filter by agent column OR metadata.agent (for legacy data) - return { - condition: `(agent = $${paramIndex} OR metadata->>'agent' = $${paramIndex})`, - value: budget.name, - }; - - case "tenant": - // Tenant budgets filter by tenant_id in metadata - return { condition: `metadata->>'tenant_id' = $${paramIndex}`, value: budget.name }; - - case "customer": - // Customer budgets filter by customer_id in metadata - return { - condition: `metadata->>'customer_id' = $${paramIndex}`, - value: budget.name, - }; - - case "feature": - // Feature budgets filter by feature in metadata or agent - return { - condition: `(metadata->>'feature' = $${paramIndex} OR agent = $${paramIndex})`, - value: budget.name, - }; - - case "tag": - // Tag budgets filter by tags array matching - if (budget.tags && budget.tags.length > 0) { - // Match if any of the budget's tags are in the event's tags array - return { - condition: `metadata->'tags' ?| $${paramIndex}`, - value: budget.tags, - }; - } - return null; - - default: - return null; - } -} - -/** - * Get rate metrics for dashboard analytics - */ -async function getRateMetrics(teamId: string | number, options: { days?: number; context_id?: string; budget?: Budget } = {}): Promise<{ - peak_rate: number; - p95_rate: number; - avg_rate: number; - min_rate: number; - max_burst: number; -}> { - const { days = 30, context_id, budget } = options; - - if (!teamId) { - return { - peak_rate: 0, - p95_rate: 0, - avg_rate: 0, - min_rate: 0, - max_burst: 0, - }; - } - - try { - const pool = await getTeamPool(teamId); - const schema = buildSchemaName(teamId); - const client = await pool.connect(); - - try { - await client.query(`SET search_path TO ${schema}, public`); - await tsdbService.ensureSchema(client); - - const startDate = new Date(); - startDate.setDate(startDate.getDate() - days); - - const conditions = [`team_id = $1`, `"timestamp" >= $2`]; - const values: unknown[] = [String(teamId), startDate]; - let paramIndex = 3; - - // Apply budget-specific filter based on budget type - if (budget) { - const budgetFilter = getBudgetFilter(budget, paramIndex); - if (budgetFilter) { - conditions.push(budgetFilter.condition); - values.push(budgetFilter.value); - paramIndex++; - } - } else if (context_id) { - conditions.push(`metadata->>'context_id' = $${paramIndex}`); - values.push(context_id); - paramIndex++; - } - - // Calculate requests per second in 1-minute buckets - const ratesSql = ` - WITH minute_buckets AS ( - SELECT - DATE_TRUNC('minute', "timestamp") as minute, - COUNT(*) as requests - FROM llm_events - WHERE ${conditions.join(" AND ")} - GROUP BY DATE_TRUNC('minute', "timestamp") - ) - SELECT - MAX(requests / 60.0) as peak_rate, - PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY requests / 60.0) as p95_rate, - AVG(requests / 60.0) as avg_rate, - MIN(requests / 60.0) as min_rate - FROM minute_buckets - `; - const ratesResult = await client.query(ratesSql, values); - const rates = ratesResult.rows[0] || {}; - - // Calculate max burst in 5-second windows - const burstSql = ` - WITH five_second_buckets AS ( - SELECT - DATE_TRUNC('second', "timestamp") - - (EXTRACT(SECOND FROM "timestamp")::integer % 5) * INTERVAL '1 second' as bucket, - COUNT(*) as requests - FROM llm_events - WHERE ${conditions.join(" AND ")} - GROUP BY DATE_TRUNC('second', "timestamp") - - (EXTRACT(SECOND FROM "timestamp")::integer % 5) * INTERVAL '1 second' - ) - SELECT MAX(requests) as max_burst - FROM five_second_buckets - `; - const burstResult = await client.query(burstSql, values); - const maxBurst = burstResult.rows[0]?.max_burst || 0; - - return { - peak_rate: parseFloat(rates.peak_rate) || 0, - p95_rate: parseFloat(rates.p95_rate) || 0, - avg_rate: parseFloat(rates.avg_rate) || 0, - min_rate: parseFloat(rates.min_rate) || 0, - max_burst: parseInt(maxBurst) || 0, - }; - } finally { - client.release(); - } - } catch (error) { - console.error(`[Aden Control] Failed to get rate metrics:`, (error as Error).message); - return { - peak_rate: 0, - p95_rate: 0, - avg_rate: 0, - min_rate: 0, - max_burst: 0, - }; - } -} - -/** - * Get detailed budget info including spend tracking - */ -async function getBudgetDetails(teamId: string | number, policyId: string | null, budgetId: string): Promise { - const policy = await getPolicy(teamId, policyId); - const budget = policy.budgets?.find((b) => b.id === budgetId); - - if (!budget) { - return null; - } - - // Get real-time tracker status - const tracker = budgetTracker.get(budgetId); - const spent = tracker?.spent ?? budget.spent ?? 0; - - return { - ...budget, - spent, - }; -} - -interface BudgetContext { - agent?: string; - metadata?: Record; - tenant_id?: string; - customer_id?: string; - feature?: string; - tags?: string[]; -} - -/** - * Find all budgets that match a given context - * Used for multi-budget validation to check ALL applicable budgets - */ -function findMatchingBudgetsForContext(budgets: Budget[], context: BudgetContext = {}): Budget[] { - if (!budgets || !Array.isArray(budgets)) return []; - - // metadata.agent takes precedence over top-level agent - const metadata = context.metadata || {}; - const effectiveAgent = (metadata.agent as string) || context.agent; - - return budgets.filter((budget) => { - switch (budget.type) { - case "global": - // Global budgets always match - return true; - - case "agent": - // Agent budgets match when agent name matches (from top-level or metadata) - return !!effectiveAgent && budget.name === effectiveAgent; - - case "tenant": - // Tenant budgets match when tenant_id matches - return !!context.tenant_id && budget.name === context.tenant_id; - - case "customer": - // Customer budgets match when customer_id matches - return !!context.customer_id && budget.name === context.customer_id; - - case "feature": - // Feature budgets match when feature name matches - return !!context.feature && budget.name === context.feature; - - case "tag": - // Tag budgets match when any budget tag is in context tags - if (!budget.tags || !context.tags) return false; - return budget.tags.some((t) => context.tags!.includes(t)); - - default: - return false; - } - }); -} - -interface BudgetValidationResult { - budget_id: string; - budget_name: string; - budget_type: string; - allowed: boolean; - action: string; - reason: string | null; - authoritative_spend: number; - budget_limit: number; - usage_percent: number; - projected_percent: number; - degrade_to_model: string | null; - degrade_to_provider: string | null; -} - -interface MultiValidationResult { - allowed: boolean; - action: string; - reason: string | undefined; - authoritative_spend: number; - budget_limit: number; - usage_percent: number; - projected_percent: number; - degrade_to_model: string | undefined; - degrade_to_provider: string | undefined; - restricting_budget_id: string | undefined; - restricting_budget_name: string | undefined; - budgets_checked: BudgetValidationResult[]; -} - -/** - * Validate multiple budgets and return the most restrictive result - */ -function validateMultipleBudgets(budgets: Budget[], estimatedCost: number, localSpend: number | null = null): MultiValidationResult { - if (!budgets || budgets.length === 0) { - return { - allowed: true, - action: "allow", - reason: "No budgets to validate", - authoritative_spend: 0, - budget_limit: 0, - usage_percent: 0, - projected_percent: 0, - degrade_to_model: undefined, - degrade_to_provider: undefined, - restricting_budget_id: undefined, - restricting_budget_name: undefined, - budgets_checked: [], - }; - } - - // Action priority (higher = more restrictive) - const actionPriority: Record = { allow: 0, throttle: 1, degrade: 2, block: 3 }; - - let mostRestrictiveResult: BudgetValidationResult | null = null; - const budgetsChecked: BudgetValidationResult[] = []; - - for (const budget of budgets) { - // Calculate projected spend - const tsdbSpend = budget.spent || 0; - const authoritativeSpend = - typeof localSpend === "number" && localSpend > tsdbSpend ? localSpend : tsdbSpend; - const projectedSpend = authoritativeSpend + estimatedCost; - const usagePercent = budget.limit > 0 ? (authoritativeSpend / budget.limit) * 100 : 0; - const projectedPercent = budget.limit > 0 ? (projectedSpend / budget.limit) * 100 : 0; - - // Determine action for this budget - let allowed = true; - let action = "allow"; - let reason: string | null = null; - let degradeToModel: string | null = null; - let degradeToProvider: string | null = null; - - if (projectedPercent >= 100) { - const limitAction = budget.limitAction || "kill"; - - switch (limitAction) { - case "kill": - allowed = false; - action = "block"; - reason = `Budget "${budget.name}" exceeded: $${projectedSpend.toFixed(4)} > $${ - budget.limit - } (${projectedPercent.toFixed(1)}%)`; - break; - case "degrade": - allowed = true; - action = "degrade"; - reason = `Budget "${budget.name}" at limit, degrading model`; - degradeToModel = budget.degradeToModel || null; - degradeToProvider = budget.degradeToProvider || null; - break; - case "throttle": - allowed = true; - action = "throttle"; - reason = `Budget "${budget.name}" at limit, throttling`; - break; - default: - allowed = false; - action = "block"; - reason = `Budget "${budget.name}" exceeded with unknown action`; - } - } else if ( - projectedPercent >= 90 && - budget.limitAction === "degrade" && - budget.degradeToModel - ) { - allowed = true; - action = "degrade"; - reason = `Budget "${budget.name}" approaching limit (${projectedPercent.toFixed( - 1 - )}%), pre-emptive degradation`; - degradeToModel = budget.degradeToModel; - degradeToProvider = budget.degradeToProvider || null; - } - - const budgetResult: BudgetValidationResult = { - budget_id: budget.id, - budget_name: budget.name, - budget_type: budget.type, - allowed, - action, - reason, - authoritative_spend: authoritativeSpend, - budget_limit: budget.limit, - usage_percent: usagePercent, - projected_percent: projectedPercent, - degrade_to_model: degradeToModel, - degrade_to_provider: degradeToProvider, - }; - - budgetsChecked.push(budgetResult); - - // Track most restrictive result - if ( - !mostRestrictiveResult || - actionPriority[action] > actionPriority[mostRestrictiveResult.action] - ) { - mostRestrictiveResult = budgetResult; - } - } - - return { - allowed: mostRestrictiveResult?.allowed ?? true, - action: mostRestrictiveResult?.action ?? "allow", - reason: mostRestrictiveResult?.reason ?? undefined, - authoritative_spend: mostRestrictiveResult?.authoritative_spend ?? 0, - budget_limit: mostRestrictiveResult?.budget_limit ?? 0, - usage_percent: mostRestrictiveResult?.usage_percent ?? 0, - projected_percent: mostRestrictiveResult?.projected_percent ?? 0, - degrade_to_model: mostRestrictiveResult?.degrade_to_model ?? undefined, - degrade_to_provider: mostRestrictiveResult?.degrade_to_provider ?? undefined, - restricting_budget_id: mostRestrictiveResult?.budget_id, - restricting_budget_name: mostRestrictiveResult?.budget_name, - budgets_checked: budgetsChecked, - }; -} - -// ============================================================================= -// Content Storage (for Layer 0 content capture) -// ============================================================================= - -interface ContentItem { - content_id: string; - content_hash: string; - content: string; - byte_size: number; -} - -/** - * Get the MongoDB collection for content storage - */ -function getContentCollection(): MongoCollection { - return _ACHO_MG_DB - .db(_ACHO_MDB_CONFIG.ERP_DBNAME) - .collection(_ACHO_MDB_COLLECTIONS.ADEN_CONTROL_CONTENT); -} - -/** - * Store large content items from SDK - * Used by Layer 0 content capture for storing content that exceeds max_content_bytes threshold - */ -async function storeContent(teamId: string | number, items: ContentItem[]): Promise<{ stored: number }> { - if (!items || items.length === 0) { - return { stored: 0 }; - } - - const collection = getContentCollection(); - const now = new Date().toISOString(); - - let stored = 0; - for (const item of items) { - try { - await collection.updateOne( - { content_id: item.content_id, team_id: teamId }, - { - $set: { - content_hash: item.content_hash, - content: item.content, - byte_size: item.byte_size, - updated_at: now, - }, - $setOnInsert: { - content_id: item.content_id, - team_id: teamId, - created_at: now, - }, - }, - { upsert: true } - ); - stored++; - } catch (error) { - console.error(`[Aden Control] Failed to store content ${item.content_id}:`, (error as Error).message); - } - } - - console.log(`[Aden Control] Stored ${stored}/${items.length} content items for team ${teamId}`); - return { stored }; -} - -/** - * Retrieve content by ID - */ -async function getContent(teamId: string | number, contentId: string): Promise { - const collection = getContentCollection(); - const doc = await collection.findOne({ content_id: contentId, team_id: teamId }) as (ContentItem & { _id?: unknown }) | null; - - if (!doc) { - return null; - } - - const { _id, ...content } = doc; - return content as ContentItem; -} - -export default { - getPolicy, - updatePolicy, - deletePolicy, - processEvents, - getEvents, - getMetricsSummary, - getUsageBreakdown, - getRateMetrics, - getBudgetStatus, - getBudgetDetails, - resetBudget, - addBudgetRule, - addThrottleRule, - addBlockRule, - addDegradeRule, - addAlertRule, - clearPolicy, - getPoliciesByTeam, - findMatchingBudgetsForContext, - validateMultipleBudgets, - storeContent, - getContent, -}; diff --git a/hive/src/services/control/control_sockets.ts b/hive/src/services/control/control_sockets.ts deleted file mode 100644 index 0f954e8f..00000000 --- a/hive/src/services/control/control_sockets.ts +++ /dev/null @@ -1,733 +0,0 @@ -/** - * Aden Control Sockets - * - * WebSocket namespace for real-time control plane communication. - * Handles: - * - SDK connections and authentication - * - Real-time policy updates - * - Event ingestion - * - Heartbeat monitoring - */ - -import jwt from "jsonwebtoken"; -// Note: userDB.findSaltByToken will be injected via initialization -import controlService from "./control_service"; -import llmEventBatcher from "./llm_event_batcher"; -import type { Server, Socket, Namespace } from "socket.io"; - -interface UserDbService { - findSaltByToken: (token: string) => Promise; -} - -let userDbService: UserDbService | null = null; -let jwtSecret: string = ""; - -/** - * Set user DB service for JWT verification - * @param service - User DB service with findSaltByToken method - * @param secret - JWT secret for token verification - */ -function setUserDbService(service: UserDbService, secret?: string): void { - userDbService = service; - if (secret) { - jwtSecret = secret; - } -} - -interface InstanceInfo { - socket: Socket; - instanceId: string; - policyId: string | null; - connectedAt: Date; - lastHeartbeat: Date; -} - -// HTTP-only agents (no socket connection) -interface HttpInstanceInfo { - instanceId: string; - policyId: string | null; - agentName: string | null; - status: string; - firstSeen: Date; - lastHeartbeat: Date; -} - -// Track connected SDK instances (WebSocket) -// teamId -> Map -const connectedInstances = new Map>(); - -// Track HTTP-only SDK instances (no WebSocket, identified by heartbeats) -// teamId -> Map -const httpInstances = new Map>(); - -// TTL for HTTP agents (remove if no heartbeat for this duration) -const HTTP_AGENT_TTL_MS = 60000; // 60 seconds - -// Store the control emitter globally for agent status broadcasts -let globalControlEmitter: ControlEmitterInner | null = null; - -// Track which teams have active subscriptions for agent status (team -> subscriber count) -const teamSubscriberCounts = new Map(); - -// Helper to get teams with active subscribers -function getTeamsWithSubscribers(): string[] { - return Array.from(teamSubscriberCounts.entries()) - .filter(([, count]) => count > 0) - .map(([teamId]) => teamId); -} - -// Interval for periodic agent status broadcasts -let agentStatusInterval: ReturnType | null = null; - -/** - * Register or update an HTTP-only agent from heartbeat - * Called from control_service when processing heartbeat events - */ -function registerHttpAgent( - teamId: string | number, - instanceId: string, - policyId: string | null, - agentName: string | null, - status: string -): void { - const teamKey = String(teamId); - - // Check if this instance is already connected via WebSocket - const wsInstances = connectedInstances.get(teamKey); - if (wsInstances) { - for (const info of wsInstances.values()) { - if (info.instanceId === instanceId) { - // Already tracked via WebSocket, just update heartbeat there - info.lastHeartbeat = new Date(); - return; - } - } - } - - // Track as HTTP-only agent - if (!httpInstances.has(teamKey)) { - httpInstances.set(teamKey, new Map()); - } - - const existing = httpInstances.get(teamKey)!.get(instanceId); - if (existing) { - // Update existing - existing.lastHeartbeat = new Date(); - existing.status = status; - existing.policyId = policyId; - existing.agentName = agentName; - } else { - // New HTTP agent - httpInstances.get(teamKey)!.set(instanceId, { - instanceId, - policyId, - agentName, - status, - firstSeen: new Date(), - lastHeartbeat: new Date(), - }); - console.log( - `[Aden Control] HTTP agent registered: ${agentName || instanceId.slice(0, 8)}... (team: ${teamKey})` - ); - - // Broadcast updated agent status to subscribers - broadcastAgentStatus(teamKey); - } -} - -/** - * Clean up stale HTTP agents that haven't sent heartbeats - */ -function cleanupStaleHttpAgents(): void { - const now = Date.now(); - const teamsWithRemovedAgents: string[] = []; - - for (const [teamId, instances] of httpInstances) { - let removed = false; - for (const [instanceId, info] of instances) { - if (now - info.lastHeartbeat.getTime() > HTTP_AGENT_TTL_MS) { - instances.delete(instanceId); - removed = true; - console.log( - `[Aden Control] HTTP agent expired: ${instanceId.slice(0, 8)}... (team: ${teamId})` - ); - } - } - - if (removed) { - teamsWithRemovedAgents.push(teamId); - } - - // Clean up empty team maps - if (instances.size === 0) { - httpInstances.delete(teamId); - } - } - - // Broadcast updated status to teams that had agents removed - for (const teamId of teamsWithRemovedAgents) { - broadcastAgentStatus(teamId); - } -} - -// Run cleanup every 30 seconds -setInterval(cleanupStaleHttpAgents, 30000); - -/** - * Get agent status for a team - */ -function getAgentStatusForTeam(teamId: string): { - type: string; - active: boolean; - count: number; - instances: Array<{ - instance_id: string; - policy_id: string | null; - agent_name: string | null; - connected_at: string; - last_heartbeat: string; - connection_type: "websocket" | "http"; - status?: string; - }>; - timestamp: string; -} { - const wsInstances = connectedInstances.get(teamId); - const httpInsts = httpInstances.get(teamId); - - const instances: Array<{ - instance_id: string; - policy_id: string | null; - agent_name: string | null; - connected_at: string; - last_heartbeat: string; - connection_type: "websocket" | "http"; - status?: string; - }> = []; - - // Add WebSocket-connected instances - if (wsInstances) { - for (const info of wsInstances.values()) { - instances.push({ - instance_id: info.instanceId, - policy_id: info.policyId, - agent_name: null, - connected_at: info.connectedAt.toISOString(), - last_heartbeat: info.lastHeartbeat.toISOString(), - connection_type: "websocket", - }); - } - } - - // Add HTTP-only instances - if (httpInsts) { - for (const info of httpInsts.values()) { - instances.push({ - instance_id: info.instanceId, - policy_id: info.policyId, - agent_name: info.agentName, - connected_at: info.firstSeen.toISOString(), - last_heartbeat: info.lastHeartbeat.toISOString(), - connection_type: "http", - status: info.status, - }); - } - } - - const count = instances.length; - - return { - type: "agent-status", - active: count > 0, - count, - instances, - timestamp: new Date().toISOString(), - }; -} - -/** - * Broadcast agent status to all subscribed clients for a team - */ -function broadcastAgentStatus(teamId: string): void { - if (!globalControlEmitter) return; - - const status = getAgentStatusForTeam(teamId); - const room = `team:${teamId}:llm-events`; - globalControlEmitter.to(room).emit("message", status); -} - -/** - * Broadcast agent status to all teams with subscribers - */ -function broadcastAgentStatusToAllTeams(): void { - const teams = getTeamsWithSubscribers(); - for (const teamId of teams) { - broadcastAgentStatus(teamId); - } -} - -interface AdenSocket extends Socket { - user?: Record; - teamId?: string; - policyId?: string | null; - sdkInstanceId?: string; -} - -interface RedisEmitter { - of: (namespace: string) => ControlEmitterInner; -} - -interface ControlEmitterInner { - to: (room: string) => { emit: (event: string, payload: unknown) => void }; - emit: (event: string, payload: unknown) => void; -} - -interface MessageData { - event_type?: string; - [key: string]: unknown; -} - -interface ControlEmitter { - emitPolicyUpdate: (teamId: string | number, policyId: string | null, policy: unknown) => void; - emitCommand: (teamId: string | number, command: { action: string; [key: string]: unknown }) => void; - emitAlert: (teamId: string | number, policyId: string | null, alert: unknown) => void; - emitToInstance: (teamId: string | number, instanceId: string, message: unknown) => boolean; - getConnectedCount: (teamId: string | number) => number; - getConnectedInstances: (teamId: string | number) => Array<{ - instance_id: string; - policy_id: string | null; - agent_name: string | null; - connected_at: string; - last_heartbeat: string; - connection_type: "websocket" | "http"; - status?: string; - }>; - getTotalConnectedCount: () => number; -} - -/** - * Initialize Aden Control WebSocket namespace - * @param io - Socket.IO server instance - * @param rootEmitter - Redis emitter for cross-instance communication - * @returns Control emitter for sending updates - */ -function initAdenControlSockets(io: Server, rootEmitter: RedisEmitter): ControlEmitter { - // Create namespace for control plane - const controlNamespace: Namespace = io.of("/v1/control/ws"); - - // Create emitter for this namespace - const controlEmitter: ControlEmitterInner = rootEmitter.of("/v1/control/ws"); - - // Store globally for agent status broadcasts - globalControlEmitter = controlEmitter; - - // Start periodic agent status broadcast (every 2 seconds) - if (agentStatusInterval) { - clearInterval(agentStatusInterval); - } - agentStatusInterval = setInterval(broadcastAgentStatusToAllTeams, 2000); - - // Initialize LLM event batcher with emitter for real-time streaming - llmEventBatcher.setEmitter(controlEmitter as unknown as { to: (room: string) => { emit: (event: string, payload: unknown) => void } }); - - // Authentication middleware - verify JWT token - controlNamespace.use(async (socket: AdenSocket, next: (err?: Error) => void) => { - try { - let token: string | undefined = - socket.handshake.auth?.token || - socket.handshake.headers?.authorization || - (socket.handshake.query?.token as string | undefined); - - if (!token) { - console.error("[Aden Control WS] No authorization provided"); - return next(new Error("Authentication required")); - } - - // Extract token (support "Bearer " and "jwt " formats) - if (token.startsWith("Bearer ")) { - token = token.slice(7); - } else if (token.startsWith("jwt ")) { - token = token.slice(4); - } - - if (!token) { - return next(new Error("Invalid token")); - } - - // Verify JWT token using user's salt - if (!userDbService) { - console.error("[Aden Control WS] userDbService not initialized"); - return next(new Error("Server configuration error")); - } - const salt = await userDbService.findSaltByToken(token); - if (!salt) { - console.error("[Aden Control WS] No salt found for token"); - return next(new Error("Invalid token")); - } - // Token is signed with jwtSecret + salt - const verifySecret = jwtSecret ? jwtSecret + salt : salt; - const decoded = await new Promise>((resolve, reject) => { - jwt.verify(token!, verifySecret, (err, decoded) => { - if (err) reject(err); - else resolve(decoded as Record); - }); - }); - - // Store user info on socket - socket.user = decoded; - socket.teamId = decoded.current_team_id as string; - socket.policyId = - (socket.handshake.headers?.["x-policy-id"] as string) || - (socket.handshake.query?.policy_id as string) || - null; - socket.sdkInstanceId = - (socket.handshake.headers?.["x-sdk-instance-id"] as string) || - (socket.handshake.query?.instance_id as string) || - socket.id; - - console.log( - `[Aden Control WS] SDK connecting: ${socket.sdkInstanceId!.slice(0, 8)}... (team: ${socket.teamId})` - ); - - next(); - } catch (error) { - console.error("[Aden Control WS] Auth error:", (error as Error).message); - next(new Error("Authentication failed")); - } - }); - - // Handle connections - controlNamespace.on("connection", async (socket: AdenSocket) => { - const { teamId, policyId, sdkInstanceId } = socket; - - console.log( - `[Aden Control WS] SDK connected: ${sdkInstanceId!.slice(0, 8)}... (socket: ${socket.id}, team: ${teamId})` - ); - - // Track this instance by team - if (!connectedInstances.has(teamId!)) { - connectedInstances.set(teamId!, new Map()); - } - connectedInstances.get(teamId!)!.set(socket.id, { - socket, - instanceId: sdkInstanceId!, - policyId: policyId || null, - connectedAt: new Date(), - lastHeartbeat: new Date(), - }); - - // Join room for this team (for policy broadcasts) - socket.join(`team:${teamId}`); - // Also join policy-specific room if policy specified - if (policyId) { - socket.join(`team:${teamId}:policy:${policyId}`); - } - - // Send current policy immediately - try { - const policy = await controlService.getPolicy(teamId!, policyId || null); - socket.emit("message", { - type: "policy", - policy, - }); - } catch (error) { - console.error("[Aden Control WS] Error sending initial policy:", error); - } - - // Handle incoming messages from SDK - socket.on("message", async (data: MessageData | string) => { - try { - await handleSdkMessage(socket, data); - } catch (error) { - console.error("[Aden Control WS] Error handling message:", error); - socket.emit("message", { - type: "error", - error: (error as Error).message, - }); - } - }); - - // Handle direct event submission (alternative to message) - socket.on("event", async (event: Record) => { - try { - await controlService.processEvents(teamId!, policyId || null, [event as any]); - } catch (error) { - console.error("[Aden Control WS] Error processing event:", error); - } - }); - - // Handle disconnection - socket.on("disconnect", (reason: string) => { - console.log( - `[Aden Control WS] SDK disconnected: ${sdkInstanceId!.slice(0, 8)}... (reason: ${reason})` - ); - - // Remove from tracking - const instances = connectedInstances.get(teamId!); - if (instances) { - instances.delete(socket.id); - if (instances.size === 0) { - connectedInstances.delete(teamId!); - } - } - }); - - // Handle errors - socket.on("error", (error: Error) => { - console.error( - `[Aden Control WS] Socket error for ${sdkInstanceId!.slice(0, 8)}...:`, - error.message - ); - }); - - // Handle LLM events stream subscription (for dashboard real-time updates) - socket.on("subscribe-llm-events", () => { - const room = `team:${teamId}:llm-events`; - socket.join(room); - console.log(`[Aden Control WS] Socket ${socket.id} subscribed to ${room}`); - - // Track subscriber count for this team - const currentCount = teamSubscriberCounts.get(teamId!) || 0; - teamSubscriberCounts.set(teamId!, currentCount + 1); - - socket.emit("message", { - type: "subscribed", - stream: "llm-events", - teamId: teamId, - }); - - // Send initial agent status - const status = getAgentStatusForTeam(teamId!); - socket.emit("message", status); - }); - - socket.on("unsubscribe-llm-events", () => { - const room = `team:${teamId}:llm-events`; - socket.leave(room); - console.log(`[Aden Control WS] Socket ${socket.id} unsubscribed from ${room}`); - - // Decrement subscriber count - const currentCount = teamSubscriberCounts.get(teamId!) || 0; - if (currentCount > 0) { - teamSubscriberCounts.set(teamId!, currentCount - 1); - } - - socket.emit("message", { - type: "unsubscribed", - stream: "llm-events", - teamId: teamId, - }); - }); - }); - - /** - * Handle incoming message from SDK - */ - async function handleSdkMessage(socket: AdenSocket, data: MessageData | string): Promise { - // Parse if string - let parsedData: MessageData; - if (typeof data === "string") { - parsedData = JSON.parse(data); - } else { - parsedData = data; - } - - const { teamId, policyId, sdkInstanceId } = socket; - - // Route based on event type - switch (parsedData.event_type) { - case "metric": - case "control": - case "heartbeat": - case "error": - // Process as event - await controlService.processEvents(teamId!, policyId || null, [parsedData as any]); - - // Update last heartbeat time - if (parsedData.event_type === "heartbeat") { - const instances = connectedInstances.get(teamId!); - const instance = instances?.get(socket.id); - if (instance) { - instance.lastHeartbeat = new Date(); - } - } - break; - - case "get_policy": { - // Request for current policy - const policy = await controlService.getPolicy(teamId!, policyId || null); - socket.emit("message", { - type: "policy", - policy, - }); - break; - } - - default: - console.warn( - `[Aden Control WS] Unknown event type from ${sdkInstanceId!.slice(0, 8)}...: ${parsedData.event_type}` - ); - } - } - - /** - * Create emitter object for external use - */ - const emitter: ControlEmitter = { - /** - * Emit policy update to all SDK instances for a team/policy - * @param teamId - The team ID - * @param policyId - The policy ID (optional, broadcasts to all team instances if not specified) - * @param policy - The policy object - */ - emitPolicyUpdate(teamId: string | number, policyId: string | null, policy: unknown): void { - console.log(`[Aden Control WS] Broadcasting policy update for team ${teamId}`); - - // If policyId specified, emit only to instances using that policy - if (policyId) { - controlEmitter.to(`team:${teamId}:policy:${policyId}`).emit("message", { - type: "policy", - policy, - }); - } else { - // Broadcast to all team instances - controlEmitter.to(`team:${teamId}`).emit("message", { - type: "policy", - policy, - }); - } - }, - - /** - * Emit a command to all SDK instances for a team - */ - emitCommand(teamId: string | number, command: { action: string; [key: string]: unknown }): void { - console.log(`[Aden Control WS] Broadcasting command: ${command.action}`); - - controlEmitter.to(`team:${teamId}`).emit("message", { - type: "command", - command, - }); - }, - - /** - * Emit alert to team instances - */ - emitAlert(teamId: string | number, policyId: string | null, alert: unknown): void { - console.log(`[Aden Control WS] Broadcasting alert for team ${teamId}`); - - const room = policyId ? `team:${teamId}:policy:${policyId}` : `team:${teamId}`; - controlEmitter.to(room).emit("message", { - type: "alert", - alert, - }); - }, - - /** - * Emit to a specific SDK instance - */ - emitToInstance(teamId: string | number, instanceId: string, message: unknown): boolean { - const instances = connectedInstances.get(String(teamId)); - if (!instances) return false; - - for (const [, info] of instances) { - if (info.instanceId === instanceId) { - info.socket.emit("message", message); - return true; - } - } - return false; - }, - - /** - * Get connected instance count for a team (WebSocket + HTTP) - */ - getConnectedCount(teamId: string | number): number { - const teamKey = String(teamId); - const wsCount = connectedInstances.get(teamKey)?.size || 0; - const httpCount = httpInstances.get(teamKey)?.size || 0; - return wsCount + httpCount; - }, - - /** - * Get all connected instances info (for dashboard) - * Includes both WebSocket and HTTP-only agents - */ - getConnectedInstances(teamId: string | number): Array<{ - instance_id: string; - policy_id: string | null; - agent_name: string | null; - connected_at: string; - last_heartbeat: string; - connection_type: "websocket" | "http"; - status?: string; - }> { - const teamKey = String(teamId); - const results: Array<{ - instance_id: string; - policy_id: string | null; - agent_name: string | null; - connected_at: string; - last_heartbeat: string; - connection_type: "websocket" | "http"; - status?: string; - }> = []; - - // Add WebSocket-connected instances - const wsInstances = connectedInstances.get(teamKey); - if (wsInstances) { - for (const info of wsInstances.values()) { - results.push({ - instance_id: info.instanceId, - policy_id: info.policyId, - agent_name: null, // WebSocket connections don't have agent_name yet - connected_at: info.connectedAt.toISOString(), - last_heartbeat: info.lastHeartbeat.toISOString(), - connection_type: "websocket", - }); - } - } - - // Add HTTP-only instances - const httpInsts = httpInstances.get(teamKey); - if (httpInsts) { - for (const info of httpInsts.values()) { - results.push({ - instance_id: info.instanceId, - policy_id: info.policyId, - agent_name: info.agentName, - connected_at: info.firstSeen.toISOString(), - last_heartbeat: info.lastHeartbeat.toISOString(), - connection_type: "http", - status: info.status, - }); - } - } - - return results; - }, - - /** - * Get total connected SDK count across all teams (WebSocket + HTTP) - */ - getTotalConnectedCount(): number { - let total = 0; - for (const instances of connectedInstances.values()) { - total += instances.size; - } - for (const instances of httpInstances.values()) { - total += instances.size; - } - return total; - }, - }; - - // Note: Emitter is returned instead of stored globally - // Use app.locals.controlEmitter to access in routes - - console.log("[Aden Control WS] WebSocket namespace initialized at /v1/control/ws"); - - return emitter; -} - -export default initAdenControlSockets; -export { setUserDbService, registerHttpAgent }; diff --git a/hive/src/services/control/llm_event_batcher.ts b/hive/src/services/control/llm_event_batcher.ts deleted file mode 100644 index 5e1e247a..00000000 --- a/hive/src/services/control/llm_event_batcher.ts +++ /dev/null @@ -1,349 +0,0 @@ -/** - * LLMEventBatcher - Batches LLM events for efficient WebSocket delivery - * - * Features: - * - Per-team in-memory buffers - * - 5-second flush interval (configurable) - * - Buffer size cap with graceful degradation (drop oldest) - * - Payload optimization (only essential fields) - * - Periodic cleanup for idle teams - */ - -const FLUSH_REASONS = { - TIMER: 1, - BUFFER_FULL: 2, - MANUAL: 3, -} as const; - -type FlushReason = typeof FLUSH_REASONS[keyof typeof FLUSH_REASONS]; - -interface TsdbEvent { - timestamp?: Date | string; - trace_id?: string; - model?: string; - provider?: string; - agent?: string; - cost_total?: number; - latency_ms?: number; - usage?: { - input_tokens?: number; - output_tokens?: number; - }; - usage_input_tokens?: number; - usage_output_tokens?: number; -} - -interface EventSummary { - timestamp: string | undefined; - trace_id: string | undefined; - model: string; - provider: string | null; - agent: string | null; - input_tokens: number; - output_tokens: number; - cost: number; - latency_ms: number | null; -} - -interface TeamBuffer { - teamId: string; - events: EventSummary[]; - flushTimer: ReturnType | null; - lastFlush: Date; - droppedCount: number; -} - -interface BatchPayload { - type: string; - teamId: string; - events: EventSummary[]; - meta: { - batchSize: number; - droppedCount: number; - windowStart: string | undefined; - windowEnd: string | undefined; - flushReason: FlushReason; - }; -} - -interface Emitter { - to: (room: string) => { emit: (event: string, payload: BatchPayload) => void }; -} - -interface BatcherOptions { - flushIntervalMs?: number; - maxBufferSize?: number; - maxEventsPerFlush?: number; -} - -class LLMEventBatcher { - private flushIntervalMs: number; - private maxBufferSize: number; - private maxEventsPerFlush: number; - private teamBuffers: Map; - private emitter: Emitter | null; - private totalEventsBuffered: number; - private totalBatchesSent: number; - private totalEventsDropped: number; - private _cleanupInterval: ReturnType; - - constructor(options: BatcherOptions = {}) { - // Configuration - this.flushIntervalMs = options.flushIntervalMs || 5000; // 5 seconds - this.maxBufferSize = options.maxBufferSize || 500; // Max events per team buffer - this.maxEventsPerFlush = options.maxEventsPerFlush || 100; // Max events per batch - - // State - this.teamBuffers = new Map(); // teamId -> TeamBuffer - this.emitter = null; // Set by setEmitter() - - // Metrics - this.totalEventsBuffered = 0; - this.totalBatchesSent = 0; - this.totalEventsDropped = 0; - - // Start periodic cleanup - this._cleanupInterval = setInterval(() => { - this.cleanup(); - }, 300000); // Every 5 minutes - } - - /** - * Set the Socket.IO emitter for broadcasting - * Called during control_sockets initialization - * @param {Object} controlEmitter - Socket.IO namespace emitter - */ - setEmitter(controlEmitter: Emitter): void { - this.emitter = controlEmitter; - console.log("[LLMEventBatcher] Emitter configured"); - } - - /** - * Add events to the buffer for a team - * Called from control_service.js after TSDB insert - * @param {string|number} teamId - Team identifier - * @param {Array} tsdbEvents - Array of TSDB events - */ - add(teamId: string | number, tsdbEvents: TsdbEvent[]): void { - if (!tsdbEvents || tsdbEvents.length === 0) return; - - const teamIdStr = String(teamId); - - // Transform to lightweight summaries - const summaries = tsdbEvents.map((e) => this._transformToSummary(e)); - - // Get or create buffer - let buffer = this.teamBuffers.get(teamIdStr); - if (!buffer) { - buffer = this._createBuffer(teamIdStr); - this.teamBuffers.set(teamIdStr, buffer); - } - - // Add events with overflow handling - this._addToBuffer(buffer, summaries); - - // Start/reset flush timer if not already running - this._scheduleFlush(teamIdStr, buffer); - } - - /** - * Transform full TSDB event to lightweight summary - * Only includes fields needed for dashboard display - * @param {Object} event - Full TSDB event - * @returns {Object} Lightweight event summary - */ - private _transformToSummary(event: TsdbEvent): EventSummary { - // Handle both nested usage object (from transformMetricToTsdbEvent) - // and flat fields (from TSDB query results) - const inputTokens = event.usage?.input_tokens ?? event.usage_input_tokens ?? 0; - const outputTokens = event.usage?.output_tokens ?? event.usage_output_tokens ?? 0; - - return { - timestamp: event.timestamp instanceof Date ? event.timestamp.toISOString() : event.timestamp, - trace_id: event.trace_id, - model: event.model || "", - provider: event.provider || null, - agent: event.agent || null, - input_tokens: inputTokens, - output_tokens: outputTokens, - cost: event.cost_total || 0, - latency_ms: event.latency_ms || null, - }; - } - - /** - * Add events to buffer with overflow handling - * @param {Object} buffer - Team buffer - * @param {Array} summaries - Event summaries to add - */ - private _addToBuffer(buffer: TeamBuffer, summaries: EventSummary[]): void { - for (const summary of summaries) { - if (buffer.events.length >= this.maxBufferSize) { - // Drop oldest event - buffer.events.shift(); - buffer.droppedCount++; - this.totalEventsDropped++; - } - buffer.events.push(summary); - this.totalEventsBuffered++; - } - - // Force flush if buffer is full - if (buffer.events.length >= this.maxBufferSize) { - this._flush(buffer.teamId, FLUSH_REASONS.BUFFER_FULL); - } - } - - /** - * Schedule flush timer for a team - * @param {string} teamId - Team identifier - * @param {Object} buffer - Team buffer - */ - private _scheduleFlush(teamId: string, buffer: TeamBuffer): void { - // Don't reschedule if timer already running - if (buffer.flushTimer) return; - - buffer.flushTimer = setTimeout(() => { - this._flush(teamId, FLUSH_REASONS.TIMER); - }, this.flushIntervalMs); - } - - /** - * Flush buffered events to WebSocket - * @param {string} teamId - Team identifier - * @param {number} flushReason - Reason for flush - */ - private _flush(teamId: string, flushReason: FlushReason): void { - const buffer = this.teamBuffers.get(teamId); - if (!buffer || buffer.events.length === 0) return; - - // Clear timer - if (buffer.flushTimer) { - clearTimeout(buffer.flushTimer); - buffer.flushTimer = null; - } - - // Extract batch (up to maxEventsPerFlush) - const batch = buffer.events.splice(0, this.maxEventsPerFlush); - const droppedCount = buffer.droppedCount; - buffer.droppedCount = 0; - buffer.lastFlush = new Date(); - - // Build payload - const payload: BatchPayload = { - type: "llm-events-batch", - teamId: teamId, - events: batch, - meta: { - batchSize: batch.length, - droppedCount: droppedCount, - windowStart: batch[0]?.timestamp, - windowEnd: batch[batch.length - 1]?.timestamp, - flushReason: flushReason, - }, - }; - - // Emit to team room - if (this.emitter) { - const room = `team:${teamId}:llm-events`; - this.emitter.to(room).emit("message", payload); - this.totalBatchesSent++; - - if (batch.length > 0) { - console.log( - `[LLMEventBatcher] Flushed ${batch.length} events to ${room} ` + - `(dropped: ${droppedCount}, reason: ${flushReason})` - ); - } - } - - // Schedule next flush if buffer still has events - if (buffer.events.length > 0) { - this._scheduleFlush(teamId, buffer); - } - } - - /** - * Create a new buffer for a team - * @param {string} teamId - Team identifier - * @returns {Object} New team buffer - */ - private _createBuffer(teamId: string): TeamBuffer { - return { - teamId: teamId, - events: [], - flushTimer: null, - lastFlush: new Date(), - droppedCount: 0, - }; - } - - /** - * Manually flush all buffers (useful for shutdown) - */ - flushAll(): void { - for (const [teamId] of this.teamBuffers) { - this._flush(teamId, FLUSH_REASONS.MANUAL); - } - } - - /** - * Get metrics for monitoring - * @returns {Object} Batcher metrics - */ - getMetrics(): { activeTeams: number; totalBuffered: number; totalEventsBuffered: number; totalBatchesSent: number; totalEventsDropped: number } { - const activeTeams = this.teamBuffers.size; - const totalBuffered = Array.from(this.teamBuffers.values()).reduce( - (sum, b) => sum + b.events.length, - 0 - ); - - return { - activeTeams, - totalBuffered, - totalEventsBuffered: this.totalEventsBuffered, - totalBatchesSent: this.totalBatchesSent, - totalEventsDropped: this.totalEventsDropped, - }; - } - - /** - * Cleanup buffers for teams with no recent activity - * Prevents memory leaks from inactive teams - * @param {number} maxIdleMs - Max idle time before cleanup (default: 5 minutes) - */ - cleanup(maxIdleMs = 300000): void { - const now = Date.now(); - let cleaned = 0; - - for (const [teamId, buffer] of this.teamBuffers.entries()) { - if (buffer.events.length === 0 && now - buffer.lastFlush.getTime() > maxIdleMs) { - if (buffer.flushTimer) { - clearTimeout(buffer.flushTimer); - } - this.teamBuffers.delete(teamId); - cleaned++; - } - } - - if (cleaned > 0) { - console.log(`[LLMEventBatcher] Cleaned up ${cleaned} idle team buffers`); - } - } - - /** - * Shutdown the batcher (cleanup intervals and flush remaining) - */ - shutdown(): void { - if (this._cleanupInterval) { - clearInterval(this._cleanupInterval); - } - this.flushAll(); - console.log("[LLMEventBatcher] Shutdown complete"); - } -} - -// Singleton instance -const llmEventBatcher = new LLMEventBatcher(); - -export default llmEventBatcher; diff --git a/hive/src/services/mongo/mongo_db.ts b/hive/src/services/mongo/mongo_db.ts deleted file mode 100644 index dcecfd32..00000000 --- a/hive/src/services/mongo/mongo_db.ts +++ /dev/null @@ -1,26 +0,0 @@ -import config from "../../config"; -import { MongoClient } from "mongodb"; - -declare const _ACHO_MG_DB: undefined | { db: (name: string) => unknown }; - -let client: MongoClient | null = null; - -const getMongoClient = async (): Promise => { - if (client) return client; - if (!config.mongodb.url) { - throw new Error("Missing MONGODB_URL in environment"); - } - client = new MongoClient(config.mongodb.url); - await client.connect(); - return client; -}; - -const getMongoDb = async (dbName = config.mongodb.dbName): Promise => { - if (typeof _ACHO_MG_DB !== "undefined" && _ACHO_MG_DB && typeof _ACHO_MG_DB.db === "function") { - return _ACHO_MG_DB.db(dbName); - } - const c = await getMongoClient(); - return c.db(dbName); -}; - -export { getMongoDb }; diff --git a/hive/src/services/quickstart/quickstart_service.ts b/hive/src/services/quickstart/quickstart_service.ts deleted file mode 100644 index 5f3b4a4e..00000000 --- a/hive/src/services/quickstart/quickstart_service.ts +++ /dev/null @@ -1,227 +0,0 @@ -/** - * Quickstart Document Generation Service - * Template-based SDK quickstart documentation generator - * - * Structure: - * - docs/aden-sdk-documents/config/*.json - Configuration for vendors, languages, frameworks - * - docs/aden-sdk-documents/templates/{language}/*.md - Complete template files - */ - -import fs from "fs"; -import path from "path"; - -// Base paths -const DOCS_BASE = path.join(__dirname, "../../../docs/aden-sdk-documents"); -const CONFIG_PATH = path.join(DOCS_BASE, "config"); -const TEMPLATES_PATH = path.join(DOCS_BASE, "templates"); - -interface VendorConfig { - name: string; - envVarComment?: string; -} - -interface LanguageConfig { - name: string; -} - -interface FrameworkConfig { - name: string; - description: string; - templateFile: string; - pythonSupport: boolean; - typescriptSupport: boolean; -} - -interface ConfigCache { - vendors: Record; - languages: Record; - frameworks: Record; -} - -// Cache for configs and templates -let configCache: ConfigCache | null = null; -let templateCache: Record = {}; - -/** - * Load all configuration files - */ -function loadConfigs(): ConfigCache { - if (configCache) return configCache; - - configCache = { - vendors: JSON.parse( - fs.readFileSync(path.join(CONFIG_PATH, "llm-vendors.json"), "utf-8") - ), - languages: JSON.parse( - fs.readFileSync(path.join(CONFIG_PATH, "sdk-languages.json"), "utf-8") - ), - frameworks: JSON.parse( - fs.readFileSync(path.join(CONFIG_PATH, "agent-frameworks.json"), "utf-8") - ), - }; - - return configCache; -} - -/** - * Load a template file - */ -function loadTemplate(language: string, templateName: string): string | null { - const cacheKey = `${language}/${templateName}`; - if (templateCache[cacheKey]) return templateCache[cacheKey]; - - const templatePath = path.join( - TEMPLATES_PATH, - language, - `${templateName}.md` - ); - - if (!fs.existsSync(templatePath)) { - return null; - } - - templateCache[cacheKey] = fs.readFileSync(templatePath, "utf-8"); - return templateCache[cacheKey]; -} - -/** - * Clear caches (useful for development/testing) - */ -function clearCaches(): void { - configCache = null; - templateCache = {}; -} - -/** - * Replace variables in template: {{variableName}} - */ -function replaceVariables( - template: string, - variables: Record -): string { - return template.replace(/\{\{(\w+)\}\}/g, (_match, key) => { - return variables[key] !== undefined ? variables[key] : ""; - }); -} - -interface GenerateQuickstartParams { - llmVendor?: string; - sdkLanguage?: string; - agentFramework: string; - apiKey: string; -} - -/** - * Generate quickstart document based on parameters - */ -function generateQuickstart({ - llmVendor = "openai", - sdkLanguage = "python", - agentFramework, - apiKey, -}: GenerateQuickstartParams): string { - const config = loadConfigs(); - - // Validate inputs - if (!config.vendors[llmVendor]) { - throw new Error( - `Invalid LLM vendor: ${llmVendor}. Valid options: ${Object.keys( - config.vendors - ).join(", ")}` - ); - } - if (!config.languages[sdkLanguage]) { - throw new Error( - `Invalid SDK language: ${sdkLanguage}. Valid options: ${Object.keys( - config.languages - ).join(", ")}` - ); - } - if (!config.frameworks[agentFramework]) { - throw new Error( - `Invalid agent framework: ${agentFramework}. Valid options: ${Object.keys( - config.frameworks - ).join(", ")}` - ); - } - if (!apiKey) { - throw new Error("API key is required"); - } - - const vendor = config.vendors[llmVendor]; - const framework = config.frameworks[agentFramework]; - - // Check language support - if (sdkLanguage === "python" && !framework.pythonSupport) { - throw new Error(`${framework.name} does not support Python`); - } - if (sdkLanguage !== "python" && !framework.typescriptSupport) { - throw new Error(`${framework.name} does not support ${sdkLanguage}`); - } - - // Load template - const template = loadTemplate(sdkLanguage, framework.templateFile); - - if (!template) { - throw new Error( - `Template not found: ${sdkLanguage}/${framework.templateFile}` - ); - } - - // Build variables - const variables: Record = { - apiKey, - serverUrl: process.env.HIVE_HOST || "https://hive.adenhq.com", - envVarComment: vendor.envVarComment || "", - }; - - // Replace variables and return - return replaceVariables(template, variables); -} - -interface QuickstartOptions { - llmVendors: Array<{ id: string; name: string }>; - sdkLanguages: Array<{ id: string; name: string }>; - agentFrameworks: Array<{ - id: string; - name: string; - description: string; - pythonSupport: boolean; - typescriptSupport: boolean; - }>; -} - -/** - * Get available options for quickstart generation - */ -function getQuickstartOptions(): QuickstartOptions { - const config = loadConfigs(); - - return { - llmVendors: Object.entries(config.vendors).map(([key, value]) => ({ - id: key, - name: value.name, - })), - sdkLanguages: Object.entries(config.languages).map(([key, value]) => ({ - id: key, - name: value.name, - })), - agentFrameworks: Object.entries(config.frameworks).map(([key, value]) => ({ - id: key, - name: value.name, - description: value.description, - pythonSupport: value.pythonSupport, - typescriptSupport: value.typescriptSupport, - })), - }; -} - -/** - * Reload configs (useful after updating config files) - */ -function reloadConfigs(): ConfigCache { - clearCaches(); - return loadConfigs(); -} - -export { generateQuickstart, getQuickstartOptions, reloadConfigs, clearCaches }; diff --git a/hive/src/services/tsdb/00-init-timescaledb.sql b/hive/src/services/tsdb/00-init-timescaledb.sql deleted file mode 100644 index 40e8046c..00000000 --- a/hive/src/services/tsdb/00-init-timescaledb.sql +++ /dev/null @@ -1,11 +0,0 @@ --- Initialize TimescaleDB extension --- This must run BEFORE schema.sql to enable hypertables and continuous aggregates - --- Create TimescaleDB extension -CREATE EXTENSION IF NOT EXISTS timescaledb; - --- Log successful initialization -DO $$ -BEGIN - RAISE NOTICE 'TimescaleDB extension initialized successfully'; -END$$; diff --git a/hive/src/services/tsdb/analytics_service.ts b/hive/src/services/tsdb/analytics_service.ts deleted file mode 100644 index ea52e330..00000000 --- a/hive/src/services/tsdb/analytics_service.ts +++ /dev/null @@ -1,748 +0,0 @@ -/** - * TSDB Analytics Service - * Computes windowed aggregations from llm_events for dashboard analytics. - */ - -import { PoolClient } from 'pg'; -import pricingService from './pricing_service'; - -const BUCKETS = [ - { label: '0-1s', min: 0, max: 1000 }, - { label: '1-2s', min: 1000, max: 2000 }, - { label: '2-5s', min: 2000, max: 5000 }, - { label: '5-10s', min: 5000, max: 10000 }, - { label: '10-20s', min: 10000, max: 20000 }, - { label: '20s+', min: 20000, max: null as number | null }, -]; - -interface WindowDef { - label: string; - start: Date | null; - end: Date; -} - -interface DailyRow { - bucket: string; - requests: number; - cost_total: number; - tokens: { - total: number; - input: number; - output: number; - cached: number; - }; -} - -interface LatencyRow { - bucket: string; - count: number; - avg_ms: number | null; - p50_ms: number | null; - p95_ms: number | null; - p99_ms: number | null; -} - -interface ModelCostRow { - model: string; - cost_total: number; - cached_tokens: number; -} - -interface AgentCostRow { - agent: string; - requests: number; - cost_total: number; - input_tokens: number; - output_tokens: number; - avg_latency_ms: number | null; -} - -const toNumber = (val: unknown, fallback = 0): number => { - const n = Number(val); - return Number.isFinite(n) ? n : fallback; -}; - -const percentile = (values: number[], pct: number): number | null => { - if (!values.length) return null; - const sorted = [...values].sort((a, b) => a - b); - if (sorted.length === 1) return sorted[0]; - const idx = Math.max(0, Math.min(sorted.length - 1, Math.floor(pct * (sorted.length - 1)))); - return sorted[idx]; -}; - -const startOfWeekUtc = (d: Date): Date => { - const day = d.getUTCDay(); - const diff = (day + 6) % 7; - const monday = new Date(Date.UTC(d.getUTCFullYear(), d.getUTCMonth(), d.getUTCDate(), 0, 0, 0, 0)); - monday.setUTCDate(monday.getUTCDate() - diff); - return monday; -}; - -const startOfMonthUtc = (d: Date): Date => - new Date(Date.UTC(d.getUTCFullYear(), d.getUTCMonth(), 1, 0, 0, 0, 0)); - -const startOfDayUtc = (d: Date): Date => - new Date(Date.UTC(d.getUTCFullYear(), d.getUTCMonth(), d.getUTCDate(), 0, 0, 0, 0)); - -export const parseAnalyticsWindow = (label: string): WindowDef => { - const now = new Date(); - switch ((label || '').toLowerCase()) { - case 'all_time': - case 'all-time': - case 'alltime': - return { label: 'all_time', start: null, end: now }; - case 'today': { - const start = startOfDayUtc(now); - return { label: 'today', start, end: now }; - } - case 'last_2_weeks': - case 'last-2-weeks': - case 'last2weeks': { - const start = new Date(now.getTime() - 14 * 24 * 3600 * 1000); - return { label: 'last_2_weeks', start, end: now }; - } - case 'this_week': { - const start = startOfWeekUtc(now); - return { label: 'this_week', start, end: now }; - } - case 'this_month': - default: { - const start = startOfMonthUtc(now); - return { label: 'this_month', start, end: now }; - } - } -}; - -const bucketLatency = (latMs: number, buckets: typeof BUCKETS): string | null => { - if (latMs === null || latMs === undefined) return null; - for (const b of buckets) { - if (b.max === null) { - if (latMs >= b.min) return b.label; - } else if (latMs >= b.min && latMs < b.max) { - return b.label; - } - } - return null; -}; - -const buildLatencyDistribution = (rows: { bucket: string; count: number }[]) => { - const counts = new Map(rows.map((r) => [r.bucket, r.count])); - const total = rows.reduce((acc, r) => acc + (r.count || 0), 0); - return BUCKETS.map((b) => { - const count = counts.get(b.label) || 0; - return { - bucket: b.label, - count, - share: total ? count / total : null, - }; - }); -}; - -const bucketLabel = (date: Date, resolution: string): string => { - if (resolution === 'hour') { - const h = new Date( - Date.UTC(date.getUTCFullYear(), date.getUTCMonth(), date.getUTCDate(), date.getUTCHours(), 0, 0, 0) - ); - return h.toISOString().slice(0, 13) + ':00:00Z'; - } - return date.toISOString().slice(0, 10); -}; - -const fetchDailyCA = async ({ - client, - start, - end, -}: { - client: PoolClient; - start: Date | null; - end: Date | null; -}): Promise => { - const params: (Date | null)[] = []; - const conds: string[] = []; - if (start) { - params.push(start); - conds.push(`bucket >= $${params.length}`); - } - if (end) { - params.push(end); - conds.push(`bucket < $${params.length}`); - } - const sql = ` - SELECT bucket, requests, cost_total, input_tokens, output_tokens, total_tokens, cached_tokens - FROM llm_events_daily_ca - ${conds.length ? `WHERE ${conds.join(' AND ')}` : ''} - ORDER BY bucket ASC - `; - const { rows } = await client.query(sql, params); - return rows.map((r: any) => ({ - bucket: r.bucket instanceof Date ? r.bucket.toISOString().slice(0, 10) : r.bucket, - requests: Number(r.requests) || 0, - cost_total: toNumber(r.cost_total, 0), - tokens: { - total: toNumber(r.total_tokens, 0), - input: toNumber(r.input_tokens, 0), - output: toNumber(r.output_tokens, 0), - cached: toNumber(r.cached_tokens, 0), - }, - })); -}; - -const fetchTodayFromBaseTable = async ({ - client, - todayStart, - end, -}: { - client: PoolClient; - todayStart: Date; - end: Date; -}): Promise => { - const sql = ` - SELECT - $1::date as bucket, - COUNT(*) as requests, - COALESCE(SUM(cost_total), 0) as cost_total, - COALESCE(SUM(usage_input_tokens), 0) as input_tokens, - COALESCE(SUM(usage_output_tokens), 0) as output_tokens, - COALESCE(SUM(COALESCE(usage_total_tokens, usage_input_tokens + usage_output_tokens)), 0) as total_tokens, - COALESCE(SUM(usage_cached_tokens), 0) as cached_tokens - FROM llm_events - WHERE "timestamp" >= $1 AND "timestamp" <= $2 - `; - const { rows } = await client.query(sql, [todayStart, end]); - if (!rows.length || rows[0].requests === 0 || rows[0].requests === '0') { - return null; - } - const r = rows[0]; - return { - bucket: todayStart.toISOString().slice(0, 10), - requests: Number(r.requests) || 0, - cost_total: toNumber(r.cost_total, 0), - tokens: { - total: toNumber(r.total_tokens, 0), - input: toNumber(r.input_tokens, 0), - output: toNumber(r.output_tokens, 0), - cached: toNumber(r.cached_tokens, 0), - }, - }; -}; - -const fetchLatencyDaily = async ({ - client, - start, - end, -}: { - client: PoolClient; - start: Date | null; - end: Date | null; -}): Promise => { - const params: (string | Date)[] = ['1 day']; - const conds = ['latency_ms IS NOT NULL']; - if (start) { - params.push(start); - conds.push(`"timestamp" >= $${params.length}`); - } - if (end) { - params.push(end); - conds.push(`"timestamp" < $${params.length}`); - } - const sql = ` - SELECT - time_bucket($1::interval, "timestamp") AS bucket, - COUNT(latency_ms) AS count, - AVG(latency_ms) AS avg_ms, - percentile_cont(0.5) WITHIN GROUP (ORDER BY latency_ms) AS p50_ms, - percentile_cont(0.95) WITHIN GROUP (ORDER BY latency_ms) AS p95_ms, - percentile_cont(0.99) WITHIN GROUP (ORDER BY latency_ms) AS p99_ms - FROM llm_events - WHERE ${conds.join(' AND ')} - GROUP BY 1 - ORDER BY 1 ASC - `; - const { rows } = await client.query(sql, params); - return rows.map((r: any) => ({ - bucket: r.bucket instanceof Date ? r.bucket.toISOString().slice(0, 10) : r.bucket, - count: Number(r.count) || 0, - avg_ms: r.avg_ms === null ? null : Number(r.avg_ms), - p50_ms: r.p50_ms === null ? null : Number(r.p50_ms), - p95_ms: r.p95_ms === null ? null : Number(r.p95_ms), - p99_ms: r.p99_ms === null ? null : Number(r.p99_ms), - })); -}; - -const fetchLatencyDistributionDaily = async ({ - client, - start, - end, -}: { - client: PoolClient; - start: Date | null; - end: Date | null; -}): Promise<{ bucket: string; count: number }[]> => { - const params: Date[] = []; - const conds = ['latency_ms IS NOT NULL']; - if (start) { - params.push(start); - conds.push(`"timestamp" >= $${params.length}`); - } - if (end) { - params.push(end); - conds.push(`"timestamp" < $${params.length}`); - } - const sql = ` - SELECT - CASE - WHEN latency_ms < 1000 THEN '0-1s' - WHEN latency_ms < 2000 THEN '1-2s' - WHEN latency_ms < 5000 THEN '2-5s' - WHEN latency_ms < 10000 THEN '5-10s' - WHEN latency_ms < 20000 THEN '10-20s' - ELSE '20s+' - END AS bucket, - COUNT(*) AS count - FROM llm_events - WHERE ${conds.join(' AND ')} - GROUP BY 1 - `; - const { rows } = await client.query(sql, params); - return rows.map((r: any) => ({ bucket: r.bucket, count: Number(r.count) || 0 })); -}; - -const fetchModelCost = async ({ - client, - start, - end, -}: { - client: PoolClient; - start: Date | null; - end: Date | null; -}): Promise => { - const params: Date[] = []; - const conds: string[] = []; - if (start) { - params.push(start); - conds.push(`"timestamp" >= $${params.length}`); - } - if (end) { - params.push(end); - conds.push(`"timestamp" < $${params.length}`); - } - const sql = ` - SELECT model, - SUM(cost_total) AS cost_total, - SUM(usage_cached_tokens) AS cached_tokens - FROM llm_events - ${conds.length ? `WHERE ${conds.join(' AND ')}` : ''} - GROUP BY model - `; - const { rows } = await client.query(sql, params); - return rows - .filter((r: any) => r.model) - .map((r: any) => ({ - model: r.model, - cost_total: toNumber(r.cost_total, 0), - cached_tokens: toNumber(r.cached_tokens, 0), - })); -}; - -const fetchAgentCost = async ({ - client, - start, - end, -}: { - client: PoolClient; - start: Date | null; - end: Date | null; -}): Promise => { - const params: Date[] = []; - const conds: string[] = []; - if (start) { - params.push(start); - conds.push(`"timestamp" >= $${params.length}`); - } - if (end) { - params.push(end); - conds.push(`"timestamp" < $${params.length}`); - } - const sql = ` - SELECT agent, - COUNT(*) AS requests, - SUM(cost_total) AS cost_total, - SUM(usage_input_tokens) AS input_tokens, - SUM(usage_output_tokens) AS output_tokens, - AVG(latency_ms) AS avg_latency_ms - FROM llm_events - ${conds.length ? `WHERE ${conds.join(' AND ')}` : ''} - GROUP BY agent - `; - const { rows } = await client.query(sql, params); - return rows - .filter((r: any) => r.agent) - .map((r: any) => ({ - agent: r.agent, - requests: Number(r.requests) || 0, - cost_total: toNumber(r.cost_total, 0), - input_tokens: toNumber(r.input_tokens, 0), - output_tokens: toNumber(r.output_tokens, 0), - avg_latency_ms: r.avg_latency_ms === null ? null : Number(r.avg_latency_ms), - })); -}; - -export const buildAnalytics = async ({ - windowLabel, - client, - resolution = 'day', -}: { - windowLabel: string; - client: PoolClient; - resolution?: 'day' | 'hour'; -}) => { - const windowDef = parseAnalyticsWindow(windowLabel); - - if (resolution === 'day') { - try { - const now = windowDef.end || new Date(); - const todayMidnight = new Date( - Date.UTC(now.getUTCFullYear(), now.getUTCMonth(), now.getUTCDate(), 0, 0, 0, 0) - ); - - const caRows = await fetchDailyCA({ client, start: windowDef.start, end: todayMidnight }); - - let todayData: DailyRow | null = null; - if (now >= todayMidnight) { - try { - todayData = await fetchTodayFromBaseTable({ client, todayStart: todayMidnight, end: now }); - } catch { - // Ignore errors fetching today's data - } - } - - const allRows = [...(caRows || [])]; - if (todayData) { - const todayBucket = todayData.bucket; - const existingIdx = allRows.findIndex((r) => r.bucket === todayBucket); - if (existingIdx >= 0) { - allRows[existingIdx] = todayData; - } else { - allRows.push(todayData); - } - } - - if (allRows && allRows.length) { - const total_cost = allRows.reduce((acc, r) => acc + (r.cost_total || 0), 0); - const total_requests = allRows.reduce((acc, r) => acc + (r.requests || 0), 0); - const total_tokens = allRows.reduce((acc, r) => acc + (r.tokens.total || 0), 0); - - const bucket_cost = allRows.map((r) => ({ bucket: r.bucket, cost_total: r.cost_total })); - const bucket_requests = allRows.map((r) => ({ bucket: r.bucket, requests: r.requests })); - const bucket_tokens = allRows.map((r) => ({ - bucket: r.bucket, - total_tokens: r.tokens.total, - input_tokens: r.tokens.input, - output_tokens: r.tokens.output, - cached_tokens: r.tokens.cached, - })); - - const latencyBuckets = await fetchLatencyDaily({ - client, - start: windowDef.start, - end: windowDef.end, - }); - const latencyDistributionRows = await fetchLatencyDistributionDaily({ - client, - start: windowDef.start, - end: windowDef.end, - }); - const latency_distribution = buildLatencyDistribution(latencyDistributionRows); - const latency_total = latencyDistributionRows.reduce((acc, r) => acc + (r.count || 0), 0); - const avg_latency_ms = - latencyBuckets.reduce( - (acc, r) => acc + (r.avg_ms !== null ? r.avg_ms * (r.count || 0) : 0), - 0 - ) / (latency_total || 1); - - const modelRows = await fetchModelCost({ client, start: windowDef.start, end: windowDef.end }); - const models = modelRows - .sort((a, b) => (b.cost_total || 0) - (a.cost_total || 0)) - .map((r) => ({ - model: r.model, - cost_total: r.cost_total, - share: total_cost ? r.cost_total / total_cost : null, - })); - const cache_savings = modelRows.reduce((acc, r) => { - const pricing = pricingService.getModelPricingSync(r.model || ''); - return acc + (r.cached_tokens / 1_000_000) * pricing.input; - }, 0); - - const agentRows = await fetchAgentCost({ client, start: windowDef.start, end: windowDef.end }); - const agents = agentRows - .sort((a, b) => (b.cost_total || 0) - (a.cost_total || 0)) - .map((r) => ({ - agent: r.agent, - requests: r.requests, - cost_total: r.cost_total, - share: total_cost ? r.cost_total / total_cost : null, - avg_latency_ms: r.avg_latency_ms, - })); - - return { - window: { - label: windowDef.label, - start: windowDef.start ? windowDef.start.toISOString() : null, - end: windowDef.end ? windowDef.end.toISOString() : null, - }, - summary: { - total_cost, - total_requests, - total_tokens, - avg_latency_ms: Number.isFinite(avg_latency_ms) ? avg_latency_ms : null, - cache_savings, - }, - timeline: { - resolution: 'day', - daily: { - cost: bucket_cost, - requests: bucket_requests, - tokens: bucket_tokens, - latency_percentiles: latencyBuckets, - }, - }, - cost_by_model: { - total_cost, - models, - }, - cost_by_agent: { - total_cost, - agents, - }, - latency_distribution: { - total: latency_total, - buckets: latency_distribution, - }, - }; - } - } catch (err) { - // Fall through to base-table path - } - } - - // Fallback: scan base table directly - const params: Date[] = []; - const conditions: string[] = []; - if (windowDef.start) { - params.push(windowDef.start); - conditions.push(`"timestamp" >= $${params.length}`); - } - if (windowDef.end) { - params.push(windowDef.end); - conditions.push(`"timestamp" < $${params.length}`); - } - - const sql = ` - SELECT - "timestamp", - model, - agent, - latency_ms, - cost_total, - usage_input_tokens, - usage_output_tokens, - usage_total_tokens, - usage_cached_tokens - FROM llm_events - ${conditions.length ? `WHERE ${conditions.join(' AND ')}` : ''} - ORDER BY "timestamp" ASC - `; - - const { rows } = await client.query(sql, params); - - const bucketCost = new Map(); - const bucketRequests = new Map(); - const bucketTokens = new Map(); - const bucketLatencies = new Map(); - const modelCost = new Map(); - const agentStats = new Map(); - const latencyBucketCounts = new Map(); - - let totalCost = 0; - let totalRequests = 0; - let totalTokens = 0; - let totalLatency = 0; - let latencyCount = 0; - let cacheSavings = 0; - - rows.forEach((r: any) => { - const ts = r.timestamp instanceof Date ? r.timestamp : new Date(r.timestamp); - if (!ts || Number.isNaN(ts.getTime())) return; - const bucket = bucketLabel(ts, resolution); - - const cost = toNumber(r.cost_total, 0); - const inTok = toNumber(r.usage_input_tokens, 0); - const outTok = toNumber(r.usage_output_tokens, 0); - const totalTokRaw = toNumber(r.usage_total_tokens, inTok + outTok); - const cachedTok = toNumber(r.usage_cached_tokens, 0); - const lat = r.latency_ms === null || r.latency_ms === undefined ? null : Number(r.latency_ms); - - totalRequests += 1; - totalCost += cost; - totalTokens += totalTokRaw; - if (lat !== null && !Number.isNaN(lat)) { - totalLatency += lat; - latencyCount += 1; - } - - bucketCost.set(bucket, (bucketCost.get(bucket) || 0) + cost); - bucketRequests.set(bucket, (bucketRequests.get(bucket) || 0) + 1); - const tok = bucketTokens.get(bucket) || { total: 0, input: 0, output: 0, cached: 0 }; - tok.total += totalTokRaw; - tok.input += inTok; - tok.output += outTok; - tok.cached += cachedTok; - bucketTokens.set(bucket, tok); - - if (lat !== null && !Number.isNaN(lat)) { - const arr = bucketLatencies.get(bucket) || []; - arr.push(lat); - bucketLatencies.set(bucket, arr); - - const latBucket = bucketLatency(lat, BUCKETS); - if (latBucket) latencyBucketCounts.set(latBucket, (latencyBucketCounts.get(latBucket) || 0) + 1); - } - - if (r.model) { - modelCost.set(r.model, (modelCost.get(r.model) || 0) + cost); - } - - if (r.agent) { - const stats = agentStats.get(r.agent) || { cost: 0, requests: 0, latencies: [] }; - stats.cost += cost; - stats.requests += 1; - if (lat !== null && !Number.isNaN(lat)) { - stats.latencies.push(lat); - } - agentStats.set(r.agent, stats); - } - - if (cachedTok > 0) { - const pricing = pricingService.getModelPricingSync(r.model || ''); - cacheSavings += (cachedTok / 1_000_000) * pricing.input; - } - }); - - const sortedBuckets = Array.from( - new Set([ - ...bucketCost.keys(), - ...bucketRequests.keys(), - ...bucketTokens.keys(), - ...bucketLatencies.keys(), - ]) - ).sort(); - - const bucket_cost = sortedBuckets.map((key) => ({ bucket: key, cost_total: bucketCost.get(key) || 0 })); - const bucket_requests = sortedBuckets.map((key) => ({ - bucket: key, - requests: bucketRequests.get(key) || 0, - })); - const bucket_tokens = sortedBuckets.map((key) => { - const tok = bucketTokens.get(key) || { total: 0, input: 0, output: 0, cached: 0 }; - return { - bucket: key, - total_tokens: tok.total, - input_tokens: tok.input, - output_tokens: tok.output, - cached_tokens: tok.cached, - }; - }); - const bucket_latency_percentiles = sortedBuckets.map((key) => { - const lats = bucketLatencies.get(key) || []; - return { - bucket: key, - count: lats.length, - avg_ms: lats.length ? lats.reduce((a, b) => a + b, 0) / lats.length : null, - p50_ms: percentile(lats, 0.5), - p95_ms: percentile(lats, 0.95), - p99_ms: percentile(lats, 0.99), - }; - }); - - const latency_total = Array.from(latencyBucketCounts.values()).reduce((a, b) => a + b, 0); - const latency_distribution = BUCKETS.map((b) => { - const count = latencyBucketCounts.get(b.label) || 0; - return { - bucket: b.label, - count, - share: latency_total ? count / latency_total : null, - }; - }); - - const models = Array.from(modelCost.entries()) - .sort((a, b) => (b[1] || 0) - (a[1] || 0)) - .map(([model, cost]) => ({ - model, - cost_total: cost, - share: totalCost ? cost / totalCost : null, - })); - - const agents = Array.from(agentStats.entries()) - .sort((a, b) => (b[1].cost || 0) - (a[1].cost || 0)) - .map(([agent, stats]) => ({ - agent, - requests: stats.requests, - cost_total: stats.cost, - share: totalCost ? stats.cost / totalCost : null, - avg_latency_ms: stats.latencies.length - ? stats.latencies.reduce((a, b) => a + b, 0) / stats.latencies.length - : null, - })); - - return { - window: { - label: windowDef.label, - start: windowDef.start ? windowDef.start.toISOString() : null, - end: windowDef.end ? windowDef.end.toISOString() : null, - }, - summary: { - total_cost: totalCost, - total_requests: totalRequests, - total_tokens: totalTokens, - avg_latency_ms: latencyCount ? totalLatency / latencyCount : null, - cache_savings: cacheSavings, - }, - timeline: - resolution === 'hour' - ? { - resolution: 'hour', - hourly: { - cost: bucket_cost, - requests: bucket_requests, - tokens: bucket_tokens, - latency_percentiles: bucket_latency_percentiles, - }, - } - : { - resolution: 'day', - daily: { - cost: bucket_cost, - requests: bucket_requests, - tokens: bucket_tokens, - latency_percentiles: bucket_latency_percentiles, - }, - }, - cost_by_model: { - total_cost: totalCost, - models, - }, - cost_by_agent: { - total_cost: totalCost, - agents, - }, - latency_distribution: { - total: latency_total, - buckets: latency_distribution, - }, - }; -}; - -export default { - buildAnalytics, - parseAnalyticsWindow, -}; diff --git a/hive/src/services/tsdb/pricing_service.ts b/hive/src/services/tsdb/pricing_service.ts deleted file mode 100644 index 7ee7ea29..00000000 --- a/hive/src/services/tsdb/pricing_service.ts +++ /dev/null @@ -1,743 +0,0 @@ -/** - * LLM Pricing Service - * - * Centralized pricing table for calculating costs by provider and model. - * Prices are stored in MongoDB and cached in memory for performance. - * Prices are in USD per 1M tokens (industry standard). - * - * Sources: - * - OpenAI: https://openai.com/pricing - * - Anthropic: https://www.anthropic.com/pricing - * - Google: https://ai.google.dev/pricing - * - AWS Bedrock: https://aws.amazon.com/bedrock/pricing/ - */ - -// In-memory cache for pricing data -const pricingCache = new Map(); -const aliasCacheMap = new Map(); // model alias -> canonical model -let cacheLoadedAt: number | null = null; -const CACHE_TTL_MS = 5 * 60 * 1000; // 5 minutes - -interface PricingEntry { - model: string; - provider: string; - input: number; - output: number; - cached_input: number; - aliases: string[]; - effective_date?: Date; - updated_at?: Date; - source?: string; -} - -interface PricingTableEntry { - provider: string; - input: number; - output: number; - cached_input: number; - aliases: string[]; -} - -// Fallback pricing for unknown models (conservative estimate) -const DEFAULT_PRICING = { input: 1.00, output: 3.00, cached_input: 0.25 }; - -// Default pricing table for seeding - USD per 1M tokens -// Updated: 2025-01-01 -const DEFAULT_PRICING_TABLE: Record = { - // OpenAI Models - "gpt-4o": { provider: "openai", input: 2.50, output: 10.00, cached_input: 1.25, aliases: ["gpt-4o-2024-11-20", "gpt-4o-2024-08-06"] }, - "gpt-4o-2024-05-13": { provider: "openai", input: 5.00, output: 15.00, cached_input: 2.50, aliases: [] }, - "gpt-4o-mini": { provider: "openai", input: 0.15, output: 0.60, cached_input: 0.075, aliases: ["gpt-4o-mini-2024-07-18"] }, - "gpt-4-turbo": { provider: "openai", input: 10.00, output: 30.00, cached_input: 5.00, aliases: ["gpt-4-turbo-2024-04-09", "gpt-4-turbo-preview"] }, - "gpt-4": { provider: "openai", input: 30.00, output: 60.00, cached_input: 15.00, aliases: ["gpt-4-0613"] }, - "gpt-3.5-turbo": { provider: "openai", input: 0.50, output: 1.50, cached_input: 0.25, aliases: ["gpt-3.5-turbo-0125"] }, - "o1": { provider: "openai", input: 15.00, output: 60.00, cached_input: 7.50, aliases: ["o1-2024-12-17", "o1-preview"] }, - "o1-mini": { provider: "openai", input: 3.00, output: 12.00, cached_input: 1.50, aliases: ["o1-mini-2024-09-12"] }, - "o3-mini": { provider: "openai", input: 1.10, output: 4.40, cached_input: 0.55, aliases: [] }, - - // Anthropic Models - "claude-3-5-sonnet-20241022": { provider: "anthropic", input: 3.00, output: 15.00, cached_input: 0.30, aliases: ["claude-3-5-sonnet-20240620", "claude-3-5-sonnet-latest"] }, - "claude-sonnet-4-20250514": { provider: "anthropic", input: 3.00, output: 15.00, cached_input: 0.30, aliases: ["claude-sonnet-4-5-20250929"] }, - "claude-3-5-haiku-20241022": { provider: "anthropic", input: 0.80, output: 4.00, cached_input: 0.08, aliases: ["claude-3-5-haiku-latest"] }, - "claude-3-opus-20240229": { provider: "anthropic", input: 15.00, output: 75.00, cached_input: 1.50, aliases: ["claude-3-opus-latest"] }, - "claude-3-sonnet-20240229": { provider: "anthropic", input: 3.00, output: 15.00, cached_input: 0.30, aliases: [] }, - "claude-3-haiku-20240307": { provider: "anthropic", input: 0.25, output: 1.25, cached_input: 0.025, aliases: [] }, - "claude-opus-4-5-20251101": { provider: "anthropic", input: 15.00, output: 75.00, cached_input: 1.50, aliases: ["claude-opus-4-20250514"] }, - - // Google Models - "gemini-2.0-flash": { provider: "google", input: 0.10, output: 0.40, cached_input: 0.025, aliases: ["gemini-2.0-flash-exp"] }, - "gemini-1.5-flash": { provider: "google", input: 0.075, output: 0.30, cached_input: 0.01875, aliases: ["gemini-1.5-flash-latest"] }, - "gemini-1.5-flash-8b": { provider: "google", input: 0.0375, output: 0.15, cached_input: 0.01, aliases: [] }, - "gemini-1.5-pro": { provider: "google", input: 1.25, output: 5.00, cached_input: 0.3125, aliases: ["gemini-1.5-pro-latest"] }, - "gemini-1.0-pro": { provider: "google", input: 0.50, output: 1.50, cached_input: 0.125, aliases: ["gemini-pro"] }, - "gemini-exp-1206": { provider: "google", input: 0.00, output: 0.00, cached_input: 0.00, aliases: [] }, - - // AWS Bedrock - Claude (cross-region inference) - "anthropic.claude-3-5-sonnet-20241022-v2:0": { provider: "bedrock", input: 3.00, output: 15.00, cached_input: 0.30, aliases: [] }, - "anthropic.claude-3-5-haiku-20241022-v1:0": { provider: "bedrock", input: 0.80, output: 4.00, cached_input: 0.08, aliases: [] }, - "anthropic.claude-3-opus-20240229-v1:0": { provider: "bedrock", input: 15.00, output: 75.00, cached_input: 1.50, aliases: [] }, - "anthropic.claude-3-sonnet-20240229-v1:0": { provider: "bedrock", input: 3.00, output: 15.00, cached_input: 0.30, aliases: [] }, - "anthropic.claude-3-haiku-20240307-v1:0": { provider: "bedrock", input: 0.25, output: 1.25, cached_input: 0.025, aliases: [] }, - - // AWS Bedrock - Amazon Models - "amazon.nova-pro-v1:0": { provider: "bedrock", input: 0.80, output: 3.20, cached_input: 0.20, aliases: [] }, - "amazon.nova-lite-v1:0": { provider: "bedrock", input: 0.06, output: 0.24, cached_input: 0.015, aliases: [] }, - "amazon.nova-micro-v1:0": { provider: "bedrock", input: 0.035, output: 0.14, cached_input: 0.00875, aliases: [] }, - "amazon.titan-text-express-v1": { provider: "bedrock", input: 0.20, output: 0.60, cached_input: 0.05, aliases: [] }, - "amazon.titan-text-lite-v1": { provider: "bedrock", input: 0.15, output: 0.20, cached_input: 0.0375, aliases: [] }, - - // Mistral Models - "mistral-large-latest": { provider: "mistral", input: 2.00, output: 6.00, cached_input: 0.50, aliases: ["mistral-large-2411"] }, - "mistral-medium-latest": { provider: "mistral", input: 2.70, output: 8.10, cached_input: 0.675, aliases: [] }, - "mistral-small-latest": { provider: "mistral", input: 0.20, output: 0.60, cached_input: 0.05, aliases: ["mistral-small-2409"] }, - "codestral-latest": { provider: "mistral", input: 0.30, output: 0.90, cached_input: 0.075, aliases: [] }, - "pixtral-large-latest": { provider: "mistral", input: 2.00, output: 6.00, cached_input: 0.50, aliases: [] }, - "ministral-8b-latest": { provider: "mistral", input: 0.10, output: 0.10, cached_input: 0.025, aliases: [] }, - "ministral-3b-latest": { provider: "mistral", input: 0.04, output: 0.04, cached_input: 0.01, aliases: [] }, - - // Cohere Models - "command-r-plus": { provider: "cohere", input: 2.50, output: 10.00, cached_input: 0.625, aliases: [] }, - "command-r": { provider: "cohere", input: 0.15, output: 0.60, cached_input: 0.0375, aliases: [] }, - "command": { provider: "cohere", input: 1.00, output: 2.00, cached_input: 0.25, aliases: [] }, - "command-light": { provider: "cohere", input: 0.30, output: 0.60, cached_input: 0.075, aliases: [] }, - - // DeepSeek Models - "deepseek-chat": { provider: "deepseek", input: 0.14, output: 0.28, cached_input: 0.014, aliases: [] }, - "deepseek-reasoner": { provider: "deepseek", input: 0.55, output: 2.19, cached_input: 0.055, aliases: [] }, - - // Groq Models (inference pricing, not training) - "llama-3.3-70b-versatile": { provider: "groq", input: 0.59, output: 0.79, cached_input: 0.15, aliases: [] }, - "llama-3.1-70b-versatile": { provider: "groq", input: 0.59, output: 0.79, cached_input: 0.15, aliases: [] }, - "llama-3.1-8b-instant": { provider: "groq", input: 0.05, output: 0.08, cached_input: 0.0125, aliases: [] }, - "llama-3.2-90b-vision-preview": { provider: "groq", input: 0.90, output: 0.90, cached_input: 0.225, aliases: [] }, - "mixtral-8x7b-32768": { provider: "groq", input: 0.24, output: 0.24, cached_input: 0.06, aliases: [] }, -}; - -declare const _ACHO_MG_DB: { db: (name: string) => { collection: (name: string) => unknown } }; -declare const _ACHO_MDB_CONFIG: { ERP_DBNAME: string }; -declare const _ACHO_MDB_COLLECTIONS: { ADEN_LLM_PRICING: string }; - -interface MongoCollection { - find: (query: Record) => { toArray: () => Promise; sort: (sort: Record) => { toArray: () => Promise } }; - findOne: (query: Record) => Promise; - findOneAndUpdate: (query: Record, update: Record, options: Record) => Promise; - deleteOne: (query: Record) => Promise<{ deletedCount: number }>; - insertOne: (doc: Record) => Promise; - updateOne: (query: Record, update: Record) => Promise; -} - -/** - * Get the MongoDB collection for pricing - * @returns {Collection} MongoDB collection - */ -function getPricingCollection(): MongoCollection { - const db = _ACHO_MG_DB.db(_ACHO_MDB_CONFIG.ERP_DBNAME); - return db.collection(_ACHO_MDB_COLLECTIONS.ADEN_LLM_PRICING) as MongoCollection; -} - -/** - * Check if cache is still valid - * @returns {boolean} - */ -function isCacheValid(): boolean { - if (!cacheLoadedAt || pricingCache.size === 0) return false; - return Date.now() - cacheLoadedAt < CACHE_TTL_MS; -} - -interface DbPricingDoc { - model: string; - provider: string; - input_per_1m: number; - output_per_1m: number; - cached_input_per_1m: number; - aliases?: string[]; - effective_date?: Date; - updated_at?: Date; -} - -/** - * Load pricing from MongoDB into memory cache - * @param {boolean} force - Force reload even if cache is valid - * @returns {Promise} Pricing cache - */ -async function loadPricingFromDb(force = false): Promise> { - if (!force && isCacheValid()) { - return pricingCache; - } - - try { - const collection = getPricingCollection(); - const docs = await collection.find({}).toArray() as DbPricingDoc[]; - - if (docs.length === 0) { - console.log("[pricing_service] No pricing in DB, using defaults"); - loadFromDefaults(); - return pricingCache; - } - - // Clear and rebuild cache - pricingCache.clear(); - aliasCacheMap.clear(); - - for (const doc of docs) { - const pricing: PricingEntry = { - model: doc.model, - provider: doc.provider, - input: doc.input_per_1m, - output: doc.output_per_1m, - cached_input: doc.cached_input_per_1m, - aliases: doc.aliases || [], - effective_date: doc.effective_date, - updated_at: doc.updated_at, - }; - pricingCache.set(doc.model.toLowerCase(), pricing); - - // Build alias map - for (const alias of pricing.aliases) { - aliasCacheMap.set(alias.toLowerCase(), doc.model.toLowerCase()); - } - } - - cacheLoadedAt = Date.now(); - console.log(`[pricing_service] Loaded ${pricingCache.size} pricing entries from DB`); - return pricingCache; - } catch (err) { - console.error("[pricing_service] Error loading from DB, using defaults:", (err as Error).message); - loadFromDefaults(); - return pricingCache; - } -} - -/** - * Load pricing from hardcoded defaults into cache - */ -function loadFromDefaults(): void { - pricingCache.clear(); - aliasCacheMap.clear(); - - for (const [model, data] of Object.entries(DEFAULT_PRICING_TABLE)) { - const pricing: PricingEntry = { - model, - provider: data.provider, - input: data.input, - output: data.output, - cached_input: data.cached_input, - aliases: data.aliases || [], - source: "default", - }; - pricingCache.set(model.toLowerCase(), pricing); - - // Build alias map - for (const alias of data.aliases || []) { - aliasCacheMap.set(alias.toLowerCase(), model.toLowerCase()); - } - } - - cacheLoadedAt = Date.now(); - console.log(`[pricing_service] Loaded ${pricingCache.size} pricing entries from defaults`); -} - -/** - * Invalidate cache to force reload on next access - */ -function invalidateCache(): void { - cacheLoadedAt = null; -} - -/** - * Resolve model name to canonical form using aliases - * @param {string} model - Model name (possibly an alias) - * @returns {string} Canonical model name - */ -function resolveAlias(model: string): string | null { - if (!model) return null; - const lower = model.toLowerCase().trim(); - - // Check if it's a direct match - if (pricingCache.has(lower)) { - return lower; - } - - // Check alias map - if (aliasCacheMap.has(lower)) { - return aliasCacheMap.get(lower)!; - } - - // Try partial matching for model families - for (const [key, pricing] of pricingCache.entries()) { - // Check if input starts with a known model prefix - if (lower.startsWith(key) || key.startsWith(lower)) { - return key; - } - // Check aliases - for (const alias of pricing.aliases || []) { - if (lower.startsWith(alias.toLowerCase()) || alias.toLowerCase().startsWith(lower)) { - return key; - } - } - } - - return lower; -} - -interface ModelPricingResult { - input: number; - output: number; - cached_input: number; - model: string; - provider: string; - source: string; -} - -/** - * Get pricing for a model - * @param {string} model - Model name - * @param {string} provider - Provider name (optional, for disambiguation) - * @returns {Promise} Pricing { input, output, cached_input } in USD per 1M tokens - */ -async function getModelPricing(model: string, provider: string | null = null): Promise { - await loadPricingFromDb(); - - const resolved = resolveAlias(model); - - // Try exact match - if (resolved && pricingCache.has(resolved)) { - const pricing = pricingCache.get(resolved)!; - return { - input: pricing.input, - output: pricing.output, - cached_input: pricing.cached_input, - model: pricing.model, - provider: pricing.provider, - source: "db", - }; - } - - // Try provider-prefixed lookup for Bedrock - if (provider === "bedrock" || provider === "aws") { - for (const [key, pricing] of pricingCache.entries()) { - if (key.includes(resolved || "") || (resolved || "").includes(key.split(".").pop()?.split("-")[0] || "")) { - return { - input: pricing.input, - output: pricing.output, - cached_input: pricing.cached_input, - model: pricing.model, - provider: pricing.provider, - source: "bedrock_match", - }; - } - } - } - - // Return default pricing - console.log(`[pricing_service] Unknown model: ${model}, using default pricing`); - return { - ...DEFAULT_PRICING, - model: model, - provider: provider || "unknown", - source: "default", - }; -} - -/** - * Get model pricing synchronously (uses cached data) - * @param {string} model - Model name - * @returns {Object} Pricing { input, output, cached_input } in USD per 1M tokens - */ -function getModelPricingSync(model: string): ModelPricingResult { - const resolved = resolveAlias(model); - - if (resolved && pricingCache.has(resolved)) { - const cached = pricingCache.get(resolved)!; - return { - input: cached.input, - output: cached.output, - cached_input: cached.cached_input, - model: cached.model, - provider: cached.provider, - source: "db", - }; - } - - return { ...DEFAULT_PRICING, model, provider: "unknown", source: "default" }; -} - -interface CostCalculationParams { - model: string; - provider?: string; - input_tokens?: number; - output_tokens?: number; - cached_tokens?: number; -} - -interface CostResult { - total: number; - input_cost: number; - output_cost: number; - cached_cost: number; - pricing: { - model: string; - source: string; - input_per_1m: number; - output_per_1m: number; - cached_per_1m: number; - }; -} - -/** - * Calculate cost for a request (synchronous version using cached data) - * @param {Object} params - Request parameters - * @returns {Object} Cost breakdown { total, input_cost, output_cost, cached_cost, pricing } - */ -function calculateCostSync({ model, provider: _provider, input_tokens = 0, output_tokens = 0, cached_tokens = 0 }: CostCalculationParams): CostResult { - const resolved = resolveAlias(model); - let pricing: { input: number; output: number; cached_input: number; model: string; source: string }; - - if (resolved && pricingCache.has(resolved)) { - const cached = pricingCache.get(resolved)!; - pricing = { - input: cached.input, - output: cached.output, - cached_input: cached.cached_input, - model: cached.model, - source: "db", - }; - } else { - pricing = { ...DEFAULT_PRICING, model, source: "default" }; - } - - // Non-cached input tokens - const nonCachedInput = Math.max(0, input_tokens - cached_tokens); - - // Calculate costs (pricing is per 1M tokens) - const inputCost = (nonCachedInput / 1_000_000) * pricing.input; - const outputCost = (output_tokens / 1_000_000) * pricing.output; - const cachedCost = (cached_tokens / 1_000_000) * pricing.cached_input; - - const total = inputCost + outputCost + cachedCost; - - return { - total, - input_cost: inputCost, - output_cost: outputCost, - cached_cost: cachedCost, - pricing: { - model: pricing.model, - source: pricing.source, - input_per_1m: pricing.input, - output_per_1m: pricing.output, - cached_per_1m: pricing.cached_input, - }, - }; -} - -/** - * Calculate cost for a request (async version) - * @param {Object} params - Request parameters - * @returns {Promise} Cost breakdown { total, input_cost, output_cost, cached_cost, pricing } - */ -async function calculateCost(params: CostCalculationParams): Promise { - await loadPricingFromDb(); - return calculateCostSync(params); -} - -interface UpsertPricingInput { - provider?: string; - input_per_1m?: number; - input?: number; - output_per_1m?: number; - output?: number; - cached_input_per_1m?: number; - cached_input?: number; - aliases?: string[]; - effective_date?: Date; -} - -/** - * Upsert pricing for a model - * @param {string} model - Model identifier - * @param {Object} pricing - Pricing data - * @param {string} userId - User making the change - * @returns {Promise} Updated document - */ -async function upsertPricing(model: string, pricing: UpsertPricingInput, userId: string | null = null): Promise { - const collection = getPricingCollection(); - - const doc = { - model: model, - provider: pricing.provider, - input_per_1m: pricing.input_per_1m ?? pricing.input, - output_per_1m: pricing.output_per_1m ?? pricing.output, - cached_input_per_1m: pricing.cached_input_per_1m ?? pricing.cached_input, - aliases: pricing.aliases || [], - effective_date: pricing.effective_date || new Date(), - updated_at: new Date(), - updated_by: userId, - }; - - const result = await collection.findOneAndUpdate( - { model: model }, - { $set: doc }, - { upsert: true, returnDocument: "after" } - ); - - // Invalidate cache to force reload - invalidateCache(); - - return result; -} - -/** - * Delete pricing for a model - * @param {string} model - Model identifier - * @returns {Promise} True if deleted - */ -async function deletePricing(model: string): Promise { - const collection = getPricingCollection(); - const result = await collection.deleteOne({ model: model }); - - // Invalidate cache to force reload - invalidateCache(); - - return result.deletedCount > 0; -} - -interface SeedResult { - inserted: number; - updated: number; - skipped: number; - errors: { model: string; error: string }[]; -} - -/** - * Seed default pricing to MongoDB - * @param {string} userId - User making the change - * @param {boolean} overwrite - If true, overwrite existing entries - * @returns {Promise} Seed results - */ -async function seedDefaultPricing(userId: string | null = null, overwrite = false): Promise { - const collection = getPricingCollection(); - const results: SeedResult = { inserted: 0, updated: 0, skipped: 0, errors: [] }; - - for (const [model, data] of Object.entries(DEFAULT_PRICING_TABLE)) { - try { - const existing = await collection.findOne({ model }); - - if (existing && !overwrite) { - results.skipped++; - continue; - } - - const doc = { - model, - provider: data.provider, - input_per_1m: data.input, - output_per_1m: data.output, - cached_input_per_1m: data.cached_input, - aliases: data.aliases || [], - effective_date: new Date(), - updated_at: new Date(), - updated_by: userId, - }; - - if (existing) { - await collection.updateOne({ model }, { $set: doc }); - results.updated++; - } else { - await collection.insertOne(doc); - results.inserted++; - } - } catch (err) { - results.errors.push({ model, error: (err as Error).message }); - } - } - - // Invalidate cache to force reload - invalidateCache(); - - console.log(`[pricing_service] Seeded pricing: ${results.inserted} inserted, ${results.updated} updated, ${results.skipped} skipped`); - return results; -} - -interface AllPricingResult { - [key: string]: { - provider: string; - input: number; - output: number; - cached_input: number; - aliases: string[]; - }; -} - -/** - * Get all available pricing data - * @returns {Promise} Full pricing table - */ -async function getAllPricing(): Promise { - await loadPricingFromDb(); - - const result: AllPricingResult = {}; - for (const [, pricing] of pricingCache.entries()) { - result[pricing.model] = { - provider: pricing.provider, - input: pricing.input, - output: pricing.output, - cached_input: pricing.cached_input, - aliases: pricing.aliases, - }; - } - return result; -} - -interface PricingByProviderResult { - [provider: string]: { - [model: string]: { - input: number; - output: number; - cached_input: number; - aliases: string[]; - }; - }; -} - -/** - * Get pricing summary grouped by provider - * @returns {Promise} Pricing by provider - */ -async function getPricingByProvider(): Promise { - await loadPricingFromDb(); - - const byProvider: PricingByProviderResult = {}; - - for (const [, pricing] of pricingCache.entries()) { - const provider = pricing.provider || "other"; - if (!byProvider[provider]) { - byProvider[provider] = {}; - } - byProvider[provider][pricing.model] = { - input: pricing.input, - output: pricing.output, - cached_input: pricing.cached_input, - aliases: pricing.aliases, - }; - } - - return byProvider; -} - -interface DegradationModel { - model: string; - label: string; - input_cost: number; - output_cost: number; - avg_cost: number; -} - -interface DegradationTargetsResult { - providers: string[]; - models: { [provider: string]: DegradationModel[] }; -} - -/** - * Get degradation target models grouped by provider - * Returns models sorted by cost (cheapest first) for budget control "degrade" mode - * @returns {Promise} { providers: [...], models: { provider: [...] } } - */ -async function getDegradationTargets(): Promise { - await loadPricingFromDb(); - - const byProvider: { [provider: string]: DegradationModel[] } = {}; - - for (const [, pricing] of pricingCache.entries()) { - const provider = pricing.provider || "other"; - if (!byProvider[provider]) { - byProvider[provider] = []; - } - - // Calculate average cost per 1M tokens (input + output) / 2 - const avgCost = (pricing.input + pricing.output) / 2; - - byProvider[provider].push({ - model: pricing.model, - label: pricing.model, - input_cost: pricing.input, - output_cost: pricing.output, - avg_cost: avgCost, - }); - } - - // Sort models within each provider by avg_cost (cheapest first) - for (const provider of Object.keys(byProvider)) { - byProvider[provider].sort((a, b) => a.avg_cost - b.avg_cost); - } - - // Get sorted list of providers - const providers = Object.keys(byProvider).sort(); - - return { - providers, - models: byProvider, - }; -} - -/** - * Get pricing directly from DB (bypasses cache) - * @param {string} model - Model identifier - * @returns {Promise} Pricing document or null - */ -async function getPricingFromDb(model: string): Promise { - const collection = getPricingCollection(); - return collection.findOne({ model }); -} - -/** - * List all pricing from DB (bypasses cache) - * @returns {Promise} All pricing documents - */ -async function listAllPricingFromDb(): Promise { - const collection = getPricingCollection(); - return collection.find({}).sort({ provider: 1, model: 1 }).toArray(); -} - -/** - * Initialize pricing service - call on server startup - * @returns {Promise} - */ -async function initialize(): Promise { - try { - await loadPricingFromDb(true); - console.log("[pricing_service] Initialized successfully"); - } catch (err) { - console.error("[pricing_service] Failed to initialize, using defaults:", (err as Error).message); - loadFromDefaults(); - } -} - -export default { - // Core functions - getModelPricing, - getModelPricingSync, - calculateCost, - calculateCostSync, - - // CRUD operations - upsertPricing, - deletePricing, - seedDefaultPricing, - - // Query functions - getAllPricing, - getPricingByProvider, - getDegradationTargets, - getPricingFromDb, - listAllPricingFromDb, - - // Cache management - loadPricingFromDb, - invalidateCache, - initialize, - - // Constants (for reference/testing) - DEFAULT_PRICING, - DEFAULT_PRICING_TABLE, -}; diff --git a/hive/src/services/tsdb/schema.sql b/hive/src/services/tsdb/schema.sql deleted file mode 100644 index f3811afa..00000000 --- a/hive/src/services/tsdb/schema.sql +++ /dev/null @@ -1,358 +0,0 @@ --- TSDB schema for team-scoped hypertable (Timescale) --- Architecture: Hot (metrics) / Warm (content refs) / Cold (content store) - --- ============================================================================= --- Enable TimescaleDB extension (required for hypertables and continuous aggregates) --- This is safe to run multiple times - CREATE EXTENSION IF NOT EXISTS is idempotent --- ============================================================================= -CREATE EXTENSION IF NOT EXISTS timescaledb; - --- ============================================================================= --- HOT TABLE: llm_events (metrics only - fast time-series queries) --- ============================================================================= -CREATE TABLE IF NOT EXISTS llm_events ( - "timestamp" timestamptz NOT NULL, - ingest_date date, - team_id text NOT NULL, - user_id text, - trace_id text NOT NULL, - span_id text, - parent_span_id text, - request_id text, - provider text, - call_sequence integer NOT NULL, - model text, - stream boolean DEFAULT false, - agent text, - agent_name text, - agent_stack jsonb, - call_site jsonb, - metadata jsonb, - latency_ms double precision, - usage_input_tokens double precision, - usage_output_tokens double precision, - usage_total_tokens double precision, - usage_cached_tokens double precision, - usage_reasoning_tokens double precision, - usage_accepted_prediction_tokens double precision, - usage_rejected_prediction_tokens double precision, - cost_total numeric, - -- Content flags (lightweight references instead of full content) - has_content boolean DEFAULT false, - finish_reason text, - tool_call_count integer DEFAULT 0, - -- Deprecated: content_capture jsonb (migrated to warm storage) - content_capture jsonb, - created_at timestamptz DEFAULT now(), - CONSTRAINT llm_events_pk PRIMARY KEY ("timestamp", trace_id, call_sequence) -); - --- ============================================================================= --- WARM TABLE: llm_event_content (content references per event) --- Links events to deduplicated content in the cold store --- ============================================================================= -CREATE TABLE IF NOT EXISTS llm_event_content ( - id bigserial, - "timestamp" timestamptz NOT NULL, - trace_id text NOT NULL, - call_sequence integer NOT NULL, - team_id text NOT NULL, - -- Content type: 'system_prompt', 'messages', 'response', 'tools', 'params' - content_type text NOT NULL, - -- Reference to cold storage (content-addressable) - content_hash text NOT NULL, - -- Quick access metadata (no need to fetch from cold store) - byte_size integer NOT NULL DEFAULT 0, - message_count integer, -- For messages type - truncated_preview text, -- First 200 chars for quick preview - created_at timestamptz DEFAULT now(), - CONSTRAINT llm_event_content_pk PRIMARY KEY (id) -); - --- Index for joining back to events -CREATE INDEX IF NOT EXISTS idx_llm_event_content_event - ON llm_event_content (trace_id, call_sequence, "timestamp"); - --- Index for content type queries -CREATE INDEX IF NOT EXISTS idx_llm_event_content_type - ON llm_event_content (team_id, content_type, "timestamp" DESC); - --- Index for content hash lookups (finding which events use a content) -CREATE INDEX IF NOT EXISTS idx_llm_event_content_hash - ON llm_event_content (content_hash); - --- ============================================================================= --- COLD TABLE: llm_content_store (deduplicated content storage) --- Content-addressable storage with SHA-256 hashes --- ============================================================================= -CREATE TABLE IF NOT EXISTS llm_content_store ( - content_hash text NOT NULL, - team_id text NOT NULL, - content text NOT NULL, - byte_size integer NOT NULL, - ref_count integer DEFAULT 1, -- Number of events referencing this content - first_seen_at timestamptz DEFAULT now(), - last_seen_at timestamptz DEFAULT now(), - CONSTRAINT llm_content_store_pk PRIMARY KEY (content_hash, team_id) -); - --- Index for cleanup queries (find orphaned content) -CREATE INDEX IF NOT EXISTS idx_llm_content_store_refs - ON llm_content_store (team_id, ref_count, last_seen_at); - --- ============================================================================= --- MIGRATION: Add new columns to existing llm_events tables --- ============================================================================= -ALTER TABLE llm_events ADD COLUMN IF NOT EXISTS has_content boolean DEFAULT false; -ALTER TABLE llm_events ADD COLUMN IF NOT EXISTS finish_reason text; -ALTER TABLE llm_events ADD COLUMN IF NOT EXISTS tool_call_count integer DEFAULT 0; - --- Ensure primary key includes timestamp if table already existed without it -DO $$ -BEGIN - IF EXISTS ( - SELECT 1 - FROM pg_constraint c - JOIN pg_class t ON c.conrelid = t.oid - WHERE t.relname = 'llm_events' - AND c.contype = 'p' - AND NOT EXISTS ( - SELECT 1 - FROM unnest(c.conkey) WITH ORDINALITY AS ck(attnum, ord) - JOIN pg_attribute a ON a.attrelid = t.oid AND a.attnum = ck.attnum - WHERE a.attname = 'timestamp' - ) - ) THEN - ALTER TABLE llm_events DROP CONSTRAINT IF EXISTS llm_events_pk; - ALTER TABLE llm_events ADD CONSTRAINT llm_events_pk PRIMARY KEY ("timestamp", trace_id, call_sequence); - END IF; -END$$; - --- Promote to hypertable when Timescale is available -DO $$ -BEGIN - IF EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'timescaledb') THEN - PERFORM public.create_hypertable('llm_events', 'timestamp', if_not_exists => TRUE); - END IF; -END$$; - --- Ensure metadata column exists for flexible fields -ALTER TABLE llm_events - ADD COLUMN IF NOT EXISTS metadata jsonb; - --- Ensure content_capture column exists (for Layer 0 content capture) -ALTER TABLE llm_events - ADD COLUMN IF NOT EXISTS content_capture jsonb; - --- Helpful indexes -CREATE INDEX IF NOT EXISTS idx_llm_events_ts ON llm_events ("timestamp" DESC); -CREATE INDEX IF NOT EXISTS idx_llm_events_team_ts ON llm_events (team_id, "timestamp" DESC); -CREATE INDEX IF NOT EXISTS idx_llm_events_model ON llm_events (model); -CREATE INDEX IF NOT EXISTS idx_llm_events_agent ON llm_events (agent); -CREATE INDEX IF NOT EXISTS idx_llm_events_trace ON llm_events (trace_id); - --- Continuous aggregate: daily rollup for analytics-wide -DO $$ -BEGIN - IF EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'timescaledb') THEN - CREATE MATERIALIZED VIEW IF NOT EXISTS llm_events_daily_ca - WITH (timescaledb.continuous) AS - SELECT - time_bucket('1 day', "timestamp") AS bucket, - COUNT(*) AS requests, - SUM(cost_total) AS cost_total, - SUM(usage_input_tokens) AS input_tokens, - SUM(usage_output_tokens) AS output_tokens, - SUM(COALESCE(usage_total_tokens, COALESCE(usage_input_tokens, 0) + COALESCE(usage_output_tokens, 0))) AS total_tokens, - SUM(usage_cached_tokens) AS cached_tokens - FROM llm_events - GROUP BY 1 - WITH NO DATA; - - -- Initial refresh to populate the CA immediately - CALL refresh_continuous_aggregate('llm_events_daily_ca', NULL, NOW()); - END IF; -EXCEPTION - WHEN others THEN NULL; -- Ignore errors if CA already exists or refresh fails -END$$; - --- Index on CA for fast range scans -DO $$ -BEGIN - IF EXISTS (SELECT 1 FROM pg_class WHERE relname = 'llm_events_daily_ca') THEN - CREATE INDEX IF NOT EXISTS idx_llm_events_daily_ca_bucket ON llm_events_daily_ca (bucket DESC); - END IF; -EXCEPTION WHEN undefined_table THEN - NULL; -END$$; - --- Continuous aggregate: daily rollup by model for fast model-grouped queries -DO $$ -BEGIN - IF EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'timescaledb') THEN - CREATE MATERIALIZED VIEW IF NOT EXISTS llm_events_daily_by_model_ca - WITH (timescaledb.continuous) AS - SELECT - time_bucket('1 day', "timestamp") AS bucket, - model, - provider, - COUNT(*) AS requests, - SUM(cost_total) AS cost_total, - SUM(usage_input_tokens) AS input_tokens, - SUM(usage_output_tokens) AS output_tokens, - SUM(COALESCE(usage_total_tokens, COALESCE(usage_input_tokens, 0) + COALESCE(usage_output_tokens, 0))) AS total_tokens, - SUM(usage_cached_tokens) AS cached_tokens, - AVG(latency_ms) AS avg_latency_ms - FROM llm_events - GROUP BY 1, 2, 3 - WITH NO DATA; - - -- Initial refresh to populate the CA immediately - CALL refresh_continuous_aggregate('llm_events_daily_by_model_ca', NULL, NOW()); - END IF; -EXCEPTION - WHEN others THEN NULL; -- Ignore errors if CA already exists or refresh fails -END$$; - --- Index on model CA for fast range scans -DO $$ -BEGIN - IF EXISTS (SELECT 1 FROM pg_class WHERE relname = 'llm_events_daily_by_model_ca') THEN - CREATE INDEX IF NOT EXISTS idx_llm_events_daily_by_model_ca_bucket ON llm_events_daily_by_model_ca (bucket DESC); - CREATE INDEX IF NOT EXISTS idx_llm_events_daily_by_model_ca_model ON llm_events_daily_by_model_ca (model); - END IF; -EXCEPTION WHEN undefined_table THEN - NULL; -END$$; - --- Continuous aggregate: daily rollup by agent for fast agent-grouped queries -DO $$ -BEGIN - IF EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'timescaledb') THEN - CREATE MATERIALIZED VIEW IF NOT EXISTS llm_events_daily_by_agent_ca - WITH (timescaledb.continuous) AS - SELECT - time_bucket('1 day', "timestamp") AS bucket, - agent, - COUNT(*) AS requests, - SUM(cost_total) AS cost_total, - SUM(usage_input_tokens) AS input_tokens, - SUM(usage_output_tokens) AS output_tokens, - SUM(COALESCE(usage_total_tokens, COALESCE(usage_input_tokens, 0) + COALESCE(usage_output_tokens, 0))) AS total_tokens, - SUM(usage_cached_tokens) AS cached_tokens, - AVG(latency_ms) AS avg_latency_ms - FROM llm_events - GROUP BY 1, 2 - WITH NO DATA; - - -- Initial refresh to populate the CA immediately - CALL refresh_continuous_aggregate('llm_events_daily_by_agent_ca', NULL, NOW()); - END IF; -EXCEPTION - WHEN others THEN NULL; -- Ignore errors if CA already exists or refresh fails -END$$; - --- Index on agent CA for fast range scans -DO $$ -BEGIN - IF EXISTS (SELECT 1 FROM pg_class WHERE relname = 'llm_events_daily_by_agent_ca') THEN - CREATE INDEX IF NOT EXISTS idx_llm_events_daily_by_agent_ca_bucket ON llm_events_daily_by_agent_ca (bucket DESC); - CREATE INDEX IF NOT EXISTS idx_llm_events_daily_by_agent_ca_agent ON llm_events_daily_by_agent_ca (agent); - END IF; -EXCEPTION WHEN undefined_table THEN - NULL; -END$$; - --- Refresh policies: keep recent buckets fresh --- Note: Using timescaledb_information.jobs (not the deprecated policy_refresh_continuous_aggregate view) -DO $$ -BEGIN - IF EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'timescaledb') - AND EXISTS ( - SELECT 1 - FROM timescaledb_information.continuous_aggregates - WHERE view_name = 'llm_events_daily_ca' - AND view_schema = current_schema() - ) - THEN - -- Add refresh policy if none exists for this CA - IF NOT EXISTS ( - SELECT 1 FROM timescaledb_information.jobs - WHERE proc_name = 'policy_refresh_continuous_aggregate' - AND hypertable_schema = current_schema() - AND hypertable_name = 'llm_events_daily_ca' - ) THEN - PERFORM add_continuous_aggregate_policy( - 'llm_events_daily_ca', - start_offset => interval '30 days', - end_offset => interval '1 hour', - schedule_interval => interval '15 minutes' - ); - END IF; - END IF; -EXCEPTION - WHEN undefined_table THEN NULL; - WHEN undefined_function THEN NULL; -END$$; - --- Refresh policies for llm_events_daily_by_model_ca -DO $$ -BEGIN - IF EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'timescaledb') - AND EXISTS ( - SELECT 1 - FROM timescaledb_information.continuous_aggregates - WHERE view_name = 'llm_events_daily_by_model_ca' - AND view_schema = current_schema() - ) - THEN - -- Add refresh policy if none exists for this CA - IF NOT EXISTS ( - SELECT 1 FROM timescaledb_information.jobs - WHERE proc_name = 'policy_refresh_continuous_aggregate' - AND hypertable_schema = current_schema() - AND hypertable_name = 'llm_events_daily_by_model_ca' - ) THEN - PERFORM add_continuous_aggregate_policy( - 'llm_events_daily_by_model_ca', - start_offset => interval '30 days', - end_offset => interval '1 hour', - schedule_interval => interval '15 minutes' - ); - END IF; - END IF; -EXCEPTION - WHEN undefined_table THEN NULL; - WHEN undefined_function THEN NULL; -END$$; - --- Refresh policies for llm_events_daily_by_agent_ca -DO $$ -BEGIN - IF EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'timescaledb') - AND EXISTS ( - SELECT 1 - FROM timescaledb_information.continuous_aggregates - WHERE view_name = 'llm_events_daily_by_agent_ca' - AND view_schema = current_schema() - ) - THEN - -- Add refresh policy if none exists for this CA - IF NOT EXISTS ( - SELECT 1 FROM timescaledb_information.jobs - WHERE proc_name = 'policy_refresh_continuous_aggregate' - AND hypertable_schema = current_schema() - AND hypertable_name = 'llm_events_daily_by_agent_ca' - ) THEN - PERFORM add_continuous_aggregate_policy( - 'llm_events_daily_by_agent_ca', - start_offset => interval '30 days', - end_offset => interval '1 hour', - schedule_interval => interval '15 minutes' - ); - END IF; - END IF; -EXCEPTION - WHEN undefined_table THEN NULL; - WHEN undefined_function THEN NULL; -END$$; diff --git a/hive/src/services/tsdb/team_context.ts b/hive/src/services/tsdb/team_context.ts deleted file mode 100644 index 578dfefb..00000000 --- a/hive/src/services/tsdb/team_context.ts +++ /dev/null @@ -1,114 +0,0 @@ -import { Pool, PoolConfig, PoolClient } from "pg"; -import jwt from "jsonwebtoken"; - -// Cache pools per team schema -const poolCache = new Map(); - -interface TokenPayload { - team_id?: string; - team?: string; - teamId?: string; - current_team_id?: string; - user_id?: string; - sub?: string; - user?: string; - userId?: string; - [key: string]: unknown; -} - -interface ParsedToken { - team_id: string; - user_id: string | null; - token: string; - payload: TokenPayload; -} - -/** - * Parse JWT to extract team_id and user_id. - * - Supports Authorization header formats: "Bearer " or "jwt " or raw token. - * - team_id: payload.team_id || payload.team || payload.teamId - * - user_id: payload.user_id || payload.sub || payload.user || payload.userId - */ -const parseToken = (authHeader: string | undefined): ParsedToken | null => { - if (!authHeader) return null; - const parts = authHeader.trim().split(" "); - const token = parts.length === 2 ? parts[1] : parts[0]; - if (!token) return null; - - // Token is already verified by passport middleware; decode only to extract team/user fields. - const payload = jwt.decode(token) as TokenPayload | null; - if (!payload || typeof payload !== "object") return null; - - const team_id = payload.team_id || payload.team || payload.teamId || payload.current_team_id; - const user_id = payload.user_id || payload.sub || payload.user || payload.userId || null; - if (!team_id) return null; - - return { team_id, user_id: user_id as string | null, token, payload }; -}; - -const buildSchemaName = (team_id: string | number): string => { - return `team_${team_id}`.replace(/[^a-zA-Z0-9_]/g, "_"); -}; - -declare const _GLOBAL_CONST: { ACHO_PG_CONFIG?: { USER: string; HOST: string; DATABASE: string; PASSWORD: string; PORT: number } }; - -const basePoolConfig = (): Partial => { - const connStr = (process.env.TSDB_PG_URL || "").replace(/\s+/g, ""); - if (connStr) { - // Only enable SSL for non-local connections or when explicitly requested - const isLocal = connStr.includes("localhost") || connStr.includes("127.0.0.1") || connStr.includes("timescaledb"); - const sslRequested = connStr.includes("sslmode=require") || process.env.TSDB_SSL === "true"; - const ssl = !isLocal || sslRequested ? { rejectUnauthorized: false } : false; - return { connectionString: connStr, ssl }; - } - if (typeof _GLOBAL_CONST !== "undefined" && _GLOBAL_CONST.ACHO_PG_CONFIG) { - const cfg = _GLOBAL_CONST.ACHO_PG_CONFIG; - return { - user: cfg.USER, - host: cfg.HOST, - database: cfg.DATABASE, - password: cfg.PASSWORD, - port: cfg.PORT, - }; - } - return {}; -}; - -const getTeamPool = async (team_id: string | number, overrideConfig?: Partial): Promise => { - const schema = buildSchemaName(team_id); - if (poolCache.has(schema)) return poolCache.get(schema)!; - - const pool = new Pool({ - ...basePoolConfig(), - ...(overrideConfig || {}), - max: 10, - idleTimeoutMillis: 30000, - connectionTimeoutMillis: 10000, - }); - - // Handle pool-level errors to prevent unhandled rejections - pool.on("error", (err) => { - console.error(`[team_context] Pool error for schema ${schema}:`, err.message); - // Remove from cache to force fresh pool on next request - poolCache.delete(schema); - }); - - // Ensure schema exists and set search_path per connection - pool.on("connect", (client: PoolClient) => { - // Fire-and-forget with error handling - don't await in event handler - client.query(`CREATE SCHEMA IF NOT EXISTS ${schema}`) - .then(() => client.query(`SET search_path TO ${schema}, public`)) - .catch((err: Error) => { - console.error(`[team_context] Schema setup error for ${schema}:`, err.message); - }); - }); - - poolCache.set(schema, pool); - return pool; -}; - -export { - parseToken, - buildSchemaName, - getTeamPool, -}; diff --git a/hive/src/services/tsdb/tsdb_service.ts b/hive/src/services/tsdb/tsdb_service.ts deleted file mode 100644 index 02d5d2f2..00000000 --- a/hive/src/services/tsdb/tsdb_service.ts +++ /dev/null @@ -1,955 +0,0 @@ -import fs from "fs"; -import path from "path"; -import crypto from "crypto"; -import { Pool, PoolClient } from "pg"; -import pricingService from "./pricing_service"; - -let _tsdbPool: Pool | undefined; -let _schemaReadyPromise: Promise | null; -const _schemaReadyByName = new Map>(); // Per-schema initialization tracking -const SCHEMA_SQL = fs.readFileSync(path.join(__dirname, "schema.sql"), "utf8"); - -const safeParseJson = (val: unknown): unknown => { - if (val === null || val === undefined) return null; - if (typeof val === "object") return val; - if (typeof val === "string") { - try { - return JSON.parse(val); - } catch (_e) { - return null; - } - } - return null; -}; - -const asObject = (val: unknown, fallback: Record = {}): Record => { - const parsed = safeParseJson(val); - if (parsed && !Array.isArray(parsed) && typeof parsed === "object") return parsed as Record; - if (val && typeof val === "object" && !Array.isArray(val)) return val as Record; - return fallback; -}; - -const asArray = (val: unknown, fallback: unknown[] = []): unknown[] => { - if (Array.isArray(val)) return val; - const parsed = safeParseJson(val); - if (Array.isArray(parsed)) return parsed; - if (typeof val === "string") { - const trimmed = val.trim(); - if (trimmed.startsWith("{") && trimmed.endsWith("}")) { - const inner = trimmed.slice(1, -1).trim(); - if (!inner) return []; - return inner - .split(",") - .map((s) => s.trim().replace(/^"+|"+$/g, "")) - .filter(Boolean); - } - return [val]; - } - if (val !== null && val !== undefined && typeof val !== "object" && typeof val !== "function") { - return [val]; - } - return fallback; -}; - -const buildMetadata = (raw: Record): Record | null => { - const base = asObject(raw.metadata ?? raw.meta ?? raw.properties ?? raw.extra, {}); - const tags = asArray(raw.tags ?? raw.labels, []) as string[]; - if (tags && tags.length) { - base.tags = tags; - } - const sessionId = raw.session_id ?? raw.sessionId; - if (sessionId !== undefined && sessionId !== null && base.session_id === undefined) { - base.session_id = sessionId; - } - const environment = raw.environment ?? raw.env; - if (environment && base.environment === undefined) { - base.environment = environment; - } - return Object.keys(base).length ? base : null; -}; - -interface UsageData { - input_tokens?: number; - output_tokens?: number; - total_tokens?: number; - cached_tokens?: number; - reasoning_tokens?: number; - accepted_prediction_tokens?: number; - rejected_prediction_tokens?: number; -} - -const calcCost = (model: string, usage: UsageData = {}): number => { - const inputTokens = Number.isFinite(Number(usage.input_tokens)) ? Number(usage.input_tokens) : 0; - const outputTokens = Number.isFinite(Number(usage.output_tokens)) ? Number(usage.output_tokens) : 0; - const cachedTokens = Number.isFinite(Number(usage.cached_tokens)) ? Number(usage.cached_tokens) : 0; - - const result = pricingService.calculateCostSync({ - model: model || "", - input_tokens: inputTokens, - output_tokens: outputTokens, - cached_tokens: cachedTokens, - }); - - return result.total; -}; - -// ============================================================================= -// Content Storage Types and Utilities -// ============================================================================= - -interface ContentCapture { - system_prompt?: string; - messages?: unknown[]; - tools?: unknown[]; - params?: Record; - response_content?: string; - finish_reason?: string; - choice_count?: number; - has_images?: boolean; - image_urls?: string[]; -} - -interface ContentReference { - content_type: string; - content_hash: string; - byte_size: number; - message_count?: number; - truncated_preview?: string; -} - -interface ContentToStore { - content_hash: string; - content: string; - byte_size: number; -} - -/** - * Generate SHA-256 hash of content for content-addressable storage - */ -const hashContent = (content: string): string => { - return crypto.createHash("sha256").update(content, "utf8").digest("hex"); -}; - -/** - * Create a truncated preview of content (first 200 chars) - */ -const createPreview = (content: string, maxLength: number = 200): string => { - if (!content || content.length <= maxLength) return content || ""; - return content.slice(0, maxLength) + "..."; -}; - -/** - * Extract content from ContentCapture and prepare for storage - * Returns content references for warm table and content items for cold table - */ -const extractContent = ( - contentCapture: ContentCapture | null | undefined -): { refs: ContentReference[]; items: ContentToStore[] } => { - if (!contentCapture) { - return { refs: [], items: [] }; - } - - const refs: ContentReference[] = []; - const items: ContentToStore[] = []; - const seenHashes = new Set(); - - // Helper to process a content field - const processContent = ( - type: string, - value: unknown, - messageCount?: number - ): void => { - if (value === null || value === undefined) return; - - const contentStr = typeof value === "string" ? value : JSON.stringify(value); - if (!contentStr || contentStr === "null" || contentStr === "{}") return; - - const hash = hashContent(contentStr); - const byteSize = Buffer.byteLength(contentStr, "utf8"); - - refs.push({ - content_type: type, - content_hash: hash, - byte_size: byteSize, - message_count: messageCount, - truncated_preview: createPreview(contentStr), - }); - - // Only store content once per hash (deduplication within batch) - if (!seenHashes.has(hash)) { - seenHashes.add(hash); - items.push({ - content_hash: hash, - content: contentStr, - byte_size: byteSize, - }); - } - }; - - // Extract each content type - if (contentCapture.system_prompt) { - processContent("system_prompt", contentCapture.system_prompt); - } - - if (contentCapture.messages && Array.isArray(contentCapture.messages) && contentCapture.messages.length > 0) { - processContent("messages", contentCapture.messages, contentCapture.messages.length); - } - - if (contentCapture.response_content) { - processContent("response", contentCapture.response_content); - } - - if (contentCapture.tools && Array.isArray(contentCapture.tools) && contentCapture.tools.length > 0) { - processContent("tools", contentCapture.tools); - } - - // Only store params if they have meaningful values (not all nulls) - if (contentCapture.params) { - const hasValues = Object.values(contentCapture.params).some( - (v) => v !== null && v !== undefined - ); - if (hasValues) { - processContent("params", contentCapture.params); - } - } - - return { refs, items }; -}; - -const parseDate = (val: unknown): Date | null => { - if (!val) return null; - const d = new Date(val as string | number | Date); - return Number.isNaN(d.getTime()) ? null : d; -}; - -const numberOrNull = (val: unknown): number | null => { - const n = Number(val); - return Number.isFinite(n) ? n : null; -}; - -interface RawEvent { - timestamp?: unknown; - team_id?: unknown; - traceId?: string; - trace_id?: string; - spanId?: string; - span_id?: string; - parent_span_id?: string; - callSequence?: number; - call_sequence?: number; - requestId?: string; - request_id?: string; - provider?: string; - model?: string; - stream?: boolean; - agent?: string; - agent_name?: string; - user_id?: string; - latency_ms?: number; - usage?: UsageData; - input_tokens?: number; - output_tokens?: number; - total_tokens?: number; - cached_tokens?: number; - reasoning_tokens?: number; - accepted_prediction_tokens?: number; - rejected_prediction_tokens?: number; - metadata?: Record; - meta?: Record; - properties?: Record; - extra?: Record; - tags?: string[]; - labels?: string[]; - session_id?: string; - sessionId?: string; - environment?: string; - env?: string; - agentStack?: string[]; - agent_stack?: string[]; - callSite?: Record; - call_site?: Record; - call_site_file?: string; - call_site_line?: number; - call_site_column?: number; - call_site_function?: string; - call_stack?: string[]; - content_capture?: Record; -} - -interface NormalizedEvent { - timestamp: Date; - ingest_date: string; - team_id: string; - trace_id: string; - span_id: string | null; - parent_span_id: string | null; - request_id: string | null; - provider: string | null; - call_sequence: number; - model: string; - stream: boolean; - agent: string | null; - agent_name: string | null; - user_id: string | null; - latency_ms: number | null; - usage_input_tokens: number | null; - usage_output_tokens: number | null; - usage_total_tokens: number | null; - usage_cached_tokens: number | null; - usage_reasoning_tokens: number | null; - usage_accepted_prediction_tokens: number | null; - usage_rejected_prediction_tokens: number | null; - metadata: Record | null; - call_site: Record; - agent_stack: string[]; - cost_total: number; - // Hot table fields (lightweight content indicators) - has_content: boolean; - finish_reason: string | null; - tool_call_count: number; - // Content data for warm/cold storage (extracted separately) - content_refs: ContentReference[]; - content_items: ContentToStore[]; - // Deprecated: kept for backward compatibility during migration - content_capture: Record | null; -} - -const normalizeEvent = (raw: RawEvent): NormalizedEvent | null => { - const ts = raw.timestamp; - const teamId = raw.team_id; - const parsedTs = parseDate(ts); - if (!parsedTs) return null; - - const traceId = raw.traceId || raw.trace_id; - const spanId = raw.spanId || raw.span_id; - const parentSpanId = raw.parent_span_id || null; - const callSeqRaw = raw.callSequence ?? raw.call_sequence; - if ( - traceId === undefined || - callSeqRaw === undefined || - callSeqRaw === null || - teamId === undefined || - teamId === null - ) { - return null; - } - const callSeq = Number(callSeqRaw); - if (!Number.isInteger(callSeq)) return null; - - const usage: UsageData = raw.usage || { - input_tokens: raw.input_tokens, - output_tokens: raw.output_tokens, - total_tokens: raw.total_tokens, - cached_tokens: raw.cached_tokens, - reasoning_tokens: raw.reasoning_tokens, - accepted_prediction_tokens: raw.accepted_prediction_tokens, - rejected_prediction_tokens: raw.rejected_prediction_tokens, - }; - // Extract agent - metadata.agent takes precedence over top-level agent - const metadata = asObject(raw.metadata, {}); - const effectiveAgent = (metadata.agent as string) || raw.agent || null; - - let agentStack = asArray(raw.agentStack ?? raw.agent_stack, []) as string[]; - if (effectiveAgent) { - const agentVal = String(effectiveAgent); - if (!agentStack.includes(agentVal)) { - agentStack = [agentVal, ...agentStack]; - } - } - const callSite = - raw.callSite || - asObject(raw.call_site, { - file: raw.call_site_file, - line: raw.call_site_line, - column: raw.call_site_column, - function: raw.call_site_function, - stack: asArray(raw.call_stack, []), - }); - - // Extract content for warm/cold storage - const contentCapture = raw.content_capture as ContentCapture | undefined; - const { refs: contentRefs, items: contentItems } = extractContent(contentCapture); - - // Extract lightweight content indicators for hot table - const hasContent = contentRefs.length > 0; - const finishReason = contentCapture?.finish_reason || null; - - // Count tool calls from messages or tool_calls field - let toolCallCount = 0; - if (contentCapture?.messages && Array.isArray(contentCapture.messages)) { - for (const msg of contentCapture.messages) { - const msgObj = msg as Record; - if (msgObj.tool_calls && Array.isArray(msgObj.tool_calls)) { - toolCallCount += msgObj.tool_calls.length; - } - } - } - - return { - timestamp: parsedTs, - ingest_date: parsedTs.toISOString().slice(0, 10), - team_id: String(teamId), - trace_id: String(traceId), - span_id: spanId || null, - parent_span_id: parentSpanId, - request_id: raw.requestId || raw.request_id || null, - provider: raw.provider || null, - call_sequence: callSeq, - model: raw.model || "", - stream: Boolean(raw.stream), - agent: agentStack[0] || null, - agent_name: raw.agent_name || null, - user_id: raw.user_id || null, - latency_ms: numberOrNull(raw.latency_ms), - usage_input_tokens: numberOrNull(usage.input_tokens), - usage_output_tokens: numberOrNull(usage.output_tokens), - usage_total_tokens: numberOrNull(usage.total_tokens), - usage_cached_tokens: numberOrNull(usage.cached_tokens), - usage_reasoning_tokens: numberOrNull(usage.reasoning_tokens), - usage_accepted_prediction_tokens: numberOrNull(usage.accepted_prediction_tokens), - usage_rejected_prediction_tokens: numberOrNull(usage.rejected_prediction_tokens), - metadata: buildMetadata(raw as Record), - call_site: callSite as Record, - agent_stack: agentStack, - cost_total: calcCost(raw.model || "", usage), - // Hot table content indicators - has_content: hasContent, - finish_reason: finishReason, - tool_call_count: toolCallCount, - // Content for warm/cold storage - content_refs: contentRefs, - content_items: contentItems, - // Deprecated: kept for backward compatibility - content_capture: raw.content_capture || null, - }; -}; - -const dedupeEvents = (events: NormalizedEvent[]): NormalizedEvent[] => { - const deduped = new Map(); - events.forEach((ev) => { - const key = `${ev.trace_id}||${ev.call_sequence}`; - const existing = deduped.get(key); - if (!existing) { - deduped.set(key, ev); - return; - } - if (existing.timestamp && ev.timestamp && ev.timestamp > existing.timestamp) { - deduped.set(key, ev); - } - }); - return Array.from(deduped.values()); -}; - -const normalizeEvents = (rawEvents: RawEvent[] = []): NormalizedEvent[] => { - const normalized: NormalizedEvent[] = []; - rawEvents.forEach((ev) => { - const n = normalizeEvent(ev); - if (n) normalized.push(n); - }); - return dedupeEvents(normalized); -}; - -const getTsdbPool = (): Pool => { - if (_tsdbPool) return _tsdbPool; - const connStr = (process.env.TSDB_PG_URL || "").replace(/\s+/g, ""); - if (connStr) { - _tsdbPool = new Pool({ - connectionString: connStr, - ssl: { rejectUnauthorized: false }, - }); - return _tsdbPool; - } - if ((global as unknown as Record)._ACHO_PG_POOL) { - _tsdbPool = (global as unknown as Record)._ACHO_PG_POOL as Pool; - return _tsdbPool; - } - throw new Error("TSDB pool not available. Set TSDB_PG_URL or initialize _ACHO_PG_POOL."); -}; - -const ensureSchema = async (client?: PoolClient): Promise => { - if (client) { - // Get current schema name for per-schema caching - const schemaResult = await client.query("SELECT current_schema()"); - const schemaName = schemaResult.rows[0]?.current_schema || "public"; - - // Check if this schema is already initialized - if (_schemaReadyByName.has(schemaName)) { - return _schemaReadyByName.get(schemaName); - } - - // Create and cache the initialization promise - const initPromise = (async () => { - try { - await client.query(SCHEMA_SQL); - } catch (err: unknown) { - // Handle race condition - if object already exists, it's fine - const pgError = err as { code?: string }; - if (pgError.code === "23505" || pgError.code === "42P07") { - // 23505 = unique_violation, 42P07 = duplicate_table - console.log(`[tsdb] Schema ${schemaName} already initialized (concurrent request)`); - return; - } - throw err; - } - })(); - - _schemaReadyByName.set(schemaName, initPromise); - - try { - await initPromise; - } catch (err) { - _schemaReadyByName.delete(schemaName); - throw err; - } - return; - } - - if (_schemaReadyPromise) return _schemaReadyPromise; - - const pool = getTsdbPool(); - _schemaReadyPromise = (async () => { - const executor = await pool.connect(); - try { - await executor.query(SCHEMA_SQL); - } finally { - executor.release(); - } - })(); - - try { - await _schemaReadyPromise; - } catch (err) { - _schemaReadyPromise = null; - throw err; - } -}; - -interface UpsertResult { - rowsWritten: number; - normalized: number; - received?: number; - contentStored?: number; - contentDeduplicated?: number; -} - -/** - * Store content in cold storage (llm_content_store) with deduplication - * Uses ON CONFLICT to increment ref_count for existing content - */ -const storeContentCold = async ( - executor: PoolClient, - teamId: string, - items: ContentToStore[] -): Promise<{ stored: number; deduplicated: number }> => { - if (!items.length) return { stored: 0, deduplicated: 0 }; - - // Batch upsert content items - const cols = ["content_hash", "team_id", "content", "byte_size", "ref_count", "first_seen_at", "last_seen_at"]; - const values: unknown[] = []; - const placeholders: string[] = []; - const now = new Date(); - - items.forEach((item, idx) => { - const base = idx * cols.length; - placeholders.push(`(${cols.map((__, i) => `$${base + i + 1}`).join(", ")})`); - values.push(item.content_hash, teamId, item.content, item.byte_size, 1, now, now); - }); - - const sql = ` - INSERT INTO llm_content_store (${cols.join(", ")}) - VALUES ${placeholders.join(", ")} - ON CONFLICT (content_hash, team_id) - DO UPDATE SET - ref_count = llm_content_store.ref_count + 1, - last_seen_at = EXCLUDED.last_seen_at - RETURNING (xmax = 0) AS inserted - `; - - const result = await executor.query(sql, values); - const inserted = result.rows.filter((r: { inserted: boolean }) => r.inserted).length; - const deduplicated = items.length - inserted; - - return { stored: inserted, deduplicated }; -}; - -/** - * Store content references in warm storage (llm_event_content) - */ -const storeContentWarm = async ( - executor: PoolClient, - events: NormalizedEvent[] -): Promise => { - // Collect all content references from all events - const allRefs: Array<{ - timestamp: Date; - trace_id: string; - call_sequence: number; - team_id: string; - ref: ContentReference; - }> = []; - - for (const ev of events) { - for (const ref of ev.content_refs) { - allRefs.push({ - timestamp: ev.timestamp, - trace_id: ev.trace_id, - call_sequence: ev.call_sequence, - team_id: ev.team_id, - ref, - }); - } - } - - if (!allRefs.length) return 0; - - const cols = [ - '"timestamp"', - "trace_id", - "call_sequence", - "team_id", - "content_type", - "content_hash", - "byte_size", - "message_count", - "truncated_preview", - ]; - const values: unknown[] = []; - const placeholders: string[] = []; - - allRefs.forEach((item, idx) => { - const base = idx * cols.length; - placeholders.push(`(${cols.map((__, i) => `$${base + i + 1}`).join(", ")})`); - values.push( - item.timestamp, - item.trace_id, - item.call_sequence, - item.team_id, - item.ref.content_type, - item.ref.content_hash, - item.ref.byte_size, - item.ref.message_count || null, - item.ref.truncated_preview || null - ); - }); - - const sql = ` - INSERT INTO llm_event_content (${cols.join(", ")}) - VALUES ${placeholders.join(", ")} - `; - - await executor.query(sql, values); - return allRefs.length; -}; - -const upsertEvents = async (rawEvents: RawEvent[] = [], client?: PoolClient): Promise => { - const events = normalizeEvents(rawEvents); - if (!events.length) { - return { rowsWritten: 0, normalized: 0 }; - } - - // Hot table columns (metrics only, no full content_capture) - const cols = [ - '"timestamp"', - "ingest_date", - "team_id", - "user_id", - "trace_id", - "span_id", - "parent_span_id", - "request_id", - "provider", - "call_sequence", - "model", - "stream", - "agent", - "agent_name", - "latency_ms", - "usage_input_tokens", - "usage_output_tokens", - "usage_total_tokens", - "usage_cached_tokens", - "usage_reasoning_tokens", - "usage_accepted_prediction_tokens", - "usage_rejected_prediction_tokens", - "call_site", - "metadata", - "agent_stack", - "cost_total", - // New lightweight content fields - "has_content", - "finish_reason", - "tool_call_count", - // Deprecated: kept for backward compatibility during migration - "content_capture", - ]; - - const values: unknown[] = []; - const placeholders: string[] = []; - events.forEach((ev, idx) => { - const base = idx * cols.length; - placeholders.push(`(${cols.map((__, i) => `$${base + i + 1}`).join(", ")})`); - values.push( - ev.timestamp, - ev.ingest_date, - ev.team_id, - ev.user_id, - ev.trace_id, - ev.span_id, - ev.parent_span_id, - ev.request_id, - ev.provider, - ev.call_sequence, - ev.model, - ev.stream, - ev.agent, - ev.agent_name, - ev.latency_ms, - ev.usage_input_tokens, - ev.usage_output_tokens, - ev.usage_total_tokens, - ev.usage_cached_tokens, - ev.usage_reasoning_tokens, - ev.usage_accepted_prediction_tokens, - ev.usage_rejected_prediction_tokens, - JSON.stringify(ev.call_site || {}), - ev.metadata ? JSON.stringify(ev.metadata) : null, - JSON.stringify(ev.agent_stack || []), - ev.cost_total, - // New fields - ev.has_content, - ev.finish_reason, - ev.tool_call_count, - // Deprecated: store null for new events, keep for backward compat - null - ); - }); - - const sql = ` - INSERT INTO llm_events (${cols.join(", ")}) - VALUES ${placeholders.join(", ")} - ON CONFLICT ("timestamp", trace_id, call_sequence) - DO UPDATE SET - "timestamp" = EXCLUDED."timestamp", - ingest_date = EXCLUDED.ingest_date, - team_id = EXCLUDED.team_id, - user_id = EXCLUDED.user_id, - trace_id = EXCLUDED.trace_id, - span_id = EXCLUDED.span_id, - parent_span_id = EXCLUDED.parent_span_id, - request_id = EXCLUDED.request_id, - provider = EXCLUDED.provider, - model = EXCLUDED.model, - stream = EXCLUDED.stream, - agent = EXCLUDED.agent, - agent_name = EXCLUDED.agent_name, - latency_ms = EXCLUDED.latency_ms, - usage_input_tokens = EXCLUDED.usage_input_tokens, - usage_output_tokens = EXCLUDED.usage_output_tokens, - usage_total_tokens = EXCLUDED.usage_total_tokens, - usage_cached_tokens = EXCLUDED.usage_cached_tokens, - usage_reasoning_tokens = EXCLUDED.usage_reasoning_tokens, - usage_accepted_prediction_tokens = EXCLUDED.usage_accepted_prediction_tokens, - usage_rejected_prediction_tokens = EXCLUDED.usage_rejected_prediction_tokens, - call_site = EXCLUDED.call_site, - metadata = EXCLUDED.metadata, - agent_stack = EXCLUDED.agent_stack, - cost_total = EXCLUDED.cost_total, - has_content = EXCLUDED.has_content, - finish_reason = EXCLUDED.finish_reason, - tool_call_count = EXCLUDED.tool_call_count - WHERE EXCLUDED."timestamp" >= llm_events."timestamp" - `; - - const pool = client ? null : getTsdbPool(); - const executor = client || (await pool!.connect()); - - let contentStored = 0; - let contentDeduplicated = 0; - - try { - // 1. Insert into hot table (llm_events) - await executor.query(sql, values); - - // 2. Collect all content items for cold storage (deduplicated across events) - const allContentItems: ContentToStore[] = []; - const seenHashes = new Set(); - const teamId = events[0]?.team_id; - - for (const ev of events) { - for (const item of ev.content_items) { - if (!seenHashes.has(item.content_hash)) { - seenHashes.add(item.content_hash); - allContentItems.push(item); - } - } - } - - // 3. Store content in cold storage (llm_content_store) - if (allContentItems.length > 0 && teamId) { - const coldResult = await storeContentCold(executor, teamId, allContentItems); - contentStored = coldResult.stored; - contentDeduplicated = coldResult.deduplicated; - } - - // 4. Store content references in warm storage (llm_event_content) - await storeContentWarm(executor, events); - - } finally { - if (!client && executor && 'release' in executor) { - (executor as PoolClient).release(); - } - } - - return { - rowsWritten: events.length, - normalized: events.length, - received: rawEvents.length, - contentStored, - contentDeduplicated, - }; -}; - -/** - * Retrieve content from cold storage by hash - */ -const getContentByHash = async ( - teamId: string, - contentHash: string, - client?: PoolClient -): Promise => { - const pool = client ? null : getTsdbPool(); - const executor = client || (await pool!.connect()); - - try { - const result = await executor.query( - `SELECT content FROM llm_content_store WHERE content_hash = $1 AND team_id = $2`, - [contentHash, teamId] - ); - return result.rows[0]?.content || null; - } finally { - if (!client && executor && "release" in executor) { - (executor as PoolClient).release(); - } - } -}; - -/** - * Get all content references for an event - */ -const getEventContent = async ( - teamId: string, - traceId: string, - callSequence: number, - client?: PoolClient -): Promise> => { - const pool = client ? null : getTsdbPool(); - const executor = client || (await pool!.connect()); - - try { - // Get content references from warm storage - const refsResult = await executor.query( - `SELECT content_type, content_hash, byte_size, message_count, truncated_preview - FROM llm_event_content - WHERE team_id = $1 AND trace_id = $2 AND call_sequence = $3`, - [teamId, traceId, callSequence] - ); - - const refs = refsResult.rows as ContentReference[]; - - // Optionally fetch full content from cold storage - const results: Array = []; - for (const ref of refs) { - const content = await getContentByHash(teamId, ref.content_hash, executor); - results.push({ ...ref, content: content || undefined }); - } - - return results; - } finally { - if (!client && executor && "release" in executor) { - (executor as PoolClient).release(); - } - } -}; - -interface DistinctAgentRecord { - agent: string; - agent_name: string | null; - first_seen: Date; - last_seen: Date; - total_requests: number; - total_cost: number; -} - -/** - * Get all distinct agents from events for a team - * Returns agent identifiers with their first/last seen timestamps and usage stats - */ -const getDistinctAgents = async ( - teamId: string, - options: { - since?: Date; - limit?: number; - } = {}, - client?: PoolClient -): Promise => { - const pool = client ? null : getTsdbPool(); - const executor = client || (await pool!.connect()); - - try { - const { since, limit = 100 } = options; - - let sql = ` - SELECT - agent, - MAX(agent_name) as agent_name, - MIN("timestamp") as first_seen, - MAX("timestamp") as last_seen, - COUNT(*) as total_requests, - COALESCE(SUM(cost_total), 0) as total_cost - FROM llm_events - WHERE team_id = $1 - AND agent IS NOT NULL - AND agent != '' - `; - - const params: unknown[] = [teamId]; - - if (since) { - sql += ` AND "timestamp" >= $${params.length + 1}`; - params.push(since); - } - - sql += ` - GROUP BY agent - ORDER BY last_seen DESC - LIMIT $${params.length + 1} - `; - params.push(limit); - - const result = await executor.query(sql, params); - - return result.rows.map((row: Record) => ({ - agent: row.agent as string, - agent_name: row.agent_name as string | null, - first_seen: new Date(row.first_seen as string), - last_seen: new Date(row.last_seen as string), - total_requests: Number(row.total_requests), - total_cost: Number(row.total_cost), - })); - } finally { - if (!client && executor && "release" in executor) { - (executor as PoolClient).release(); - } - } -}; - -export { - normalizeEvent, - normalizeEvents, - ensureSchema, - upsertEvents, - getTsdbPool, - getContentByHash, - getEventContent, - getDistinctAgents, -}; diff --git a/hive/src/services/tsdb/users_schema.sql b/hive/src/services/tsdb/users_schema.sql deleted file mode 100644 index 7a9c7257..00000000 --- a/hive/src/services/tsdb/users_schema.sql +++ /dev/null @@ -1,149 +0,0 @@ --- User Authentication Schema for PostgreSQL (Local Development) --- This schema mirrors the MySQL user tables for local development --- Run this on your local PostgreSQL/TimescaleDB instance - --- ============================================================================= --- USERS TABLE: Core user accounts --- ============================================================================= -CREATE TABLE IF NOT EXISTS users ( - id SERIAL PRIMARY KEY, - email VARCHAR(255) UNIQUE NOT NULL, - password VARCHAR(255), - name VARCHAR(255), - firstname VARCHAR(255), - lastname VARCHAR(255), - -- JWT authentication (TEXT for long JWT tokens) - token TEXT UNIQUE, - salt TEXT, - -- Team association - current_team_id INTEGER, - -- Account status - status VARCHAR(50) DEFAULT 'active', - email_verified BOOLEAN DEFAULT false, - -- Metadata - avatar_url TEXT, - preferences JSONB DEFAULT '{}', - -- Timestamps - created_at TIMESTAMPTZ DEFAULT NOW(), - updated_at TIMESTAMPTZ DEFAULT NOW(), - last_login_at TIMESTAMPTZ -); - --- Indexes for common lookups -CREATE INDEX IF NOT EXISTS idx_users_email ON users (email); -CREATE INDEX IF NOT EXISTS idx_users_token ON users (token); -CREATE INDEX IF NOT EXISTS idx_users_team ON users (current_team_id); - --- ============================================================================= --- DEVELOPERS TABLE: API tokens for programmatic access --- ============================================================================= -CREATE TABLE IF NOT EXISTS developers ( - id SERIAL PRIMARY KEY, - user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE, - team_id INTEGER NOT NULL, - token TEXT UNIQUE NOT NULL, - label VARCHAR(255), - -- System tokens are managed by the platform, not users - "system" BOOLEAN DEFAULT false, - -- Permissions and scope - scopes JSONB DEFAULT '[]', - -- Rate limiting - rate_limit INTEGER DEFAULT 1000, - -- Timestamps - create_time BIGINT DEFAULT EXTRACT(EPOCH FROM NOW())::BIGINT, - last_used_at TIMESTAMPTZ, - expires_at TIMESTAMPTZ, - -- Status - revoked BOOLEAN DEFAULT false, - revoked_at TIMESTAMPTZ -); - --- Indexes for token lookups -CREATE INDEX IF NOT EXISTS idx_developers_token ON developers (token); -CREATE INDEX IF NOT EXISTS idx_developers_user ON developers (user_id); -CREATE INDEX IF NOT EXISTS idx_developers_team ON developers (team_id); -CREATE INDEX IF NOT EXISTS idx_developers_user_team ON developers (user_id, team_id); - --- ============================================================================= --- TEAMS TABLE: Team/Organization accounts --- ============================================================================= -CREATE TABLE IF NOT EXISTS teams ( - id SERIAL PRIMARY KEY, - name VARCHAR(255) NOT NULL, - slug VARCHAR(255) UNIQUE, - -- Billing and subscription - plan VARCHAR(50) DEFAULT 'free', - billing_email VARCHAR(255), - -- Settings - settings JSONB DEFAULT '{}', - -- Timestamps - created_at TIMESTAMPTZ DEFAULT NOW(), - updated_at TIMESTAMPTZ DEFAULT NOW() -); - --- ============================================================================= --- TEAM_MEMBERS TABLE: User-Team associations --- ============================================================================= -CREATE TABLE IF NOT EXISTS team_members ( - id SERIAL PRIMARY KEY, - user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE, - team_id INTEGER NOT NULL REFERENCES teams(id) ON DELETE CASCADE, - role VARCHAR(50) DEFAULT 'member', - -- Timestamps - joined_at TIMESTAMPTZ DEFAULT NOW(), - UNIQUE(user_id, team_id) -); - -CREATE INDEX IF NOT EXISTS idx_team_members_user ON team_members (user_id); -CREATE INDEX IF NOT EXISTS idx_team_members_team ON team_members (team_id); - --- ============================================================================= --- SEED DATA: Default development user and team --- ============================================================================= - --- Create a default team -INSERT INTO teams (id, name, slug, plan) -VALUES (1, 'Development Team', 'dev-team', 'enterprise') -ON CONFLICT (id) DO NOTHING; - --- Create a default development user --- Email: dev@honeycomb.local --- Password: honeycomb123 -INSERT INTO users (id, email, password, name, firstname, lastname, token, salt, current_team_id, status, email_verified) -VALUES ( - 1, - 'dev@honeycomb.local', - '$2b$10$BgXnS6Cg7HwimTzBtsnh0.j8s8.ypWFooW9A.7YbNIC4e94HIFxYu', - 'Development User', - 'Dev', - 'User', - 'dev-token-12345', - 'dev-salt-secret-key', - 1, - 'active', - true -) -ON CONFLICT (id) DO NOTHING; - --- Create a default API token for the development user -INSERT INTO developers (id, user_id, team_id, token, label, "system") -VALUES ( - 1, - 1, - 1, - 'hive_dev_token_abc123xyz', - 'Development API Token', - false -) -ON CONFLICT (id) DO NOTHING; - --- Add user to team -INSERT INTO team_members (user_id, team_id, role) -VALUES (1, 1, 'admin') -ON CONFLICT (user_id, team_id) DO NOTHING; - --- Reset sequences to avoid conflicts -SELECT setval('users_id_seq', COALESCE((SELECT MAX(id) FROM users), 1)); -SELECT setval('teams_id_seq', COALESCE((SELECT MAX(id) FROM teams), 1)); -SELECT setval('developers_id_seq', COALESCE((SELECT MAX(id) FROM developers), 1)); -SELECT setval('team_members_id_seq', COALESCE((SELECT MAX(id) FROM team_members), 1)); diff --git a/hive/src/sockets/control.socket.ts b/hive/src/sockets/control.socket.ts deleted file mode 100644 index e5061804..00000000 --- a/hive/src/sockets/control.socket.ts +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Control Socket Initialization - * - * Wrapper for initializing control plane WebSockets with proper dependencies. - */ - -import { Server } from 'socket.io'; -import { createAdapter } from '@socket.io/redis-adapter'; -import { Emitter } from '@socket.io/redis-emitter'; -import Redis from 'ioredis'; -import type { Server as HttpServer } from 'http'; - -import initAdenControlSockets, { setUserDbService } from '../services/control/control_sockets'; - -interface ControlEmitter { - emitPolicyUpdate: (teamId: string | number, policyId: string | null, policy: unknown) => void; - emitCommand: (teamId: string | number, command: { action: string; [key: string]: unknown }) => void; - emitAlert: (teamId: string | number, policyId: string | null, alert: unknown) => void; - emitToInstance: (teamId: string | number, instanceId: string, message: unknown) => boolean; - getConnectedCount: (teamId: string | number) => number; - getConnectedInstances: (teamId: string | number) => Array<{ - instance_id: string; - policy_id: string | null; - connected_at: string; - last_heartbeat: string; - }>; - getTotalConnectedCount: () => number; -} - -interface MockEmitter { - of: () => { - to: () => { emit: () => void }; - emit: () => void; - }; -} - -/** - * Initialize WebSockets for the control plane - * @param server - HTTP server instance - * @returns Promise<{io: Server, controlEmitter: Object}> - */ -async function initializeSockets(server: HttpServer): Promise<{ io: Server; controlEmitter: ControlEmitter }> { - // Create Socket.IO server - const io = new Server(server, { - cors: { - origin: '*', - methods: ['GET', 'POST'], - }, - transports: ['websocket', 'polling'], - }); - - let controlEmitter: ControlEmitter; - - // Try to setup Redis adapter for scaling - if (process.env.REDIS_URL) { - try { - const pubClient = new Redis(process.env.REDIS_URL); - const subClient = pubClient.duplicate(); - - await Promise.all([ - new Promise((resolve) => pubClient.on('connect', resolve)), - new Promise((resolve) => subClient.on('connect', resolve)), - ]); - - io.adapter(createAdapter(pubClient, subClient)); - - // Create Redis emitter for cross-instance communication - const redisEmitter = new Emitter(pubClient); - controlEmitter = initAdenControlSockets(io, redisEmitter as unknown as { of: (namespace: string) => { to: (room: string) => { emit: (event: string, payload: unknown) => void }; emit: (event: string, payload: unknown) => void } }); - - console.log('[Sockets] Redis adapter connected'); - } catch (err) { - console.warn('[Sockets] Redis connection failed, using local adapter:', (err as Error).message); - // Create a mock emitter for local development - const mockEmitter: MockEmitter = { - of: () => ({ - to: () => ({ emit: () => {} }), - emit: () => {}, - }), - }; - controlEmitter = initAdenControlSockets(io, mockEmitter as unknown as { of: (namespace: string) => { to: (room: string) => { emit: (event: string, payload: unknown) => void }; emit: (event: string, payload: unknown) => void } }); - } - } else { - console.warn('[Sockets] No REDIS_URL configured, using local adapter'); - // Create a mock emitter for local development - const mockEmitter: MockEmitter = { - of: () => ({ - to: () => ({ emit: () => {} }), - emit: () => {}, - }), - }; - controlEmitter = initAdenControlSockets(io, mockEmitter as unknown as { of: (namespace: string) => { to: (room: string) => { emit: (event: string, payload: unknown) => void }; emit: (event: string, payload: unknown) => void } }); - } - - return { io, controlEmitter }; -} - -export { initializeSockets, setUserDbService }; diff --git a/hive/src/types/acho-inc-administration.d.ts b/hive/src/types/acho-inc-administration.d.ts deleted file mode 100644 index 6e40ae19..00000000 --- a/hive/src/types/acho-inc-administration.d.ts +++ /dev/null @@ -1,123 +0,0 @@ -declare module '@acho-inc/administration' { - import { Pool } from 'pg'; - import { Strategy } from 'passport-jwt'; - - export interface MySQLPoolConfig { - host?: string; - port?: number; - user?: string; - password?: string; - database?: string; - ssl?: { - ca?: string | Buffer; - key?: string | Buffer; - cert?: string | Buffer; - } | null; - } - - export interface UserDbServiceConfig { - /** MySQL connection pool (for production) */ - mysqlPool?: any; - /** PostgreSQL connection pool (for local development) */ - pgPool?: Pool; - /** Database type: 'mysql' or 'postgres' */ - dbType?: 'mysql' | 'postgres'; - /** Redis client for caching (optional) */ - redisClient?: any; - /** Table name mapping */ - tables: { - USER: string; - DEVELOPERS?: string; - }; - /** Service account salt lookup function (optional) */ - findServiceAccountSalt?: (token: string) => Promise; - } - - export interface DevTokenObject { - id: number; - user_id: number; - team_id: number; - token: string; - label: string; - system?: boolean; - create_time: number; - } - - export interface LoginResult { - token: string; - email: string; - firstname?: string; - lastname?: string; - name?: string; - current_team_id?: number; - created_at?: Date | number; - } - - export interface TokenResult { - token: string; - salt: string; - } - - export interface LoginOptions { - jwtSecret: string; - expiresIn?: string; - } - - export interface RegisterOptions extends LoginOptions { - defaultTeamId?: number; - } - - export interface UserData { - email: string; - password: string; - name?: string; - firstname?: string; - lastname?: string; - } - - export interface RegisterResult { - id: number; - token: string; - email: string; - name?: string; - firstname?: string; - lastname?: string; - current_team_id?: number; - created_at?: Date; - } - - export interface UserDbService { - findSaltByToken: (token: string) => Promise; - findById: (id: number) => Promise; - findByToken: (token: string) => Promise; - findByEmail: (email: string) => Promise; - getLatestUserDevToken: (user: { id: number; current_team_id: number }) => Promise; - // Auth methods - verifyPassword: (password: string, hash: string) => Promise; - hashPassword: (password: string) => Promise; - generateToken: (user: any, options: LoginOptions) => Promise; - updateUserToken: (userId: number, token: string, salt: string) => Promise; - login: (email: string, password: string, options: LoginOptions) => Promise; - register: (userData: UserData, options: RegisterOptions) => Promise; - dbType?: 'mysql' | 'postgres'; - } - - export interface PassportStrategyConfig { - findSaltByToken: (token: string) => Promise; - jwtSecret?: string; - } - - export const auth: { - createPassportStrategy: (config: PassportStrategyConfig) => Strategy; - verifyToken: (token: string, secret: string) => Promise; - }; - - export const database: { - createMySQLPool: (config: MySQLPoolConfig) => any; - createPGPool: (connectionString: string) => Pool; - }; - - export const models: { - createUserDbService: (config: UserDbServiceConfig) => UserDbService; - }; -} diff --git a/hive/tsconfig.json b/hive/tsconfig.json deleted file mode 100644 index 29a905f7..00000000 --- a/hive/tsconfig.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "compilerOptions": { - "target": "ES2022", - "module": "commonjs", - "lib": ["ES2022"], - "outDir": "./dist", - "rootDir": "./src", - "strict": false, - "esModuleInterop": true, - "skipLibCheck": true, - "forceConsistentCasingInFileNames": true, - "resolveJsonModule": true, - "declaration": false, - "sourceMap": true, - "moduleResolution": "node", - "allowSyntheticDefaultImports": true, - "noImplicitAny": false, - "strictNullChecks": false, - "noUnusedLocals": false, - "noUnusedParameters": false, - "useUnknownInCatchVariables": false, - "typeRoots": ["./node_modules/@types", "./src/types"] - }, - "include": ["src/**/*"], - "exclude": ["node_modules", "dist", "tests"] -} diff --git a/honeycomb/.env.example b/honeycomb/.env.example deleted file mode 100644 index 407158f7..00000000 --- a/honeycomb/.env.example +++ /dev/null @@ -1,13 +0,0 @@ -# Frontend Environment Variables -# Copy this file to .env and update values as needed -# Or run `npm run generate:env` from the root to generate from config.yaml - -# Hive API URL (handles all backend endpoints: auth, user, IAM, agent control) -VITE_API_URL=http://localhost:4000 - -# Application settings -VITE_APP_NAME=Hive -VITE_APP_ENV=development - -# Google OAuth (optional) -VITE_GOOGLE_OAUTH_ID=your-google-oauth-client-id diff --git a/honeycomb/.eslintrc.cjs b/honeycomb/.eslintrc.cjs deleted file mode 100644 index d6c95379..00000000 --- a/honeycomb/.eslintrc.cjs +++ /dev/null @@ -1,18 +0,0 @@ -module.exports = { - root: true, - env: { browser: true, es2020: true }, - extends: [ - 'eslint:recommended', - 'plugin:@typescript-eslint/recommended', - 'plugin:react-hooks/recommended', - ], - ignorePatterns: ['dist', '.eslintrc.cjs'], - parser: '@typescript-eslint/parser', - plugins: ['react-refresh'], - rules: { - 'react-refresh/only-export-components': [ - 'warn', - { allowConstantExport: true }, - ], - }, -} diff --git a/honeycomb/Dockerfile b/honeycomb/Dockerfile deleted file mode 100644 index fa06b1c8..00000000 --- a/honeycomb/Dockerfile +++ /dev/null @@ -1,36 +0,0 @@ -# Build stage -FROM node:20-alpine AS builder - -WORKDIR /app - -# Build argument for API URL (Vite needs this at build time) -ARG VITE_API_URL=http://localhost:4000 -ENV VITE_API_URL=$VITE_API_URL - -# Copy package files -COPY package*.json ./ - -# Install dependencies -RUN npm install - -# Copy source code -COPY . . - -# Build the application -RUN npm run build - -# Production stage -FROM nginx:alpine AS production - -# Copy custom nginx config -COPY nginx.conf /etc/nginx/conf.d/default.conf - -# Copy built assets from builder -COPY --from=builder /app/dist /usr/share/nginx/html - -# Expose port -EXPOSE 3000 - -# Override the default entrypoint to skip noisy scripts -ENTRYPOINT [] -CMD ["sh", "-c", "echo '[Hive] Frontend ready at http://localhost:3000' && exec nginx -g 'daemon off;' 2>/dev/null"] diff --git a/honeycomb/Dockerfile.dev b/honeycomb/Dockerfile.dev deleted file mode 100644 index 7bec8d10..00000000 --- a/honeycomb/Dockerfile.dev +++ /dev/null @@ -1,19 +0,0 @@ -# Development Dockerfile with hot reload -FROM node:20-alpine - -WORKDIR /app - -# Copy package files -COPY package*.json ./ - -# Install dependencies -RUN npm install - -# Copy source code -COPY . . - -# Expose port -EXPOSE 3000 - -# Start development server with hot reload -CMD ["npm", "run", "dev"] diff --git a/honeycomb/components.json b/honeycomb/components.json deleted file mode 100644 index d29aef0b..00000000 --- a/honeycomb/components.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "$schema": "https://ui.shadcn.com/schema.json", - "style": "default", - "rsc": false, - "tsx": true, - "tailwind": { - "config": "tailwind.config.js", - "css": "src/styles/index.css", - "baseColor": "slate", - "cssVariables": true, - "prefix": "" - }, - "aliases": { - "components": "@/components", - "utils": "@/lib/utils", - "ui": "@/components/ui", - "lib": "@/lib", - "hooks": "@/hooks" - } -} diff --git a/honeycomb/index.html b/honeycomb/index.html deleted file mode 100644 index f891a023..00000000 --- a/honeycomb/index.html +++ /dev/null @@ -1,17 +0,0 @@ - - - - - - - - - - - Hive - - -
- - - diff --git a/honeycomb/nginx.conf b/honeycomb/nginx.conf deleted file mode 100644 index ef067b7a..00000000 --- a/honeycomb/nginx.conf +++ /dev/null @@ -1,46 +0,0 @@ -# Suppress noisy logs - only log errors -error_log /var/log/nginx/error.log error; -access_log off; - -server { - listen 3000; - server_name localhost; - root /usr/share/nginx/html; - index index.html; - - # Gzip compression - gzip on; - gzip_vary on; - gzip_min_length 1024; - gzip_proxied expired no-cache no-store private auth; - gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml application/javascript; - - # Security headers - add_header X-Frame-Options "SAMEORIGIN" always; - add_header X-Content-Type-Options "nosniff" always; - add_header X-XSS-Protection "1; mode=block" always; - - # Handle SPA routing - serve index.html for all routes - location / { - try_files $uri $uri/ /index.html; - } - - # Proxy API requests to backend - location /api { - proxy_pass http://hive:4000; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection 'upgrade'; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_cache_bypass $http_upgrade; - } - - # Cache static assets - location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2)$ { - expires 1y; - add_header Cache-Control "public, immutable"; - } -} diff --git a/honeycomb/package.json b/honeycomb/package.json deleted file mode 100644 index 9eb77dec..00000000 --- a/honeycomb/package.json +++ /dev/null @@ -1,68 +0,0 @@ -{ - "name": "honeycomb", - "version": "0.1.0", - "private": true, - "type": "module", - "scripts": { - "dev": "vite", - "build": "tsc --noEmit && vite build", - "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0", - "preview": "vite preview", - "test": "vitest --passWithNoTests", - "test:coverage": "vitest run --coverage", - "clean": "rm -rf dist node_modules" - }, - "dependencies": { - "@hookform/resolvers": "^5.2.2", - "@radix-ui/react-avatar": "^1.1.11", - "@radix-ui/react-dialog": "^1.1.15", - "@radix-ui/react-dropdown-menu": "^2.1.16", - "@radix-ui/react-label": "^2.1.8", - "@radix-ui/react-popover": "^1.1.15", - "@radix-ui/react-progress": "^1.1.8", - "@radix-ui/react-scroll-area": "^1.2.10", - "@radix-ui/react-select": "^2.2.6", - "@radix-ui/react-separator": "^1.1.8", - "@radix-ui/react-slot": "^1.2.4", - "@radix-ui/react-switch": "^1.2.6", - "@radix-ui/react-tabs": "^1.1.13", - "@radix-ui/react-tooltip": "^1.2.8", - "@tanstack/react-query": "^5.90.16", - "class-variance-authority": "^0.7.1", - "clsx": "^2.1.1", - "date-fns": "^4.1.0", - "react-day-picker": "^9.13.0", - "lucide-react": "^0.562.0", - "react": "^18.2.0", - "react-dom": "^18.2.0", - "react-hook-form": "^7.71.0", - "react-markdown": "^10.1.0", - "react-router-dom": "^6.21.0", - "react-vega": "^8.0.0", - "recharts": "^3.6.0", - "socket.io-client": "^4.8.3", - "tailwind-merge": "^3.4.0", - "tailwindcss-animate": "^1.0.7", - "vega": "^6.2.0", - "vega-embed": "^7.1.0", - "vega-lite": "^6.4.1", - "zod": "^4.3.5", - "zustand": "^5.0.10" - }, - "devDependencies": { - "@types/react": "^18.2.43", - "@types/react-dom": "^18.2.17", - "@typescript-eslint/eslint-plugin": "^6.14.0", - "@typescript-eslint/parser": "^6.14.0", - "@vitejs/plugin-react": "^4.2.1", - "autoprefixer": "^10.4.23", - "eslint": "^8.55.0", - "eslint-plugin-react-hooks": "^4.6.0", - "eslint-plugin-react-refresh": "^0.4.5", - "postcss": "^8.5.6", - "tailwindcss": "^3.4.19", - "typescript": "^5.3.0", - "vite": "^5.0.8", - "vitest": "^1.1.0" - } -} diff --git a/honeycomb/postcss.config.js b/honeycomb/postcss.config.js deleted file mode 100644 index 2e7af2b7..00000000 --- a/honeycomb/postcss.config.js +++ /dev/null @@ -1,6 +0,0 @@ -export default { - plugins: { - tailwindcss: {}, - autoprefixer: {}, - }, -} diff --git a/honeycomb/public/favicon.svg b/honeycomb/public/favicon.svg deleted file mode 100644 index 41fb817d..00000000 --- a/honeycomb/public/favicon.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/honeycomb/src/App.tsx b/honeycomb/src/App.tsx deleted file mode 100644 index ddd777b6..00000000 --- a/honeycomb/src/App.tsx +++ /dev/null @@ -1,40 +0,0 @@ -import { BrowserRouter, Routes, Route, Navigate } from 'react-router-dom'; -import { AgentControlLayout } from './components/agent-control/AgentControlLayout'; -import { DataPanel } from './components/agent-control/DataPanel'; -import { AnalyticsPanel } from './components/agent-control/AnalyticsPanel'; -import { CostControls } from './components/agent-control/CostControls'; -import { WorkersPanel } from './components/agent-control/WorkersPanel'; -import { NotFoundPage } from './pages/NotFoundPage'; -import { LoginPage } from './pages/LoginPage'; -import { RegisterPage } from './pages/RegisterPage'; -import { ProtectedRoute } from './components/auth/ProtectedRoute'; - -export function App() { - return ( - - - {/* Public routes */} - } /> - } /> - } /> - } /> - - {/* Protected routes */} - } /> - - - - } - > - } /> - } /> - } /> - } /> - - } /> - - - ); -} diff --git a/honeycomb/src/assets/aden-icon.png b/honeycomb/src/assets/aden-icon.png deleted file mode 100644 index 08e94456..00000000 Binary files a/honeycomb/src/assets/aden-icon.png and /dev/null differ diff --git a/honeycomb/src/assets/aden-icon.svg b/honeycomb/src/assets/aden-icon.svg deleted file mode 100644 index 93cdfe97..00000000 --- a/honeycomb/src/assets/aden-icon.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - diff --git a/honeycomb/src/assets/aden-logo.svg b/honeycomb/src/assets/aden-logo.svg deleted file mode 100644 index 38f30b6d..00000000 --- a/honeycomb/src/assets/aden-logo.svg +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - - - - diff --git a/honeycomb/src/components/.gitkeep b/honeycomb/src/components/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/honeycomb/src/components/ErrorBoundary.tsx b/honeycomb/src/components/ErrorBoundary.tsx deleted file mode 100644 index 1011e485..00000000 --- a/honeycomb/src/components/ErrorBoundary.tsx +++ /dev/null @@ -1,50 +0,0 @@ -import { Component, ErrorInfo, ReactNode } from 'react' - -interface Props { - children: ReactNode -} - -interface State { - hasError: boolean - error?: Error -} - -export class ErrorBoundary extends Component { - constructor(props: Props) { - super(props) - this.state = { hasError: false } - } - - static getDerivedStateFromError(error: Error): State { - return { hasError: true, error } - } - - componentDidCatch(error: Error, errorInfo: ErrorInfo) { - console.error('Uncaught error:', error, errorInfo) - } - - render() { - if (this.state.hasError) { - return ( -
-
-

- Something went wrong -

-

- {this.state.error?.message || 'An unexpected error occurred'} -

- -
-
- ) - } - - return this.props.children - } -} diff --git a/honeycomb/src/components/agent-control/AgentControlLayout.tsx b/honeycomb/src/components/agent-control/AgentControlLayout.tsx deleted file mode 100644 index fc7c5436..00000000 --- a/honeycomb/src/components/agent-control/AgentControlLayout.tsx +++ /dev/null @@ -1,306 +0,0 @@ -import { useEffect } from 'react' -import { Outlet, NavLink, useNavigate, useLocation } from 'react-router-dom' -import { useControlSocket } from '@/hooks/useControlSocket' -import { useAgentControlStore } from '@/stores/agentControlStore' -import { useUserStore, type UserState } from '@/stores/userStore' -import { useSidebarCollapsed } from '@/hooks/usePersistedSettings' -import { NotificationBell } from './shared/NotificationBell' -import { - DropdownMenu, - DropdownMenuContent, - DropdownMenuItem, - DropdownMenuTrigger, -} from '@/components/ui/dropdown-menu' -import { LiveIndicator } from './shared/LiveIndicator' -import { UserAvatar } from '@/components/user/UserAvatar' -import { Button } from '@/components/ui/button' -import adenLogo from '@/assets/aden-logo.svg' -import adenIcon from '@/assets/aden-icon.png' -import { - Tooltip, - TooltipContent, - TooltipProvider, - TooltipTrigger, -} from '@/components/ui/tooltip' -import { cn } from '@/lib/utils' -import { - Database, - BarChart3, - DollarSign, - Users, - PanelLeftClose, - PanelLeft, - Settings, - Sparkles, - LogOut, - HelpCircle, - ExternalLink, - FileText, - MessageCircle, -} from 'lucide-react' -import { SettingsModal } from '@/components/settings/SettingsModal' -import { HelpDialog } from './shared/HelpDialog' - -const navItems = [ - { value: 'agents', label: 'Agents', path: '/agents', icon: Users }, - { value: 'data', label: 'Logs', path: '/data', icon: Database }, - { value: 'analytics', label: 'Performance Dashboard', path: '/performance-dashboard', icon: BarChart3 }, - { value: 'cost-control', label: 'Cost Control', path: '/cost-control', icon: DollarSign }, -] - -/** - * Main layout for Agent Control with sidebar navigation and socket lifecycle. - */ -export function AgentControlLayout() { - const { connect, disconnect, isConnected } = useControlSocket() - const hasActiveAgents = useAgentControlStore((state) => state.eventsBuffer.length > 0) - const user = useUserStore((state: UserState) => state.user) - const fullName = useUserStore((state: UserState) => state.fullName()) - const signOut = useUserStore((state: UserState) => state.signOut) - const isLoggingOut = useUserStore((state: UserState) => state.isLoggingOut) - const navigate = useNavigate() - const location = useLocation() - const { sidebarCollapsed, toggleSidebar } = useSidebarCollapsed() - - // Settings modal controlled by URL hash - const settingsOpen = location.hash === '#settings' - const handleSettingsClose = (open: boolean) => { - if (!open) { - navigate(location.pathname, { replace: true }) - } - } - - // Help dialog controlled by URL hash - const helpOpen = location.hash === '#help' - const handleHelpClose = (open: boolean) => { - if (!open) { - navigate(location.pathname, { replace: true }) - } - } - - // Connect socket on mount - useEffect(() => { - connect() - return () => disconnect() - }, [connect, disconnect]) - - return ( -
- {/* Sidebar - full height */} - - - {/* Right side - header bar + content */} -
- {/* Top bar with connection status + notifications */} -
- - - {/* Connection status - hidden during logout to prevent red flash */} - {!isLoggingOut && ( -
- - {isConnected ? 'Connected' : 'Disconnected'} -
- )} - - - - {/* Help dropdown */} - - - - - - navigate(`${location.pathname}#help`)}> - - Guide - - window.open('https://docs.adenhq.com/', '_blank')}> - - Documentation - - - window.open('https://discord.gg/MXE49hrKDk', '_blank')}> - - Discord - - - - -
- - {/* Content area */} -
-
- -
-
-
- - - -
- ) -} diff --git a/honeycomb/src/components/agent-control/AnalyticsPanel.tsx b/honeycomb/src/components/agent-control/AnalyticsPanel.tsx deleted file mode 100644 index 9a2add5b..00000000 --- a/honeycomb/src/components/agent-control/AnalyticsPanel.tsx +++ /dev/null @@ -1,351 +0,0 @@ -import { useMemo } from 'react' -import { Card } from '@/components/ui/card' -import { - Select, - SelectContent, - SelectItem, - SelectTrigger, - SelectValue, -} from '@/components/ui/select' -import { Skeleton } from '@/components/ui/skeleton' -import { KpiCard } from './shared/KpiCard' -import { LiveIndicator } from './shared/LiveIndicator' -import { VegaLiteChart } from './charts/VegaLiteChart' -import { useAnalytics } from '@/hooks/queries/useAnalytics' -import { useAgentControlStore } from '@/stores/agentControlStore' -import { usePersistedTimeRange } from '@/hooks/usePersistedSettings' -import type { TimeRange } from '@/types/settings' -import { transformAnalyticsData, type CostByModelData } from './charts/transformers' -import { - createCostTrendSpec, - createTokenUsageSpec, - createCostByModelSpec, - createLatencyDistributionSpec, -} from './charts/specs' -import type { RawJsonData, KPIValues } from '@/types/agentControl' - -// Shape of analytics API response for type safety -interface AnalyticsResponse extends RawJsonData { - analytics?: { - summary?: { - total_cost?: number - total_requests?: number - total_tokens?: number - avg_latency_ms?: number - cache_savings?: number - } - } - kpis?: Record - summary?: Record -} - -const timeRangeOptions: { value: TimeRange; label: string }[] = [ - { value: 'all', label: 'All Time' }, - { value: 'month', label: 'Last Month' }, - { value: 'twoWeeks', label: 'Last 2 Weeks' }, - { value: 'week', label: 'Last Week' }, - { value: 'today', label: 'Today' }, -] - -// Helper to safely extract KPI values from raw API response -function extractKpis(data: RawJsonData | undefined): KPIValues { - const defaults: KPIValues = { - totalCost: 0, - projectedMonthlyCost: 0, - totalRequests: 0, - totalTokens: 0, - successRate: 0.99, - avgLatency: 0, - cacheSavings: 0, - } - - if (!data) return defaults - - // Handle new analytics response shape - const analyticsData = data as AnalyticsResponse - if (analyticsData?.analytics?.summary) { - const summary = analyticsData.analytics.summary - return { - totalCost: Number(summary.total_cost || 0), - projectedMonthlyCost: Number(summary.total_cost || 0) * 30, - totalRequests: Number(summary.total_requests || 0), - totalTokens: Number(summary.total_tokens || 0), - successRate: 0.99, // Not provided in new API - avgLatency: Number(summary.avg_latency_ms || 0), - cacheSavings: Number(summary.cache_savings || 0), - } - } - - // Fallback to old response shapes - const kpis = (data.kpis || data.summary || data) as Record - - return { - totalCost: Number(kpis.totalCost || kpis.total_cost || 0), - projectedMonthlyCost: Number(kpis.projectedMonthlyCost || kpis.projected_cost || 0), - totalRequests: Number(kpis.totalRequests || kpis.total_requests || 0), - totalTokens: Number(kpis.totalTokens || kpis.total_tokens || 0), - successRate: Number(kpis.successRate || kpis.success_rate || 0.99), - avgLatency: Number(kpis.avgLatency || kpis.avg_latency || 0), - cacheSavings: Number(kpis.cacheSavings || kpis.cache_savings || 0), - } -} - -/** - * Main analytics dashboard with KPIs and VegaLite charts. - */ -export function AnalyticsPanel() { - const { timeRange, setTimeRange } = usePersistedTimeRange() - const hasActiveAgents = useAgentControlStore((state) => state.eventsBuffer.length > 0) - - const { data: analytics, isLoading } = useAnalytics() - - const kpis = extractKpis(analytics as RawJsonData | undefined) - - // Transform API data to chart-ready format - const chartData = useMemo( - () => transformAnalyticsData(analytics), - [analytics] - ) - - // Create chart specs with memoization - const costTrendSpec = useMemo( - () => (chartData.costTrends.length > 0 ? createCostTrendSpec(chartData.costTrends) : null), - [chartData.costTrends] - ) - - const tokenUsageSpec = useMemo( - () => (chartData.tokenUsage.length > 0 ? createTokenUsageSpec(chartData.tokenUsage) : null), - [chartData.tokenUsage] - ) - - const costByModelSpec = useMemo( - () => (chartData.costByModel.length > 0 ? createCostByModelSpec(chartData.costByModel) : null), - [chartData.costByModel] - ) - - const latencyDistributionSpec = useMemo( - () => - chartData.latencyDistribution.length > 0 - ? createLatencyDistributionSpec(chartData.latencyDistribution) - : null, - [chartData.latencyDistribution] - ) - - const formatCurrency = (value: number) => - new Intl.NumberFormat('en-US', { - style: 'currency', - currency: 'USD', - minimumFractionDigits: 0, - maximumFractionDigits: 2, - }).format(value) - - const formatNumber = (value: number) => { - if (value >= 1000000) return `${(value / 1000000).toFixed(1)}M` - if (value >= 1000) return `${(value / 1000).toFixed(1)}K` - return value.toLocaleString() - } - - const formatPercent = (value: number) => `${(value * 100).toFixed(1)}%` - - return ( -
- {/* Header */} -
-
-

Analytics

- -
- -
- - {/* KPI Grid */} -
- - - - - } - /> - - - - } - /> - - - - } - /> - - - - } - /> - - - - } - /> -
- - {/* Charts Grid */} - {isLoading ? ( -
- {[...Array(4)].map((_, i) => ( - - - - - ))} -
- ) : ( -
- {/* Cost Trend Chart */} - -

Cost Trend

- {costTrendSpec ? ( - - ) : ( -
- No cost data available -
- )} -
- - {/* Token Usage Chart */} - -

Token Usage

- {tokenUsageSpec ? ( - - ) : ( -
- No token data available -
- )} -
- - {/* Cost by Model Chart */} - -

Cost by Model

- {costByModelSpec ? ( -
- -
- {chartData.costByModel.map((model: CostByModelData) => ( -
-
- {model.name} - {model.value}% - ${model.cost.toFixed(4)} -
- ))} -
-
- ) : ( -
- No model data available -
- )} - - - {/* Latency Distribution Chart */} - -

Latency Distribution

- {latencyDistributionSpec ? ( - - ) : ( -
- No latency data available -
- )} -
-
- )} -
- ) -} diff --git a/honeycomb/src/components/agent-control/CostControls.tsx b/honeycomb/src/components/agent-control/CostControls.tsx deleted file mode 100644 index 61074ecd..00000000 --- a/honeycomb/src/components/agent-control/CostControls.tsx +++ /dev/null @@ -1,279 +0,0 @@ -import { useState, useMemo } from 'react' -import { Button } from '@/components/ui/button' -import { - Select, - SelectContent, - SelectItem, - SelectTrigger, - SelectValue, -} from '@/components/ui/select' -import { Skeleton } from '@/components/ui/skeleton' -import { BudgetCard } from './shared/BudgetCard' -import { KpiCard } from './shared/KpiCard' -import { AddBudgetDialog } from './budget/AddBudgetDialog' -import { BudgetDetailPanel } from './budget/BudgetDetailPanel' -import { useBudgets } from '@/hooks/queries/useBudgets' -import type { BudgetType, BudgetConfig, RawJsonData } from '@/types/agentControl' - -const budgetTypeOptions: { value: BudgetType | 'all'; label: string }[] = [ - { value: 'all', label: 'All Types' }, - { value: 'global', label: 'Global' }, - { value: 'agent', label: 'Agent' }, - { value: 'customer', label: 'Customer' }, - { value: 'feature', label: 'Feature' }, - { value: 'tag', label: 'Tag' }, -] - -// Extract budgets from API response (handles policy-based structure) -function extractBudgets(data: RawJsonData | undefined): BudgetConfig[] { - if (!data) return [] - if (Array.isArray(data)) return data as BudgetConfig[] - if (data.policies && Array.isArray(data.policies)) { - const allBudgets: BudgetConfig[] = [] - for (const policy of data.policies as Array<{ budgets?: BudgetConfig[] }>) { - if (policy.budgets) allBudgets.push(...policy.budgets) - } - return allBudgets - } - if (data.budgets && Array.isArray(data.budgets)) { - return data.budgets as BudgetConfig[] - } - return [] -} - -// Extract policyId from API response (uses first policy or 'default') -function extractPolicyId(data: RawJsonData | undefined): string | null { - if (!data) return null - if (data.policies && Array.isArray(data.policies) && data.policies.length > 0) { - return (data.policies[0] as { id?: string }).id || 'default' - } - return 'default' -} - -/** - * Budget management panel with summary cards and budget list. - */ -export function CostControls() { - const [typeFilter, setTypeFilter] = useState('all') - const [addDialogOpen, setAddDialogOpen] = useState(false) - const [selectedBudget, setSelectedBudget] = useState(null) - const [detailPanelOpen, setDetailPanelOpen] = useState(false) - - const handleBudgetClick = (budget: BudgetConfig) => { - setSelectedBudget(budget) - setDetailPanelOpen(true) - } - - const { data: rawData, isLoading, error } = useBudgets() - - // Parse budgets and policyId from API response - const budgets = useMemo( - () => extractBudgets(rawData as RawJsonData | undefined), - [rawData] - ) - - const policyId = useMemo( - () => extractPolicyId(rawData as RawJsonData | undefined), - [rawData] - ) - - // Compute summary stats - const summary = useMemo(() => { - if (!budgets.length) return null - return { - totalBudget: budgets.reduce((sum: number, b: BudgetConfig) => sum + b.limit, 0), - totalSpent: budgets.reduce((sum: number, b: BudgetConfig) => sum + b.spent, 0), - activeAlerts: budgets.filter((b: BudgetConfig) => - b.alerts.some((a) => a.enabled && b.spent / b.limit >= a.threshold / 100) - ).length, - budgetsAtRisk: budgets.filter((b: BudgetConfig) => b.spent / b.limit >= 0.9).length, - } - }, [budgets]) - - // Filter budgets by type - const filteredBudgets = useMemo( - () => budgets.filter((b: BudgetConfig) => typeFilter === 'all' || b.type === typeFilter), - [budgets, typeFilter] - ) - - const formatCurrency = (value: number) => - new Intl.NumberFormat('en-US', { - style: 'currency', - currency: 'USD', - minimumFractionDigits: 0, - maximumFractionDigits: 0, - }).format(value) - - if (error) { - return ( -
-

Failed to load budgets

- -
- ) - } - - return ( -
- {/* Summary Cards */} -
- - - - } - /> - 0 - ? { - value: Math.round((summary.totalSpent / summary.totalBudget) * 100), - direction: summary.totalSpent / summary.totalBudget > 0.8 ? 'up' : 'down', - } - : undefined - } - icon={ - - - - } - /> - 0} - icon={ - - - - } - /> - 0} - icon={ - - - - } - /> -
- - {/* Controls */} -
-

Budgets

-
- - -
-
- - {/* Budget List */} - {isLoading ? ( -
- {[...Array(4)].map((_, i) => ( - - ))} -
- ) : filteredBudgets.length === 0 ? ( -
-

No budgets found

-
- ) : ( -
- {filteredBudgets.map((budget: BudgetConfig) => ( - handleBudgetClick(budget)} - /> - ))} -
- )} - - {/* Add Budget Dialog */} - - - {/* Budget Detail Panel */} - -
- ) -} diff --git a/honeycomb/src/components/agent-control/DataPanel.tsx b/honeycomb/src/components/agent-control/DataPanel.tsx deleted file mode 100644 index b6fdd54b..00000000 --- a/honeycomb/src/components/agent-control/DataPanel.tsx +++ /dev/null @@ -1,521 +0,0 @@ -import { useState, useMemo } from 'react' -import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card' -import { Button } from '@/components/ui/button' -import { - Select, - SelectContent, - SelectItem, - SelectTrigger, - SelectValue, -} from '@/components/ui/select' -import { - Table, - TableBody, - TableCell, - TableHead, - TableHeader, - TableRow, -} from '@/components/ui/table' -import { Skeleton } from '@/components/ui/skeleton' -import { Badge } from '@/components/ui/badge' -import { LiveIndicator } from './shared/LiveIndicator' -import { DateRangePicker } from '@/components/ui/date-range-picker' -import { useLogs, useLogsAggregated } from '@/hooks/queries/useLogs' -import { useAgentControlStore } from '@/stores/agentControlStore' -import type { DateRange } from 'react-day-picker' - -type ViewType = 'raw' | 'metrics' | 'model' | 'agent' -type LogType = 'llm_request' | 'tool_call' | 'error' - -const viewOptions = [ - { value: 'raw', label: 'Raw Data' }, - { value: 'metrics', label: 'Metrics Summary' }, - { value: 'model', label: 'Model Usage' }, - { value: 'agent', label: 'Agent Activity' }, -] - -interface LogEntry { - id?: string - timestamp: string - derived_type: LogType - derived_success: boolean - agent?: string - model?: string - provider?: string - cost_total?: number - latency_ms?: number - finish_reason?: string - tool_call_count?: number - usage_input_tokens?: number - usage_output_tokens?: number - usage_total_tokens?: number - [key: string]: unknown -} - -interface AggregatedEntry { - model?: string - agent?: string - request_count: number - total_input_tokens: number - total_output_tokens: number - total_tokens: number - total_cost: number - avg_latency_ms: number - first_seen?: string - last_seen?: string -} - -export function DataPanel() { - const [viewType, setViewType] = useState('raw') - const [expandedRow, setExpandedRow] = useState(null) - - // Default date range: last 7 days - const [dateRange, setDateRange] = useState(() => { - const end = new Date() - end.setHours(23, 59, 59, 999) - const start = new Date() - start.setDate(start.getDate() - 7) - start.setHours(0, 0, 0, 0) - return { from: start, to: end } - }) - - const hasActiveAgents = useAgentControlStore((state) => state.eventsBuffer.length > 0) - - // Convert date range to ISO strings for API - const startDate = useMemo(() => { - return dateRange?.from?.toISOString() ?? new Date().toISOString() - }, [dateRange?.from]) - - const endDate = useMemo(() => { - return dateRange?.to?.toISOString() ?? new Date().toISOString() - }, [dateRange?.to]) - - // Fetch raw logs for 'raw' and 'metrics' views - const { - data: logsData, - isLoading: logsLoading, - error: logsError, - refetch: refetchLogs, - } = useLogs(startDate, endDate, 500, viewType === 'raw' || viewType === 'metrics') - - // Fetch aggregated data for 'model' view - const { - data: modelData, - isLoading: modelLoading, - error: modelError, - refetch: refetchModel, - } = useLogsAggregated(startDate, endDate, 'model', 100, viewType === 'model') - - // Fetch aggregated data for 'agent' view - const { - data: agentData, - isLoading: agentLoading, - error: agentError, - refetch: refetchAgent, - } = useLogsAggregated(startDate, endDate, 'agent', 100, viewType === 'agent') - - // Parse logs from API response - const logs = useMemo((): LogEntry[] => { - if (!logsData) return [] - const rawLogs = (logsData as { rows?: unknown[] }).rows || - (logsData as { logs?: unknown[] }).logs || - (Array.isArray(logsData) ? logsData : []) - return (rawLogs as LogEntry[]).map((log, idx) => ({ - ...log, - id: log.id || `log-${idx}`, - })) - }, [logsData]) - - // Parse aggregated data - const modelAggregations = useMemo((): AggregatedEntry[] => { - if (!modelData) return [] - return (modelData as { aggregations?: AggregatedEntry[] }).aggregations || [] - }, [modelData]) - - const agentAggregations = useMemo((): AggregatedEntry[] => { - if (!agentData) return [] - return (agentData as { aggregations?: AggregatedEntry[] }).aggregations || [] - }, [agentData]) - - // Determine loading/error state based on current view - const isLoading = viewType === 'raw' || viewType === 'metrics' - ? logsLoading - : viewType === 'model' - ? modelLoading - : agentLoading - - const error = viewType === 'raw' || viewType === 'metrics' - ? logsError - : viewType === 'model' - ? modelError - : agentError - - const refetch = viewType === 'raw' || viewType === 'metrics' - ? refetchLogs - : viewType === 'model' - ? refetchModel - : refetchAgent - - const handleExport = () => { - let csv = '' - - if (viewType === 'raw') { - if (!logs.length) return - csv = [ - ['Timestamp', 'Provider', 'Model', 'Agent', 'Tokens', 'Cost', 'Latency'].join(','), - ...logs.map((log) => - [ - log.timestamp, - log.provider || '-', - log.model || '-', - log.agent || '-', - log.usage_total_tokens ?? '-', - log.cost_total ? Number(log.cost_total).toFixed(6) : '-', - log.latency_ms ? `${Math.round(Number(log.latency_ms))}ms` : '-', - ].join(',') - ), - ].join('\n') - } else if (viewType === 'metrics') { - const successCount = logs.filter((l) => l.derived_success).length - const totalCost = logs.reduce((sum, l) => sum + (Number(l.cost_total) || 0), 0) - csv = [ - ['Metric', 'Value'].join(','), - ['Total Requests', logs.length].join(','), - ['Success Rate', `${((successCount / Math.max(logs.length, 1)) * 100).toFixed(1)}%`].join(','), - ['Total Cost', `$${totalCost.toFixed(2)}`].join(','), - ].join('\n') - } else if (viewType === 'model') { - if (!modelAggregations.length) return - csv = [ - ['Model', 'Requests', 'Input Tokens', 'Output Tokens', 'Total Cost', 'Avg Latency'].join(','), - ...modelAggregations.map((row) => - [ - row.model || '-', - row.request_count, - row.total_input_tokens, - row.total_output_tokens, - `$${row.total_cost.toFixed(4)}`, - `${Math.round(row.avg_latency_ms)}ms`, - ].join(',') - ), - ].join('\n') - } else if (viewType === 'agent') { - if (!agentAggregations.length) return - csv = [ - ['Agent', 'Requests', 'Input Tokens', 'Output Tokens', 'Total Cost', 'Avg Latency'].join(','), - ...agentAggregations.map((row) => - [ - row.agent || '-', - row.request_count, - row.total_input_tokens, - row.total_output_tokens, - `$${row.total_cost.toFixed(4)}`, - `${Math.round(row.avg_latency_ms)}ms`, - ].join(',') - ), - ].join('\n') - } - - if (!csv) return - - const blob = new Blob([csv], { type: 'text/csv' }) - const url = URL.createObjectURL(blob) - const a = document.createElement('a') - a.href = url - a.download = `logs-${viewType}-${new Date().toISOString().split('T')[0]}.csv` - a.click() - URL.revokeObjectURL(url) - } - - const formatTimestamp = (ts: string) => { - return new Date(ts).toLocaleString(undefined, { - month: 'short', - day: 'numeric', - hour: '2-digit', - minute: '2-digit', - second: '2-digit', - }) - } - - const getCardTitle = () => { - switch (viewType) { - case 'raw': - return 'Raw Data' - case 'metrics': - return 'Metrics Summary' - case 'model': - return 'Model Usage' - case 'agent': - return 'Agent Activity' - default: - return 'Data' - } - } - - const hasData = () => { - switch (viewType) { - case 'raw': - case 'metrics': - return logs.length > 0 - case 'model': - return modelAggregations.length > 0 - case 'agent': - return agentAggregations.length > 0 - default: - return false - } - } - - if (error) { - return ( -
-

Failed to load data

- -
- ) - } - - return ( -
- {/* Controls */} -
-
- - - -
- -
- - -
-
- - {/* Data Table */} - - - {getCardTitle()} - - - {isLoading ? ( -
- {[...Array(10)].map((_, i) => ( - - ))} -
- ) : !hasData() ? ( -
- No data found for the selected date range -
- ) : viewType === 'raw' ? ( - - - - Timestamp - Provider - Model - Agent - Tokens - Cost - Latency - - - - {logs.map((log) => ( - <> - - setExpandedRow(expandedRow === log.id ? null : log.id || null) - } - > - - {formatTimestamp(log.timestamp)} - - - - {log.provider || '-'} - - - - {log.model || '-'} - - - {log.agent || '-'} - - - {log.usage_total_tokens ?? '-'} - - - {log.cost_total ? `$${Number(log.cost_total).toFixed(6)}` : '-'} - - - {log.latency_ms ? `${Math.round(Number(log.latency_ms))}ms` : '-'} - - - {expandedRow === log.id && ( - - -
-                            {JSON.stringify(log, null, 2)}
-                          
-
-
- )} - - ))} -
-
- ) : viewType === 'metrics' ? ( - - - - Metric - Value - - - - - Total Requests - {logs.length} - - - Success Rate - - {( - (logs.filter((l) => l.derived_success).length / - Math.max(logs.length, 1)) * - 100 - ).toFixed(1)} - % - - - - Total Cost - - $ - {logs - .reduce((sum, l) => sum + (Number(l.cost_total) || 0), 0) - .toFixed(2)} - - - - Total Tokens - - {logs - .reduce((sum, l) => sum + (Number(l.usage_total_tokens) || 0), 0) - .toLocaleString()} - - - - Avg Latency - - {Math.round( - logs.reduce((sum, l) => sum + (Number(l.latency_ms) || 0), 0) / - Math.max(logs.length, 1) - )} - ms - - - -
- ) : viewType === 'model' ? ( - - - - Model - Requests - Input Tokens - Output Tokens - Total Cost - Avg Latency - - - - {modelAggregations.map((row, idx) => ( - - {row.model || '-'} - {row.request_count} - - {row.total_input_tokens.toLocaleString()} - - - {row.total_output_tokens.toLocaleString()} - - - ${row.total_cost.toFixed(4)} - - - {Math.round(row.avg_latency_ms)}ms - - - ))} - -
- ) : viewType === 'agent' ? ( - - - - Agent - Requests - Input Tokens - Output Tokens - Total Cost - Avg Latency - - - - {agentAggregations.map((row, idx) => ( - - {row.agent || '(no agent)'} - {row.request_count} - - {row.total_input_tokens.toLocaleString()} - - - {row.total_output_tokens.toLocaleString()} - - - ${row.total_cost.toFixed(4)} - - - {Math.round(row.avg_latency_ms)}ms - - - ))} - -
- ) : null} -
-
-
- ) -} diff --git a/honeycomb/src/components/agent-control/WorkersPanel.tsx b/honeycomb/src/components/agent-control/WorkersPanel.tsx deleted file mode 100644 index c22e61ab..00000000 --- a/honeycomb/src/components/agent-control/WorkersPanel.tsx +++ /dev/null @@ -1,276 +0,0 @@ -import { useState, useMemo } from 'react' -import { useQuery } from '@tanstack/react-query' -import { Card, CardContent } from '@/components/ui/card' -import { Avatar, AvatarFallback } from '@/components/ui/avatar' -import { Badge } from '@/components/ui/badge' -import { Skeleton } from '@/components/ui/skeleton' -import { KpiCard } from './shared/KpiCard' -import { WorkerProfilePanel } from './workers/WorkerProfilePanel' -import { useAgentControlStore } from '@/stores/agentControlStore' -import { getAgents } from '@/services/controlApi' -import { cn } from '@/lib/utils' -import type { AgentInfo, LLMEvent } from '@/types/agentControl' - -// Derive workers from events buffer -function deriveWorkersFromEvents(events: LLMEvent[]): AgentInfo[] { - const workerMap = new Map() - - for (const event of events) { - const existing = workerMap.get(event.agent) - if (existing) { - existing.total_requests++ - existing.total_cost += event.cost - if (new Date(event.timestamp) > new Date(existing.last_seen)) { - existing.last_seen = event.timestamp - } - } else { - workerMap.set(event.agent, { - agent: event.agent, - agent_name: null, - status: 'connected', - connection_type: 'websocket', - instance_id: null, - first_seen: event.timestamp, - last_seen: event.timestamp, - total_requests: 1, - total_cost: event.cost, - }) - } - } - - return Array.from(workerMap.values()) -} - -/** - * Worker/Agent management grid with status indicators. - */ -export function WorkersPanel() { - const [selectedWorker, setSelectedWorker] = useState(null) - const [profileOpen, setProfileOpen] = useState(false) - - // Fetch agents from API (past week) - const { data: agentsData, isLoading } = useQuery({ - queryKey: ['agents'], - queryFn: async () => { - const oneWeekAgo = new Date() - oneWeekAgo.setDate(oneWeekAgo.getDate() - 7) - return getAgents(oneWeekAgo.toISOString()) - }, - }) - - // Get real-time events from store - const eventsBuffer = useAgentControlStore((state) => state.eventsBuffer) - const realtimeAgents = useMemo(() => deriveWorkersFromEvents(eventsBuffer), [eventsBuffer]) - - // Merge API agents with real-time updates (real-time overrides API data) - const workers = useMemo(() => { - const apiAgents = agentsData?.agents || [] - const agentMap = new Map() - // Add API agents first - for (const agent of apiAgents) { - agentMap.set(agent.agent, agent) - } - // Override with real-time data - for (const agent of realtimeAgents) { - agentMap.set(agent.agent, agent) - } - return Array.from(agentMap.values()) - }, [agentsData?.agents, realtimeAgents]) - - // Compute summary stats - const onlineCount = workers.filter((w: AgentInfo) => w.status === 'connected').length - const offlineCount = workers.filter((w: AgentInfo) => w.status === 'disconnected').length - const totalRequests = workers.reduce((sum: number, w: AgentInfo) => sum + w.total_requests, 0) - - const handleWorkerClick = (worker: AgentInfo) => { - setSelectedWorker(worker) - setProfileOpen(true) - } - - const formatCurrency = (value: number) => - new Intl.NumberFormat('en-US', { - style: 'currency', - currency: 'USD', - minimumFractionDigits: 2, - }).format(value) - - return ( -
- {/* Summary Cards */} -
- - - - } - /> - 0} - icon={ - - - - } - /> - - - - } - /> - - - - } - /> -
- - {/* Workers Grid */} - {isLoading ? ( -
- {[...Array(6)].map((_, i) => ( - - ))} -
- ) : workers.length === 0 ? ( -
-

No agents found

-

- Agents will appear here when they connect and send events -

-
- ) : ( -
- {workers.map((worker: AgentInfo) => ( - handleWorkerClick(worker)} - formatCurrency={formatCurrency} - /> - ))} -
- )} - - {/* Worker Profile Panel */} - -
- ) -} - -interface WorkerCardProps { - worker: AgentInfo - onClick: () => void - formatCurrency: (value: number) => string -} - -function WorkerCard({ worker, onClick, formatCurrency }: WorkerCardProps) { - const isOnline = worker.status === 'connected' - - return ( - - -
- - - {(worker.agent_name || worker.agent).slice(0, 2).toUpperCase()} - - - -
-
- - {worker.agent_name || worker.agent} - - - {isOnline ? 'Online' : 'Offline'} - -
- -
-
- Requests - - {worker.total_requests.toLocaleString()} - -
-
- Cost - - {formatCurrency(worker.total_cost)} - -
-
-
-
-
-
- ) -} diff --git a/honeycomb/src/components/agent-control/budget/AddBudgetDialog.tsx b/honeycomb/src/components/agent-control/budget/AddBudgetDialog.tsx deleted file mode 100644 index ab9d4665..00000000 --- a/honeycomb/src/components/agent-control/budget/AddBudgetDialog.tsx +++ /dev/null @@ -1,184 +0,0 @@ -import { useState } from 'react' -import { - Dialog, - DialogContent, - DialogDescription, - DialogFooter, - DialogHeader, - DialogTitle, -} from '@/components/ui/dialog' -import { Button } from '@/components/ui/button' -import { Input } from '@/components/ui/input' -import { - Select, - SelectContent, - SelectItem, - SelectTrigger, - SelectValue, -} from '@/components/ui/select' -import { useCreateBudget } from '@/hooks/queries/useBudgets' -import { useNotificationStore } from '@/stores/notificationStore' -import type { BudgetType } from '@/types/agentControl' - -interface AddBudgetDialogProps { - open: boolean - onOpenChange: (open: boolean) => void - policyId: string | null -} - -const budgetTypes: { value: BudgetType; label: string }[] = [ - { value: 'global', label: 'Global' }, - { value: 'agent', label: 'Agent' }, - { value: 'customer', label: 'Customer' }, - { value: 'feature', label: 'Feature' }, - { value: 'tag', label: 'Tag' }, -] - -/** - * Dialog for creating a new budget configuration. - */ -export function AddBudgetDialog({ open, onOpenChange, policyId }: AddBudgetDialogProps) { - const [name, setName] = useState('') - const [type, setType] = useState('agent') - const [limit, setLimit] = useState('100') - const [error, setError] = useState(null) - - const createBudget = useCreateBudget() - const addNotification = useNotificationStore((state) => state.addNotification) - - const handleSubmit = async (e: React.FormEvent) => { - e.preventDefault() - setError(null) - - if (!policyId) { - setError('No policy available. Please try again later.') - return - } - - if (!name.trim()) { - setError('Name is required') - return - } - - const limitValue = parseFloat(limit) - if (isNaN(limitValue) || limitValue <= 0) { - setError('Limit must be greater than 0') - return - } - - try { - await createBudget.mutateAsync({ - policyId, - budget: { - id: name.trim().toLowerCase().replace(/\s+/g, '-'), - name: name.trim(), - type, - limit: limitValue, - spent: 0, - limitAction: 'throttle', - throttleRate: 1.0, - alerts: [ - { threshold: 80, enabled: true }, - { threshold: 100, enabled: true }, - ], - notifications: { - inApp: true, - email: false, - emailRecipients: [], - webhook: false, - }, - }, - }) - addNotification({ - type: 'success', - title: 'Budget created', - message: `"${name.trim()}" has been created successfully.`, - }) - handleClose() - } catch (err) { - addNotification({ - type: 'error', - title: 'Creation failed', - message: err instanceof Error ? err.message : 'Failed to create budget', - }) - } - } - - const handleClose = () => { - setName('') - setType('agent') - setLimit('100') - setError(null) - onOpenChange(false) - } - - return ( - - - - Create Budget - - Set up a new budget to control costs for agents, models, or features. - - - -
- {error && ( -
- {error} -
- )} - - {/* Name */} -
- - setName(e.target.value)} - placeholder="e.g., Production Agent Budget" - /> -
- - {/* Type */} -
- - -
- - {/* Limit */} -
- - setLimit(e.target.value)} - placeholder="100.00" - /> -
- - - - - -
-
-
- ) -} diff --git a/honeycomb/src/components/agent-control/budget/BudgetDetailPanel.tsx b/honeycomb/src/components/agent-control/budget/BudgetDetailPanel.tsx deleted file mode 100644 index 9ccaed1e..00000000 --- a/honeycomb/src/components/agent-control/budget/BudgetDetailPanel.tsx +++ /dev/null @@ -1,541 +0,0 @@ -import { useState, useEffect } from 'react' -import { - Sheet, - SheetContent, - SheetHeader, - SheetTitle, - SheetFooter, -} from '@/components/ui/sheet' -import { Button } from '@/components/ui/button' -import { Input } from '@/components/ui/input' -import { Label } from '@/components/ui/label' -import { - Select, - SelectContent, - SelectItem, - SelectTrigger, - SelectValue, -} from '@/components/ui/select' -import { Progress } from '@/components/ui/progress' -import { Separator } from '@/components/ui/separator' -import { Badge } from '@/components/ui/badge' -import { Switch } from '@/components/ui/switch' -import { cn } from '@/lib/utils' -import { - DollarSign, - Bot, - User, - LayoutGrid, - Tag, - Trash2, - Plus, - X, - Bell, - Mail, -} from 'lucide-react' -import { useUpdateBudget, useDeleteBudget } from '@/hooks/queries/useBudgets' -import { useNotificationStore } from '@/stores/notificationStore' -import type { BudgetConfig, BudgetType, LimitAction } from '@/types/agentControl' - -interface BudgetDetailPanelProps { - budget: BudgetConfig | null - open: boolean - onOpenChange: (open: boolean) => void - policyId: string | null - existingBudgets: BudgetConfig[] -} - -const typeIcons: Record = { - global: DollarSign, - agent: Bot, - customer: User, - feature: LayoutGrid, - tag: Tag, -} - -const typeColors: Record = { - global: 'bg-blue-100 text-blue-700', - agent: 'bg-red-100 text-red-700', - customer: 'bg-purple-100 text-purple-700', - feature: 'bg-orange-100 text-orange-700', - tag: 'bg-green-100 text-green-700', -} - -const limitActions: { value: LimitAction; label: string; description: string }[] = [ - { value: 'throttle', label: 'Throttle', description: 'Rate limit requests when budget is exceeded' }, - { value: 'kill', label: 'Block', description: 'Stop all requests when budget is exceeded' }, -] - -/** - * Right-side slide-over panel for viewing and editing budget details. - */ -export function BudgetDetailPanel({ - budget, - open, - onOpenChange, - policyId, - existingBudgets, -}: BudgetDetailPanelProps) { - // Local state for editing - const [limit, setLimit] = useState('') - const [limitAction, setLimitAction] = useState('throttle') - const [throttleRate, setThrottleRate] = useState('1.0') - const [alerts, setAlerts] = useState<{ threshold: number; enabled: boolean }[]>([]) - const [newThreshold, setNewThreshold] = useState('') - const [emailEnabled, setEmailEnabled] = useState(false) - const [emailRecipients, setEmailRecipients] = useState([]) - const [newEmail, setNewEmail] = useState('') - const [inAppEnabled, setInAppEnabled] = useState(false) - const [isDirty, setIsDirty] = useState(false) - - const updateBudget = useUpdateBudget() - const deleteBudgetMutation = useDeleteBudget() - const addNotification = useNotificationStore((state) => state.addNotification) - - // Reset form when budget changes - useEffect(() => { - if (budget) { - setLimit(budget.limit.toString()) - setLimitAction(budget.limitAction) - setThrottleRate(budget.throttleRate?.toString() ?? '1.0') - setAlerts([...budget.alerts]) - setEmailEnabled(budget.notifications.email) - setEmailRecipients([...budget.notifications.emailRecipients]) - setInAppEnabled(budget.notifications.inApp) - setIsDirty(false) - setNewThreshold('') - setNewEmail('') - } - }, [budget]) - - if (!budget) return null - - const percentage = budget.limit > 0 ? (budget.spent / budget.limit) * 100 : 0 - const status = percentage >= 100 ? 'critical' : percentage >= 80 ? 'warning' : 'healthy' - const remaining = Math.max(0, budget.limit - budget.spent) - - const TypeIcon = typeIcons[budget.type] || DollarSign - - const formatCurrency = (value: number) => - new Intl.NumberFormat('en-US', { - style: 'currency', - currency: 'USD', - minimumFractionDigits: 0, - maximumFractionDigits: 0, - }).format(value) - - const handleChange = () => { - setIsDirty(true) - } - - const handleAddThreshold = () => { - const threshold = parseInt(newThreshold) - if (threshold > 0 && threshold <= 100 && !alerts.some(a => a.threshold === threshold)) { - setAlerts([...alerts, { threshold, enabled: true }].sort((a, b) => a.threshold - b.threshold)) - setNewThreshold('') - handleChange() - } - } - - const handleRemoveThreshold = (threshold: number) => { - setAlerts(alerts.filter(a => a.threshold !== threshold)) - handleChange() - } - - const handleToggleThreshold = (threshold: number) => { - setAlerts(alerts.map(a => - a.threshold === threshold ? { ...a, enabled: !a.enabled } : a - )) - handleChange() - } - - const handleSubmit = async () => { - if (!policyId || !budget) return - - try { - await updateBudget.mutateAsync({ - policyId, - budgetId: budget.id, - updates: { - limit: parseFloat(limit), - limitAction, - throttleRate: limitAction === 'throttle' ? parseFloat(throttleRate) : undefined, - alerts, - notifications: { - inApp: inAppEnabled, - email: emailEnabled, - emailRecipients, - webhook: budget.notifications.webhook, - }, - }, - existingBudgets, - }) - addNotification({ - type: 'success', - title: 'Budget updated', - message: `"${budget.name}" has been updated successfully.`, - }) - onOpenChange(false) - } catch (error) { - console.error('Failed to update budget:', error) - addNotification({ - type: 'error', - title: 'Update failed', - message: 'Failed to update budget. Please try again.', - }) - } - } - - const handleDelete = async () => { - if (!policyId || !budget) return - - if (confirm(`Are you sure you want to delete "${budget.name}"? This action cannot be undone.`)) { - try { - await deleteBudgetMutation.mutateAsync({ - policyId, - budgetId: budget.id, - existingBudgets, - }) - addNotification({ - type: 'success', - title: 'Budget deleted', - message: `"${budget.name}" has been deleted.`, - }) - onOpenChange(false) - } catch (error) { - console.error('Failed to delete budget:', error) - addNotification({ - type: 'error', - title: 'Delete failed', - message: 'Failed to delete budget. Please try again.', - }) - } - } - } - - return ( - - - {/* Header */} - -
-
- -
-
- {budget.name} -
- - {budget.type} - - {budget.tagCategory && ( - - {budget.tagCategory} - - )} -
-
-
-

- {formatCurrency(budget.spent)} of {formatCurrency(budget.limit)} used -

-
- - {/* Scrollable Content */} -
- {/* Budget Usage Section */} -
-

Budget Usage

-
-
- Progress - - {Math.round(percentage)}% - -
- div]:bg-green-500', - status === 'warning' && '[&>div]:bg-orange-500', - status === 'critical' && '[&>div]:bg-red-500' - )} - /> -
- Spent: {formatCurrency(budget.spent)} - Remaining: {formatCurrency(remaining)} -
-
-
- - - - {/* Monthly Limit Section */} -
- -
- $ - { - setLimit(e.target.value) - handleChange() - }} - className="pl-7" - /> -
-
- - - - {/* At Limit Action Section */} -
- - -

- {limitActions.find(a => a.value === limitAction)?.description} -

- - {/* Throttle Rate Config - shown when throttle is selected */} - {limitAction === 'throttle' && ( -
- - { - setThrottleRate(e.target.value) - handleChange() - }} - /> -

- Maximum requests per second when budget limit is reached -

-
- )} -
- - - - {/* Alert Thresholds Section */} -
- - - {alerts.length === 0 ? ( -

No alert thresholds configured

- ) : ( -
- {alerts.map((alert) => ( -
-
- handleToggleThreshold(alert.threshold)} - /> - - {alert.threshold}% - - - ({formatCurrency(budget.limit * alert.threshold / 100)}) - -
- -
- ))} -
- )} - - {/* Add new threshold */} -
-
- setNewThreshold(e.target.value)} - className="pr-8" - /> - % -
- -
-
- - - - {/* Notification Channels Section */} -
- -
- - -
- {emailEnabled && ( -
- {emailRecipients.length > 0 && ( -
- {emailRecipients.map((email) => ( -
- {email} - -
- ))} -
- )} -
- setNewEmail(e.target.value)} - onKeyDown={(e) => { - if (e.key === 'Enter') { - e.preventDefault() - const email = newEmail.trim().toLowerCase() - if (email && email.includes('@') && !emailRecipients.includes(email)) { - setEmailRecipients([...emailRecipients, email]) - setNewEmail('') - handleChange() - } - } - }} - className="flex-1" - /> - -
-
- )} -
-
- - {/* Footer */} - - -
- - - - - - ) -} diff --git a/honeycomb/src/components/agent-control/charts/CostByModelChart.tsx b/honeycomb/src/components/agent-control/charts/CostByModelChart.tsx deleted file mode 100644 index 293676ea..00000000 --- a/honeycomb/src/components/agent-control/charts/CostByModelChart.tsx +++ /dev/null @@ -1,114 +0,0 @@ -import { PieChart, Pie, Cell, ResponsiveContainer, Legend, Tooltip } from 'recharts' -import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card' -import type { CostByModelData } from '@/types/agentControl' - -// Extended type with index signature for recharts compatibility -interface ChartData extends CostByModelData { - [key: string]: string | number | undefined -} - -interface CostByModelChartProps { - data: CostByModelData[] - title?: string - className?: string -} - -const COLORS = [ - 'hsl(var(--primary))', - 'hsl(var(--primary) / 0.8)', - 'hsl(var(--primary) / 0.6)', - 'hsl(var(--primary) / 0.4)', - 'hsl(220 70% 50%)', - 'hsl(280 70% 50%)', - 'hsl(340 70% 50%)', - 'hsl(160 70% 50%)', -] - -/** - * Donut chart showing cost distribution by model. - */ -export function CostByModelChart({ - data, - title = 'Cost by Model', - className, -}: CostByModelChartProps) { - const formatCurrency = (value: number) => - new Intl.NumberFormat('en-US', { - style: 'currency', - currency: 'USD', - minimumFractionDigits: 0, - maximumFractionDigits: 2, - }).format(value) - - const formatPercent = (value: number) => `${(value * 100).toFixed(1)}%` - - const totalCost = data.reduce((sum, item) => sum + item.cost, 0) - - // Cast data to chart-compatible type - const chartData: ChartData[] = data.map((d) => ({ ...d })) - - return ( - - - {title} - - -
- - - - {data.map((item, index) => ( - - ))} - - [formatCurrency(Number(value) || 0), 'Cost']} - contentStyle={{ - backgroundColor: 'hsl(var(--card))', - border: '1px solid hsl(var(--border))', - borderRadius: '6px', - }} - /> - { - const item = data.find((d) => d.name === value) - return ( - - {value}{' '} - - ({item ? formatPercent(item.cost / totalCost) : ''}) - - - ) - }} - /> - - -
-
- {formatCurrency(totalCost)} - Total Cost -
-
-
- ) -} diff --git a/honeycomb/src/components/agent-control/charts/CostTrendChart.tsx b/honeycomb/src/components/agent-control/charts/CostTrendChart.tsx deleted file mode 100644 index 3911b9d6..00000000 --- a/honeycomb/src/components/agent-control/charts/CostTrendChart.tsx +++ /dev/null @@ -1,109 +0,0 @@ -import { - AreaChart, - Area, - XAxis, - YAxis, - CartesianGrid, - Tooltip, - ResponsiveContainer, - ReferenceLine, -} from 'recharts' -import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card' -import type { CostTrendData } from '@/types/agentControl' - -interface CostTrendChartProps { - data: CostTrendData[] - budgetLine?: number - title?: string - className?: string -} - -/** - * Area chart showing cost trends over time with optional budget line. - */ -export function CostTrendChart({ - data, - budgetLine, - title = 'Cost Trend', - className, -}: CostTrendChartProps) { - const formatCurrency = (value: number) => - new Intl.NumberFormat('en-US', { - style: 'currency', - currency: 'USD', - minimumFractionDigits: 0, - maximumFractionDigits: 0, - }).format(value) - - const formatDate = (dateStr: string) => { - const date = new Date(dateStr) - return date.toLocaleDateString(undefined, { month: 'short', day: 'numeric' }) - } - - return ( - - - {title} - - -
- - - - - - - - - - - - [formatCurrency(Number(value) || 0), 'Cost']} - labelFormatter={formatDate} - contentStyle={{ - backgroundColor: 'hsl(var(--card))', - border: '1px solid hsl(var(--border))', - borderRadius: '6px', - }} - /> - {budgetLine && ( - - )} - - - -
-
-
- ) -} diff --git a/honeycomb/src/components/agent-control/charts/LatencyChart.tsx b/honeycomb/src/components/agent-control/charts/LatencyChart.tsx deleted file mode 100644 index e71f904b..00000000 --- a/honeycomb/src/components/agent-control/charts/LatencyChart.tsx +++ /dev/null @@ -1,70 +0,0 @@ -import { - BarChart, - Bar, - XAxis, - YAxis, - CartesianGrid, - Tooltip, - ResponsiveContainer, -} from 'recharts' -import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card' -import type { LatencyData } from '@/types/agentControl' - -interface LatencyChartProps { - data: LatencyData[] - title?: string - className?: string -} - -/** - * Bar chart showing latency distribution. - */ -export function LatencyChart({ - data, - title = 'Latency Distribution', - className, -}: LatencyChartProps) { - const formatCount = (value: number) => { - if (value >= 1000) return `${(value / 1000).toFixed(1)}K` - return value.toString() - } - - return ( - - - {title} - - -
- - - - - - [formatCount(Number(value) || 0), 'Requests']} - contentStyle={{ - backgroundColor: 'hsl(var(--card))', - border: '1px solid hsl(var(--border))', - borderRadius: '6px', - }} - /> - - - -
-
-
- ) -} diff --git a/honeycomb/src/components/agent-control/charts/ModelUsageChart.tsx b/honeycomb/src/components/agent-control/charts/ModelUsageChart.tsx deleted file mode 100644 index 38c8aa2f..00000000 --- a/honeycomb/src/components/agent-control/charts/ModelUsageChart.tsx +++ /dev/null @@ -1,81 +0,0 @@ -import { - BarChart, - Bar, - XAxis, - YAxis, - CartesianGrid, - Tooltip, - ResponsiveContainer, -} from 'recharts' -import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card' -import type { ModelUsageData } from '@/types/agentControl' - -interface ModelUsageChartProps { - data: ModelUsageData[] - title?: string - className?: string -} - -/** - * Bar chart showing model usage by requests. - */ -export function ModelUsageChart({ - data, - title = 'Model Usage', - className, -}: ModelUsageChartProps) { - const formatNumber = (value: number) => { - if (value >= 1000000) return `${(value / 1000000).toFixed(1)}M` - if (value >= 1000) return `${(value / 1000).toFixed(1)}K` - return value.toString() - } - - // Sort by requests descending - const sortedData = [...data].sort((a, b) => b.requests - a.requests) - - return ( - - - {title} - - -
- - - - - - { - const numValue = Number(value) || 0 - if (name === 'requests') return [formatNumber(numValue), 'Requests'] - return [numValue, String(name)] - }} - contentStyle={{ - backgroundColor: 'hsl(var(--card))', - border: '1px solid hsl(var(--border))', - borderRadius: '6px', - }} - /> - - - -
-
-
- ) -} diff --git a/honeycomb/src/components/agent-control/charts/TokenUsageChart.tsx b/honeycomb/src/components/agent-control/charts/TokenUsageChart.tsx deleted file mode 100644 index 13c58c12..00000000 --- a/honeycomb/src/components/agent-control/charts/TokenUsageChart.tsx +++ /dev/null @@ -1,96 +0,0 @@ -import { - BarChart, - Bar, - XAxis, - YAxis, - CartesianGrid, - Tooltip, - ResponsiveContainer, - Legend, -} from 'recharts' -import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card' -import type { TokenUsageData } from '@/types/agentControl' - -interface TokenUsageChartProps { - data: TokenUsageData[] - title?: string - className?: string -} - -/** - * Stacked bar chart showing input/output token usage over time. - */ -export function TokenUsageChart({ - data, - title = 'Token Usage', - className, -}: TokenUsageChartProps) { - const formatNumber = (value: number) => { - if (value >= 1000000) return `${(value / 1000000).toFixed(1)}M` - if (value >= 1000) return `${(value / 1000).toFixed(1)}K` - return value.toString() - } - - const formatDate = (dateStr: string) => { - const date = new Date(dateStr) - return date.toLocaleDateString(undefined, { month: 'short', day: 'numeric' }) - } - - return ( - - - {title} - - -
- - - - - - [ - formatNumber(Number(value) || 0), - name === 'input' ? 'Input Tokens' : 'Output Tokens', - ]} - labelFormatter={formatDate} - contentStyle={{ - backgroundColor: 'hsl(var(--card))', - border: '1px solid hsl(var(--border))', - borderRadius: '6px', - }} - /> - - - - - -
-
-
- ) -} diff --git a/honeycomb/src/components/agent-control/charts/TopAgentsChart.tsx b/honeycomb/src/components/agent-control/charts/TopAgentsChart.tsx deleted file mode 100644 index 218f0d83..00000000 --- a/honeycomb/src/components/agent-control/charts/TopAgentsChart.tsx +++ /dev/null @@ -1,97 +0,0 @@ -import { - BarChart, - Bar, - XAxis, - YAxis, - CartesianGrid, - Tooltip, - ResponsiveContainer, - Cell, -} from 'recharts' -import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card' -import type { TopAgentData } from '@/types/agentControl' - -interface TopAgentsChartProps { - data: TopAgentData[] - title?: string - className?: string -} - -/** - * Horizontal bar chart showing top agents by spend. - */ -export function TopAgentsChart({ - data, - title = 'Top Agents by Spend', - className, -}: TopAgentsChartProps) { - const formatCurrency = (value: number) => - new Intl.NumberFormat('en-US', { - style: 'currency', - currency: 'USD', - minimumFractionDigits: 0, - maximumFractionDigits: 0, - }).format(value) - - // Sort by spend descending and take top 10 - const sortedData = [...data].sort((a, b) => b.spend - a.spend).slice(0, 10) - - return ( - - - {title} - - -
- - - - - - { - const numValue = Number(value) || 0 - if (name === 'spend') return [formatCurrency(numValue), 'Spend'] - return [numValue, String(name)] - }} - contentStyle={{ - backgroundColor: 'hsl(var(--card))', - border: '1px solid hsl(var(--border))', - borderRadius: '6px', - }} - /> - - {sortedData.map((entry, index) => { - // Color based on limit usage - const limitRatio = entry.limit ? entry.spend / entry.limit : 0 - let fill = 'hsl(var(--primary))' - if (limitRatio >= 0.9) fill = 'hsl(var(--destructive))' - else if (limitRatio >= 0.75) fill = 'hsl(38 92% 50%)' - - return - })} - - - -
-
-
- ) -} diff --git a/honeycomb/src/components/agent-control/charts/VegaLiteChart.tsx b/honeycomb/src/components/agent-control/charts/VegaLiteChart.tsx deleted file mode 100644 index 4ecbcf70..00000000 --- a/honeycomb/src/components/agent-control/charts/VegaLiteChart.tsx +++ /dev/null @@ -1,78 +0,0 @@ -import { useRef, useEffect, useState } from 'react' -import vegaEmbed, { type Result, type VisualizationSpec, type EmbedOptions } from 'vega-embed' -import { cn } from '@/lib/utils' - -interface VegaLiteChartProps { - spec: VisualizationSpec - className?: string - options?: EmbedOptions -} - -/** - * React wrapper component for VegaLite charts using vega-embed. - * Uses ResizeObserver to ensure container has valid dimensions before rendering. - * Handles mounting, updating, and cleanup of Vega views. - */ -export function VegaLiteChart({ spec, className, options }: VegaLiteChartProps) { - const containerRef = useRef(null) - const vegaResultRef = useRef(null) - const [isReady, setIsReady] = useState(false) - - // Wait for container to be ready with valid dimensions - useEffect(() => { - if (!containerRef.current) return - - const resizeObserver = new ResizeObserver((entries) => { - for (const entry of entries) { - if (entry.contentRect.width > 0) { - setIsReady(true) - } - } - }) - - resizeObserver.observe(containerRef.current) - - // Check initial dimensions - if (containerRef.current.clientWidth > 0) { - setIsReady(true) - } - - return () => resizeObserver.disconnect() - }, []) - - // Render chart when ready - useEffect(() => { - if (!containerRef.current || !spec || !isReady) return - - const renderChart = async () => { - // Cleanup previous render to prevent memory leaks - if (vegaResultRef.current) { - vegaResultRef.current.finalize() - vegaResultRef.current = null - } - - try { - const result = await vegaEmbed(containerRef.current!, spec, { - actions: false, - tooltip: { theme: 'dark' }, - ...options, - }) - vegaResultRef.current = result - } catch (error) { - console.error('Failed to render VegaLite chart:', error) - } - } - - renderChart() - - return () => { - // Cleanup on unmount or before re-render - if (vegaResultRef.current) { - vegaResultRef.current.finalize() - vegaResultRef.current = null - } - } - }, [spec, options, isReady]) - - return
-} diff --git a/honeycomb/src/components/agent-control/charts/index.ts b/honeycomb/src/components/agent-control/charts/index.ts deleted file mode 100644 index 799db1d2..00000000 --- a/honeycomb/src/components/agent-control/charts/index.ts +++ /dev/null @@ -1,6 +0,0 @@ -export { CostTrendChart } from './CostTrendChart' -export { TokenUsageChart } from './TokenUsageChart' -export { LatencyChart } from './LatencyChart' -export { CostByModelChart } from './CostByModelChart' -export { TopAgentsChart } from './TopAgentsChart' -export { ModelUsageChart } from './ModelUsageChart' diff --git a/honeycomb/src/components/agent-control/charts/specs.ts b/honeycomb/src/components/agent-control/charts/specs.ts deleted file mode 100644 index 8292cea0..00000000 --- a/honeycomb/src/components/agent-control/charts/specs.ts +++ /dev/null @@ -1,257 +0,0 @@ -/** - * VegaLite spec builders for analytics charts. - * Based on patterns from acho-launchpad's AnalyticsPanel.vue - */ - -import type { VisualizationSpec } from 'vega-embed' -import type { - CostTrendData, - TokenUsageData, - CostByModelData, - LatencyDistributionData, - LatencyPercentilesData, -} from './transformers' - -// ============================================================================= -// Cost Trend Chart (Area Chart with optional budget line) -// ============================================================================= - -export function createCostTrendSpec(data: CostTrendData[]): VisualizationSpec { - return { - $schema: 'https://vega.github.io/schema/vega-lite/v5.json', - width: 'container', - height: 220, - padding: { left: 10, right: 10, top: 10, bottom: 10 }, - data: { values: data }, - layer: [ - { - mark: { type: 'area', line: true, color: '#263A99', opacity: 0.3 }, - encoding: { - x: { - field: 'date', - type: 'ordinal', - sort: null, - axis: { title: null, labelAngle: -45 }, - }, - y: { - field: 'cost', - type: 'quantitative', - axis: { title: 'Cost ($)', format: '$.2f' }, - }, - tooltip: [ - { field: 'date', title: 'Date' }, - { field: 'cost', title: 'Cost', format: '$.4f' }, - ], - }, - }, - // Budget reference line (optional) - { - mark: { type: 'rule', color: '#c1392b', strokeDash: [5, 5], strokeWidth: 2 }, - encoding: { - y: { datum: 66.67 }, - }, - }, - ], - config: { view: { stroke: null } }, - } as VisualizationSpec -} - -// ============================================================================= -// Request Volume Chart (Bar Chart) -// ============================================================================= - -export function createRequestVolumeSpec(data: CostTrendData[]): VisualizationSpec { - return { - $schema: 'https://vega.github.io/schema/vega-lite/v5.json', - width: 'container', - height: 220, - padding: { left: 10, right: 10, top: 10, bottom: 10 }, - data: { values: data }, - mark: { - type: 'bar', - color: '#22c55e', - cornerRadiusTopLeft: 4, - cornerRadiusTopRight: 4, - }, - encoding: { - x: { - field: 'date', - type: 'ordinal', - sort: null, - axis: { title: null, labelAngle: -45 }, - }, - y: { - field: 'requests', - type: 'quantitative', - axis: { title: 'Requests' }, - }, - tooltip: [ - { field: 'date', title: 'Date' }, - { field: 'requests', title: 'Requests' }, - ], - }, - config: { view: { stroke: null } }, - } as VisualizationSpec -} - -// ============================================================================= -// Token Usage Chart (Stacked Bar Chart) -// ============================================================================= - -export function createTokenUsageSpec(data: TokenUsageData[]): VisualizationSpec { - return { - $schema: 'https://vega.github.io/schema/vega-lite/v5.json', - width: 'container', - height: 220, - padding: { left: 10, right: 10, top: 10, bottom: 10 }, - data: { values: data }, - mark: { - type: 'bar', - cornerRadiusTopLeft: 4, - cornerRadiusTopRight: 4, - }, - encoding: { - x: { - field: 'date', - type: 'ordinal', - sort: null, - axis: { title: null, labelAngle: -45 }, - }, - y: { - field: 'tokens', - type: 'quantitative', - axis: { title: 'Tokens', format: '.2s' }, - stack: true, - }, - color: { - field: 'type', - type: 'nominal', - scale: { domain: ['Input', 'Output'], range: ['#263A99', '#22c55e'] }, - legend: { orient: 'bottom', title: null }, - }, - tooltip: [ - { field: 'date', title: 'Date' }, - { field: 'type', title: 'Type' }, - { field: 'tokens', title: 'Tokens', format: ',.0f' }, - ], - }, - config: { view: { stroke: null } }, - } as VisualizationSpec -} - -// ============================================================================= -// Cost by Model Chart (Donut Chart) -// ============================================================================= - -export function createCostByModelSpec(data: CostByModelData[]): VisualizationSpec { - return { - $schema: 'https://vega.github.io/schema/vega-lite/v5.json', - width: 180, - height: 180, - data: { values: data }, - mark: { type: 'arc', innerRadius: 50 }, - encoding: { - theta: { field: 'value', type: 'quantitative' }, - color: { - field: 'name', - type: 'nominal', - scale: { - domain: data.map((m) => m.name), - range: data.map((m) => m.color), - }, - legend: null, - }, - tooltip: [ - { field: 'name', title: 'Model' }, - { field: 'value', title: 'Share (%)', format: '.0f' }, - { field: 'cost', title: 'Cost', format: '$.4f' }, - ], - }, - config: { view: { stroke: null } }, - } as VisualizationSpec -} - -// ============================================================================= -// Latency Distribution Chart (Bar Chart) -// ============================================================================= - -export function createLatencyDistributionSpec( - data: LatencyDistributionData[] -): VisualizationSpec { - return { - $schema: 'https://vega.github.io/schema/vega-lite/v5.json', - width: 'container', - height: 220, - padding: { left: 10, right: 10, top: 10, bottom: 10 }, - data: { values: data }, - mark: { - type: 'bar', - color: '#263A99', - cornerRadiusTopLeft: 4, - cornerRadiusTopRight: 4, - }, - encoding: { - x: { - field: 'range', - type: 'ordinal', - axis: { title: 'Latency Range' }, - sort: null, - }, - y: { - field: 'count', - type: 'quantitative', - axis: { title: 'Count' }, - }, - tooltip: [ - { field: 'range', title: 'Range' }, - { field: 'count', title: 'Count' }, - ], - }, - config: { view: { stroke: null } }, - } as VisualizationSpec -} - -// ============================================================================= -// Latency Percentiles Chart (Multi-line Chart) -// ============================================================================= - -export function createLatencyPercentilesSpec( - data: LatencyPercentilesData[] -): VisualizationSpec { - return { - $schema: 'https://vega.github.io/schema/vega-lite/v5.json', - width: 'container', - height: 220, - padding: { left: 10, right: 10, top: 10, bottom: 10 }, - data: { values: data }, - mark: { type: 'line', point: true }, - encoding: { - x: { - field: 'date', - type: 'ordinal', - sort: null, - axis: { title: null, labelAngle: -45 }, - }, - y: { - field: 'latency', - type: 'quantitative', - axis: { title: 'Latency (ms)' }, - }, - color: { - field: 'percentile', - type: 'nominal', - scale: { - domain: ['P50', 'P95', 'P99'], - range: ['#263A99', '#f59e0b', '#c1392b'], - }, - legend: null, - }, - tooltip: [ - { field: 'date', title: 'Date' }, - { field: 'percentile', title: 'Percentile' }, - { field: 'latency', title: 'Latency (ms)', format: '.0f' }, - ], - }, - config: { view: { stroke: null } }, - } as VisualizationSpec -} diff --git a/honeycomb/src/components/agent-control/charts/transformers.ts b/honeycomb/src/components/agent-control/charts/transformers.ts deleted file mode 100644 index 9538c3b2..00000000 --- a/honeycomb/src/components/agent-control/charts/transformers.ts +++ /dev/null @@ -1,339 +0,0 @@ -/** - * Data transformation utilities for converting API responses to chart-ready data. - * Based on patterns from acho-launchpad's useAgentControlData.ts - */ - -// Color palette for models (matches launchpad) -export const MODEL_COLORS = ['#263A99', '#22c55e', '#6b21a8', '#f59e0b', '#c1392b', '#06b6d4'] - -// ============================================================================= -// API Response Types (for type safety with unknown API data) -// ============================================================================= - -interface TimelineCostItem { - bucket: string - cost_total?: number -} - -interface TimelineRequestItem { - bucket: string - requests?: number -} - -interface TimelineTokenItem { - bucket: string - input_tokens?: number - output_tokens?: number -} - -interface TimelineLatencyItem { - bucket: string - p50_ms?: number - p95_ms?: number - p99_ms?: number -} - -interface TimelineData { - cost?: TimelineCostItem[] - requests?: TimelineRequestItem[] - tokens?: TimelineTokenItem[] - latency_percentiles?: TimelineLatencyItem[] -} - -interface CostByModelItem { - model?: string - cost_total?: number - share?: number -} - -interface CostByModelResponse { - models?: CostByModelItem[] -} - -interface LatencyBucketItem { - bucket: string - count?: number -} - -interface LatencyDistributionResponse { - buckets?: LatencyBucketItem[] -} - -interface CostByAgentItem { - agent?: string - cost_total?: number - requests?: number -} - -interface CostByAgentResponse { - agents?: CostByAgentItem[] -} - -interface AnalyticsData { - analytics?: { - timeline?: { - resolution?: string - hourly?: TimelineData - daily?: TimelineData - } - cost_by_model?: CostByModelResponse - latency_distribution?: LatencyDistributionResponse - cost_by_agent?: CostByAgentResponse - } -} - -// ============================================================================= -// Types for transformed chart data -// ============================================================================= - -export interface CostTrendData { - date: string - cost: number - requests: number - budget?: number -} - -export interface TokenUsageData { - date: string - type: 'Input' | 'Output' - tokens: number -} - -export interface CostByModelData { - name: string - cost: number - value: number // percentage - color: string -} - -export interface LatencyDistributionData { - range: string - count: number -} - -export interface LatencyPercentilesData { - date: string - percentile: 'P50' | 'P95' | 'P99' - latency: number -} - -export interface TopAgentData { - name: string - spend: number - requests: number - avgCost: number -} - -// ============================================================================= -// Format helpers -// ============================================================================= - -/** - * Format bucket label based on resolution - * For hourly: "2 PM", "3 PM", etc. - * For daily: "Dec 14", "Dec 15", etc. - */ -export function formatBucketLabel(bucket: string, resolution: 'day' | 'hour'): string { - const date = new Date(bucket) - if (resolution === 'hour') { - return date.toLocaleTimeString('en-US', { hour: 'numeric', hour12: true }) - } - return date.toLocaleDateString('en-US', { month: 'short', day: 'numeric' }) -} - -// ============================================================================= -// Data transformers -// ============================================================================= - -/** - * Transform analytics API response to chart-ready data - */ -export function transformAnalyticsData(data: AnalyticsData | undefined) { - if (!data?.analytics) { - return { - costTrends: [], - tokenUsage: [], - costByModel: [], - latencyDistribution: [], - latencyPercentiles: [], - topAgents: [], - } - } - - const analytics = data.analytics - const resolution: 'day' | 'hour' = analytics.timeline?.resolution === 'hour' ? 'hour' : 'day' - const timeline = resolution === 'hour' ? analytics.timeline?.hourly : analytics.timeline?.daily - - return { - costTrends: transformCostTrends(timeline, resolution), - tokenUsage: transformTokenUsage(timeline, resolution), - costByModel: transformCostByModel(analytics.cost_by_model), - latencyDistribution: transformLatencyDistribution(analytics.latency_distribution), - latencyPercentiles: transformLatencyPercentiles(timeline, resolution), - topAgents: transformTopAgents(analytics.cost_by_agent), - } -} - -/** - * Transform cost timeline to cost trend data - */ -function transformCostTrends( - timeline: TimelineData | undefined, - resolution: 'day' | 'hour' -): CostTrendData[] { - if (!timeline?.cost || !Array.isArray(timeline.cost)) { - return [] - } - - // Create requests lookup map - const requestsMap = new Map( - (timeline.requests || []).map((r: TimelineRequestItem) => [r.bucket, r.requests ?? 0]) - ) - - return timeline.cost.map((d: TimelineCostItem) => ({ - date: formatBucketLabel(d.bucket, resolution), - cost: d.cost_total || 0, - requests: requestsMap.get(d.bucket) || 0, - budget: 66.67, // Default daily budget (~$2000/month / 30 days) - })) -} - -/** - * Transform token timeline to stacked bar chart data (flattened) - */ -function transformTokenUsage( - timeline: TimelineData | undefined, - resolution: 'day' | 'hour' -): TokenUsageData[] { - if (!timeline?.tokens || !Array.isArray(timeline.tokens)) { - return [] - } - - return timeline.tokens.flatMap((d: TimelineTokenItem) => [ - { - date: formatBucketLabel(d.bucket, resolution), - type: 'Input' as const, - tokens: d.input_tokens || 0, - }, - { - date: formatBucketLabel(d.bucket, resolution), - type: 'Output' as const, - tokens: d.output_tokens || 0, - }, - ]) -} - -/** - * Transform cost by model to pie/donut chart data - */ -function transformCostByModel(costByModel: CostByModelResponse | undefined): CostByModelData[] { - if (!costByModel?.models || !Array.isArray(costByModel.models)) { - return [] - } - - return costByModel.models.map((m: CostByModelItem, i: number) => ({ - name: m.model?.split('/').pop() || m.model || 'Unknown', - cost: m.cost_total || 0, - value: Math.round((m.share || 0) * 100), - color: MODEL_COLORS[i % MODEL_COLORS.length], - })) -} - -/** - * Aggregate API latency buckets to UI buckets - * API: 0-1s, 1-2s, 2-5s, 5-10s, 10-20s, 20s+ - * UI: 0-2s, 2-5s, 5-10s, 10-20s, 20s+ - */ -function transformLatencyDistribution( - latencyDistribution: LatencyDistributionResponse | undefined -): LatencyDistributionData[] { - if (!latencyDistribution?.buckets || !Array.isArray(latencyDistribution.buckets)) { - return [] - } - - const aggregated: Record = { - '0-2s': 0, - '2-5s': 0, - '5-10s': 0, - '10-20s': 0, - '20s+': 0, - } - - latencyDistribution.buckets.forEach((b: LatencyBucketItem) => { - switch (b.bucket) { - case '0-1s': - case '1-2s': - aggregated['0-2s'] += b.count || 0 - break - case '2-5s': - aggregated['2-5s'] += b.count || 0 - break - case '5-10s': - aggregated['5-10s'] += b.count || 0 - break - case '10-20s': - aggregated['10-20s'] += b.count || 0 - break - case '20s+': - aggregated['20s+'] += b.count || 0 - break - } - }) - - // Only return data if there are actual counts - const totalCount = Object.values(aggregated).reduce((sum, count) => sum + count, 0) - if (totalCount === 0) { - return [] - } - - return Object.entries(aggregated).map(([range, count]) => ({ range, count })) -} - -/** - * Transform latency percentiles to multi-line chart data (flattened) - */ -function transformLatencyPercentiles( - timeline: TimelineData | undefined, - resolution: 'day' | 'hour' -): LatencyPercentilesData[] { - if (!timeline?.latency_percentiles || !Array.isArray(timeline.latency_percentiles)) { - return [] - } - - return timeline.latency_percentiles.flatMap((d: TimelineLatencyItem) => [ - { - date: formatBucketLabel(d.bucket, resolution), - percentile: 'P50' as const, - latency: d.p50_ms || 0, - }, - { - date: formatBucketLabel(d.bucket, resolution), - percentile: 'P95' as const, - latency: d.p95_ms || 0, - }, - { - date: formatBucketLabel(d.bucket, resolution), - percentile: 'P99' as const, - latency: d.p99_ms || d.p95_ms || 0, - }, - ]) -} - -/** - * Transform cost by agent to top agents list - */ -function transformTopAgents(costByAgent: CostByAgentResponse | undefined): TopAgentData[] { - if (!costByAgent?.agents || !Array.isArray(costByAgent.agents)) { - return [] - } - - return costByAgent.agents.map((a: CostByAgentItem) => { - const requests = a.requests || 0 - return { - name: a.agent || 'Unknown', - spend: a.cost_total || 0, - requests, - avgCost: requests > 0 ? (a.cost_total || 0) / requests : 0, - } - }) -} diff --git a/honeycomb/src/components/agent-control/index.ts b/honeycomb/src/components/agent-control/index.ts deleted file mode 100644 index a1671983..00000000 --- a/honeycomb/src/components/agent-control/index.ts +++ /dev/null @@ -1,24 +0,0 @@ -// Layout -export { AgentControlLayout } from './AgentControlLayout' - -// Main Panels -export { DataPanel } from './DataPanel' -export { AnalyticsPanel } from './AnalyticsPanel' -export { CostControls } from './CostControls' -export { WorkersPanel } from './WorkersPanel' - -// Shared components -export { LiveIndicator } from './shared/LiveIndicator' -export { KpiCard } from './shared/KpiCard' -export { BudgetCard } from './shared/BudgetCard' -export { NotificationBell } from './shared/NotificationBell' - -// Budget -export { AddBudgetDialog } from './budget/AddBudgetDialog' -export { BudgetDetailPanel } from './budget/BudgetDetailPanel' - -// Workers -export { WorkerProfilePanel } from './workers/WorkerProfilePanel' - -// Charts -export * from './charts' diff --git a/honeycomb/src/components/agent-control/shared/BudgetCard.tsx b/honeycomb/src/components/agent-control/shared/BudgetCard.tsx deleted file mode 100644 index 13dca7b4..00000000 --- a/honeycomb/src/components/agent-control/shared/BudgetCard.tsx +++ /dev/null @@ -1,172 +0,0 @@ -import { Progress } from '@/components/ui/progress' -import { Badge } from '@/components/ui/badge' -import { cn } from '@/lib/utils' -import { - DollarSign, - Bot, - User, - LayoutGrid, - Tag, - Ban, - Gauge, - ArrowDown, - Bell, - Mail, - Webhook, - ChevronRight, -} from 'lucide-react' -import type { BudgetConfig, BudgetType, LimitAction } from '@/types/agentControl' - -interface BudgetCardProps { - budget: BudgetConfig - onClick?: () => void - className?: string -} - -const typeIcons: Record = { - global: DollarSign, - agent: Bot, - customer: User, - feature: LayoutGrid, - tag: Tag, -} - -const typeColors: Record = { - global: 'bg-blue-100 text-blue-700', - agent: 'bg-red-100 text-red-700', - customer: 'bg-purple-100 text-purple-700', - feature: 'bg-orange-100 text-orange-700', - tag: 'bg-green-100 text-green-700', -} - -const actionIcons: Record = { - kill: Ban, - throttle: Gauge, - degrade: ArrowDown, - notify: Bell, -} - -const actionColors: Record = { - kill: 'bg-red-100 text-red-700', - throttle: 'bg-orange-100 text-orange-700', - degrade: 'bg-blue-100 text-blue-700', - notify: 'bg-green-100 text-green-700', -} - -const actionLabels: Record = { - kill: 'Block', - throttle: 'Throttle', - degrade: 'Degrade', - notify: 'Notify', -} - -/** - * Budget row with horizontal layout matching launchpad style. - */ -export function BudgetCard({ budget, onClick, className }: BudgetCardProps) { - const percentage = budget.limit > 0 ? (budget.spent / budget.limit) * 100 : 0 - const status = percentage >= 100 ? 'critical' : percentage >= 80 ? 'warning' : 'healthy' - - const TypeIcon = typeIcons[budget.type] || DollarSign - const ActionIcon = actionIcons[budget.limitAction] || Gauge - - const formatCurrency = (value: number) => - new Intl.NumberFormat('en-US', { - style: 'currency', - currency: 'USD', - minimumFractionDigits: 0, - maximumFractionDigits: 0, - }).format(value) - - return ( -
-
- {/* Left: Icon + Name + Badges */} -
-
- -
-
-

{budget.name}

-
- - {budget.type} - - {budget.tagCategory && ( - - {budget.tagCategory} - - )} -
-
-
- - {/* Middle: Progress */} -
-
- - {formatCurrency(budget.spent)} / {formatCurrency(budget.limit)} - - - {Math.round(percentage)}% - -
- div]:bg-green-500', - status === 'warning' && '[&>div]:bg-orange-500', - status === 'critical' && '[&>div]:bg-red-500' - )} - /> -
- - {/* Right: Actions + Notifications + Chevron */} -
- - - {actionLabels[budget.limitAction]} - - -
- {budget.notifications.inApp && ( - - )} - {budget.notifications.email && ( - - )} - {budget.notifications.webhook && ( - - )} -
- - -
-
-
- ) -} diff --git a/honeycomb/src/components/agent-control/shared/HelpDialog.tsx b/honeycomb/src/components/agent-control/shared/HelpDialog.tsx deleted file mode 100644 index eb17b84a..00000000 --- a/honeycomb/src/components/agent-control/shared/HelpDialog.tsx +++ /dev/null @@ -1,331 +0,0 @@ -import { useState } from 'react' -import { useNavigate, useLocation } from 'react-router-dom' -import { - KeyRound, - Code, - BarChart3, - AlertTriangle, - BookOpen, - Check, - Copy, - ChevronLeft, - ChevronRight, -} from 'lucide-react' -import { - Dialog, - DialogContent, - DialogTitle, - DialogDescription, -} from '@/components/ui/dialog' -import { Button } from '@/components/ui/button' -import { cn } from '@/lib/utils' - -interface HelpDialogProps { - open: boolean - onOpenChange: (open: boolean) => void -} - -const steps = [ - { - title: 'Get Your API Token', - description: 'Generate an API token to authenticate SDK requests and start tracking.', - icon: KeyRound, - }, - { - title: 'Complete SDK Quickstart', - description: 'Follow the SDK Quickstart to install, configure, and instrument your code.', - icon: Code, - }, - { - title: 'Verify Integration', - description: 'Confirm your setup is working and start monitoring your LLM usage.', - icon: BarChart3, - }, -] - -const envContent = `ADEN_API_KEY=your-api-token -ADEN_API_URL=https://kube.acho.io` - -export function HelpDialog({ open, onOpenChange }: HelpDialogProps) { - const [currentStep, setCurrentStep] = useState(0) - const [copied, setCopied] = useState(false) - const navigate = useNavigate() - const location = useLocation() - - const isFirstStep = currentStep === 0 - const isLastStep = currentStep === steps.length - 1 - const CurrentIcon = steps[currentStep].icon - - const goBack = () => { - if (!isFirstStep) { - setCurrentStep((prev) => prev - 1) - } - } - - const goNext = () => { - if (!isLastStep) { - setCurrentStep((prev) => prev + 1) - } - } - - const finish = () => { - onOpenChange(false) - setCurrentStep(0) - navigate(`${location.pathname}#settings/developers`) - } - - const copyEnvToClipboard = async () => { - try { - await navigator.clipboard.writeText(envContent) - setCopied(true) - setTimeout(() => setCopied(false), 2000) - } catch { - console.error('Failed to copy') - } - } - - const handleOpenChange = (open: boolean) => { - if (!open) { - setCurrentStep(0) - } - onOpenChange(open) - } - - return ( - - - {/* Header */} -
-
- -
-
- - Step {currentStep + 1} of {steps.length} - - - {steps[currentStep].title} - -
-
- - {/* Scrollable Content */} -
- - SDK onboarding walkthrough - -

- {steps[currentStep].description} -

- - {/* Step 1: Get Your API Token */} - {currentStep === 0 && ( -
- {/* Warning box */} -
- - - Keep your API token secure. Never commit it to version control or expose it in client-side code. - -
- - {/* Numbered steps */} -
- - - -
- - {/* Code block */} -
-

Add to your .env file:

-
-
-                    {envContent}
-                  
- -
-
-
- )} - - {/* Step 2: Complete SDK Quickstart */} - {currentStep === 1 && ( -
- {/* Info box */} -
-
- -
-
-

SDK Quickstart Guide

-

- Navigate to Settings → Developers → SDK Quickstart for complete setup instructions. -

-
-
- - {/* Checklist */} -
-

The quickstart covers:

-
    - - - - -
-
- - {/* Tip box */} -
- Tip: The SDK Quickstart includes copy-paste code snippets tailored to your account. -
-
- )} - - {/* Step 3: Verify Integration */} - {currentStep === 2 && ( -
- {/* Numbered steps */} -
- -
-
- 2 -
-
-

Check your dashboard

-

- Within 30 seconds, you should see data appear in: -

-
    -
  • - - Analytics tab — Request counts and cost trends -
  • -
  • - - Data tab — Detailed request logs -
  • -
-
-
-
- - {/* Warning/troubleshooting box */} -
- -
-

Not seeing data?

-
    -
  • Verify your API key is correct in .env
  • -
  • Ensure the SDK is initialized before LLM calls
  • -
  • Check for network/firewall issues
  • -
  • Review the troubleshooting section in Documentation
  • -
-
-
- - {/* Help text */} -

- Need more help? Return to Settings → Developers → Documentation for detailed troubleshooting. -

-
- )} -
- - {/* Progress Dots */} -
- {steps.map((_, index) => ( -
- - {/* Footer */} -
- {!isFirstStep ? ( - - ) : ( -
- )} - {!isLastStep ? ( - - ) : ( - - )} -
- -
- ) -} - -function NumberedStep({ - number, - title, - subtitle, -}: { - number: number - title: string - subtitle: string -}) { - return ( -
-
- {number} -
-
-

{title}

-

{subtitle}

-
-
- ) -} - -function ChecklistItem({ text }: { text: string }) { - return ( -
  • - - {text} -
  • - ) -} diff --git a/honeycomb/src/components/agent-control/shared/KpiCard.tsx b/honeycomb/src/components/agent-control/shared/KpiCard.tsx deleted file mode 100644 index b01c1c2c..00000000 --- a/honeycomb/src/components/agent-control/shared/KpiCard.tsx +++ /dev/null @@ -1,67 +0,0 @@ -import { Card, CardContent } from '@/components/ui/card' -import { Skeleton } from '@/components/ui/skeleton' -import { cn } from '@/lib/utils' - -interface KpiCardProps { - label: string - value: string | number - icon?: React.ReactNode - trend?: { value: number; direction: 'up' | 'down' } - highlight?: boolean - loading?: boolean - className?: string -} - -/** - * Real-time KPI display card with optional trend indicator. - */ -export function KpiCard({ - label, - value, - icon, - trend, - highlight, - loading, - className, -}: KpiCardProps) { - if (loading) { - return ( - - - - - - - ) - } - - return ( - - -
    - {label} - {icon && {icon}} -
    -
    - {value} - {trend && ( - - {trend.direction === 'up' ? '↑' : '↓'} {Math.abs(trend.value)}% - - )} -
    -
    -
    - ) -} diff --git a/honeycomb/src/components/agent-control/shared/LiveIndicator.tsx b/honeycomb/src/components/agent-control/shared/LiveIndicator.tsx deleted file mode 100644 index 9cf2314d..00000000 --- a/honeycomb/src/components/agent-control/shared/LiveIndicator.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import { cn } from '@/lib/utils' - -interface LiveIndicatorProps { - isLive?: boolean - className?: string -} - -/** - * Pulsing dot indicator for live/active status. - */ -export function LiveIndicator({ isLive = true, className }: LiveIndicatorProps) { - if (!isLive) return null - - return ( -
    - - - - - Live -
    - ) -} diff --git a/honeycomb/src/components/agent-control/shared/NotificationBell.tsx b/honeycomb/src/components/agent-control/shared/NotificationBell.tsx deleted file mode 100644 index 022ec1da..00000000 --- a/honeycomb/src/components/agent-control/shared/NotificationBell.tsx +++ /dev/null @@ -1,137 +0,0 @@ -import { useNavigate } from 'react-router-dom' -import { Button } from '@/components/ui/button' -import { - DropdownMenu, - DropdownMenuContent, - DropdownMenuItem, - DropdownMenuSeparator, - DropdownMenuTrigger, -} from '@/components/ui/dropdown-menu' -import { ScrollArea } from '@/components/ui/scroll-area' -import { useNotificationStore } from '@/stores/notificationStore' -import { cn } from '@/lib/utils' - -/** - * Header notification bell with dropdown list. - */ -export function NotificationBell() { - const navigate = useNavigate() - const { notifications, unreadCount, markAsRead, markAllAsRead } = useNotificationStore() - - const handleNotificationClick = (notification: (typeof notifications)[0]) => { - markAsRead(notification.id) - - // Navigate to relevant panel based on notification type - if (notification.type === 'budget') { - navigate('/cost-control') - } - } - - const formatTime = (timestamp: string) => { - const date = new Date(timestamp) - const now = new Date() - const diffMs = now.getTime() - date.getTime() - const diffMins = Math.floor(diffMs / 60000) - const diffHours = Math.floor(diffMs / 3600000) - const diffDays = Math.floor(diffMs / 86400000) - - if (diffMins < 1) return 'Just now' - if (diffMins < 60) return `${diffMins}m ago` - if (diffHours < 24) return `${diffHours}h ago` - return `${diffDays}d ago` - } - - const typeIcons: Record = { - info: 'text-blue-500', - success: 'text-green-500', - warning: 'text-yellow-500', - error: 'text-red-500', - budget: 'text-purple-500', - } - - return ( - - - - - - -
    - Notifications - {unreadCount > 0 && ( - - )} -
    - - - - {notifications.length === 0 ? ( -
    - No notifications -
    - ) : ( - - {notifications.map((notification) => ( - handleNotificationClick(notification)} - > -
    - -
    -
    - - {notification.title} - - - {formatTime(notification.timestamp)} - -
    -

    - {notification.message} -

    -
    -
    -
    - ))} -
    - )} -
    -
    - ) -} diff --git a/honeycomb/src/components/agent-control/workers/WorkerProfilePanel.tsx b/honeycomb/src/components/agent-control/workers/WorkerProfilePanel.tsx deleted file mode 100644 index cb4d4106..00000000 --- a/honeycomb/src/components/agent-control/workers/WorkerProfilePanel.tsx +++ /dev/null @@ -1,171 +0,0 @@ -import { - Sheet, - SheetContent, - SheetDescription, - SheetHeader, - SheetTitle, -} from '@/components/ui/sheet' -import { Avatar, AvatarFallback } from '@/components/ui/avatar' -import { Badge } from '@/components/ui/badge' -import { Separator } from '@/components/ui/separator' -import { cn } from '@/lib/utils' -import type { AgentInfo } from '@/types/agentControl' - -interface WorkerProfilePanelProps { - worker: AgentInfo | null - open: boolean - onOpenChange: (open: boolean) => void -} - -/** - * Sidebar sheet showing detailed worker/agent information. - */ -export function WorkerProfilePanel({ - worker, - open, - onOpenChange, -}: WorkerProfilePanelProps) { - if (!worker) return null - - const isOnline = worker.status === 'connected' - - const formatDate = (dateString: string) => { - return new Date(dateString).toLocaleString(undefined, { - dateStyle: 'medium', - timeStyle: 'short', - }) - } - - const formatCurrency = (value: number) => - new Intl.NumberFormat('en-US', { - style: 'currency', - currency: 'USD', - minimumFractionDigits: 2, - }).format(value) - - const stats = [ - { - label: 'Total Requests', - value: worker.total_requests.toLocaleString(), - }, - { - label: 'Total Cost', - value: formatCurrency(worker.total_cost), - }, - { - label: 'First Seen', - value: formatDate(worker.first_seen), - }, - { - label: 'Last Seen', - value: formatDate(worker.last_seen), - }, - ] - - return ( - - - -
    - - - {(worker.agent_name || worker.agent).slice(0, 2).toUpperCase()} - - -
    - - {worker.agent_name || worker.agent} - - - {worker.agent} - -
    -
    -
    - -
    - {/* Status */} -
    - Status - - - {isOnline ? 'Online' : 'Offline'} - -
    - - {/* Connection Type */} - {worker.connection_type && ( -
    - Connection - {worker.connection_type} -
    - )} - - {/* Instance ID */} - {worker.instance_id && ( -
    - Instance ID - - {worker.instance_id.slice(0, 8)}... - -
    - )} - - - - {/* Stats */} -
    -

    Statistics

    -
    - {stats.map((stat) => ( -
    -
    {stat.label}
    -
    {stat.value}
    -
    - ))} -
    -
    - - - - {/* Activity Timeline (placeholder) */} -
    -

    Recent Activity

    -
    -
    -
    -
    -
    Connected
    -
    - {formatDate(worker.last_seen)} -
    -
    -
    -
    -
    -
    -
    First request
    -
    - {formatDate(worker.first_seen)} -
    -
    -
    -
    -
    -
    - - - ) -} diff --git a/honeycomb/src/components/auth/LoginForm.tsx b/honeycomb/src/components/auth/LoginForm.tsx deleted file mode 100644 index 4fdad098..00000000 --- a/honeycomb/src/components/auth/LoginForm.tsx +++ /dev/null @@ -1,127 +0,0 @@ -import { useForm } from 'react-hook-form' -import { zodResolver } from '@hookform/resolvers/zod' -import * as z from 'zod/v3' -import { Button } from '@/components/ui/button' -import { Input } from '@/components/ui/input' -import { useState } from 'react' -import { submitLogin } from '@/services/authApi' -import { useUserStore } from '@/stores/userStore' -import { useNavigate, useSearchParams, Link } from 'react-router-dom' - -const loginSchema = z.object({ - email: z.string().email('Please enter a valid email'), - password: z.string().min(1, 'Please enter your password'), -}) - -type LoginFormData = z.infer - -interface LoginFormProps { - orgPath?: string - orgName?: string - showSignup?: boolean -} - -export function LoginForm({ orgPath, orgName, showSignup = true }: LoginFormProps) { - const [error, setError] = useState('') - const [isSubmitting, setIsSubmitting] = useState(false) - const navigate = useNavigate() - const [searchParams] = useSearchParams() - const initUserProfile = useUserStore((s) => s.initUserProfile) - - const { - register, - handleSubmit, - formState: { errors }, - } = useForm({ - resolver: zodResolver(loginSchema), - }) - - const handleRedirect = () => { - const redirect = searchParams.get('redirect') - navigate(redirect ? decodeURIComponent(redirect) : '/') - } - - const handleLogin = async (data: LoginFormData) => { - setError('') - setIsSubmitting(true) - - try { - const res = await submitLogin(data) - - localStorage.removeItem('context_session_id') - localStorage.setItem('token', `jwt ${res.token}`) - - if (res.mustResetPassword) { - navigate(`/reset-password?token=${res.token}`) - return - } - - await initUserProfile() - handleRedirect() - } catch (err) { - setError((err as Error)?.message || 'Failed to login. Please check your credentials.') - } finally { - setIsSubmitting(false) - } - } - - return ( -
    - {orgName && ( -

    - Welcome to {orgName}'s ARP Platform -

    - )} - - {error &&

    {error}

    } - -
    -
    - - {errors.email && ( -

    {errors.email.message}

    - )} -
    - -
    - - {errors.password && ( -

    {errors.password.message}

    - )} -
    - - -
    - -
    - - Forgot password? - - - {showSignup && ( - - Don't have an account?{' '} - - Sign up - - - )} -
    -
    - ) -} diff --git a/honeycomb/src/components/auth/ProtectedRoute.tsx b/honeycomb/src/components/auth/ProtectedRoute.tsx deleted file mode 100644 index 1bcb45c2..00000000 --- a/honeycomb/src/components/auth/ProtectedRoute.tsx +++ /dev/null @@ -1,52 +0,0 @@ -import { Navigate, useLocation } from 'react-router-dom' -import { useUserStore } from '@/stores/userStore' -import { useEffect, useState } from 'react' - -interface ProtectedRouteProps { - children: React.ReactNode -} - -export function ProtectedRoute({ children }: ProtectedRouteProps) { - const location = useLocation() - const [isChecking, setIsChecking] = useState(true) - const [isAuthenticated, setIsAuthenticated] = useState(false) - const initUserProfile = useUserStore((s) => s.initUserProfile) - const user = useUserStore((s) => s.user) - - useEffect(() => { - const checkAuth = async () => { - const token = localStorage.getItem('token') - if (!token) { - setIsChecking(false) - return - } - - if (user) { - setIsAuthenticated(true) - setIsChecking(false) - return - } - - const result = await initUserProfile() - setIsAuthenticated(!!result) - setIsChecking(false) - } - - checkAuth() - }, [initUserProfile, user]) - - if (isChecking) { - return ( -
    -
    -
    - ) - } - - if (!isAuthenticated) { - const returnUrl = encodeURIComponent(location.pathname + location.search) - return - } - - return <>{children} -} diff --git a/honeycomb/src/components/auth/RegisterForm.tsx b/honeycomb/src/components/auth/RegisterForm.tsx deleted file mode 100644 index 2942f950..00000000 --- a/honeycomb/src/components/auth/RegisterForm.tsx +++ /dev/null @@ -1,205 +0,0 @@ -import { useForm } from 'react-hook-form' -import { zodResolver } from '@hookform/resolvers/zod' -import * as z from 'zod/v3' -import { Button } from '@/components/ui/button' -import { Input } from '@/components/ui/input' -import { useState } from 'react' -import { submitRegister } from '@/services/authApi' -import { useUserStore } from '@/stores/userStore' -import { useNavigate, useSearchParams, Link } from 'react-router-dom' -import { Eye, EyeOff } from 'lucide-react' - -const registerSchema = z - .object({ - firstname: z.string().min(1, 'First name is required'), - lastname: z.string().min(1, 'Last name is required'), - email: z.string().email('Please enter a valid email'), - password: z.string().min(8, 'Password must be at least 8 characters'), - confirmPassword: z.string().min(1, 'Please confirm your password'), - }) - .refine((data) => data.password === data.confirmPassword, { - message: 'Passwords do not match', - path: ['confirmPassword'], - }) - -type RegisterFormData = z.infer - -interface RegisterFormProps { - orgPath?: string - orgName?: string -} - -export function RegisterForm({ orgPath, orgName }: RegisterFormProps) { - const [error, setError] = useState('') - const [isSubmitting, setIsSubmitting] = useState(false) - const [showPassword, setShowPassword] = useState(false) - const [showConfirmPassword, setShowConfirmPassword] = useState(false) - const navigate = useNavigate() - const [searchParams] = useSearchParams() - const initUserProfile = useUserStore((s) => s.initUserProfile) - - const { - register, - handleSubmit, - watch, - formState: { errors, isValid }, - } = useForm({ - resolver: zodResolver(registerSchema), - mode: 'onChange', - }) - - const password = watch('password') - const confirmPassword = watch('confirmPassword') - const passwordsMatch = !confirmPassword || password === confirmPassword - - const handleRedirect = () => { - const redirect = searchParams.get('redirect') - navigate(redirect ? decodeURIComponent(redirect) : '/') - } - - const handleRegister = async (data: RegisterFormData) => { - setError('') - setIsSubmitting(true) - - try { - const res = await submitRegister({ - email: data.email, - password: data.password, - firstname: data.firstname, - lastname: data.lastname, - }) - - localStorage.removeItem('context_session_id') - localStorage.setItem('token', `jwt ${res.token}`) - - await initUserProfile() - handleRedirect() - } catch (err) { - setError((err as Error)?.message || 'Failed to register. Please try again.') - } finally { - setIsSubmitting(false) - } - } - - return ( -
    - {orgName && ( -

    - Join {orgName}'s ARP Platform -

    - )} - - {!orgName && ( -

    Create your account

    - )} - - {error &&

    {error}

    } - -
    -
    -
    - - {errors.firstname && ( -

    {errors.firstname.message}

    - )} -
    -
    - - {errors.lastname && ( -

    {errors.lastname.message}

    - )} -
    -
    - -
    - - {errors.email && ( -

    {errors.email.message}

    - )} -
    - -
    -
    - - -
    - {errors.password && ( -

    {errors.password.message}

    - )} -
    - -
    -
    - - -
    - {errors.confirmPassword && ( -

    {errors.confirmPassword.message}

    - )} - {!errors.confirmPassword && confirmPassword && !passwordsMatch && ( -

    Passwords do not match

    - )} -
    - - -
    - -
    - Already have an account? - - Sign in - -
    -
    - ) -} diff --git a/honeycomb/src/components/quickstart/AgentStatusIndicator.tsx b/honeycomb/src/components/quickstart/AgentStatusIndicator.tsx deleted file mode 100644 index 474b0728..00000000 --- a/honeycomb/src/components/quickstart/AgentStatusIndicator.tsx +++ /dev/null @@ -1,91 +0,0 @@ -import { cn } from '@/lib/utils' -import { - Tooltip, - TooltipContent, - TooltipProvider, - TooltipTrigger, -} from '@/components/ui/tooltip' -import { useAgentStatus } from '@/hooks/useAgentStatus' - -interface AgentStatusIndicatorProps { - className?: string - showDetails?: boolean -} - -export function AgentStatusIndicator({ - className, - showDetails = true, -}: AgentStatusIndicatorProps) { - const { status, isConnected, error, hasActiveAgents, agentCount } = - useAgentStatus({ autoConnect: true, autoReconnect: true }) - - const formatTime = (timestamp: string) => { - return new Date(timestamp).toLocaleTimeString() - } - - const tooltipContent = () => { - if (error) { - return {error} - } - - if (!isConnected) { - return Connecting... - } - - if (!hasActiveAgents) { - return No agents connected - } - - if (showDetails && status?.instances?.length) { - return ( -
    -
    - {agentCount} agent{agentCount !== 1 ? 's' : ''} connected -
    -
    - {status.instances.slice(0, 5).map((instance) => ( -
    - {instance.instance_id.slice(0, 8)}... -{' '} - {formatTime(instance.connected_at)} -
    - ))} - {status.instances.length > 5 && ( -
    +{status.instances.length - 5} more
    - )} -
    -
    - ) - } - - return `${agentCount} agent${agentCount !== 1 ? 's' : ''} connected` - } - - const indicator = ( -
    - - {hasActiveAgents ? ( - <> - - - - ) : ( - - )} - - - {hasActiveAgents ? `${agentCount} connected` : 'No agents'} - -
    - ) - - return ( - - - {indicator} - - {tooltipContent()} - - - - ) -} diff --git a/honeycomb/src/components/quickstart/CodeBlock.tsx b/honeycomb/src/components/quickstart/CodeBlock.tsx deleted file mode 100644 index 6ed8c3fd..00000000 --- a/honeycomb/src/components/quickstart/CodeBlock.tsx +++ /dev/null @@ -1,63 +0,0 @@ -import { useState, useCallback } from 'react' -import { Copy, Check } from 'lucide-react' -import { Button } from '@/components/ui/button' -import { cn } from '@/lib/utils' -import { copyToClipboard } from '@/lib/quickstart' -import { useNotificationStore } from '@/stores/notificationStore' - -interface CodeBlockProps { - code: string - language?: string - className?: string -} - -export function CodeBlock({ code, language, className }: CodeBlockProps) { - const [copied, setCopied] = useState(false) - const addNotification = useNotificationStore((s) => s.addNotification) - - const handleCopy = useCallback(async () => { - const success = await copyToClipboard(code) - if (success) { - setCopied(true) - setTimeout(() => setCopied(false), 2000) - } else { - addNotification({ - type: 'error', - title: 'Copy failed', - message: 'Failed to copy code to clipboard', - }) - } - }, [code, addNotification]) - - return ( -
    -
    - {language && ( - - {language} - - )} - -
    -
    -        {code.trimEnd()}
    -      
    -
    - ) -} diff --git a/honeycomb/src/components/quickstart/MarkdownRenderer.tsx b/honeycomb/src/components/quickstart/MarkdownRenderer.tsx deleted file mode 100644 index 20378971..00000000 --- a/honeycomb/src/components/quickstart/MarkdownRenderer.tsx +++ /dev/null @@ -1,72 +0,0 @@ -import ReactMarkdown from 'react-markdown' -import type { Components } from 'react-markdown' -import { CodeBlock } from './CodeBlock' - -interface MarkdownRendererProps { - content: string -} - -export function MarkdownRenderer({ content }: MarkdownRendererProps) { - const components: Components = { - // Handle fenced code blocks (wrapped in pre) - pre({ children }) { - return <>{children} - }, - // Handle all code elements - code({ className, children, node }) { - const match = /language-(\w+)/.exec(className || '') - const language = match ? match[1] : undefined - const codeContent = String(children).replace(/\n$/, '') - - // Check if this is inside a pre tag (block code) by looking at parent - const isBlock = node?.position && codeContent.includes('\n') || language - - if (isBlock) { - return - } - - // Inline code - return ( - - {children} - - ) - }, - h1: ({ children }) => ( -

    {children}

    - ), - h2: ({ children }) => ( -

    {children}

    - ), - h3: ({ children }) => ( -

    {children}

    - ), - p: ({ children }) =>

    {children}

    , - ul: ({ children }) =>
      {children}
    , - ol: ({ children }) => ( -
      {children}
    - ), - li: ({ children }) =>
  • {children}
  • , - a: ({ href, children }) => ( - - {children} - - ), - blockquote: ({ children }) => ( -
    - {children} -
    - ), - } - - return ( -
    - {content} -
    - ) -} diff --git a/honeycomb/src/components/quickstart/QuickstartToolbar.tsx b/honeycomb/src/components/quickstart/QuickstartToolbar.tsx deleted file mode 100644 index a813b076..00000000 --- a/honeycomb/src/components/quickstart/QuickstartToolbar.tsx +++ /dev/null @@ -1,89 +0,0 @@ -import { Copy, Download } from 'lucide-react' -import { Button } from '@/components/ui/button' -import { - Select, - SelectContent, - SelectItem, - SelectTrigger, - SelectValue, -} from '@/components/ui/select' -import type { SdkLanguage, AgentFramework } from '@/types/quickstart' - -interface QuickstartToolbarProps { - languages: SdkLanguage[] - frameworks: AgentFramework[] - selectedLanguage: string - selectedFramework: string - onLanguageChange: (value: string) => void - onFrameworkChange: (value: string) => void - onCopyAll: () => void - onDownload: () => void - isCopyDisabled: boolean - isDownloadDisabled: boolean -} - -export function QuickstartToolbar({ - languages, - frameworks, - selectedLanguage, - selectedFramework, - onLanguageChange, - onFrameworkChange, - onCopyAll, - onDownload, - isCopyDisabled, - isDownloadDisabled, -}: QuickstartToolbarProps) { - return ( -
    -
    - - - -
    - -
    - - -
    -
    - ) -} diff --git a/honeycomb/src/components/quickstart/SDKQuickstart.tsx b/honeycomb/src/components/quickstart/SDKQuickstart.tsx deleted file mode 100644 index 5a45a6c0..00000000 --- a/honeycomb/src/components/quickstart/SDKQuickstart.tsx +++ /dev/null @@ -1,229 +0,0 @@ -import { useState, useEffect, useMemo, useCallback } from 'react' -import { Loader2, RefreshCw } from 'lucide-react' -import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card' -import { Button } from '@/components/ui/button' -import { Skeleton } from '@/components/ui/skeleton' -import { - useQuickstartOptions, - useGenerateQuickstart, -} from '@/hooks/queries/useQuickstart' -import { useNotificationStore } from '@/stores/notificationStore' -import { - extractCodeBlocks, - copyToClipboard, - downloadAsFile, -} from '@/lib/quickstart' -import { QuickstartToolbar } from './QuickstartToolbar' -import { MarkdownRenderer } from './MarkdownRenderer' -import { AgentStatusIndicator } from './AgentStatusIndicator' -import type { AgentFramework } from '@/types/quickstart' - -export function SDKQuickstart() { - const [selectedLanguage, setSelectedLanguage] = useState('python') - const [selectedFramework, setSelectedFramework] = useState('') - - const addNotification = useNotificationStore((s) => s.addNotification) - - const { - data: options, - isLoading: optionsLoading, - error: optionsError, - } = useQuickstartOptions() - - const generateMutation = useGenerateQuickstart() - - // Filter frameworks by language support - const availableFrameworks = useMemo(() => { - if (!options?.agentFrameworks) return [] - return options.agentFrameworks.filter((fw) => - selectedLanguage === 'python' ? fw.pythonSupport : fw.typescriptSupport - ) - }, [options, selectedLanguage]) - - // Auto-select first framework when options load or language changes - useEffect(() => { - if (availableFrameworks.length > 0 && !selectedFramework) { - setSelectedFramework(availableFrameworks[0].id) - } - }, [availableFrameworks, selectedFramework]) - - // Generate docs - const generateDocs = useCallback(() => { - if (!selectedFramework) return - generateMutation.mutate({ - agentFramework: selectedFramework, - sdkLanguage: selectedLanguage, - }) - }, [selectedFramework, selectedLanguage, generateMutation]) - - // Auto-generate on initial load and when selections change - useEffect(() => { - if (selectedFramework && options) { - generateDocs() - } - // eslint-disable-next-line react-hooks/exhaustive-deps - }, [selectedFramework, selectedLanguage]) - - // Handle language change - const handleLanguageChange = useCallback( - (newLanguage: string) => { - setSelectedLanguage(newLanguage) - - // Check if current framework supports new language - const newFrameworks = options?.agentFrameworks?.filter((fw) => - newLanguage === 'python' ? fw.pythonSupport : fw.typescriptSupport - ) - const frameworkStillValid = newFrameworks?.some( - (fw) => fw.id === selectedFramework - ) - - if (!frameworkStillValid) { - // Will auto-select via useEffect - setSelectedFramework('') - } - }, - [options, selectedFramework] - ) - - // Handle framework change - const handleFrameworkChange = useCallback((frameworkId: string) => { - setSelectedFramework(frameworkId) - }, []) - - // Copy all code blocks - const handleCopyAll = useCallback(async () => { - if (!generateMutation.data?.markdown) return - - const codeBlocks = extractCodeBlocks(generateMutation.data.markdown) - if (codeBlocks.length === 0) { - addNotification({ - type: 'warning', - title: 'No code to copy', - message: 'No code blocks found in the documentation', - }) - return - } - - const success = await copyToClipboard(codeBlocks.join('\n\n')) - if (success) { - addNotification({ - type: 'success', - title: 'Copied', - message: 'All code blocks copied to clipboard', - }) - } - }, [generateMutation.data, addNotification]) - - // Download markdown - const handleDownload = useCallback(() => { - if (!generateMutation.data?.markdown) return - - const filename = `aden-sdk-quickstart-${selectedFramework}-${selectedLanguage}.md` - downloadAsFile(generateMutation.data.markdown, filename) - }, [generateMutation.data, selectedFramework, selectedLanguage]) - - // Error state - if (optionsError) { - return ( - - - SDK Quickstart - - -
    -

    - Failed to load quickstart options -

    - -
    -
    -
    - ) - } - - // Loading state - if (optionsLoading) { - return ( - - - SDK Quickstart - - -
    -
    - - -
    - -
    -
    -
    - ) - } - - const markdown = generateMutation.data?.markdown - const tokenName = generateMutation.data?.metadata?.tokenName - const codeBlocks = markdown ? extractCodeBlocks(markdown) : [] - - return ( - - - SDK Quickstart - - - - {options && ( - - )} - - {/* Token info */} - {tokenName && ( -

    - Using API Key: {tokenName} -

    - )} - - {/* Generation error */} - {generateMutation.isError && ( -
    -

    Failed to generate documentation

    - -
    - )} - - {/* Loading overlay */} - {generateMutation.isPending && ( -
    - - - Generating documentation... - -
    - )} - - {/* Rendered markdown */} - {markdown && !generateMutation.isPending && ( - - )} -
    -
    - ) -} diff --git a/honeycomb/src/components/quickstart/index.ts b/honeycomb/src/components/quickstart/index.ts deleted file mode 100644 index 382a0631..00000000 --- a/honeycomb/src/components/quickstart/index.ts +++ /dev/null @@ -1,5 +0,0 @@ -export { SDKQuickstart } from './SDKQuickstart' -export { CodeBlock } from './CodeBlock' -export { MarkdownRenderer } from './MarkdownRenderer' -export { QuickstartToolbar } from './QuickstartToolbar' -export { AgentStatusIndicator } from './AgentStatusIndicator' diff --git a/honeycomb/src/components/settings/ChangePasswordDialog.tsx b/honeycomb/src/components/settings/ChangePasswordDialog.tsx deleted file mode 100644 index 3114b7df..00000000 --- a/honeycomb/src/components/settings/ChangePasswordDialog.tsx +++ /dev/null @@ -1,160 +0,0 @@ -import { useState, useEffect } from 'react' -import { - Dialog, - DialogContent, - DialogHeader, - DialogTitle, - DialogFooter, -} from '@/components/ui/dialog' -import { Button } from '@/components/ui/button' -import { Input } from '@/components/ui/input' -import { useUpdatePassword } from '@/hooks/queries/useUser' -import { useNotificationStore } from '@/stores/notificationStore' - -interface ChangePasswordDialogProps { - open: boolean - onOpenChange: (open: boolean) => void -} - -interface FormErrors { - oldPassword?: string - newPassword?: string - confirmPassword?: string -} - -export function ChangePasswordDialog({ - open, - onOpenChange, -}: ChangePasswordDialogProps) { - const [oldPassword, setOldPassword] = useState('') - const [newPassword, setNewPassword] = useState('') - const [confirmPassword, setConfirmPassword] = useState('') - const [errors, setErrors] = useState({}) - - const updatePassword = useUpdatePassword() - const addNotification = useNotificationStore((s) => s.addNotification) - - // Reset form when dialog opens/closes - useEffect(() => { - if (open) { - setOldPassword('') - setNewPassword('') - setConfirmPassword('') - setErrors({}) - } - }, [open]) - - const validate = (): boolean => { - const newErrors: FormErrors = {} - - if (!oldPassword) { - newErrors.oldPassword = 'Please enter your old password' - } else if (oldPassword.length < 10) { - newErrors.oldPassword = 'Password must be at least 10 characters' - } - - if (!newPassword) { - newErrors.newPassword = 'Please enter your new password' - } else if (newPassword.length < 10) { - newErrors.newPassword = 'Password must be at least 10 characters' - } - - if (!confirmPassword) { - newErrors.confirmPassword = 'Please confirm your new password' - } else if (newPassword !== confirmPassword) { - newErrors.confirmPassword = "Passwords don't match" - } - - setErrors(newErrors) - return Object.keys(newErrors).length === 0 - } - - const handleSubmit = async (e: React.FormEvent) => { - e.preventDefault() - - if (!validate()) return - - try { - await updatePassword.mutateAsync({ - oldPassword, - newPassword, - }) - addNotification({ - type: 'success', - title: 'Password updated', - message: 'Your password has been updated successfully.', - }) - onOpenChange(false) - } catch { - addNotification({ - type: 'error', - title: 'Update failed', - message: 'Failed to update password. Please check your old password.', - }) - } - } - - const handleClose = () => { - onOpenChange(false) - } - - return ( - - - - Change password - - -
    -
    - - setOldPassword(e.target.value)} - placeholder="Enter your current password" - /> - {errors.oldPassword && ( -

    {errors.oldPassword}

    - )} -
    - -
    - - setNewPassword(e.target.value)} - placeholder="Enter your new password" - /> - {errors.newPassword && ( -

    {errors.newPassword}

    - )} -
    - -
    - - setConfirmPassword(e.target.value)} - placeholder="Confirm your new password" - /> - {errors.confirmPassword && ( -

    {errors.confirmPassword}

    - )} -
    - - - - - -
    -
    -
    - ) -} diff --git a/honeycomb/src/components/settings/CreateAPIKeyDialog.tsx b/honeycomb/src/components/settings/CreateAPIKeyDialog.tsx deleted file mode 100644 index dfa5aff4..00000000 --- a/honeycomb/src/components/settings/CreateAPIKeyDialog.tsx +++ /dev/null @@ -1,165 +0,0 @@ -import { useState, useEffect } from 'react' -import { Copy, Check } from 'lucide-react' -import { - Dialog, - DialogContent, - DialogHeader, - DialogTitle, - DialogFooter, -} from '@/components/ui/dialog' -import { Button } from '@/components/ui/button' -import { Input } from '@/components/ui/input' -import { Textarea } from '@/components/ui/textarea' -import { useCreateAPIToken } from '@/hooks/queries/useUser' -import { useNotificationStore } from '@/stores/notificationStore' -import { isValidTokenLabel } from '@/lib/user' - -interface CreateAPIKeyDialogProps { - open: boolean - onOpenChange: (open: boolean) => void -} - -export function CreateAPIKeyDialog({ - open, - onOpenChange, -}: CreateAPIKeyDialogProps) { - const [tokenName, setTokenName] = useState('') - const [newToken, setNewToken] = useState('') - const [showToken, setShowToken] = useState(false) - const [error, setError] = useState('') - const [copied, setCopied] = useState(false) - - const createToken = useCreateAPIToken() - const addNotification = useNotificationStore((s) => s.addNotification) - - // Reset state when dialog opens/closes - useEffect(() => { - if (open) { - setTokenName('') - setNewToken('') - setShowToken(false) - setError('') - setCopied(false) - } - }, [open]) - - const handleCreate = async () => { - if (!tokenName.trim()) { - setError('Please enter a name for the API key') - return - } - - if (!isValidTokenLabel(tokenName)) { - setError('Please only use letters, numbers, and underscores') - return - } - - setError('') - - try { - const result = await createToken.mutateAsync(tokenName) - setNewToken(result.token) - setShowToken(true) - addNotification({ - type: 'success', - title: 'API key created', - message: 'Your new API key has been created successfully.', - }) - } catch { - addNotification({ - type: 'error', - title: 'Creation failed', - message: 'Failed to create API key. Please try again.', - }) - } - } - - const handleCopy = async () => { - try { - await navigator.clipboard.writeText(newToken) - setCopied(true) - setTimeout(() => setCopied(false), 2000) - } catch { - addNotification({ - type: 'error', - title: 'Copy failed', - message: 'Failed to copy to clipboard.', - }) - } - } - - const handleClose = () => { - onOpenChange(false) - } - - return ( - - - - Create an API Key - - -
    -

    - Please enter a name for the API key -

    - - setTokenName(e.target.value)} - placeholder="Enter letters, numbers, and underscores" - disabled={showToken} - onKeyDown={(e) => { - if (e.key === 'Enter' && !showToken) handleCreate() - }} - /> - - {error &&

    {error}

    } - - {!showToken && ( - - - - )} - - {showToken && ( -
    - -

    - Make sure to copy your API key now. You won't be able to see it again! -

    -
    -