From 689db5ab33833b67419266d15ca60a3260547881 Mon Sep 17 00:00:00 2001 From: Timothy Date: Tue, 13 Jan 2026 20:13:05 -0800 Subject: [PATCH] feat: initial open-source release Beeline - Open-source LLM observability and control platform Features: - Real-time agent monitoring dashboard - LLM metrics and analytics (TimescaleDB) - Cost tracking and budget controls - WebSocket event streaming - MCP (Model Context Protocol) server Apache 2.0 License --- .dockerignore | 35 + .editorconfig | 21 + .github/CODEOWNERS | 16 + .github/ISSUE_TEMPLATE/bug_report.md | 54 + .github/ISSUE_TEMPLATE/feature_request.md | 29 + .github/PULL_REQUEST_TEMPLATE.md | 43 + .github/workflows/ci.yml | 95 + .github/workflows/release.yml | 95 + .gitignore | 54 + CHANGELOG.md | 40 + CODE_OF_CONDUCT.md | 43 + CONTRIBUTING.md | 108 + DEVELOPER.md | 1198 ++ LICENSE | 190 + README.md | 150 + SECURITY.md | 53 + config.yaml.example | 118 + docker-compose.override.yml.example | 37 + docker-compose.yml | 138 + docs/architecture.md | 222 + docs/configuration.md | 189 + docs/getting-started.md | 143 + hive/.env.example | 28 + hive/Dockerfile | 66 + hive/Dockerfile.dev | 24 + .../config/agent-frameworks.json | 24 + .../config/llm-vendors.json | 14 + .../config/sdk-languages.json | 10 + .../python/quickstart-langflow.md | 191 + .../python/quickstart-langgraph.md | 164 + .../python/quickstart-livekit.md | 165 + .../templates/javascript/generic.md | 194 + .../templates/javascript/langgraph.md | 297 + .../templates/python/generic.md | 164 + .../templates/python/langflow.md | 191 + .../templates/python/langgraph.md | 164 + .../templates/python/livekit.md | 162 + hive/docs/api/user-authentication.md | 247 + hive/docs/sdk-event-specification.md | 703 + hive/k8s/base/deployment.yaml | 101 + hive/k8s/base/kustomization.yaml | 6 + hive/k8s/base/service.yaml | 15 + .../overlays/production/kustomization.yaml | 21 + hive/k8s/overlays/production/namespace.yaml | 6 + .../production/patches/deployment.yaml | 27 + hive/k8s/overlays/staging/kustomization.yaml | 21 + hive/k8s/overlays/staging/namespace.yaml | 6 + .../overlays/staging/patches/deployment.yaml | 27 + hive/package.json | 61 + hive/scripts/migrate-add-agent-name.ts | 129 + hive/scripts/test-mcp-curl.sh | 61 + hive/scripts/test-mcp.ts | 176 + hive/src/app.ts | 150 + hive/src/config/index.ts | 134 + hive/src/controllers/control.controller.ts | 1885 +++ hive/src/controllers/iam.controller.ts | 154 + hive/src/controllers/quickstart.controller.ts | 192 + hive/src/controllers/tsdb.controller.ts | 1161 ++ hive/src/controllers/user.controller.ts | 466 + hive/src/index.ts | 116 + hive/src/mcp/index.ts | 62 + hive/src/mcp/server.ts | 90 + hive/src/mcp/tools/agents.ts | 197 + hive/src/mcp/tools/analytics.ts | 169 + hive/src/mcp/tools/budget.ts | 335 + hive/src/mcp/tools/policies.ts | 225 + hive/src/mcp/transport/http.ts | 238 + hive/src/mcp/utils/api-client.ts | 610 + hive/src/mcp/utils/response-helpers.ts | 65 + hive/src/mcp/utils/schema-helpers.ts | 80 + .../middleware/error-handler.middleware.ts | 43 + hive/src/routes.ts | 43 + hive/src/services/control/control_service.ts | 2065 +++ hive/src/services/control/control_sockets.ts | 584 + .../src/services/control/llm_event_batcher.ts | 349 + hive/src/services/mongo/mongo_db.ts | 26 + .../services/quickstart/quickstart_service.ts | 227 + .../src/services/tsdb/00-init-timescaledb.sql | 11 + hive/src/services/tsdb/analytics_service.ts | 748 + hive/src/services/tsdb/pricing_service.ts | 743 + hive/src/services/tsdb/schema.sql | 358 + hive/src/services/tsdb/team_context.ts | 114 + hive/src/services/tsdb/tsdb_service.ts | 955 ++ hive/src/services/tsdb/users_schema.sql | 149 + hive/src/sockets/control.socket.ts | 98 + hive/src/types/acho-inc-administration.d.ts | 123 + hive/tsconfig.json | 26 + honeycomb/.env.example | 13 + honeycomb/Dockerfile | 35 + honeycomb/Dockerfile.dev | 19 + honeycomb/components.json | 20 + honeycomb/index.html | 17 + honeycomb/nginx.conf | 42 + honeycomb/package.json | 67 + honeycomb/postcss.config.js | 6 + honeycomb/public/favicon.svg | 4 + honeycomb/src/App.tsx | 40 + honeycomb/src/assets/aden-icon.png | Bin 0 -> 2027 bytes honeycomb/src/assets/aden-icon.svg | 5 + honeycomb/src/assets/aden-logo.svg | 15 + honeycomb/src/components/.gitkeep | 0 honeycomb/src/components/ErrorBoundary.tsx | 50 + .../agent-control/AgentControlLayout.tsx | 254 + .../agent-control/AnalyticsPanel.tsx | 335 + .../components/agent-control/CostControls.tsx | 266 + .../components/agent-control/DataPanel.tsx | 345 + .../components/agent-control/WorkersPanel.tsx | 276 + .../agent-control/budget/AddBudgetDialog.tsx | 209 + .../budget/BudgetDetailPanel.tsx | 480 + .../agent-control/charts/CostByModelChart.tsx | 110 + .../agent-control/charts/CostTrendChart.tsx | 109 + .../agent-control/charts/LatencyChart.tsx | 70 + .../agent-control/charts/ModelUsageChart.tsx | 81 + .../agent-control/charts/TokenUsageChart.tsx | 96 + .../agent-control/charts/TopAgentsChart.tsx | 98 + .../agent-control/charts/VegaLiteChart.tsx | 52 + .../components/agent-control/charts/index.ts | 6 + .../components/agent-control/charts/specs.ts | 257 + .../agent-control/charts/transformers.ts | 254 + .../src/components/agent-control/index.ts | 24 + .../agent-control/shared/BudgetCard.tsx | 172 + .../agent-control/shared/KpiCard.tsx | 67 + .../agent-control/shared/LiveIndicator.tsx | 23 + .../agent-control/shared/NotificationBell.tsx | 137 + .../workers/WorkerProfilePanel.tsx | 171 + honeycomb/src/components/auth/LoginForm.tsx | 127 + .../src/components/auth/ProtectedRoute.tsx | 52 + .../src/components/auth/RegisterForm.tsx | 205 + .../quickstart/AgentStatusIndicator.tsx | 91 + .../src/components/quickstart/CodeBlock.tsx | 63 + .../quickstart/MarkdownRenderer.tsx | 72 + .../quickstart/QuickstartToolbar.tsx | 89 + .../components/quickstart/SDKQuickstart.tsx | 229 + honeycomb/src/components/quickstart/index.ts | 5 + .../settings/ChangePasswordDialog.tsx | 160 + .../settings/CreateAPIKeyDialog.tsx | 165 + .../components/settings/DeveloperSettings.tsx | 128 + .../components/settings/ProfileSettings.tsx | 387 + .../src/components/settings/SettingsModal.tsx | 71 + honeycomb/src/components/ui/avatar.tsx | 48 + honeycomb/src/components/ui/badge.tsx | 36 + honeycomb/src/components/ui/button.tsx | 56 + honeycomb/src/components/ui/card.tsx | 79 + honeycomb/src/components/ui/dialog.tsx | 120 + honeycomb/src/components/ui/dropdown-menu.tsx | 200 + honeycomb/src/components/ui/input.tsx | 22 + honeycomb/src/components/ui/label.tsx | 24 + honeycomb/src/components/ui/popover.tsx | 29 + honeycomb/src/components/ui/progress.tsx | 26 + honeycomb/src/components/ui/scroll-area.tsx | 48 + honeycomb/src/components/ui/select.tsx | 158 + honeycomb/src/components/ui/separator.tsx | 29 + honeycomb/src/components/ui/sheet.tsx | 138 + honeycomb/src/components/ui/skeleton.tsx | 15 + honeycomb/src/components/ui/switch.tsx | 27 + honeycomb/src/components/ui/table.tsx | 117 + honeycomb/src/components/ui/tabs.tsx | 55 + honeycomb/src/components/ui/textarea.tsx | 22 + honeycomb/src/components/ui/tooltip.tsx | 30 + honeycomb/src/components/user/UserAvatar.tsx | 132 + honeycomb/src/hooks/index.ts | 7 + honeycomb/src/hooks/queries/index.ts | 25 + honeycomb/src/hooks/queries/useAnalytics.ts | 80 + honeycomb/src/hooks/queries/useBudgets.ts | 183 + honeycomb/src/hooks/queries/useLogs.ts | 61 + honeycomb/src/hooks/queries/useQuickstart.ts | 24 + honeycomb/src/hooks/queries/useUser.ts | 184 + honeycomb/src/hooks/useAgentStatus.ts | 170 + honeycomb/src/hooks/useApi.ts | 44 + honeycomb/src/hooks/useControlSocket.ts | 90 + honeycomb/src/lib/quickstart.ts | 55 + honeycomb/src/lib/user.ts | 89 + honeycomb/src/lib/utils.ts | 6 + honeycomb/src/main.tsx | 31 + honeycomb/src/pages/HomePage.tsx | 48 + honeycomb/src/pages/LoginPage.tsx | 45 + honeycomb/src/pages/NotFoundPage.tsx | 15 + honeycomb/src/pages/RegisterPage.tsx | 39 + honeycomb/src/services/agentControlApi.ts | 241 + honeycomb/src/services/api.ts | 90 + honeycomb/src/services/authApi.ts | 17 + honeycomb/src/services/controlApi.ts | 27 + honeycomb/src/services/orgApi.ts | 25 + honeycomb/src/services/quickstartApi.ts | 18 + honeycomb/src/services/userApi.ts | 34 + honeycomb/src/stores/agentControlStore.ts | 58 + honeycomb/src/stores/notificationStore.ts | 89 + honeycomb/src/stores/userStore.ts | 117 + honeycomb/src/styles/index.css | 378 + honeycomb/src/types/agentControl.ts | 303 + honeycomb/src/types/auth.ts | 26 + honeycomb/src/types/index.ts | 25 + honeycomb/src/types/quickstart.ts | 64 + honeycomb/src/types/user.ts | 72 + honeycomb/src/utils/index.ts | 27 + honeycomb/src/vite-env.d.ts | 1 + honeycomb/tailwind.config.js | 91 + honeycomb/tsconfig.json | 27 + honeycomb/tsconfig.node.json | 18 + honeycomb/vite.config.ts | 34 + package-lock.json | 12235 ++++++++++++++++ package.json | 36 + scripts/generate-env.ts | 180 + scripts/setup.sh | 85 + tsconfig.base.json | 24 + 205 files changed, 42545 insertions(+) create mode 100644 .dockerignore create mode 100644 .editorconfig create mode 100644 .github/CODEOWNERS create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md create mode 100644 .github/PULL_REQUEST_TEMPLATE.md create mode 100644 .github/workflows/ci.yml create mode 100644 .github/workflows/release.yml create mode 100644 .gitignore create mode 100644 CHANGELOG.md create mode 100644 CODE_OF_CONDUCT.md create mode 100644 CONTRIBUTING.md create mode 100644 DEVELOPER.md create mode 100644 LICENSE create mode 100644 README.md create mode 100644 SECURITY.md create mode 100644 config.yaml.example create mode 100644 docker-compose.override.yml.example create mode 100644 docker-compose.yml create mode 100644 docs/architecture.md create mode 100644 docs/configuration.md create mode 100644 docs/getting-started.md create mode 100644 hive/.env.example create mode 100644 hive/Dockerfile create mode 100644 hive/Dockerfile.dev create mode 100644 hive/docs/aden-sdk-documents/config/agent-frameworks.json create mode 100644 hive/docs/aden-sdk-documents/config/llm-vendors.json create mode 100644 hive/docs/aden-sdk-documents/config/sdk-languages.json create mode 100644 hive/docs/aden-sdk-documents/python/quickstart-langflow.md create mode 100644 hive/docs/aden-sdk-documents/python/quickstart-langgraph.md create mode 100644 hive/docs/aden-sdk-documents/python/quickstart-livekit.md create mode 100644 hive/docs/aden-sdk-documents/templates/javascript/generic.md create mode 100644 hive/docs/aden-sdk-documents/templates/javascript/langgraph.md create mode 100644 hive/docs/aden-sdk-documents/templates/python/generic.md create mode 100644 hive/docs/aden-sdk-documents/templates/python/langflow.md create mode 100644 hive/docs/aden-sdk-documents/templates/python/langgraph.md create mode 100644 hive/docs/aden-sdk-documents/templates/python/livekit.md create mode 100644 hive/docs/api/user-authentication.md create mode 100644 hive/docs/sdk-event-specification.md create mode 100644 hive/k8s/base/deployment.yaml create mode 100644 hive/k8s/base/kustomization.yaml create mode 100644 hive/k8s/base/service.yaml create mode 100644 hive/k8s/overlays/production/kustomization.yaml create mode 100644 hive/k8s/overlays/production/namespace.yaml create mode 100644 hive/k8s/overlays/production/patches/deployment.yaml create mode 100644 hive/k8s/overlays/staging/kustomization.yaml create mode 100644 hive/k8s/overlays/staging/namespace.yaml create mode 100644 hive/k8s/overlays/staging/patches/deployment.yaml create mode 100644 hive/package.json create mode 100644 hive/scripts/migrate-add-agent-name.ts create mode 100755 hive/scripts/test-mcp-curl.sh create mode 100644 hive/scripts/test-mcp.ts create mode 100644 hive/src/app.ts create mode 100644 hive/src/config/index.ts create mode 100644 hive/src/controllers/control.controller.ts create mode 100644 hive/src/controllers/iam.controller.ts create mode 100644 hive/src/controllers/quickstart.controller.ts create mode 100644 hive/src/controllers/tsdb.controller.ts create mode 100644 hive/src/controllers/user.controller.ts create mode 100644 hive/src/index.ts create mode 100644 hive/src/mcp/index.ts create mode 100644 hive/src/mcp/server.ts create mode 100644 hive/src/mcp/tools/agents.ts create mode 100644 hive/src/mcp/tools/analytics.ts create mode 100644 hive/src/mcp/tools/budget.ts create mode 100644 hive/src/mcp/tools/policies.ts create mode 100644 hive/src/mcp/transport/http.ts create mode 100644 hive/src/mcp/utils/api-client.ts create mode 100644 hive/src/mcp/utils/response-helpers.ts create mode 100644 hive/src/mcp/utils/schema-helpers.ts create mode 100644 hive/src/middleware/error-handler.middleware.ts create mode 100644 hive/src/routes.ts create mode 100644 hive/src/services/control/control_service.ts create mode 100644 hive/src/services/control/control_sockets.ts create mode 100644 hive/src/services/control/llm_event_batcher.ts create mode 100644 hive/src/services/mongo/mongo_db.ts create mode 100644 hive/src/services/quickstart/quickstart_service.ts create mode 100644 hive/src/services/tsdb/00-init-timescaledb.sql create mode 100644 hive/src/services/tsdb/analytics_service.ts create mode 100644 hive/src/services/tsdb/pricing_service.ts create mode 100644 hive/src/services/tsdb/schema.sql create mode 100644 hive/src/services/tsdb/team_context.ts create mode 100644 hive/src/services/tsdb/tsdb_service.ts create mode 100644 hive/src/services/tsdb/users_schema.sql create mode 100644 hive/src/sockets/control.socket.ts create mode 100644 hive/src/types/acho-inc-administration.d.ts create mode 100644 hive/tsconfig.json create mode 100644 honeycomb/.env.example create mode 100644 honeycomb/Dockerfile create mode 100644 honeycomb/Dockerfile.dev create mode 100644 honeycomb/components.json create mode 100644 honeycomb/index.html create mode 100644 honeycomb/nginx.conf create mode 100644 honeycomb/package.json create mode 100644 honeycomb/postcss.config.js create mode 100644 honeycomb/public/favicon.svg create mode 100644 honeycomb/src/App.tsx create mode 100644 honeycomb/src/assets/aden-icon.png create mode 100644 honeycomb/src/assets/aden-icon.svg create mode 100644 honeycomb/src/assets/aden-logo.svg create mode 100644 honeycomb/src/components/.gitkeep create mode 100644 honeycomb/src/components/ErrorBoundary.tsx create mode 100644 honeycomb/src/components/agent-control/AgentControlLayout.tsx create mode 100644 honeycomb/src/components/agent-control/AnalyticsPanel.tsx create mode 100644 honeycomb/src/components/agent-control/CostControls.tsx create mode 100644 honeycomb/src/components/agent-control/DataPanel.tsx create mode 100644 honeycomb/src/components/agent-control/WorkersPanel.tsx create mode 100644 honeycomb/src/components/agent-control/budget/AddBudgetDialog.tsx create mode 100644 honeycomb/src/components/agent-control/budget/BudgetDetailPanel.tsx create mode 100644 honeycomb/src/components/agent-control/charts/CostByModelChart.tsx create mode 100644 honeycomb/src/components/agent-control/charts/CostTrendChart.tsx create mode 100644 honeycomb/src/components/agent-control/charts/LatencyChart.tsx create mode 100644 honeycomb/src/components/agent-control/charts/ModelUsageChart.tsx create mode 100644 honeycomb/src/components/agent-control/charts/TokenUsageChart.tsx create mode 100644 honeycomb/src/components/agent-control/charts/TopAgentsChart.tsx create mode 100644 honeycomb/src/components/agent-control/charts/VegaLiteChart.tsx create mode 100644 honeycomb/src/components/agent-control/charts/index.ts create mode 100644 honeycomb/src/components/agent-control/charts/specs.ts create mode 100644 honeycomb/src/components/agent-control/charts/transformers.ts create mode 100644 honeycomb/src/components/agent-control/index.ts create mode 100644 honeycomb/src/components/agent-control/shared/BudgetCard.tsx create mode 100644 honeycomb/src/components/agent-control/shared/KpiCard.tsx create mode 100644 honeycomb/src/components/agent-control/shared/LiveIndicator.tsx create mode 100644 honeycomb/src/components/agent-control/shared/NotificationBell.tsx create mode 100644 honeycomb/src/components/agent-control/workers/WorkerProfilePanel.tsx create mode 100644 honeycomb/src/components/auth/LoginForm.tsx create mode 100644 honeycomb/src/components/auth/ProtectedRoute.tsx create mode 100644 honeycomb/src/components/auth/RegisterForm.tsx create mode 100644 honeycomb/src/components/quickstart/AgentStatusIndicator.tsx create mode 100644 honeycomb/src/components/quickstart/CodeBlock.tsx create mode 100644 honeycomb/src/components/quickstart/MarkdownRenderer.tsx create mode 100644 honeycomb/src/components/quickstart/QuickstartToolbar.tsx create mode 100644 honeycomb/src/components/quickstart/SDKQuickstart.tsx create mode 100644 honeycomb/src/components/quickstart/index.ts create mode 100644 honeycomb/src/components/settings/ChangePasswordDialog.tsx create mode 100644 honeycomb/src/components/settings/CreateAPIKeyDialog.tsx create mode 100644 honeycomb/src/components/settings/DeveloperSettings.tsx create mode 100644 honeycomb/src/components/settings/ProfileSettings.tsx create mode 100644 honeycomb/src/components/settings/SettingsModal.tsx create mode 100644 honeycomb/src/components/ui/avatar.tsx create mode 100644 honeycomb/src/components/ui/badge.tsx create mode 100644 honeycomb/src/components/ui/button.tsx create mode 100644 honeycomb/src/components/ui/card.tsx create mode 100644 honeycomb/src/components/ui/dialog.tsx create mode 100644 honeycomb/src/components/ui/dropdown-menu.tsx create mode 100644 honeycomb/src/components/ui/input.tsx create mode 100644 honeycomb/src/components/ui/label.tsx create mode 100644 honeycomb/src/components/ui/popover.tsx create mode 100644 honeycomb/src/components/ui/progress.tsx create mode 100644 honeycomb/src/components/ui/scroll-area.tsx create mode 100644 honeycomb/src/components/ui/select.tsx create mode 100644 honeycomb/src/components/ui/separator.tsx create mode 100644 honeycomb/src/components/ui/sheet.tsx create mode 100644 honeycomb/src/components/ui/skeleton.tsx create mode 100644 honeycomb/src/components/ui/switch.tsx create mode 100644 honeycomb/src/components/ui/table.tsx create mode 100644 honeycomb/src/components/ui/tabs.tsx create mode 100644 honeycomb/src/components/ui/textarea.tsx create mode 100644 honeycomb/src/components/ui/tooltip.tsx create mode 100644 honeycomb/src/components/user/UserAvatar.tsx create mode 100644 honeycomb/src/hooks/index.ts create mode 100644 honeycomb/src/hooks/queries/index.ts create mode 100644 honeycomb/src/hooks/queries/useAnalytics.ts create mode 100644 honeycomb/src/hooks/queries/useBudgets.ts create mode 100644 honeycomb/src/hooks/queries/useLogs.ts create mode 100644 honeycomb/src/hooks/queries/useQuickstart.ts create mode 100644 honeycomb/src/hooks/queries/useUser.ts create mode 100644 honeycomb/src/hooks/useAgentStatus.ts create mode 100644 honeycomb/src/hooks/useApi.ts create mode 100644 honeycomb/src/hooks/useControlSocket.ts create mode 100644 honeycomb/src/lib/quickstart.ts create mode 100644 honeycomb/src/lib/user.ts create mode 100644 honeycomb/src/lib/utils.ts create mode 100644 honeycomb/src/main.tsx create mode 100644 honeycomb/src/pages/HomePage.tsx create mode 100644 honeycomb/src/pages/LoginPage.tsx create mode 100644 honeycomb/src/pages/NotFoundPage.tsx create mode 100644 honeycomb/src/pages/RegisterPage.tsx create mode 100644 honeycomb/src/services/agentControlApi.ts create mode 100644 honeycomb/src/services/api.ts create mode 100644 honeycomb/src/services/authApi.ts create mode 100644 honeycomb/src/services/controlApi.ts create mode 100644 honeycomb/src/services/orgApi.ts create mode 100644 honeycomb/src/services/quickstartApi.ts create mode 100644 honeycomb/src/services/userApi.ts create mode 100644 honeycomb/src/stores/agentControlStore.ts create mode 100644 honeycomb/src/stores/notificationStore.ts create mode 100644 honeycomb/src/stores/userStore.ts create mode 100644 honeycomb/src/styles/index.css create mode 100644 honeycomb/src/types/agentControl.ts create mode 100644 honeycomb/src/types/auth.ts create mode 100644 honeycomb/src/types/index.ts create mode 100644 honeycomb/src/types/quickstart.ts create mode 100644 honeycomb/src/types/user.ts create mode 100644 honeycomb/src/utils/index.ts create mode 100644 honeycomb/src/vite-env.d.ts create mode 100644 honeycomb/tailwind.config.js create mode 100644 honeycomb/tsconfig.json create mode 100644 honeycomb/tsconfig.node.json create mode 100644 honeycomb/vite.config.ts create mode 100644 package-lock.json create mode 100644 package.json create mode 100644 scripts/generate-env.ts create mode 100755 scripts/setup.sh create mode 100644 tsconfig.base.json diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..efb22d1c --- /dev/null +++ b/.dockerignore @@ -0,0 +1,35 @@ +# Git +.git/ +.gitignore + +# Documentation +*.md +docs/ +LICENSE + +# IDE +.idea/ +.vscode/ + +# Dependencies (rebuilt in container) +node_modules/ + +# Build artifacts +dist/ +build/ +coverage/ + +# Environment files +.env* +config.yaml + +# Logs +*.log +logs/ + +# OS +.DS_Store +Thumbs.db + +# GitHub +.github/ diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000..51b5033c --- /dev/null +++ b/.editorconfig @@ -0,0 +1,21 @@ +# EditorConfig helps maintain consistent coding styles +# https://editorconfig.org + +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_style = space +indent_size = 2 +insert_final_newline = true +trim_trailing_whitespace = true + +[*.md] +trim_trailing_whitespace = false + +[*.{yml,yaml}] +indent_size = 2 + +[Makefile] +indent_style = tab diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 00000000..71ab6b3f --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,16 @@ +# Default owners for everything in the repo +* @adenhq/maintainers + +# Frontend +/honeycomb/ @adenhq/maintainers + +# Backend +/hive/ @adenhq/maintainers + +# Infrastructure +/docker-compose*.yml @adenhq/maintainers +/.github/ @adenhq/maintainers + +# Documentation +/docs/ @adenhq/maintainers +*.md @adenhq/maintainers diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..6e6478f5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,54 @@ +--- +name: Bug Report +about: Report a bug to help us improve +title: '[Bug]: ' +labels: bug +assignees: '' +--- + +## Describe the Bug + +A clear and concise description of what the bug is. + +## To Reproduce + +Steps to reproduce the behavior: + +1. Go to '...' +2. Click on '...' +3. See error + +## Expected Behavior + +A clear and concise description of what you expected to happen. + +## Screenshots + +If applicable, add screenshots to help explain your problem. + +## Environment + +- OS: [e.g., Ubuntu 22.04, macOS 14] +- Docker version: [e.g., 24.0.0] +- Node version: [e.g., 20.10.0] +- Browser (if applicable): [e.g., Chrome 120] + +## Configuration + +Relevant parts of your `config.yaml` (remove any sensitive data): + +```yaml +# paste here +``` + +## Logs + +Relevant log output: + +``` +paste logs here +``` + +## Additional Context + +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..2781dd49 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,29 @@ +--- +name: Feature Request +about: Suggest a new feature or enhancement +title: '[Feature]: ' +labels: enhancement +assignees: '' +--- + +## Problem Statement + +A clear and concise description of what problem this feature would solve. + +Ex. I'm always frustrated when [...] + +## Proposed Solution + +A clear and concise description of what you want to happen. + +## Alternatives Considered + +A description of any alternative solutions or features you've considered. + +## Additional Context + +Add any other context, mockups, or screenshots about the feature request here. + +## Implementation Ideas + +If you have ideas about how this could be implemented, share them here. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..d0b2ac24 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,43 @@ +## Description + +Brief description of the changes in this PR. + +## Type of Change + +- [ ] Bug fix (non-breaking change that fixes an issue) +- [ ] New feature (non-breaking change that adds functionality) +- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) +- [ ] Documentation update +- [ ] Refactoring (no functional changes) + +## Related Issues + +Fixes #(issue number) + +## Changes Made + +- Change 1 +- Change 2 +- Change 3 + +## Testing + +Describe the tests you ran to verify your changes: + +- [ ] Unit tests pass (`npm run test`) +- [ ] Lint passes (`npm run lint`) +- [ ] Manual testing performed + +## Checklist + +- [ ] My code follows the project's style guidelines +- [ ] I have performed a self-review of my code +- [ ] I have commented my code, particularly in hard-to-understand areas +- [ ] I have made corresponding changes to the documentation +- [ ] My changes generate no new warnings +- [ ] I have added tests that prove my fix is effective or that my feature works +- [ ] New and existing unit tests pass locally with my changes + +## Screenshots (if applicable) + +Add screenshots to demonstrate UI changes. diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..6fca7376 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,95 @@ +name: CI + +on: + push: + branches: [main] + pull_request: + branches: [main] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + lint: + name: Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'npm' + + - name: Install dependencies + run: npm ci + + - name: Run linter + run: npm run lint + + test: + name: Test + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'npm' + + - name: Install dependencies + run: npm ci + + - name: Run tests + run: npm run test + + build: + name: Build + runs-on: ubuntu-latest + needs: [lint, test] + steps: + - uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'npm' + + - name: Install dependencies + run: npm ci + + - name: Build packages + run: npm run build + + docker: + name: Docker Build + runs-on: ubuntu-latest + needs: [lint, test] + steps: + - uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build frontend image + uses: docker/build-push-action@v5 + with: + context: ./honeycomb + push: false + tags: honeycomb-frontend:test + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Build backend image + uses: docker/build-push-action@v5 + with: + context: ./hive + push: false + tags: honeycomb-backend:test + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000..3452ce25 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,95 @@ +name: Release + +on: + push: + tags: + - 'v*' + +permissions: + contents: write + packages: write + +jobs: + release: + name: Create Release + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'npm' + + - name: Install dependencies + run: npm ci + + - name: Build packages + run: npm run build + + - name: Run tests + run: npm run test + + - name: Generate changelog + id: changelog + run: | + # Extract version from tag + VERSION=${GITHUB_REF#refs/tags/v} + echo "version=$VERSION" >> $GITHUB_OUTPUT + + - name: Create GitHub Release + uses: softprops/action-gh-release@v1 + with: + generate_release_notes: true + draft: false + prerelease: ${{ contains(github.ref, '-') }} + + docker-publish: + name: Publish Docker Images + runs-on: ubuntu-latest + needs: release + steps: + - uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: | + ghcr.io/${{ github.repository }}/frontend + ghcr.io/${{ github.repository }}/backend + tags: | + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=semver,pattern={{major}} + + - name: Build and push frontend + uses: docker/build-push-action@v5 + with: + context: ./honeycomb + push: true + tags: ghcr.io/${{ github.repository }}/frontend:${{ github.ref_name }} + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Build and push backend + uses: docker/build-push-action@v5 + with: + context: ./hive + push: true + tags: ghcr.io/${{ github.repository }}/backend:${{ github.ref_name }} + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..a8c4967e --- /dev/null +++ b/.gitignore @@ -0,0 +1,54 @@ +# Dependencies +node_modules/ +.pnpm-store/ + +# Build outputs +dist/ +build/ +.next/ +out/ + +# Environment files (generated from config.yaml) +.env +.env.local +.env.*.local +honeycomb/.env +hive/.env + +# User configuration (copied from .example) +config.yaml +docker-compose.override.yml + +# IDE +.idea/ +.vscode/* +!.vscode/extensions.json +!.vscode/settings.json.example +*.swp +*.swo +*~ + +# OS +.DS_Store +Thumbs.db + +# Logs +logs/ +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* + +# Testing +coverage/ +.nyc_output/ + +# TypeScript +*.tsbuildinfo + +# Misc +*.local +.cache/ +tmp/ +temp/ diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..2f405c47 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,40 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +### Added +- Initial project structure +- React frontend (honeycomb) with Vite and TypeScript +- Node.js backend (hive) with Express and TypeScript +- Docker Compose configuration for local development +- Configuration system via `config.yaml` +- GitHub Actions CI/CD workflows +- Comprehensive documentation + +### Changed +- N/A + +### Deprecated +- N/A + +### Removed +- N/A + +### Fixed +- N/A + +### Security +- N/A + +## [0.1.0] - 2025-01-13 + +### Added +- Initial release + +[Unreleased]: https://github.com/adenhq/beeline/compare/v0.1.0...HEAD +[0.1.0]: https://github.com/adenhq/beeline/releases/tag/v0.1.0 diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..6f1e3ad0 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,43 @@ +# Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment: + +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior: + +- The use of sexualized language or imagery and unwelcome sexual attention +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information without explicit permission +- Other conduct which could reasonably be considered inappropriate + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at contact@adenhq.com. + +All complaints will be reviewed and investigated promptly and fairly. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 2.0. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..07765383 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,108 @@ +# Contributing to Beeline + +Thank you for your interest in contributing to Beeline! This document provides guidelines and information for contributors. + +## Code of Conduct + +By participating in this project, you agree to abide by our [Code of Conduct](CODE_OF_CONDUCT.md). + +## Getting Started + +1. Fork the repository +2. Clone your fork: `git clone https://github.com/YOUR_USERNAME/beeline.git` +3. Create a feature branch: `git checkout -b feature/your-feature-name` +4. Make your changes +5. Run tests: `npm run test` +6. Commit your changes following our commit conventions +7. Push to your fork and submit a Pull Request + +## Development Setup + +```bash +# Install dependencies +npm install + +# Copy configuration +cp config.yaml.example config.yaml + +# Generate environment files +npm run setup + +# Start development environment +docker compose up +``` + +## Commit Convention + +We follow [Conventional Commits](https://www.conventionalcommits.org/): + +``` +type(scope): description + +[optional body] + +[optional footer] +``` + +**Types:** +- `feat`: New feature +- `fix`: Bug fix +- `docs`: Documentation changes +- `style`: Code style changes (formatting, etc.) +- `refactor`: Code refactoring +- `test`: Adding or updating tests +- `chore`: Maintenance tasks + +**Examples:** +``` +feat(auth): add OAuth2 login support +fix(api): handle null response from external service +docs(readme): update installation instructions +``` + +## Pull Request Process + +1. Update documentation if needed +2. Add tests for new functionality +3. Ensure all tests pass +4. Update the CHANGELOG.md if applicable +5. Request review from maintainers + +### PR Title Format + +Follow the same convention as commits: +``` +feat(component): add new feature description +``` + +## Project Structure + +- `honeycomb/` - React frontend application +- `hive/` - Node.js backend API +- `docs/` - Documentation +- `scripts/` - Build and utility scripts + +## Code Style + +- Use TypeScript for all new code +- Follow existing code patterns +- Use meaningful variable and function names +- Add comments for complex logic +- Keep functions focused and small + +## Testing + +```bash +# Run all tests +npm run test + +# Run tests for a specific package +npm run test --workspace=honeycomb +npm run test --workspace=hive +``` + +## Questions? + +Feel free to open an issue for questions or join our [Discord community](https://discord.com/invite/MXE49hrKDk). + +Thank you for contributing! diff --git a/DEVELOPER.md b/DEVELOPER.md new file mode 100644 index 00000000..d8e4fb22 --- /dev/null +++ b/DEVELOPER.md @@ -0,0 +1,1198 @@ +# Developer Guide + +This comprehensive guide covers everything you need to know to work on the Beeline monorepo effectively. + +## Table of Contents + +1. [Repository Overview](#repository-overview) +2. [Initial Setup](#initial-setup) +3. [Project Structure](#project-structure) +4. [Configuration System](#configuration-system) +5. [Development Workflow](#development-workflow) +6. [Working with the Frontend (honeycomb)](#working-with-the-frontend-honeycomb) +7. [Working with the Backend (hive)](#working-with-the-backend-hive) +8. [Docker Development](#docker-development) +9. [Testing](#testing) +10. [Code Style & Conventions](#code-style--conventions) +11. [Git Workflow](#git-workflow) +12. [Debugging](#debugging) +13. [Common Tasks](#common-tasks) +14. [Troubleshooting](#troubleshooting) + +--- + +## Repository Overview + +Beeline is a monorepo containing two main packages: + +| Package | Directory | Description | Tech Stack | +|---------|-----------|-------------|------------| +| **honeycomb** | `/honeycomb` | Frontend web application | React 18, TypeScript, Vite | +| **hive** | `/hive` | Backend API server | Node.js, Express, TypeScript | + +The repository uses **npm workspaces** to manage dependencies across packages from a single root `package.json`. + +### Key Principles + +- **Single source of configuration**: Edit `config.yaml` once, environment files are auto-generated +- **Consistent tooling**: Both packages use TypeScript with strict mode +- **Docker-first**: Production deployments use containerized builds +- **Developer ergonomics**: Hot reload, clear error messages, minimal setup + +--- + +## Initial Setup + +### Prerequisites + +Ensure you have installed: + +- **Node.js v20+** - [Download](https://nodejs.org/) or use nvm: `nvm install 20` +- **npm v10+** - Comes with Node.js 20 +- **Docker v20.10+** - [Download](https://docs.docker.com/get-docker/) +- **Docker Compose v2+** - Included with Docker Desktop + +Verify installation: + +```bash +node --version # Should be v20.x.x +npm --version # Should be 10.x.x +docker --version # Should be 20.10+ +docker compose version # Should be v2.x.x +``` + +### Step-by-Step Setup + +```bash +# 1. Clone the repository +git clone https://github.com/adenhq/beeline.git +cd beeline + +# 2. Create your configuration file +cp config.yaml.example config.yaml + +# 3. (Optional) Edit config.yaml with your settings +# Most defaults work out of the box + +# 4. Run the automated setup +npm run setup +``` + +The `setup` script performs these actions: +1. Installs all dependencies for root, honeycomb, and hive +2. Generates `.env` files from your `config.yaml` +3. Reports any issues + +### Verify Setup + +```bash +# Build both packages to verify everything works +npm run build + +# Or run in development mode +npm run dev -w honeycomb # Terminal 1: Frontend at http://localhost:3000 +npm run dev -w hive # Terminal 2: Backend at http://localhost:4000 +``` + +--- + +## Project Structure + +``` +beeline/ # Repository root +│ +├── .github/ # GitHub configuration +│ ├── workflows/ +│ │ ├── ci.yml # Runs on every PR: lint, test, build +│ │ └── release.yml # Runs on tags: publish Docker images +│ ├── ISSUE_TEMPLATE/ # Bug report & feature request templates +│ ├── PULL_REQUEST_TEMPLATE.md # PR description template +│ └── CODEOWNERS # Auto-assign reviewers +│ +├── docs/ # Documentation +│ ├── getting-started.md # Quick start guide +│ ├── configuration.md # Configuration reference +│ └── architecture.md # System architecture +│ +├── honeycomb/ # FRONTEND PACKAGE +│ ├── src/ +│ │ ├── components/ # Reusable UI components +│ │ ├── hooks/ # Custom React hooks +│ │ │ └── useApi.ts # Hook for API calls +│ │ ├── pages/ # Route-level page components +│ │ │ ├── HomePage.tsx +│ │ │ └── NotFoundPage.tsx +│ │ ├── services/ # External service clients +│ │ │ └── api.ts # Backend API client +│ │ ├── styles/ # Global CSS +│ │ │ └── index.css +│ │ ├── types/ # TypeScript type definitions +│ │ │ └── index.ts +│ │ ├── utils/ # Utility functions +│ │ │ └── index.ts +│ │ ├── App.tsx # Root component with routing +│ │ ├── main.tsx # Application entry point +│ │ └── vite-env.d.ts # Vite type declarations +│ ├── public/ # Static assets (copied as-is) +│ │ └── favicon.svg +│ ├── index.html # HTML template +│ ├── nginx.conf # Production nginx config +│ ├── package.json # Package dependencies & scripts +│ ├── tsconfig.json # TypeScript configuration +│ ├── tsconfig.node.json # TypeScript config for Vite +│ ├── vite.config.ts # Vite bundler configuration +│ ├── Dockerfile # Production Docker build +│ ├── Dockerfile.dev # Development Docker build +│ └── .env.example # Environment variable template +│ +├── hive/ # BACKEND PACKAGE +│ ├── src/ +│ │ ├── config/ # Configuration loading +│ │ │ └── index.ts # Env var parsing & validation +│ │ ├── controllers/ # Request handlers (business logic) +│ │ ├── middleware/ # Express middleware +│ │ │ └── errorHandler.ts # Global error handling +│ │ ├── models/ # Data models / database schemas +│ │ ├── routes/ # API route definitions +│ │ │ ├── api.ts # /api/* routes +│ │ │ └── health.ts # Health check endpoints +│ │ ├── services/ # Business logic services +│ │ ├── types/ # TypeScript type definitions +│ │ │ └── index.ts +│ │ ├── utils/ # Utility functions +│ │ │ └── logger.ts # Structured logging +│ │ ├── index.ts # Application entry point +│ │ └── server.ts # Express server setup +│ ├── package.json # Package dependencies & scripts +│ ├── tsconfig.json # TypeScript configuration +│ ├── Dockerfile # Production Docker build +│ ├── Dockerfile.dev # Development Docker build +│ └── .env.example # Environment variable template +│ +├── scripts/ # Build & utility scripts +│ ├── setup.sh # First-time setup script +│ └── generate-env.ts # Generates .env from config.yaml +│ +├── config.yaml.example # Configuration template (copy to config.yaml) +├── config.yaml # Your local configuration (git-ignored) +├── docker-compose.yml # Production Docker Compose +├── docker-compose.override.yml.example # Dev overrides template +├── docker-compose.override.yml # Your local dev overrides (git-ignored) +│ +├── package.json # Root package.json (workspaces config) +├── package-lock.json # Dependency lock file +├── tsconfig.base.json # Shared TypeScript settings +│ +├── .gitignore # Git ignore rules +├── .editorconfig # Editor formatting rules +├── .dockerignore # Docker ignore rules +│ +├── README.md # Project overview +├── DEVELOPER.md # This file +├── CONTRIBUTING.md # Contribution guidelines +├── CHANGELOG.md # Version history +├── LICENSE # Apache 2.0 License +├── CODE_OF_CONDUCT.md # Community guidelines +└── SECURITY.md # Security policy +``` + +--- + +## Configuration System + +### How It Works + +Instead of managing multiple `.env` files, you edit a single `config.yaml`: + +``` +┌─────────────────┐ +│ config.yaml │ ← You edit this one file +└────────┬────────┘ + │ + ▼ +┌─────────────────┐ +│ generate-env.ts │ ← Script transforms YAML to .env +└────────┬────────┘ + │ + ├──────────────────┬──────────────────┐ + ▼ ▼ ▼ +┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ +│ /.env │ │ /honeycomb/.env │ │ /hive/.env │ +│ (Docker Compose)│ │ (Frontend) │ │ (Backend) │ +└─────────────────┘ └─────────────────┘ └─────────────────┘ +``` + +### Configuration Reference + +The `config.yaml` file structure: + +```yaml +# =========================================== +# Application Configuration +# =========================================== + +# Application metadata +app: + name: beeline # Used in logs and API responses + environment: development # development | staging | production + +# Server configuration +server: + frontend: + port: 3000 # Frontend port + host: "0.0.0.0" # Bind address + backend: + port: 4000 # Backend API port + host: "0.0.0.0" # Bind address + +# API configuration +api: + prefix: /api # API route prefix + cors: + origins: # Allowed CORS origins + - "http://localhost:3000" + - "http://localhost:4000" + +# Logging configuration +logging: + level: debug # debug | info | warn | error + format: pretty # pretty | json + +# Security settings +security: + jwt: + secret: "change-me-in-production-use-min-32-chars" + expiresIn: "7d" # Token expiration + +# Database configuration (when needed) +database: + host: localhost + port: 5432 + name: beeline + user: postgres + password: postgres + +# Feature flags (optional) +features: + enableMetrics: true + enableSwagger: true +``` + +### Regenerating Environment Files + +After editing `config.yaml`, regenerate the `.env` files: + +```bash +npm run generate:env +``` + +This is required because: +- Docker Compose reads from `.env` files +- Vite reads frontend env vars from `/honeycomb/.env` +- Node.js reads backend env vars from `/hive/.env` + +--- + +## Development Workflow + +### Option 1: Local Development (Recommended for Active Development) + +Best for rapid iteration with instant hot reload: + +```bash +# Terminal 1: Start frontend +npm run dev -w honeycomb + +# Terminal 2: Start backend +npm run dev -w hive +``` + +| Service | URL | Hot Reload | +|---------|-----|------------| +| Frontend | http://localhost:3000 | Yes (Vite HMR) | +| Backend | http://localhost:4000 | Yes (tsx watch) | +| API Health | http://localhost:4000/health | - | + +### Option 2: Docker Development + +Best for testing Docker builds or when you need consistent environments: + +```bash +# Copy development overrides +cp docker-compose.override.yml.example docker-compose.override.yml + +# Start containers with hot reload +docker compose up + +# Or in detached mode +docker compose up -d + +# View logs +docker compose logs -f + +# Stop containers +docker compose down +``` + +### Option 3: Mixed Mode + +Run backend in Docker, frontend locally (useful for frontend-focused work): + +```bash +# Start only backend in Docker +docker compose up hive -d + +# Run frontend locally +npm run dev -w honeycomb +``` + +### Available NPM Scripts + +**Root level** (run from repository root): + +| Command | Description | +|---------|-------------| +| `npm run setup` | First-time setup (install + generate env) | +| `npm run generate:env` | Regenerate .env files from config.yaml | +| `npm run build` | Build all packages | +| `npm run build -w honeycomb` | Build frontend only | +| `npm run build -w hive` | Build backend only | +| `npm run lint` | Lint all packages | +| `npm run test` | Run all tests | +| `npm run clean` | Remove node_modules and build artifacts | + +**Frontend** (`/honeycomb`): + +| Command | Description | +|---------|-------------| +| `npm run dev` | Start Vite dev server with HMR | +| `npm run build` | Type-check and build for production | +| `npm run preview` | Preview production build locally | +| `npm run lint` | Lint with ESLint | +| `npm run test` | Run tests with Vitest | +| `npm run test:coverage` | Run tests with coverage report | + +**Backend** (`/hive`): + +| Command | Description | +|---------|-------------| +| `npm run dev` | Start with hot reload (tsx watch) | +| `npm run build` | Compile TypeScript to JavaScript | +| `npm run start` | Run compiled JavaScript | +| `npm run lint` | Lint with ESLint | +| `npm run test` | Run tests with Vitest | +| `npm run test:coverage` | Run tests with coverage report | + +--- + +## Working with the Frontend (honeycomb) + +### Tech Stack + +- **React 18** - UI library with hooks +- **TypeScript** - Type safety +- **Vite** - Build tool with instant HMR +- **React Router v6** - Client-side routing +- **Vitest** - Testing framework + +### Adding a New Page + +1. Create the page component: + +```tsx +// honeycomb/src/pages/UsersPage.tsx +import { useEffect, useState } from 'react'; +import { useApi } from '../hooks/useApi'; + +export function UsersPage() { + const { data, loading, error } = useApi('/api/users'); + + if (loading) return
Loading...
; + if (error) return
Error: {error.message}
; + + return ( +
+

Users

+
    + {data?.map(user => ( +
  • {user.name}
  • + ))} +
+
+ ); +} +``` + +2. Add the route in `App.tsx`: + +```tsx +// honeycomb/src/App.tsx +import { UsersPage } from './pages/UsersPage'; + +// Inside Routes: +} /> +``` + +### Adding a New Component + +```tsx +// honeycomb/src/components/Button.tsx +interface ButtonProps { + children: React.ReactNode; + onClick?: () => void; + variant?: 'primary' | 'secondary'; +} + +export function Button({ children, onClick, variant = 'primary' }: ButtonProps) { + return ( + + ); +} +``` + +### Making API Calls + +Use the provided `useApi` hook or the `api` service: + +```tsx +// Using the hook (recommended for components) +import { useApi } from '../hooks/useApi'; + +function MyComponent() { + const { data, loading, error, refetch } = useApi('/api/endpoint'); + // ... +} + +// Using the service directly (for non-component code) +import { api } from '../services/api'; + +async function fetchData() { + const response = await api.get('/api/endpoint'); + return response.data; +} +``` + +### Environment Variables in Frontend + +Access environment variables using `import.meta.env`: + +```tsx +// Only VITE_* prefixed variables are exposed to the frontend +const apiUrl = import.meta.env.VITE_API_URL; +const appName = import.meta.env.VITE_APP_NAME; +``` + +**Important**: Never put secrets in frontend environment variables. They are bundled into the JavaScript and visible to users. + +### Path Aliases + +Use `@/` to import from the `src` directory: + +```tsx +// Instead of: +import { Button } from '../../../components/Button'; + +// Use: +import { Button } from '@/components/Button'; +``` + +--- + +## Working with the Backend (hive) + +### Tech Stack + +- **Node.js 20** - Runtime +- **Express** - Web framework +- **TypeScript** - Type safety +- **tsx** - TypeScript execution with hot reload +- **Zod** - Runtime validation (recommended) +- **Vitest** - Testing framework + +### Adding a New API Endpoint + +1. Create the route file: + +```typescript +// hive/src/routes/users.ts +import { Router } from 'express'; +import type { Request, Response } from 'express'; + +const router = Router(); + +// GET /api/users +router.get('/', async (req: Request, res: Response) => { + try { + const users = await getUsersFromDatabase(); + res.json(users); + } catch (error) { + res.status(500).json({ error: 'Failed to fetch users' }); + } +}); + +// GET /api/users/:id +router.get('/:id', async (req: Request, res: Response) => { + const { id } = req.params; + try { + const user = await getUserById(id); + if (!user) { + return res.status(404).json({ error: 'User not found' }); + } + res.json(user); + } catch (error) { + res.status(500).json({ error: 'Failed to fetch user' }); + } +}); + +// POST /api/users +router.post('/', async (req: Request, res: Response) => { + const { name, email } = req.body; + try { + const user = await createUser({ name, email }); + res.status(201).json(user); + } catch (error) { + res.status(500).json({ error: 'Failed to create user' }); + } +}); + +export default router; +``` + +2. Register the route in `api.ts`: + +```typescript +// hive/src/routes/api.ts +import usersRouter from './users'; + +// Add to the router: +router.use('/users', usersRouter); +``` + +### Request Validation with Zod + +```typescript +// hive/src/routes/users.ts +import { z } from 'zod'; + +const createUserSchema = z.object({ + name: z.string().min(1).max(100), + email: z.string().email(), + age: z.number().int().positive().optional(), +}); + +router.post('/', async (req: Request, res: Response) => { + const result = createUserSchema.safeParse(req.body); + + if (!result.success) { + return res.status(400).json({ + error: 'Validation failed', + details: result.error.issues + }); + } + + const { name, email, age } = result.data; + // ... create user +}); +``` + +### Adding Middleware + +```typescript +// hive/src/middleware/auth.ts +import type { Request, Response, NextFunction } from 'express'; + +export function requireAuth(req: Request, res: Response, next: NextFunction) { + const token = req.headers.authorization?.replace('Bearer ', ''); + + if (!token) { + return res.status(401).json({ error: 'Authentication required' }); + } + + try { + const decoded = verifyToken(token); + req.user = decoded; + next(); + } catch { + res.status(401).json({ error: 'Invalid token' }); + } +} + +// Usage in routes: +router.get('/protected', requireAuth, (req, res) => { + res.json({ user: req.user }); +}); +``` + +### Logging + +Use the built-in logger for consistent structured logging: + +```typescript +import { logger } from '../utils/logger'; + +// Different log levels +logger.debug('Detailed debug info', { userId: 123 }); +logger.info('User logged in', { userId: 123 }); +logger.warn('Rate limit approaching', { currentRate: 95 }); +logger.error('Database connection failed', { error: err.message }); +``` + +### Environment Variables in Backend + +Access via `process.env` or the config module: + +```typescript +// Direct access +const port = process.env.PORT || 4000; + +// Or via config (recommended - adds validation) +import { config } from '../config'; +const port = config.port; +``` + +--- + +## Docker Development + +### Docker Compose Files + +| File | Purpose | +|------|---------| +| `docker-compose.yml` | Base configuration (production-like) | +| `docker-compose.override.yml` | Development overrides (hot reload, debug ports) | + +When you run `docker compose up`, Docker automatically merges both files. + +### Building Images + +```bash +# Build all images +docker compose build + +# Build specific service +docker compose build honeycomb +docker compose build hive + +# Build with no cache (fresh build) +docker compose build --no-cache +``` + +### Running Containers + +```bash +# Start all services +docker compose up + +# Start in background +docker compose up -d + +# Start specific service +docker compose up hive + +# View logs +docker compose logs -f +docker compose logs -f hive # Specific service + +# Stop all services +docker compose down + +# Stop and remove volumes +docker compose down -v +``` + +### Debugging in Docker + +The development override exposes debug ports: + +- **Backend debug port**: 9229 (Node.js inspector) + +To debug the backend in VS Code: + +1. Add to `.vscode/launch.json`: + +```json +{ + "version": "0.2.0", + "configurations": [ + { + "name": "Attach to Docker", + "type": "node", + "request": "attach", + "port": 9229, + "address": "localhost", + "localRoot": "${workspaceFolder}/hive", + "remoteRoot": "/app", + "restart": true + } + ] +} +``` + +2. Start containers: `docker compose up` +3. In VS Code, press F5 or select "Attach to Docker" + +### Useful Docker Commands + +```bash +# Execute command in running container +docker compose exec hive sh +docker compose exec honeycomb sh + +# View container resource usage +docker stats + +# Remove all stopped containers +docker container prune + +# Remove unused images +docker image prune +``` + +--- + +## Testing + +### Running Tests + +```bash +# Run all tests +npm run test + +# Run tests for specific package +npm run test -w honeycomb +npm run test -w hive + +# Run with coverage +npm run test:coverage -w honeycomb +npm run test:coverage -w hive + +# Run in watch mode (re-runs on file changes) +cd honeycomb && npm run test -- --watch +cd hive && npm run test -- --watch +``` + +### Writing Frontend Tests + +```tsx +// honeycomb/src/components/Button.test.tsx +import { render, screen, fireEvent } from '@testing-library/react'; +import { describe, it, expect, vi } from 'vitest'; +import { Button } from './Button'; + +describe('Button', () => { + it('renders children', () => { + render(); + expect(screen.getByText('Click me')).toBeInTheDocument(); + }); + + it('calls onClick when clicked', () => { + const handleClick = vi.fn(); + render(); + + fireEvent.click(screen.getByText('Click me')); + + expect(handleClick).toHaveBeenCalledTimes(1); + }); +}); +``` + +### Writing Backend Tests + +```typescript +// hive/src/routes/health.test.ts +import { describe, it, expect } from 'vitest'; +import request from 'supertest'; +import { app } from '../server'; + +describe('Health Routes', () => { + it('GET /health returns healthy status', async () => { + const response = await request(app).get('/health'); + + expect(response.status).toBe(200); + expect(response.body).toMatchObject({ + status: 'healthy', + }); + }); + + it('GET /health/ready returns ready status', async () => { + const response = await request(app).get('/health/ready'); + + expect(response.status).toBe(200); + expect(response.body.ready).toBe(true); + }); +}); +``` + +--- + +## Code Style & Conventions + +### TypeScript + +- **Strict mode enabled** - No implicit any, strict null checks +- **Explicit return types** on exported functions +- **Interface over type** for object shapes (unless unions needed) +- **Readonly** where possible + +```typescript +// Good +interface User { + readonly id: string; + name: string; + email: string; +} + +export function getUser(id: string): Promise { + // ... +} + +// Avoid +export function getUser(id) { // Missing types + // ... +} +``` + +### React Components + +- **Functional components** only (no class components) +- **Named exports** for components +- **Props interface** defined above component + +```tsx +// Good +interface ButtonProps { + children: React.ReactNode; + onClick?: () => void; +} + +export function Button({ children, onClick }: ButtonProps) { + return ; +} + +// Avoid +export default function({ children, onClick }) { // Missing types, default export + return ; +} +``` + +### File Naming + +| Type | Convention | Example | +|------|------------|---------| +| Components | PascalCase | `UserCard.tsx` | +| Hooks | camelCase with `use` prefix | `useAuth.ts` | +| Utilities | camelCase | `formatDate.ts` | +| Types | PascalCase | `User.ts` or in `types/index.ts` | +| Tests | Same as file + `.test` | `UserCard.test.tsx` | +| Styles | Same as component | `UserCard.css` | + +### Import Order + +1. External packages +2. Internal absolute imports (`@/...`) +3. Relative imports +4. Style imports + +```tsx +// External +import { useState, useEffect } from 'react'; +import { useNavigate } from 'react-router-dom'; + +// Internal absolute +import { Button } from '@/components/Button'; +import { useApi } from '@/hooks/useApi'; + +// Relative +import { formatUserName } from './utils'; + +// Styles +import './UserCard.css'; +``` + +--- + +## Git Workflow + +### Branch Naming + +``` +feature/add-user-authentication +bugfix/fix-login-redirect +hotfix/security-patch +chore/update-dependencies +docs/improve-readme +``` + +### Commit Messages + +Follow [Conventional Commits](https://www.conventionalcommits.org/): + +``` +(): + +[optional body] + +[optional footer] +``` + +**Types:** +- `feat` - New feature +- `fix` - Bug fix +- `docs` - Documentation only +- `style` - Formatting, missing semicolons, etc. +- `refactor` - Code change that neither fixes a bug nor adds a feature +- `test` - Adding or updating tests +- `chore` - Maintenance tasks + +**Examples:** + +``` +feat(auth): add JWT authentication + +fix(api): handle null response from external service + +docs(readme): update installation instructions + +chore(deps): update React to 18.2.0 +``` + +### Pull Request Process + +1. Create a feature branch from `main` +2. Make your changes with clear commits +3. Run tests locally: `npm run test` +4. Run linting: `npm run lint` +5. Push and create a PR +6. Fill out the PR template +7. Request review from CODEOWNERS +8. Address feedback +9. Squash and merge when approved + +--- + +## Debugging + +### Frontend Debugging + +**React Developer Tools:** +1. Install the [React DevTools browser extension](https://react.dev/learn/react-developer-tools) +2. Open browser DevTools → React tab +3. Inspect component tree, props, state, and hooks + +**VS Code Debugging:** +1. Add Chrome debug configuration to `.vscode/launch.json`: + +```json +{ + "type": "chrome", + "request": "launch", + "name": "Debug Frontend", + "url": "http://localhost:3000", + "webRoot": "${workspaceFolder}/honeycomb/src" +} +``` + +2. Start the dev server: `npm run dev -w honeycomb` +3. Press F5 in VS Code + +### Backend Debugging + +**VS Code Debugging:** +1. Add Node debug configuration: + +```json +{ + "type": "node", + "request": "launch", + "name": "Debug Backend", + "runtimeExecutable": "npm", + "runtimeArgs": ["run", "dev"], + "cwd": "${workspaceFolder}/hive", + "console": "integratedTerminal" +} +``` + +2. Set breakpoints in your code +3. Press F5 to start debugging + +**Logging:** +```typescript +import { logger } from '../utils/logger'; + +// Add debug logs +logger.debug('Processing request', { + userId: req.user.id, + body: req.body +}); +``` + +--- + +## Common Tasks + +### Adding a New Dependency + +```bash +# Add to frontend +npm install -w honeycomb + +# Add to backend +npm install -w hive + +# Add dev dependency +npm install -D -w honeycomb + +# Add to root (shared tooling) +npm install -D -w . +``` + +### Updating Dependencies + +```bash +# Check for outdated packages +npm outdated + +# Update all to latest minor/patch +npm update + +# Update specific package +npm install @latest -w honeycomb +``` + +### Adding Environment Variables + +1. Add to `config.yaml.example` (template): + +```yaml +myService: + apiKey: "your-api-key-here" +``` + +2. Add to your local `config.yaml`: + +```yaml +myService: + apiKey: "actual-api-key" +``` + +3. Update `scripts/generate-env.ts` to output the new variable + +4. Regenerate env files: + +```bash +npm run generate:env +``` + +5. Access in code: + +```typescript +// Backend +const apiKey = process.env.MY_SERVICE_API_KEY; + +// Frontend (must be prefixed with VITE_) +const apiKey = import.meta.env.VITE_MY_SERVICE_API_KEY; +``` + +### Database Migrations (when added) + +```bash +# Create a new migration +npm run migration:create -w hive -- --name add-users-table + +# Run pending migrations +npm run migration:run -w hive + +# Rollback last migration +npm run migration:rollback -w hive +``` + +--- + +## Troubleshooting + +### Port Already in Use + +```bash +# Find process using port +lsof -i :3000 +lsof -i :4000 + +# Kill process +kill -9 + +# Or change ports in config.yaml and regenerate +``` + +### Node Modules Issues + +```bash +# Clean everything and reinstall +npm run clean +rm -rf node_modules package-lock.json +npm install +``` + +### Docker Issues + +```bash +# Reset Docker state +docker compose down -v +docker system prune -f +docker compose build --no-cache +docker compose up +``` + +### TypeScript Errors After Pull + +```bash +# Rebuild TypeScript +npm run build + +# Or restart TS server in VS Code +# Cmd/Ctrl + Shift + P → "TypeScript: Restart TS Server" +``` + +### Environment Variables Not Loading + +```bash +# Regenerate from config.yaml +npm run generate:env + +# Verify files exist +cat .env +cat honeycomb/.env +cat hive/.env + +# Restart dev servers after changing env +``` + +### Tests Failing + +```bash +# Run with verbose output +npm run test -w honeycomb -- --reporter=verbose + +# Run single test file +npm run test -w honeycomb -- src/components/Button.test.tsx + +# Clear test cache +npm run test -w honeycomb -- --clearCache +``` + +--- + +## Getting Help + +- **Documentation**: Check the `/docs` folder +- **Issues**: Search [existing issues](https://github.com/adenhq/beeline/issues) +- **Discord**: Join our [community](https://discord.com/invite/MXE49hrKDk) +- **Code Review**: Tag a maintainer on your PR + +--- + +*Happy coding!* 🐝 diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..f72c3e53 --- /dev/null +++ b/LICENSE @@ -0,0 +1,190 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2024 Aden + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 00000000..0b3b4ae4 --- /dev/null +++ b/README.md @@ -0,0 +1,150 @@ +# Beeline + +Beeline Instrumentation for your AI agents + +

+ Beeline Banner +

+ +[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/adenhq/beeline/blob/main/LICENSE) +[![Y Combinator](https://img.shields.io/badge/Y%20Combinator-Aden-orange)](https://www.ycombinator.com/companies/aden) +[![Docker Pulls](https://img.shields.io/docker/pulls/adenhq/beeline?logo=Docker&labelColor=%23528bff)](https://hub.docker.com/u/adenhq) +[![Discord](https://img.shields.io/discord/1172610340073242735?logo=discord&labelColor=%235462eb&logoColor=%23f5f5f5&color=%235462eb)](https://discord.com/invite/MXE49hrKDk) +[![Twitter Follow](https://img.shields.io/twitter/follow/teamaden?logo=X&color=%23f5f5f5)](https://x.com/aden_hq) +[![LinkedIn](https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff)](https://www.linkedin.com/company/teamaden/) + +## Overview + +Beeline provides advanced runtime control for your AI agents, enabling you to observe, intervene, and dynamically adjust agent behavior as it executes. By giving you real-time visibility and control, Beeline helps you build more reliable AI systems—catching and correcting issues during execution rather than reacting after failures occur. + +Visit [adenhq.com](https://adenhq.com) for complete documentation, examples, and guides. + +## Quick Links + +- **[Documentation](https://docs.adenhq.com/)** - Complete guides and API reference +- **[Self-Hosting Guide](https://docs.adenhq.com/getting-started/quickstart)** - Deploy Beeline on your infrastructure +- **[Changelog](https://github.com/adenhq/beeline/releases)** - Latest updates and releases + +- **[Report Issues](https://github.com/adenhq/beeline/issues)** - Bug reports and feature requests + +## Quick Start + +### Prerequisites + +- [Docker](https://docs.docker.com/get-docker/) (v20.10+) +- [Docker Compose](https://docs.docker.com/compose/install/) (v2.0+) + +### Installation + +```bash +# Clone the repository +git clone https://github.com/adenhq/beeline.git +cd beeline + +# Copy and configure +cp config.yaml.example config.yaml + +# Run setup and start services +npm run setup +docker compose up +``` + +**Access the application:** + +- Dashboard: http://localhost:3000 +- API: http://localhost:4000 +- Health: http://localhost:4000/health + +## Features + +- **Observe** - Real-time visibility into agent execution, decisions, and performance +- **Metrics & Analytics** - Track costs, latency, and token usage with TimescaleDB +- **Cost Control** - Set budgets and policies to manage LLM spending +- **Real-time Events** - WebSocket streaming for live agent monitoring +- **Self-Hostable** - Deploy on your own infrastructure with full control +- **Production-Ready** - Built for scale and reliability + +## Project Structure + +``` +beeline/ +├── honeycomb/ # Frontend (React + TypeScript + Vite) +├── hive/ # Backend (Node.js + TypeScript + Express) +├── docs/ # Documentation +├── scripts/ # Build and utility scripts +├── config.yaml.example # Configuration template +└── docker-compose.yml # Container orchestration +``` + +## Development + +### Local Development with Hot Reload + +```bash +# Copy development overrides +cp docker-compose.override.yml.example docker-compose.override.yml + +# Start with hot reload enabled +docker compose up +``` + +### Running Without Docker + +```bash +# Install dependencies +npm install + +# Generate environment files +npm run generate:env + +# Start frontend (in honeycomb/) +cd honeycomb && npm run dev + +# Start backend (in hive/) +cd hive && npm run dev +``` + +## Documentation + +- **[Developer Guide](DEVELOPER.md)** - Comprehensive guide for developers +- [Getting Started](docs/getting-started.md) - Quick setup instructions +- [Configuration Guide](docs/configuration.md) - All configuration options +- [Architecture Overview](docs/architecture.md) - System design and structure + +## Community & Support + +We use [Discord](https://discord.com/invite/MXE49hrKDk) for support, feature requests, and community discussions. + +- Discord - [Join our community](https://discord.com/invite/MXE49hrKDk) +- Twitter/X - [@adenhq](https://x.com/aden_hq) +- LinkedIn - [Company Page](https://www.linkedin.com/company/teamaden/) + +## Contributing + +We welcome contributions! Please see [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines. + +1. Fork the repository +2. Create your feature branch (`git checkout -b feature/amazing-feature`) +3. Commit your changes (`git commit -m 'Add amazing feature'`) +4. Push to the branch (`git push origin feature/amazing-feature`) +5. Open a Pull Request + +## Join Our Team + +**We're hiring!** Join us in engineering, research, and go-to-market roles. + +[View Open Positions](https://jobs.adenhq.com/a8cec478-cdbc-473c-bbd4-f4b7027ec193/applicant) + +## Security + +For security concerns, please see [SECURITY.md](SECURITY.md). + +## License + +This project is licensed under the Apache License 2.0 - see the [LICENSE](LICENSE) file for details. + +--- + +

+ Made with care by the Aden team +

diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 00000000..184d80d0 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,53 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.x.x | :white_check_mark: | + +## Reporting a Vulnerability + +We take security vulnerabilities seriously. If you discover a security issue, please report it responsibly. + +### How to Report + +**Please do NOT report security vulnerabilities through public GitHub issues.** + +Instead, please send an email to contact@adenhq.com with: + +1. A description of the vulnerability +2. Steps to reproduce the issue +3. Potential impact of the vulnerability +4. Any possible mitigations you've identified + +### What to Expect + +- **Acknowledgment**: We will acknowledge receipt of your report within 48 hours +- **Communication**: We will keep you informed of our progress +- **Resolution**: We aim to resolve critical vulnerabilities within 7 days +- **Credit**: We will credit you in our security advisories (unless you prefer to remain anonymous) + +### Safe Harbor + +We consider security research conducted in accordance with this policy to be: + +- Authorized concerning any applicable anti-hacking laws +- Authorized concerning any relevant anti-circumvention laws +- Exempt from restrictions in our Terms of Service that would interfere with conducting security research + +## Security Best Practices for Users + +1. **Keep Updated**: Always run the latest version +2. **Secure Configuration**: Review `config.yaml` settings, especially in production +3. **Environment Variables**: Never commit `.env` files or `config.yaml` with secrets +4. **Network Security**: Use HTTPS in production, configure firewalls appropriately +5. **Database Security**: Use strong passwords, limit network access + +## Security Features + +- Environment-based configuration (no hardcoded secrets) +- Input validation on API endpoints +- Secure session handling +- CORS configuration +- Rate limiting (configurable) diff --git a/config.yaml.example b/config.yaml.example new file mode 100644 index 00000000..877007b9 --- /dev/null +++ b/config.yaml.example @@ -0,0 +1,118 @@ +# Beeline Configuration +# ====================== +# Copy this file to config.yaml and customize for your environment. +# Run `npm run setup` to generate .env files from this configuration. +# +# For detailed documentation, see: docs/configuration.md + +# ----------------------------------------------------------------------------- +# Application Settings +# ----------------------------------------------------------------------------- +app: + # Application name (displayed in UI and logs) + name: Beeline + + # Environment: development, production, or test + environment: development + + # Log level: debug, info, warn, error + log_level: info + +# ----------------------------------------------------------------------------- +# Server Configuration +# ----------------------------------------------------------------------------- +server: + # Frontend settings + frontend: + # Port for the frontend application + port: 3000 + + # Backend (Hive) settings + backend: + # Port for the backend API + port: 4000 + + # Host to bind to (0.0.0.0 for all interfaces) + host: 0.0.0.0 + +# ----------------------------------------------------------------------------- +# TimescaleDB Configuration (Time-series metrics storage) +# ----------------------------------------------------------------------------- +timescaledb: + # Connection URL for TimescaleDB + # Format: postgresql://user:password@host:port/database + url: postgresql://postgres:postgres@localhost:5432/aden_tsdb + + # External port mapping (for docker-compose) + port: 5432 + +# ----------------------------------------------------------------------------- +# MongoDB Configuration (Policies, pricing, control config) +# ----------------------------------------------------------------------------- +mongodb: + # Connection URL for MongoDB + url: mongodb://localhost:27017 + + # Database name for main data + database: aden + + # Database name for ERP data + erp_database: erp + + # External port mapping (for docker-compose) + port: 27017 + +# ----------------------------------------------------------------------------- +# Redis Configuration (Caching and Socket.IO) +# ----------------------------------------------------------------------------- +redis: + # Connection URL for Redis + url: redis://localhost:6379 + + # External port mapping (for docker-compose) + port: 6379 + +# ----------------------------------------------------------------------------- +# Authentication & Security +# ----------------------------------------------------------------------------- +auth: + # JWT secret key - CHANGE THIS IN PRODUCTION! + # Generate with: openssl rand -base64 32 + jwt_secret: change-this-to-a-secure-random-string-min-32-chars + + # JWT token expiration (e.g., 1h, 7d, 30d) + jwt_expires_in: 7d + + # Passphrase for additional encryption - CHANGE THIS IN PRODUCTION! + passphrase: change-this-to-a-secure-passphrase + +# ----------------------------------------------------------------------------- +# NPM Configuration +# ----------------------------------------------------------------------------- +npm: + # NPM token for private package access (if needed) + token: "" + +# ----------------------------------------------------------------------------- +# CORS Configuration +# ----------------------------------------------------------------------------- +cors: + # Allowed origin for CORS requests + # In production, set this to your frontend URL + origin: http://localhost:3000 + +# ----------------------------------------------------------------------------- +# Feature Flags +# ----------------------------------------------------------------------------- +features: + # Enable user registration + registration: true + + # Enable API rate limiting + rate_limiting: false + + # Enable request logging + request_logging: true + + # Enable MCP (Model Context Protocol) server + mcp_server: true diff --git a/docker-compose.override.yml.example b/docker-compose.override.yml.example new file mode 100644 index 00000000..caea0365 --- /dev/null +++ b/docker-compose.override.yml.example @@ -0,0 +1,37 @@ +# Development overrides +# Copy this file to docker-compose.override.yml for local development +# +# Usage: +# cp docker-compose.override.yml.example docker-compose.override.yml +# docker compose up +# +# This enables: +# - Hot reload for both frontend and backend +# - Source code mounted as volumes +# - Debug ports exposed +# - Development environment settings + +services: + honeycomb: + build: + context: ./honeycomb + dockerfile: Dockerfile.dev + volumes: + - ./honeycomb/src:/app/src:ro + - ./honeycomb/public:/app/public:ro + - ./honeycomb/index.html:/app/index.html:ro + environment: + - VITE_API_URL=http://localhost:4000 + + hive: + build: + context: ./hive + dockerfile: Dockerfile.dev + volumes: + - ./hive/src:/app/src:ro + environment: + - NODE_ENV=development + - LOG_LEVEL=debug + # Uncomment to enable Node.js debugging + # ports: + # - "9229:9229" diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..bc9552dc --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,138 @@ +services: + # Frontend - React application + honeycomb: + build: + context: ./honeycomb + target: production + args: + VITE_API_URL: ${VITE_API_URL:-http://localhost:4000} + container_name: honeycomb-frontend + ports: + - "${FRONTEND_PORT:-3000}:3000" + depends_on: + hive: + condition: service_healthy + restart: unless-stopped + networks: + - honeycomb-network + + # Backend - Hive API (LLM observability & control plane) + hive: + build: + context: ./hive + target: production + args: + NPM_TOKEN: ${NPM_TOKEN:-} + container_name: honeycomb-backend + ports: + - "${BACKEND_PORT:-4000}:4000" + environment: + - NODE_ENV=${NODE_ENV:-production} + - PORT=4000 + - LOG_LEVEL=${LOG_LEVEL:-info} + # PostgreSQL (TimescaleDB) + - TSDB_PG_URL=postgresql://postgres:postgres@timescaledb:5432/aden_tsdb + # MongoDB + - MONGODB_URL=mongodb://mongodb:27017 + - MONGODB_DBNAME=${MONGODB_DBNAME:-aden} + - MONGODB_ERP_DBNAME=${MONGODB_ERP_DBNAME:-erp} + # Redis + - REDIS_URL=redis://redis:6379 + # Authentication + - JWT_SECRET=${JWT_SECRET:-change-me-in-production-use-min-32-chars} + - PASSPHRASE=${PASSPHRASE:-change-me-in-production} + # Hive backend URL for SDK quickstart documents + - HIVE_HOST=${HIVE_HOST:-http://localhost:4000} + depends_on: + timescaledb: + condition: service_healthy + mongodb: + condition: service_healthy + redis: + condition: service_healthy + healthcheck: + test: + [ + "CMD", + "node", + "-e", + "fetch('http://localhost:4000/health').then(r => process.exit(r.ok ? 0 : 1)).catch(() => process.exit(1))", + ] + interval: 10s + timeout: 5s + retries: 5 + start_period: 15s + restart: unless-stopped + networks: + - honeycomb-network + + # TimescaleDB - Time series database for LLM metrics + timescaledb: + image: timescale/timescaledb:latest-pg16 + container_name: honeycomb-timescaledb + ports: + - "${TSDB_PORT:-5432}:5432" + environment: + - POSTGRES_USER=postgres + - POSTGRES_PASSWORD=postgres + - POSTGRES_DB=aden_tsdb + volumes: + - timescaledb_data:/var/lib/postgresql/data + # Auto-run schema files on first startup (alphabetical order) + - ./hive/src/services/tsdb/00-init-timescaledb.sql:/docker-entrypoint-initdb.d/00-init-timescaledb.sql:ro + - ./hive/src/services/tsdb/schema.sql:/docker-entrypoint-initdb.d/01-schema.sql:ro + - ./hive/src/services/tsdb/users_schema.sql:/docker-entrypoint-initdb.d/02-users.sql:ro + healthcheck: + test: ["CMD-SHELL", "pg_isready -U postgres -d aden_tsdb"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 10s + restart: unless-stopped + networks: + - honeycomb-network + + # MongoDB - Policies, pricing, and control configuration + mongodb: + image: mongo:7 + container_name: honeycomb-mongodb + ports: + - "${MONGODB_PORT:-27017}:27017" + volumes: + - mongodb_data:/data/db + healthcheck: + test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 10s + restart: unless-stopped + networks: + - honeycomb-network + + # Redis - Caching and Socket.IO adapter + redis: + image: redis:7-alpine + container_name: honeycomb-redis + ports: + - "${REDIS_PORT:-6379}:6379" + volumes: + - redis_data:/data + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 5s + restart: unless-stopped + networks: + - honeycomb-network + +networks: + honeycomb-network: + driver: bridge + +volumes: + timescaledb_data: + mongodb_data: + redis_data: diff --git a/docs/architecture.md b/docs/architecture.md new file mode 100644 index 00000000..27cff67f --- /dev/null +++ b/docs/architecture.md @@ -0,0 +1,222 @@ +# Architecture Overview + +This document describes the high-level architecture of Beeline. + +## System Overview + +``` +┌─────────────────────────────────────────────────────────────┐ +│ Client │ +│ (Web Browser) │ +└─────────────────────────┬───────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ Docker Network │ +│ │ +│ ┌─────────────────────┐ ┌─────────────────────────┐ │ +│ │ honeycomb │ │ hive │ │ +│ │ (Frontend) │ ───▶ │ (Backend) │ │ +│ │ │ │ │ │ +│ │ React + Vite │ │ Express + TypeScript │ │ +│ │ Port: 3000 │ │ Port: 4000 │ │ +│ │ │ │ │ │ +│ │ ┌───────────────┐ │ │ ┌─────────────────┐ │ │ +│ │ │ Nginx │ │ │ │ Routes │ │ │ +│ │ │ (production) │ │ │ │ /api, /health │ │ │ +│ │ └───────────────┘ │ │ └────────┬────────┘ │ │ +│ └─────────────────────┘ │ │ │ │ +│ │ ▼ │ │ +│ │ ┌─────────────────┐ │ │ +│ │ │ Controllers │ │ │ +│ │ └────────┬────────┘ │ │ +│ │ │ │ │ +│ │ ▼ │ │ +│ │ ┌─────────────────┐ │ │ +│ │ │ Services │ │ │ +│ │ └────────┬────────┘ │ │ +│ │ │ │ │ +│ └───────────┼─────────────┘ │ +└───────────────────────────────────────────┼────────────────┘ + │ + ▼ + ┌─────────────────────────┐ + │ Database │ + │ (PostgreSQL/etc) │ + └─────────────────────────┘ +``` + +## Components + +### Frontend (honeycomb/) + +The frontend is a single-page application built with: + +- **React 18** - UI library +- **TypeScript** - Type safety +- **Vite** - Build tool and dev server +- **React Router** - Client-side routing + +**Key Directories:** + +| Directory | Purpose | +|-----------|---------| +| `src/components/` | Reusable UI components | +| `src/pages/` | Page-level components (routes) | +| `src/hooks/` | Custom React hooks | +| `src/services/` | API client and external services | +| `src/types/` | TypeScript type definitions | +| `src/utils/` | Utility functions | +| `src/styles/` | Global styles and CSS | + +**Production Build:** +- Vite builds static assets +- Nginx serves the built files +- API requests proxied to backend + +### Backend (hive/) + +The backend is a RESTful API built with: + +- **Express** - Web framework +- **TypeScript** - Type safety +- **Zod** - Runtime validation +- **Helmet** - Security headers + +**Key Directories:** + +| Directory | Purpose | +|-----------|---------| +| `src/routes/` | API route definitions | +| `src/controllers/` | Request handlers | +| `src/services/` | Business logic | +| `src/middleware/` | Express middleware | +| `src/models/` | Data models | +| `src/types/` | TypeScript types | +| `src/utils/` | Utility functions | +| `src/config/` | Configuration loading | + +**API Structure:** + +``` +GET /health # Health check endpoints +GET /health/ready # Readiness probe +GET /health/live # Liveness probe + +GET /api # API info +GET /api/users # Example resource +``` + +## Request Flow + +1. **Client** makes HTTP request +2. **Nginx** (production) or **Vite** (dev) receives request +3. Static assets served directly; API requests proxied +4. **Express** receives API request +5. **Middleware** processes (auth, logging, validation) +6. **Router** matches route to controller +7. **Controller** handles request, calls services +8. **Service** executes business logic +9. **Response** returned to client + +## Configuration System + +``` +config.yaml + │ + ▼ +generate-env.ts ──────────────────┐ + │ │ + ▼ ▼ +.env (root) honeycomb/.env + │ │ + ▼ ▼ +docker-compose.yml Vite (frontend) + │ + ▼ +hive/.env + │ + ▼ +Express (backend) +``` + +## Docker Architecture + +**Production:** +``` +docker-compose.yml +├── honeycomb (frontend) +│ └── Dockerfile (multi-stage: build → nginx) +└── hive (backend) + └── Dockerfile (multi-stage: build → node) +``` + +**Development:** +``` +docker-compose.yml + docker-compose.override.yml +├── honeycomb (frontend) +│ └── Dockerfile.dev (vite dev server) +└── hive (backend) + └── Dockerfile.dev (tsx watch) +``` + +## Scaling Considerations + +### Horizontal Scaling + +Both frontend and backend are stateless and can be scaled horizontally: + +```yaml +# docker-compose.yml +services: + hive: + deploy: + replicas: 3 +``` + +### Database + +- Use connection pooling +- Consider read replicas for heavy read loads +- Implement caching layer if needed + +### Caching + +Options for caching: +- Redis for session/cache storage +- CDN for static assets +- HTTP caching headers + +## Security + +### Frontend +- Served over HTTPS (configure in nginx/reverse proxy) +- CSP headers via nginx +- No sensitive data in client code + +### Backend +- Helmet.js for security headers +- CORS configured for specific origins +- Input validation with Zod +- JWT for authentication +- Rate limiting (configurable) + +## Monitoring + +### Health Checks +- `/health` - Overall health +- `/health/ready` - Ready to accept traffic +- `/health/live` - Process is alive + +### Logging +- Structured JSON logs in production +- Configurable log levels +- Request logging via Morgan + +## Development Workflow + +1. Edit code in `honeycomb/` or `hive/` +2. Hot reload updates automatically +3. Run tests: `npm run test` +4. Lint: `npm run lint` +5. Build: `npm run build` diff --git a/docs/configuration.md b/docs/configuration.md new file mode 100644 index 00000000..d15834b8 --- /dev/null +++ b/docs/configuration.md @@ -0,0 +1,189 @@ +# Configuration Guide + +Beeline uses a centralized configuration system based on a single `config.yaml` file. This makes it easy to configure the entire application from one place. + +## Configuration Flow + +``` +config.yaml --> generate-env.ts --> .env files + ├── .env (root, for Docker) + ├── honeycomb/.env (frontend) + └── hive/.env (backend) +``` + +## Getting Started + +1. Copy the example configuration: + ```bash + cp config.yaml.example config.yaml + ``` + +2. Edit `config.yaml` with your settings + +3. Generate environment files: + ```bash + npm run generate:env + ``` + +## Configuration Options + +### Application Settings + +```yaml +app: + # Application name - displayed in UI and logs + name: Beeline + + # Environment mode + # - development: enables debug features, verbose logging + # - production: optimized for performance, minimal logging + # - test: for running tests + environment: development + + # Log level: debug, info, warn, error + log_level: info +``` + +### Server Configuration + +```yaml +server: + frontend: + # Port for the React frontend + port: 3000 + + backend: + # Port for the Node.js API + port: 4000 + + # Host to bind (0.0.0.0 = all interfaces) + host: 0.0.0.0 +``` + +### Database Configuration + +```yaml +database: + # PostgreSQL connection URL + url: postgresql://user:password@localhost:5432/beeline + + # For SQLite (local development) + # url: sqlite:./data/beeline.db +``` + +**Connection URL Format:** +``` +postgresql://[user]:[password]@[host]:[port]/[database] +``` + +### Authentication + +```yaml +auth: + # JWT secret key for signing tokens + # IMPORTANT: Change this in production! + # Generate with: openssl rand -base64 32 + jwt_secret: your-secret-key + + # Token expiration time + # Examples: 1h, 7d, 30d + jwt_expires_in: 7d +``` + +### CORS Configuration + +```yaml +cors: + # Allowed origin for cross-origin requests + # Set to your frontend URL in production + origin: http://localhost:3000 +``` + +### Feature Flags + +```yaml +features: + # Enable/disable user registration + registration: true + + # Enable API rate limiting + rate_limiting: false + + # Enable request logging + request_logging: true +``` + +## Environment-Specific Configuration + +You can create environment-specific config files: + +- `config.yaml` - Your main configuration (git-ignored) +- `config.yaml.example` - Template with safe defaults (committed) + +For different environments, you might want separate files: + +```bash +# Development +cp config.yaml.example config.yaml +# Edit for development settings + +# Production +cp config.yaml.example config.production.yaml +# Edit for production settings +``` + +## Docker Compose Integration + +The root `.env` file is used by Docker Compose. Key variables: + +| Variable | Description | Default | +|----------|-------------|---------| +| `FRONTEND_PORT` | Frontend container port | 3000 | +| `BACKEND_PORT` | Backend container port | 4000 | +| `NODE_ENV` | Node environment | production | +| `DATABASE_URL` | Database connection | - | +| `JWT_SECRET` | Auth secret key | - | + +## Security Best Practices + +1. **Never commit `config.yaml`** - It may contain secrets +2. **Use strong JWT secrets** - Generate with `openssl rand -base64 32` +3. **Restrict CORS in production** - Set to your exact frontend URL +4. **Use environment variables for CI/CD** - Override config in deployments + +## Updating Configuration + +After changing `config.yaml`: + +```bash +# Regenerate .env files +npm run generate:env + +# Restart services +docker compose restart +# or +docker compose up --build +``` + +## Troubleshooting + +### Changes Not Taking Effect + +1. Ensure you ran `npm run generate:env` +2. Restart the services +3. Check if the correct `.env` file is being loaded + +### Configuration Validation Errors + +The backend validates configuration on startup. Check logs for specific errors: + +```bash +docker compose logs hive +``` + +### Missing Environment Variables + +If a required variable is missing, add it to: +1. `config.yaml.example` (with safe default) +2. `config.yaml` (with your value) +3. `scripts/generate-env.ts` (to generate it) diff --git a/docs/getting-started.md b/docs/getting-started.md new file mode 100644 index 00000000..a8a815f4 --- /dev/null +++ b/docs/getting-started.md @@ -0,0 +1,143 @@ +# Getting Started + +This guide will help you get Beeline running on your local machine. + +## Prerequisites + +- **Docker** (v20.10+) and **Docker Compose** (v2.0+) - for containerized deployment +- **Node.js** (v20+) - for local development without Docker + +## Quick Start with Docker + +The fastest way to get started is using Docker Compose: + +```bash +# 1. Clone the repository +git clone https://github.com/adenhq/beeline.git +cd beeline + +# 2. Copy and configure +cp config.yaml.example config.yaml + +# 3. Run setup +npm run setup + +# 4. Start services +docker compose up +``` + +The application will be available at: +- **Frontend**: http://localhost:3000 +- **Backend API**: http://localhost:4000 +- **Health Check**: http://localhost:4000/health + +## Development Setup + +For local development with hot reload: + +```bash +# 1. Clone and configure (same as above) +git clone https://github.com/adenhq/beeline.git +cd beeline +cp config.yaml.example config.yaml + +# 2. Install dependencies +npm install + +# 3. Generate environment files +npm run generate:env + +# 4. Start frontend (terminal 1) +cd honeycomb +npm run dev + +# 5. Start backend (terminal 2) +cd hive +npm run dev +``` + +### Using Docker for Development + +You can also use Docker with hot reload enabled: + +```bash +# Copy development overrides +cp docker-compose.override.yml.example docker-compose.override.yml + +# Start with hot reload +docker compose up +``` + +## Project Structure + +``` +beeline/ +├── honeycomb/ # Frontend (React + TypeScript + Vite) +│ ├── src/ +│ │ ├── components/ # Reusable UI components +│ │ ├── pages/ # Page components +│ │ ├── hooks/ # Custom React hooks +│ │ ├── services/ # API client and services +│ │ ├── types/ # TypeScript type definitions +│ │ └── utils/ # Utility functions +│ └── public/ # Static assets +│ +├── hive/ # Backend (Node.js + TypeScript + Express) +│ └── src/ +│ ├── controllers/ # Request handlers +│ ├── middleware/ # Express middleware +│ ├── models/ # Data models +│ ├── routes/ # API routes +│ ├── services/ # Business logic +│ ├── types/ # TypeScript types +│ └── utils/ # Utility functions +│ +├── docs/ # Documentation +├── scripts/ # Build and utility scripts +└── config.yaml # Application configuration +``` + +## Next Steps + +1. **Configure the Application**: See [Configuration Guide](configuration.md) +2. **Understand the Architecture**: See [Architecture Overview](architecture.md) +3. **Start Building**: Add your own components and API endpoints + +## Troubleshooting + +### Port Already in Use + +If ports 3000 or 4000 are in use, update `config.yaml`: + +```yaml +server: + frontend: + port: 3001 # Change to available port + backend: + port: 4001 +``` + +Then regenerate environment files: + +```bash +npm run generate:env +``` + +### Docker Build Fails + +Clear Docker cache and rebuild: + +```bash +docker compose down +docker compose build --no-cache +docker compose up +``` + +### Dependencies Issues + +Clear node_modules and reinstall: + +```bash +npm run clean +npm install +``` diff --git a/hive/.env.example b/hive/.env.example new file mode 100644 index 00000000..330b93ba --- /dev/null +++ b/hive/.env.example @@ -0,0 +1,28 @@ +# Server Configuration +PORT=4000 +NODE_ENV=development + +# TSDB PostgreSQL (TimescaleDB) +TSDB_PG_URL=postgresql://user:password@localhost:5432/aden_tsdb + +# User Database (MySQL - read-only access) +MYSQL_HOST=localhost +MYSQL_PORT=3306 +MYSQL_USER=aden_reader +MYSQL_PASSWORD= +MYSQL_DATABASE=aden + +# MongoDB (policies and pricing data) +MONGODB_URL=mongodb://localhost:27017 +MONGODB_DBNAME=aden +MONGODB_ERP_DBNAME=erp + +# Redis (caching and socket.io adapter) +REDIS_URL=redis://localhost:6379 + +# JWT Authentication +JWT_SECRET=your-jwt-secret +PASSPHRASE=your-passphrase + +# Logging +LOG_LEVEL=info diff --git a/hive/Dockerfile b/hive/Dockerfile new file mode 100644 index 00000000..89819395 --- /dev/null +++ b/hive/Dockerfile @@ -0,0 +1,66 @@ +# Build stage +FROM node:20-alpine AS builder + +ARG NPM_TOKEN + +WORKDIR /app + +# Configure npm for private packages (@acho-inc/administration) +RUN echo "//registry.npmjs.org/:_authToken=${NPM_TOKEN}" > .npmrc + +# Copy package files +COPY package*.json ./ +COPY tsconfig.json ./ + +# Install all dependencies (including dev for TypeScript build) +RUN npm install + +# Copy source code +COPY src ./src + +# Copy docs for quickstart templates +COPY docs ./docs + +# Build TypeScript +RUN npm run build + +# Remove npmrc after build +RUN rm -f .npmrc + +# Production stage +FROM node:20-alpine AS production + +WORKDIR /app + +# Create non-root user +RUN addgroup -g 1001 -S nodejs && \ + adduser -S nodejs -u 1001 + +# Copy package files for production deps +COPY package*.json ./ + +# Configure npm for private packages (needed for production install) +ARG NPM_TOKEN +RUN echo "//registry.npmjs.org/:_authToken=${NPM_TOKEN}" > .npmrc && \ + npm install --omit=dev && \ + rm -f .npmrc && \ + npm cache clean --force + +# Copy compiled JavaScript from builder +COPY --from=builder --chown=nodejs:nodejs /app/dist ./dist + +# Copy docs directory for quickstart templates +COPY --from=builder --chown=nodejs:nodejs /app/docs ./docs + +USER nodejs + +# Default port (can be overridden via PORT env var) +EXPOSE 4000 + +ENV NODE_ENV=production + +# Health check +HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \ + CMD node -e "fetch('http://localhost:' + (process.env.PORT || 4000) + '/health').then(r => r.ok ? process.exit(0) : process.exit(1)).catch(() => process.exit(1))" + +CMD ["node", "dist/index.js"] diff --git a/hive/Dockerfile.dev b/hive/Dockerfile.dev new file mode 100644 index 00000000..0c77b34e --- /dev/null +++ b/hive/Dockerfile.dev @@ -0,0 +1,24 @@ +# Development Dockerfile with hot reload +FROM node:20-alpine + +ARG NPM_TOKEN + +WORKDIR /app + +# Configure npm for private packages (@acho-inc/administration) +RUN echo "//registry.npmjs.org/:_authToken=${NPM_TOKEN}" > .npmrc + +# Copy package files +COPY package*.json ./ + +# Install dependencies +RUN npm install && rm -f .npmrc + +# Copy source code +COPY . . + +# Expose ports (app + debug) +EXPOSE 4000 9229 + +# Start development server with hot reload +CMD ["npm", "run", "dev"] diff --git a/hive/docs/aden-sdk-documents/config/agent-frameworks.json b/hive/docs/aden-sdk-documents/config/agent-frameworks.json new file mode 100644 index 00000000..7b9fbbdb --- /dev/null +++ b/hive/docs/aden-sdk-documents/config/agent-frameworks.json @@ -0,0 +1,24 @@ +{ + "generic": { + "name": "Generic", + "description": "Generic agent integration", + "pythonSupport": true, + "typescriptSupport": true, + "templateFile": "generic" + }, + "langgraph": { + "name": "LangGraph", + "description": "LangGraph agent integration", + "pythonSupport": true, + "typescriptSupport": true, + "templateFile": "langgraph" + }, + "livekit": { + "name": "LiveKit", + "description": "LiveKit voice agent integration", + "pythonSupport": true, + "typescriptSupport": false, + "adenPythonExtra": "livekit", + "templateFile": "livekit" + } +} diff --git a/hive/docs/aden-sdk-documents/config/llm-vendors.json b/hive/docs/aden-sdk-documents/config/llm-vendors.json new file mode 100644 index 00000000..d9cb26d5 --- /dev/null +++ b/hive/docs/aden-sdk-documents/config/llm-vendors.json @@ -0,0 +1,14 @@ +{ + "openai": { + "name": "OpenAI", + "envVarComment": "# or ANTHROPIC_API_KEY, GOOGLE_API_KEY" + }, + "anthropic": { + "name": "Anthropic", + "envVarComment": "# or OPENAI_API_KEY, GOOGLE_API_KEY" + }, + "google": { + "name": "Google", + "envVarComment": "# or OPENAI_API_KEY, ANTHROPIC_API_KEY" + } +} diff --git a/hive/docs/aden-sdk-documents/config/sdk-languages.json b/hive/docs/aden-sdk-documents/config/sdk-languages.json new file mode 100644 index 00000000..7efad1fc --- /dev/null +++ b/hive/docs/aden-sdk-documents/config/sdk-languages.json @@ -0,0 +1,10 @@ +{ + "python": { + "name": "Python", + "adenPackage": "aden-py" + }, + "javascript": { + "name": "JavaScript/TypeScript", + "adenPackage": "aden-ts" + } +} diff --git a/hive/docs/aden-sdk-documents/python/quickstart-langflow.md b/hive/docs/aden-sdk-documents/python/quickstart-langflow.md new file mode 100644 index 00000000..e3bd9292 --- /dev/null +++ b/hive/docs/aden-sdk-documents/python/quickstart-langflow.md @@ -0,0 +1,191 @@ +Quick reference for integrating Aden LLM observability & cost control into LangFlow applications. + +## Prerequisites + +`.env` file should contain: + +``` +OPENAI_API_KEY=sk-xxx # or ANTHROPIC_API_KEY, GOOGLE_API_KEY +ADEN_API_URL=https://hive.adenhq.com +ADEN_API_KEY=your-aden-api-key + +``` + +## Installation + +```bash +pip install aden-py langflow python-dotenv + +``` + +## Basic Setup (3 Steps) + +### 1. Import and Load Environment + +```python +import os +from dotenv import load_dotenv +load_dotenv() + +from aden import ( + instrument, + uninstrument, + MeterOptions, + create_console_emitter, + BeforeRequestResult, + RequestCancelledError, +) + +``` + +### 2. Define Budget Check Callback + +```python +def budget_check(params, context): + """Enforce budget limits before each LLM request.""" + budget_info = getattr(context, 'budget', None) + + if budget_info and budget_info.get('exhausted', False): + return BeforeRequestResult.cancel("Budget exhausted") + + if budget_info and budget_info.get('percent_used', 0) >= 95: + return BeforeRequestResult.throttle(delay_ms=2000) + + if budget_info and budget_info.get('percent_used', 0) >= 80: + return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") + + return BeforeRequestResult.proceed() + +``` + +### 3. Initialize Aden (at startup) + +```python +instrument(MeterOptions( + api_key=os.environ.get("ADEN_API_KEY"), + server_url=os.environ.get("ADEN_API_URL"), + emit_metric=create_console_emitter(pretty=True), + on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), + before_request=budget_check, +)) + +``` + +### 4. Use LangFlow Components + +```python +from langflow.components.models import LanguageModelComponent + +comp = LanguageModelComponent() +comp.set_attributes({ + "provider": "Google", # or "OpenAI" + "model_name": "gemini-2.0-flash", + "api_key": os.getenv("GOOGLE_API_KEY"), + "stream": False, +}) + +model = comp.build_model() + +try: + response = model.invoke("Hello!") + print(response.content) +except RequestCancelledError as e: + print(f"Budget exceeded: {e}") + +``` + +### 5. Cleanup (on exit) + +```python +uninstrument() + +``` + +## Complete Template + +```python +"""LangFlow with Aden instrumentation""" +import os +from dotenv import load_dotenv +load_dotenv() + +from aden import ( + instrument, uninstrument, MeterOptions, + create_console_emitter, BeforeRequestResult, RequestCancelledError, +) + +# Budget enforcement callback +def budget_check(params, context): + budget_info = getattr(context, 'budget', None) + if budget_info and budget_info.get('exhausted', False): + return BeforeRequestResult.cancel("Budget exhausted") + if budget_info and budget_info.get('percent_used', 0) >= 95: + return BeforeRequestResult.throttle(delay_ms=2000) + if budget_info and budget_info.get('percent_used', 0) >= 80: + return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") + return BeforeRequestResult.proceed() + +# Initialize Aden +instrument(MeterOptions( + api_key=os.environ.get("ADEN_API_KEY"), + server_url=os.environ.get("ADEN_API_URL"), + emit_metric=create_console_emitter(pretty=True), + on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), + before_request=budget_check, +)) + +# === YOUR LANGFLOW CODE HERE === + +from langflow.components.models import LanguageModelComponent + +def run_model(user_input: str): + try: + comp = LanguageModelComponent() + comp.set_attributes({ + "provider": "Google", + "model_name": "gemini-2.0-flash", + "api_key": os.getenv("GOOGLE_API_KEY"), + "stream": False, + }) + model = comp.build_model() + return model.invoke(user_input).content + except RequestCancelledError as e: + return f"Sorry, you have used up your allowance. {e}" + +if __name__ == "__main__": + try: + print(run_model("Say hello!")) + finally: + uninstrument() + +``` + +## Supported Providers + +| Provider | Model Example | Notes | +| --------- | ------------------- | -------------------------------- | +| OpenAI | gpt-4o, gpt-4o-mini | Direct SDK instrumentation | +| Google | gemini-2.0-flash | Uses gRPC client instrumentation | +| Anthropic | claude-3-opus | Direct SDK instrumentation | + +## Budget Actions Reference + +| Action | When | Behavior | +| ----------------------------------------------- | ----------------- | ------------------------------ | +| `BeforeRequestResult.proceed()` | Within budget | Request continues normally | +| `BeforeRequestResult.cancel(msg)` | Budget exhausted | Raises `RequestCancelledError` | +| `BeforeRequestResult.throttle(delay_ms=N)` | Near limit | Delays request by N ms | +| `BeforeRequestResult.degrade(to_model, reason)` | Approaching limit | Switches to cheaper model | + +## Key Points + +- `emit_metric` is **required** - use `create_console_emitter(pretty=True)` for dev +- `before_request` callback enables budget enforcement +- Always wrap model calls in `try/except RequestCancelledError` +- Call `uninstrument()` on exit to flush remaining metrics +- Control agent connects automatically when `api_key` + `server_url` are provided +- Google Gemini support works automatically via gRPC client instrumentation + +## Documentation + +Full docs: [https://pypi.org/project/aden-py](https://pypi.org/project/aden-py/) diff --git a/hive/docs/aden-sdk-documents/python/quickstart-langgraph.md b/hive/docs/aden-sdk-documents/python/quickstart-langgraph.md new file mode 100644 index 00000000..7a413234 --- /dev/null +++ b/hive/docs/aden-sdk-documents/python/quickstart-langgraph.md @@ -0,0 +1,164 @@ +Quick reference for integrating Aden LLM observability & cost control into Python agents. + +## Prerequisites + +`.env` file should contain: + +``` +OPENAI_API_KEY=sk-xxx # or ANTHROPIC_API_KEY, GOOGLE_API_KEY +ADEN_API_URL=https://hive.adenhq.com +ADEN_API_KEY=your-aden-api-key + +``` + +## Installation + +```bash +pip install aden-py python-dotenv + +``` + +## Basic Setup (3 Steps) + +### 1. Import and Load Environment + +```python +import os +from dotenv import load_dotenv +load_dotenv() + +from aden import ( + instrument, + uninstrument, + MeterOptions, + create_console_emitter, + BeforeRequestResult, + RequestCancelledError, +) + +``` + +### 2. Define Budget Check Callback + +```python +def budget_check(params, context): + """Enforce budget limits before each LLM request.""" + budget_info = getattr(context, 'budget', None) + + if budget_info and budget_info.get('exhausted', False): + return BeforeRequestResult.cancel("Budget exhausted") + + if budget_info and budget_info.get('percent_used', 0) >= 95: + return BeforeRequestResult.throttle(delay_ms=2000) + + if budget_info and budget_info.get('percent_used', 0) >= 80: + return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") + + return BeforeRequestResult.proceed() + +``` + +### 3. Initialize Aden (at startup) + +```python +instrument(MeterOptions( + api_key=os.environ.get("ADEN_API_KEY"), + server_url=os.environ.get("ADEN_API_URL"), + emit_metric=create_console_emitter(pretty=True), + on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), + before_request=budget_check, +)) + +``` + +### 4. Handle Budget Errors in Your Agent + +```python +def run_agent(user_input: str): + try: + # Your agent logic here + result = graph.invoke({"messages": [{"role": "user", "content": user_input}]}) + return result["messages"][-1].content + except RequestCancelledError as e: + return f"Sorry, you have used up your allowance. {e}" + +``` + +### 5. Cleanup (on exit) + +```python +uninstrument() + +``` + +## Complete Template + +```python +"""Agent with Aden instrumentation""" +import os +from dotenv import load_dotenv +load_dotenv() + +from aden import ( + instrument, uninstrument, MeterOptions, + create_console_emitter, BeforeRequestResult, RequestCancelledError, +) + +# Budget enforcement callback +def budget_check(params, context): + budget_info = getattr(context, 'budget', None) + if budget_info and budget_info.get('exhausted', False): + return BeforeRequestResult.cancel("Budget exhausted") + if budget_info and budget_info.get('percent_used', 0) >= 95: + return BeforeRequestResult.throttle(delay_ms=2000) + if budget_info and budget_info.get('percent_used', 0) >= 80: + return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") + return BeforeRequestResult.proceed() + +# Initialize Aden +instrument(MeterOptions( + api_key=os.environ.get("ADEN_API_KEY"), + server_url=os.environ.get("ADEN_API_URL"), + emit_metric=create_console_emitter(pretty=True), + on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), + before_request=budget_check, +)) + +# === YOUR AGENT CODE HERE === + +def run_agent(user_input: str): + try: + # Your LLM calls here + pass + except RequestCancelledError as e: + return f"Sorry, you have used up your allowance. {e}" + +if __name__ == "__main__": + try: + # Your main loop + pass + finally: + uninstrument() + +``` + +## Budget Actions Reference + +| Action | When | Behavior | +| ----------------------------------------------- | ----------------- | ------------------------------ | +| `BeforeRequestResult.proceed()` | Within budget | Request continues normally | +| `BeforeRequestResult.cancel(msg)` | Budget exhausted | Raises `RequestCancelledError` | +| `BeforeRequestResult.throttle(delay_ms=N)` | Near limit | Delays request by N ms | +| `BeforeRequestResult.degrade(to_model, reason)` | Approaching limit | Switches to cheaper model | + +## Key Points + +- `emit_metric` is **required** - use `create_console_emitter(pretty=True)` for dev +- `before_request` callback enables budget enforcement +- Always wrap agent calls in `try/except RequestCancelledError` +- Call `uninstrument()` on exit to flush remaining metrics +- Control agent connects automatically when `api_key` + `server_url` are provided + +## Documentation + +Full docs: [https://pypi.org/project/aden-py](https://pypi.org/project/aden-py/json) diff --git a/hive/docs/aden-sdk-documents/python/quickstart-livekit.md b/hive/docs/aden-sdk-documents/python/quickstart-livekit.md new file mode 100644 index 00000000..35c0f9ab --- /dev/null +++ b/hive/docs/aden-sdk-documents/python/quickstart-livekit.md @@ -0,0 +1,165 @@ +# Aden-py LiveKit Integration Guide + +Quick reference for integrating Aden LLM observability & cost control into LiveKit voice agents. + +## Prerequisites + +`.env` file should contain: + +``` +OPENAI_API_KEY=sk-xxx +ADEN_API_URL=https://hive.adenhq.com +ADEN_API_KEY=your-aden-api-key +``` + +## Installation + +```bash +pip install 'aden-py[livekit]' python-dotenv +``` + +## Setup (4 Steps) + +### 1. Import and Load Environment + +```python +import os +from dotenv import load_dotenv +load_dotenv() + +from aden import ( + instrument, + MeterOptions, + create_console_emitter, + BeforeRequestResult, + RequestCancelledError, +) +``` + +### 2. Define Budget Check Callback + +```python +def budget_check(params, context): + """Enforce budget limits before each LLM request.""" + budget_info = getattr(context, 'budget', None) + + if budget_info and budget_info.get('exhausted', False): + return BeforeRequestResult.cancel("Budget exhausted") + + if budget_info and budget_info.get('percent_used', 0) >= 95: + return BeforeRequestResult.throttle(delay_ms=2000) + + if budget_info and budget_info.get('percent_used', 0) >= 80: + return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") + + return BeforeRequestResult.proceed() +``` + +### 3. Create Worker Prewarm Function + +**IMPORTANT:** LiveKit uses multiprocessing. Instrumentation must happen in each worker process, not the main process. + +```python +def initialize_aden_in_worker(proc): + """Initialize Aden instrumentation in each worker process.""" + instrument(MeterOptions( + api_key=os.environ.get("ADEN_API_KEY"), + server_url=os.environ.get("ADEN_API_URL"), + emit_metric=create_console_emitter(pretty=True), + on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), + before_request=budget_check, + )) +``` + +### 4. Pass Prewarm Function to WorkerOptions + +```python +if __name__ == "__main__": + agents.cli.run_app(agents.WorkerOptions( + entrypoint_fnc=entrypoint, + agent_name="my-agent", + prewarm_fnc=initialize_aden_in_worker, # <-- This is the key! + )) +``` + +## Complete Template + +```python +"""LiveKit Voice Agent with Aden instrumentation""" +import os +from dotenv import load_dotenv +load_dotenv() + +from livekit import agents +from livekit.plugins import openai + +from aden import ( + instrument, MeterOptions, create_console_emitter, + BeforeRequestResult, RequestCancelledError, +) + +# Budget enforcement callback +def budget_check(params, context): + budget_info = getattr(context, 'budget', None) + if budget_info and budget_info.get('exhausted', False): + return BeforeRequestResult.cancel("Budget exhausted") + if budget_info and budget_info.get('percent_used', 0) >= 95: + return BeforeRequestResult.throttle(delay_ms=2000) + if budget_info and budget_info.get('percent_used', 0) >= 80: + return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") + return BeforeRequestResult.proceed() + +# Worker initialization - runs in each spawned process +def initialize_aden_in_worker(proc): + instrument(MeterOptions( + api_key=os.environ.get("ADEN_API_KEY"), + server_url=os.environ.get("ADEN_API_URL"), + emit_metric=create_console_emitter(pretty=True), + on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), + before_request=budget_check, + )) + +async def entrypoint(ctx: agents.JobContext): + # Your agent logic here + session = agents.AgentSession( + llm=openai.LLM(model="gpt-4o-mini"), + # ... + ) + await session.start(ctx.room) + +if __name__ == "__main__": + agents.cli.run_app(agents.WorkerOptions( + entrypoint_fnc=entrypoint, + agent_name="my-agent", + prewarm_fnc=initialize_aden_in_worker, + )) +``` + +## Budget Actions Reference + +| Action | When | Behavior | +| ----------------------------------------------- | ------------------------ | ------------------------------ | +| `BeforeRequestResult.proceed()` | Within budget | Request continues normally | +| `BeforeRequestResult.cancel(msg)` | Budget exhausted | Raises `RequestCancelledError` | +| `BeforeRequestResult.throttle(delay_ms=N)` | Near limit (95%+) | Delays request by N ms | +| `BeforeRequestResult.degrade(to_model, reason)` | Approaching limit (80%+) | Switches to cheaper model | + +## Key Points + +- **Use `prewarm_fnc`** - LiveKit spawns worker processes; instrumentation must happen in each worker +- **Don't instrument in main process** - It won't affect the worker processes where LLM calls happen +- `emit_metric` is **required** - use `create_console_emitter(pretty=True)` for dev +- Control agent connects automatically when `api_key` + `server_url` are provided + +## Troubleshooting + +**No metrics showing?** + +- Ensure `prewarm_fnc` is set in `WorkerOptions` +- Check that `ADEN_API_KEY` and `ADEN_API_URL` are in your `.env` +- Verify you're using `aden-py[livekit]` (with the livekit extra) + +**Metrics in test but not in agent?** + +- LiveKit uses multiprocessing - the main process instrumentation doesn't carry over +- The `prewarm_fnc` runs in each worker before your `entrypoint` is called diff --git a/hive/docs/aden-sdk-documents/templates/javascript/generic.md b/hive/docs/aden-sdk-documents/templates/javascript/generic.md new file mode 100644 index 00000000..fe8dca0c --- /dev/null +++ b/hive/docs/aden-sdk-documents/templates/javascript/generic.md @@ -0,0 +1,194 @@ +Quick reference for integrating Aden LLM observability & cost control into TypeScript/JavaScript agents. + +## Prerequisites + +`.env` file should contain: + +``` +OPENAI_API_KEY=sk-xxx {{envVarComment}} +ADEN_API_URL={{serverUrl}} +ADEN_API_KEY={{apiKey}} +``` + +## Installation + +```bash +npm install aden-ts dotenv + +# Install the LLM SDKs you use +npm install openai # For OpenAI +npm install @anthropic-ai/sdk # For Anthropic +npm install @google/generative-ai # For Google Gemini +``` + +## Basic Setup + +### 1. Import Aden and SDK (at top of file) + +```typescript +import "dotenv/config"; +import OpenAI from "openai"; +import { + instrument, + uninstrument, + createConsoleEmitter, + RequestCancelledError, +} from "aden-ts"; +import type { BeforeRequestContext, BeforeRequestResult } from "aden-ts"; +``` + +### 2. Define Before Request Callback (optional) + +```typescript +// Custom logic before each LLM request +// Budget enforcement is handled server-side by the control agent +function beforeRequest( + _params: Record, + context: BeforeRequestContext +): BeforeRequestResult { + console.log(`[Aden] Request to model: ${context.model}`); + return { action: "proceed" }; +} +``` + +### 3. Initialize Aden (at startup, BEFORE using SDK) + +```typescript +await instrument({ + apiKey: process.env.ADEN_API_KEY, + serverUrl: process.env.ADEN_API_URL, + emitMetric: createConsoleEmitter({ pretty: true }), + onAlert: (alert: { level: string; message: string }) => + console.log(`[Aden ${alert.level}] ${alert.message}`), + beforeRequest, + sdks: { OpenAI }, +}); +``` + +### 4. Handle Budget Errors in Your Agent + +```typescript +async function runAgent(userInput: string): Promise { + try { + const openai = new OpenAI(); + const response = await openai.chat.completions.create({ + model: "gpt-4o", + messages: [{ role: "user", content: userInput }], + }); + return response.choices[0]?.message?.content ?? ""; + } catch (e) { + if (e instanceof RequestCancelledError) { + return `Sorry, your budget has been exhausted. ${e.message}`; + } + throw e; + } +} +``` + +### 5. Cleanup (on exit) + +```typescript +await uninstrument(); +``` + +## Complete Template + +```typescript +/** + * Agent with Aden instrumentation + */ +import "dotenv/config"; +import OpenAI from "openai"; +import { + instrument, + uninstrument, + createConsoleEmitter, + RequestCancelledError, +} from "aden-ts"; +import type { BeforeRequestContext, BeforeRequestResult } from "aden-ts"; + +// Before request callback (optional) +function beforeRequest( + _params: Record, + context: BeforeRequestContext +): BeforeRequestResult { + console.log(`[Aden] Request to model: ${context.model}`); + return { action: "proceed" }; +} + +// Initialize Aden FIRST +await instrument({ + apiKey: process.env.ADEN_API_KEY, + serverUrl: process.env.ADEN_API_URL, + emitMetric: createConsoleEmitter({ pretty: true }), + onAlert: (alert: { level: string; message: string }) => + console.log(`[Aden ${alert.level}] ${alert.message}`), + beforeRequest, + sdks: { OpenAI }, +}); + +// === YOUR AGENT CODE HERE === + +async function runAgent(userInput: string): Promise { + try { + const openai = new OpenAI(); + const response = await openai.chat.completions.create({ + model: "gpt-4o", + messages: [{ role: "user", content: userInput }], + }); + return response.choices[0]?.message?.content ?? ""; + } catch (e) { + if (e instanceof RequestCancelledError) { + return `Sorry, your budget has been exhausted. ${e.message}`; + } + throw e; + } +} + +// Main entry point +async function main() { + try { + const result = await runAgent("Hello, world!"); + console.log(result); + } finally { + await uninstrument(); + } +} + +main(); +``` + +## BeforeRequestContext Reference + +The `context` parameter in `beforeRequest` contains: + +| Field | Type | Description | +| --- | --- | --- | +| `model` | string | Model being used for this request | +| `stream` | boolean | Whether this is a streaming request | +| `spanId` | string | Generated span ID (OTel standard) | +| `traceId` | string | Trace ID grouping related operations | +| `timestamp` | Date | When the request was initiated | +| `metadata` | Record | Custom metadata (optional) | + +## BeforeRequestResult Actions + +| Action | Usage | Behavior | +| --- | --- | --- | +| `{ action: "proceed" }` | Allow request | Request continues normally | +| `{ action: "cancel", reason: "..." }` | Block request | Throws `RequestCancelledError` | +| `{ action: "throttle", delayMs: N }` | Rate limit | Delays request by N ms | +| `{ action: "degrade", toModel: "...", reason: "..." }` | Downgrade | Switches to specified model | + +## Key Points + +- Module name is `aden-ts` (not `aden`) +- `emitMetric` is **required** - use `createConsoleEmitter({ pretty: true })` for dev +- Budget enforcement is handled **server-side** by the control agent +- Always wrap agent calls in `try/catch` for `RequestCancelledError` +- Call `await uninstrument()` on exit to flush remaining metrics +- Control agent connects automatically when `apiKey` + `serverUrl` are provided + +## Documentation + +Full docs: [https://www.npmjs.com/package/aden-ts](https://www.npmjs.com/package/aden-ts) diff --git a/hive/docs/aden-sdk-documents/templates/javascript/langgraph.md b/hive/docs/aden-sdk-documents/templates/javascript/langgraph.md new file mode 100644 index 00000000..a911a7bd --- /dev/null +++ b/hive/docs/aden-sdk-documents/templates/javascript/langgraph.md @@ -0,0 +1,297 @@ +Quick reference for integrating Aden LLM observability & cost control into TypeScript/JavaScript agents. + +## Prerequisites + +`.env` file should contain: + +``` +OPENAI_API_KEY=sk-xxx {{envVarComment}} +ADEN_API_URL={{serverUrl}} +ADEN_API_KEY={{apiKey}} +``` + +## Installation + +```bash +npm install aden-ts dotenv + +# Install the LLM SDKs you use +npm install openai # For OpenAI +npm install @anthropic-ai/sdk # For Anthropic +npm install @google/generative-ai # For Google Gemini +``` + +## Basic Setup + +### 1. Import Aden and SDK (at top of file) + +```typescript +import "dotenv/config"; +import OpenAI from "openai"; +import { + instrument, + uninstrument, + createConsoleEmitter, + RequestCancelledError, +} from "aden-ts"; +import type { BeforeRequestContext, BeforeRequestResult } from "aden-ts"; +``` + +### 2. Define Before Request Callback (optional) + +```typescript +// Custom logic before each LLM request +// Budget enforcement is handled server-side by the control agent +function beforeRequest( + _params: Record, + context: BeforeRequestContext +): BeforeRequestResult { + console.log(`[Aden] Request to model: ${context.model}`); + return { action: "proceed" }; +} +``` + +### 3. Initialize Aden (at startup, BEFORE using SDK) + +```typescript +await instrument({ + apiKey: process.env.ADEN_API_KEY, + serverUrl: process.env.ADEN_API_URL, + emitMetric: createConsoleEmitter({ pretty: true }), + onAlert: (alert: { level: string; message: string }) => + console.log(`[Aden ${alert.level}] ${alert.message}`), + beforeRequest, + sdks: { OpenAI }, +}); +``` + +### 4. Handle Budget Errors in Your Agent + +```typescript +async function runAgent(userInput: string): Promise { + try { + const openai = new OpenAI(); + const response = await openai.chat.completions.create({ + model: "gpt-4o", + messages: [{ role: "user", content: userInput }], + }); + return response.choices[0]?.message?.content ?? ""; + } catch (e) { + if (e instanceof RequestCancelledError) { + return `Sorry, your budget has been exhausted. ${e.message}`; + } + throw e; + } +} +``` + +### 5. Cleanup (on exit) + +```typescript +await uninstrument(); +``` + +## Complete Template (Direct SDK Usage) + +```typescript +/** + * Agent with Aden instrumentation - Direct SDK usage + */ +import "dotenv/config"; +import OpenAI from "openai"; +import { + instrument, + uninstrument, + createConsoleEmitter, + RequestCancelledError, +} from "aden-ts"; +import type { BeforeRequestContext, BeforeRequestResult } from "aden-ts"; + +// Before request callback (optional) +function beforeRequest( + _params: Record, + context: BeforeRequestContext +): BeforeRequestResult { + console.log(`[Aden] Request to model: ${context.model}`); + return { action: "proceed" }; +} + +// Initialize Aden FIRST +await instrument({ + apiKey: process.env.ADEN_API_KEY, + serverUrl: process.env.ADEN_API_URL, + emitMetric: createConsoleEmitter({ pretty: true }), + onAlert: (alert: { level: string; message: string }) => + console.log(`[Aden ${alert.level}] ${alert.message}`), + beforeRequest, + sdks: { OpenAI }, +}); + +// === YOUR AGENT CODE HERE === + +async function runAgent(userInput: string): Promise { + try { + const openai = new OpenAI(); + const response = await openai.chat.completions.create({ + model: "gpt-4o", + messages: [{ role: "user", content: userInput }], + }); + return response.choices[0]?.message?.content ?? ""; + } catch (e) { + if (e instanceof RequestCancelledError) { + return `Sorry, your budget has been exhausted. ${e.message}`; + } + throw e; + } +} + +// Main entry point +async function main() { + try { + const result = await runAgent("Hello, world!"); + console.log(result); + } finally { + await uninstrument(); + } +} + +main(); +``` + +## LangChain / LangGraph Integration + +When using LangChain or LangGraph, you **MUST** use dynamic imports to ensure instrumentation is applied before LangChain loads the SDK. + +### Critical: SDK Version Matching + +LangChain bundles its own SDK dependencies. To ensure instrumentation works, your SDK version must match LangChain's: + +```bash +# Check what version LangChain uses +cat node_modules/@langchain/anthropic/node_modules/@anthropic-ai/sdk/package.json | grep version + +# Update your package.json to match that version +# e.g., "@anthropic-ai/sdk": "^0.65.0" + +# Reinstall to dedupe +rm -rf node_modules package-lock.json && npm install + +# Verify no nested SDK (should show "No such file") +ls node_modules/@langchain/anthropic/node_modules 2>/dev/null || echo "OK: SDK is shared" +``` + +### LangChain Template + +```typescript +/** + * LangGraph Agent with Aden instrumentation + * Key: Use dynamic imports AFTER instrument() + */ +import "dotenv/config"; +import Anthropic from "@anthropic-ai/sdk"; +import { + instrument, + uninstrument, + createConsoleEmitter, + RequestCancelledError, +} from "aden-ts"; +import type { BeforeRequestContext, BeforeRequestResult } from "aden-ts"; + +function beforeRequest( + _params: Record, + context: BeforeRequestContext +): BeforeRequestResult { + console.log(`[Aden] Request to model: ${context.model}`); + return { action: "proceed" }; +} + +async function main() { + // 1. Initialize Aden FIRST (before any LangChain imports) + await instrument({ + apiKey: process.env.ADEN_API_KEY, + serverUrl: process.env.ADEN_API_URL, + emitMetric: createConsoleEmitter({ pretty: true }), + onAlert: (alert: { level: string; message: string }) => + console.log(`[Aden ${alert.level}] ${alert.message}`), + beforeRequest, + sdks: { Anthropic }, + }); + + // 2. Dynamic imports AFTER instrumentation + const { ChatAnthropic } = await import("@langchain/anthropic"); + const { HumanMessage } = await import("@langchain/core/messages"); + // ... other LangChain imports + + // 3. Now create your LangChain components + const model = new ChatAnthropic({ + model: "claude-sonnet-4-20250514", + temperature: 0, + }); + + try { + // Your agent logic here + const response = await model.invoke([new HumanMessage("Hello!")]); + console.log(response.content); + } catch (error) { + if (error instanceof RequestCancelledError) { + console.log(`Budget exhausted: ${error.message}`); + } else { + throw error; + } + } finally { + await uninstrument(); + } +} + +main(); +``` + +## BeforeRequestContext Reference + +The `context` parameter in `beforeRequest` contains: + +| Field | Type | Description | +| --- | --- | --- | +| `model` | string | Model being used for this request | +| `stream` | boolean | Whether this is a streaming request | +| `spanId` | string | Generated span ID (OTel standard) | +| `traceId` | string | Trace ID grouping related operations | +| `timestamp` | Date | When the request was initiated | +| `metadata` | Record | Custom metadata (optional) | + +## BeforeRequestResult Actions + +| Action | Usage | Behavior | +| --- | --- | --- | +| `{ action: "proceed" }` | Allow request | Request continues normally | +| `{ action: "cancel", reason: "..." }` | Block request | Throws `RequestCancelledError` | +| `{ action: "throttle", delayMs: N }` | Rate limit | Delays request by N ms | +| `{ action: "degrade", toModel: "...", reason: "..." }` | Downgrade | Switches to specified model | + +## Key Points + +- Module name is `aden-ts` (not `aden`) +- `emitMetric` is **required** - use `createConsoleEmitter({ pretty: true })` for dev +- Budget enforcement is handled **server-side** by the control agent +- Always wrap agent calls in `try/catch` for `RequestCancelledError` +- Call `await uninstrument()` on exit to flush remaining metrics +- Control agent connects automatically when `apiKey` + `serverUrl` are provided +- **LangChain users**: Must use dynamic imports and match SDK versions + +## Troubleshooting + +### No metrics being captured + +1. **Check SDK version match**: Run `npm ls @anthropic-ai/sdk` - should show only ONE version +2. **Use dynamic imports**: Import LangChain modules AFTER `instrument()` is called +3. **Verify instrumentation**: Look for `[aden] Instrumented: anthropic + control agent` at startup + +### RequestCancelledError not thrown + +Budget enforcement is server-side. Ensure: +- `ADEN_API_KEY` and `ADEN_API_URL` are set correctly +- Control agent connection is established (check startup logs) + +## Documentation + +Full docs: [https://www.npmjs.com/package/aden-ts](https://www.npmjs.com/package/aden-ts) diff --git a/hive/docs/aden-sdk-documents/templates/python/generic.md b/hive/docs/aden-sdk-documents/templates/python/generic.md new file mode 100644 index 00000000..d5e6fe79 --- /dev/null +++ b/hive/docs/aden-sdk-documents/templates/python/generic.md @@ -0,0 +1,164 @@ +Quick reference for integrating Aden LLM observability & cost control into Python agents. + +## Prerequisites + +`.env` file should contain: + +``` +OPENAI_API_KEY=sk-xxx {{envVarComment}} +ADEN_API_URL={{serverUrl}} +ADEN_API_KEY={{apiKey}} + +``` + +## Installation + +```bash +pip install aden-py python-dotenv + +``` + +## Basic Setup (3 Steps) + +### 1. Import and Load Environment + +```python +import os +from dotenv import load_dotenv +load_dotenv() + +from aden import ( + instrument, + uninstrument, + MeterOptions, + create_console_emitter, + BeforeRequestResult, + RequestCancelledError, +) + +``` + +### 2. Define Budget Check Callback + +```python +def budget_check(params, context): + """Enforce budget limits before each LLM request.""" + budget_info = getattr(context, 'budget', None) + + if budget_info and budget_info.get('exhausted', False): + return BeforeRequestResult.cancel("Budget exhausted") + + if budget_info and budget_info.get('percent_used', 0) >= 95: + return BeforeRequestResult.throttle(delay_ms=2000) + + if budget_info and budget_info.get('percent_used', 0) >= 80: + return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") + + return BeforeRequestResult.proceed() + +``` + +### 3. Initialize Aden (at startup) + +```python +instrument(MeterOptions( + api_key=os.environ.get("ADEN_API_KEY"), + server_url=os.environ.get("ADEN_API_URL"), + emit_metric=create_console_emitter(pretty=True), + on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), + before_request=budget_check, +)) + +``` + +### 4. Handle Budget Errors in Your Agent + +```python +def run_agent(user_input: str): + try: + # Your agent logic here + result = graph.invoke({"messages": [{"role": "user", "content": user_input}]}) + return result["messages"][-1].content + except RequestCancelledError as e: + return f"Sorry, you have used up your allowance. {e}" + +``` + +### 5. Cleanup (on exit) + +```python +uninstrument() + +``` + +## Complete Template + +```python +"""Agent with Aden instrumentation""" +import os +from dotenv import load_dotenv +load_dotenv() + +from aden import ( + instrument, uninstrument, MeterOptions, + create_console_emitter, BeforeRequestResult, RequestCancelledError, +) + +# Budget enforcement callback +def budget_check(params, context): + budget_info = getattr(context, 'budget', None) + if budget_info and budget_info.get('exhausted', False): + return BeforeRequestResult.cancel("Budget exhausted") + if budget_info and budget_info.get('percent_used', 0) >= 95: + return BeforeRequestResult.throttle(delay_ms=2000) + if budget_info and budget_info.get('percent_used', 0) >= 80: + return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") + return BeforeRequestResult.proceed() + +# Initialize Aden +instrument(MeterOptions( + api_key=os.environ.get("ADEN_API_KEY"), + server_url=os.environ.get("ADEN_API_URL"), + emit_metric=create_console_emitter(pretty=True), + on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), + before_request=budget_check, +)) + +# === YOUR AGENT CODE HERE === + +def run_agent(user_input: str): + try: + # Your LLM calls here + pass + except RequestCancelledError as e: + return f"Sorry, you have used up your allowance. {e}" + +if __name__ == "__main__": + try: + # Your main loop + pass + finally: + uninstrument() + +``` + +## Budget Actions Reference + +| Action | When | Behavior | +| --- | --- | --- | +| `BeforeRequestResult.proceed()` | Within budget | Request continues normally | +| `BeforeRequestResult.cancel(msg)` | Budget exhausted | Raises `RequestCancelledError` | +| `BeforeRequestResult.throttle(delay_ms=N)` | Near limit | Delays request by N ms | +| `BeforeRequestResult.degrade(to_model, reason)` | Approaching limit | Switches to cheaper model | + +## Key Points + +- `emit_metric` is **required** - use `create_console_emitter(pretty=True)` for dev +- `before_request` callback enables budget enforcement +- Always wrap agent calls in `try/except RequestCancelledError` +- Call `uninstrument()` on exit to flush remaining metrics +- Control agent connects automatically when `api_key` + `server_url` are provided + +## Documentation + +Full docs: [https://pypi.org/project/aden-py](https://pypi.org/project/aden-py/) diff --git a/hive/docs/aden-sdk-documents/templates/python/langflow.md b/hive/docs/aden-sdk-documents/templates/python/langflow.md new file mode 100644 index 00000000..12e52166 --- /dev/null +++ b/hive/docs/aden-sdk-documents/templates/python/langflow.md @@ -0,0 +1,191 @@ +Quick reference for integrating Aden LLM observability & cost control into LangFlow applications. + +## Prerequisites + +`.env` file should contain: + +``` +OPENAI_API_KEY=sk-xxx {{envVarComment}} +ADEN_API_URL={{serverUrl}} +ADEN_API_KEY={{apiKey}} + +``` + +## Installation + +```bash +pip install aden-py langflow python-dotenv + +``` + +## Basic Setup (3 Steps) + +### 1. Import and Load Environment + +```python +import os +from dotenv import load_dotenv +load_dotenv() + +from aden import ( + instrument, + uninstrument, + MeterOptions, + create_console_emitter, + BeforeRequestResult, + RequestCancelledError, +) + +``` + +### 2. Define Budget Check Callback + +```python +def budget_check(params, context): + """Enforce budget limits before each LLM request.""" + budget_info = getattr(context, 'budget', None) + + if budget_info and budget_info.get('exhausted', False): + return BeforeRequestResult.cancel("Budget exhausted") + + if budget_info and budget_info.get('percent_used', 0) >= 95: + return BeforeRequestResult.throttle(delay_ms=2000) + + if budget_info and budget_info.get('percent_used', 0) >= 80: + return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") + + return BeforeRequestResult.proceed() + +``` + +### 3. Initialize Aden (at startup) + +```python +instrument(MeterOptions( + api_key=os.environ.get("ADEN_API_KEY"), + server_url=os.environ.get("ADEN_API_URL"), + emit_metric=create_console_emitter(pretty=True), + on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), + before_request=budget_check, +)) + +``` + +### 4. Use LangFlow Components + +```python +from langflow.components.models import LanguageModelComponent + +comp = LanguageModelComponent() +comp.set_attributes({ + "provider": "Google", # or "OpenAI" + "model_name": "gemini-2.0-flash", + "api_key": os.getenv("GOOGLE_API_KEY"), + "stream": False, +}) + +model = comp.build_model() + +try: + response = model.invoke("Hello!") + print(response.content) +except RequestCancelledError as e: + print(f"Budget exceeded: {e}") + +``` + +### 5. Cleanup (on exit) + +```python +uninstrument() + +``` + +## Complete Template + +```python +"""LangFlow with Aden instrumentation""" +import os +from dotenv import load_dotenv +load_dotenv() + +from aden import ( + instrument, uninstrument, MeterOptions, + create_console_emitter, BeforeRequestResult, RequestCancelledError, +) + +# Budget enforcement callback +def budget_check(params, context): + budget_info = getattr(context, 'budget', None) + if budget_info and budget_info.get('exhausted', False): + return BeforeRequestResult.cancel("Budget exhausted") + if budget_info and budget_info.get('percent_used', 0) >= 95: + return BeforeRequestResult.throttle(delay_ms=2000) + if budget_info and budget_info.get('percent_used', 0) >= 80: + return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") + return BeforeRequestResult.proceed() + +# Initialize Aden +instrument(MeterOptions( + api_key=os.environ.get("ADEN_API_KEY"), + server_url=os.environ.get("ADEN_API_URL"), + emit_metric=create_console_emitter(pretty=True), + on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), + before_request=budget_check, +)) + +# === YOUR LANGFLOW CODE HERE === + +from langflow.components.models import LanguageModelComponent + +def run_model(user_input: str): + try: + comp = LanguageModelComponent() + comp.set_attributes({ + "provider": "Google", + "model_name": "gemini-2.0-flash", + "api_key": os.getenv("GOOGLE_API_KEY"), + "stream": False, + }) + model = comp.build_model() + return model.invoke(user_input).content + except RequestCancelledError as e: + return f"Sorry, you have used up your allowance. {e}" + +if __name__ == "__main__": + try: + print(run_model("Say hello!")) + finally: + uninstrument() + +``` + +## Supported Providers + +| Provider | Model Example | Notes | +| --- | --- | --- | +| OpenAI | gpt-4o, gpt-4o-mini | Direct SDK instrumentation | +| Google | gemini-2.0-flash | Uses gRPC client instrumentation | +| Anthropic | claude-3-opus | Direct SDK instrumentation | + +## Budget Actions Reference + +| Action | When | Behavior | +| --- | --- | --- | +| `BeforeRequestResult.proceed()` | Within budget | Request continues normally | +| `BeforeRequestResult.cancel(msg)` | Budget exhausted | Raises `RequestCancelledError` | +| `BeforeRequestResult.throttle(delay_ms=N)` | Near limit | Delays request by N ms | +| `BeforeRequestResult.degrade(to_model, reason)` | Approaching limit | Switches to cheaper model | + +## Key Points + +- `emit_metric` is **required** - use `create_console_emitter(pretty=True)` for dev +- `before_request` callback enables budget enforcement +- Always wrap model calls in `try/except RequestCancelledError` +- Call `uninstrument()` on exit to flush remaining metrics +- Control agent connects automatically when `api_key` + `server_url` are provided +- Google Gemini support works automatically via gRPC client instrumentation + +## Documentation + +Full docs: [https://pypi.org/project/aden-py](https://pypi.org/project/aden-py/) diff --git a/hive/docs/aden-sdk-documents/templates/python/langgraph.md b/hive/docs/aden-sdk-documents/templates/python/langgraph.md new file mode 100644 index 00000000..d5e6fe79 --- /dev/null +++ b/hive/docs/aden-sdk-documents/templates/python/langgraph.md @@ -0,0 +1,164 @@ +Quick reference for integrating Aden LLM observability & cost control into Python agents. + +## Prerequisites + +`.env` file should contain: + +``` +OPENAI_API_KEY=sk-xxx {{envVarComment}} +ADEN_API_URL={{serverUrl}} +ADEN_API_KEY={{apiKey}} + +``` + +## Installation + +```bash +pip install aden-py python-dotenv + +``` + +## Basic Setup (3 Steps) + +### 1. Import and Load Environment + +```python +import os +from dotenv import load_dotenv +load_dotenv() + +from aden import ( + instrument, + uninstrument, + MeterOptions, + create_console_emitter, + BeforeRequestResult, + RequestCancelledError, +) + +``` + +### 2. Define Budget Check Callback + +```python +def budget_check(params, context): + """Enforce budget limits before each LLM request.""" + budget_info = getattr(context, 'budget', None) + + if budget_info and budget_info.get('exhausted', False): + return BeforeRequestResult.cancel("Budget exhausted") + + if budget_info and budget_info.get('percent_used', 0) >= 95: + return BeforeRequestResult.throttle(delay_ms=2000) + + if budget_info and budget_info.get('percent_used', 0) >= 80: + return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") + + return BeforeRequestResult.proceed() + +``` + +### 3. Initialize Aden (at startup) + +```python +instrument(MeterOptions( + api_key=os.environ.get("ADEN_API_KEY"), + server_url=os.environ.get("ADEN_API_URL"), + emit_metric=create_console_emitter(pretty=True), + on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), + before_request=budget_check, +)) + +``` + +### 4. Handle Budget Errors in Your Agent + +```python +def run_agent(user_input: str): + try: + # Your agent logic here + result = graph.invoke({"messages": [{"role": "user", "content": user_input}]}) + return result["messages"][-1].content + except RequestCancelledError as e: + return f"Sorry, you have used up your allowance. {e}" + +``` + +### 5. Cleanup (on exit) + +```python +uninstrument() + +``` + +## Complete Template + +```python +"""Agent with Aden instrumentation""" +import os +from dotenv import load_dotenv +load_dotenv() + +from aden import ( + instrument, uninstrument, MeterOptions, + create_console_emitter, BeforeRequestResult, RequestCancelledError, +) + +# Budget enforcement callback +def budget_check(params, context): + budget_info = getattr(context, 'budget', None) + if budget_info and budget_info.get('exhausted', False): + return BeforeRequestResult.cancel("Budget exhausted") + if budget_info and budget_info.get('percent_used', 0) >= 95: + return BeforeRequestResult.throttle(delay_ms=2000) + if budget_info and budget_info.get('percent_used', 0) >= 80: + return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") + return BeforeRequestResult.proceed() + +# Initialize Aden +instrument(MeterOptions( + api_key=os.environ.get("ADEN_API_KEY"), + server_url=os.environ.get("ADEN_API_URL"), + emit_metric=create_console_emitter(pretty=True), + on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), + before_request=budget_check, +)) + +# === YOUR AGENT CODE HERE === + +def run_agent(user_input: str): + try: + # Your LLM calls here + pass + except RequestCancelledError as e: + return f"Sorry, you have used up your allowance. {e}" + +if __name__ == "__main__": + try: + # Your main loop + pass + finally: + uninstrument() + +``` + +## Budget Actions Reference + +| Action | When | Behavior | +| --- | --- | --- | +| `BeforeRequestResult.proceed()` | Within budget | Request continues normally | +| `BeforeRequestResult.cancel(msg)` | Budget exhausted | Raises `RequestCancelledError` | +| `BeforeRequestResult.throttle(delay_ms=N)` | Near limit | Delays request by N ms | +| `BeforeRequestResult.degrade(to_model, reason)` | Approaching limit | Switches to cheaper model | + +## Key Points + +- `emit_metric` is **required** - use `create_console_emitter(pretty=True)` for dev +- `before_request` callback enables budget enforcement +- Always wrap agent calls in `try/except RequestCancelledError` +- Call `uninstrument()` on exit to flush remaining metrics +- Control agent connects automatically when `api_key` + `server_url` are provided + +## Documentation + +Full docs: [https://pypi.org/project/aden-py](https://pypi.org/project/aden-py/) diff --git a/hive/docs/aden-sdk-documents/templates/python/livekit.md b/hive/docs/aden-sdk-documents/templates/python/livekit.md new file mode 100644 index 00000000..e0cc13f5 --- /dev/null +++ b/hive/docs/aden-sdk-documents/templates/python/livekit.md @@ -0,0 +1,162 @@ +# Aden-py LiveKit Integration Guide + +Quick reference for integrating Aden LLM observability & cost control into LiveKit voice agents. + +## Prerequisites + +`.env` file should contain: +``` +OPENAI_API_KEY=sk-xxx +ADEN_API_URL={{serverUrl}} +ADEN_API_KEY={{apiKey}} +``` + +## Installation + +```bash +pip install 'aden-py[livekit]' python-dotenv +``` + +## Setup (4 Steps) + +### 1. Import and Load Environment + +```python +import os +from dotenv import load_dotenv +load_dotenv() + +from aden import ( + instrument, + MeterOptions, + create_console_emitter, + BeforeRequestResult, + RequestCancelledError, +) +``` + +### 2. Define Budget Check Callback + +```python +def budget_check(params, context): + """Enforce budget limits before each LLM request.""" + budget_info = getattr(context, 'budget', None) + + if budget_info and budget_info.get('exhausted', False): + return BeforeRequestResult.cancel("Budget exhausted") + + if budget_info and budget_info.get('percent_used', 0) >= 95: + return BeforeRequestResult.throttle(delay_ms=2000) + + if budget_info and budget_info.get('percent_used', 0) >= 80: + return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") + + return BeforeRequestResult.proceed() +``` + +### 3. Create Worker Prewarm Function + +**IMPORTANT:** LiveKit uses multiprocessing. Instrumentation must happen in each worker process, not the main process. + +```python +def initialize_aden_in_worker(proc): + """Initialize Aden instrumentation in each worker process.""" + instrument(MeterOptions( + api_key=os.environ.get("ADEN_API_KEY"), + server_url=os.environ.get("ADEN_API_URL"), + emit_metric=create_console_emitter(pretty=True), + on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), + before_request=budget_check, + )) +``` + +### 4. Pass Prewarm Function to WorkerOptions + +```python +if __name__ == "__main__": + agents.cli.run_app(agents.WorkerOptions( + entrypoint_fnc=entrypoint, + agent_name="my-agent", + prewarm_fnc=initialize_aden_in_worker, # <-- This is the key! + )) +``` + +## Complete Template + +```python +"""LiveKit Voice Agent with Aden instrumentation""" +import os +from dotenv import load_dotenv +load_dotenv() + +from livekit import agents +from livekit.plugins import openai + +from aden import ( + instrument, MeterOptions, create_console_emitter, + BeforeRequestResult, RequestCancelledError, +) + +# Budget enforcement callback +def budget_check(params, context): + budget_info = getattr(context, 'budget', None) + if budget_info and budget_info.get('exhausted', False): + return BeforeRequestResult.cancel("Budget exhausted") + if budget_info and budget_info.get('percent_used', 0) >= 95: + return BeforeRequestResult.throttle(delay_ms=2000) + if budget_info and budget_info.get('percent_used', 0) >= 80: + return BeforeRequestResult.degrade(to_model="gpt-4o-mini", reason="Approaching limit") + return BeforeRequestResult.proceed() + +# Worker initialization - runs in each spawned process +def initialize_aden_in_worker(proc): + instrument(MeterOptions( + api_key=os.environ.get("ADEN_API_KEY"), + server_url=os.environ.get("ADEN_API_URL"), + emit_metric=create_console_emitter(pretty=True), + on_alert=lambda alert: print(f"[Aden {alert.level}] {alert.message}"), + before_request=budget_check, + )) + +async def entrypoint(ctx: agents.JobContext): + # Your agent logic here + session = agents.AgentSession( + llm=openai.LLM(model="gpt-4o-mini"), + # ... + ) + await session.start(ctx.room) + +if __name__ == "__main__": + agents.cli.run_app(agents.WorkerOptions( + entrypoint_fnc=entrypoint, + agent_name="my-agent", + prewarm_fnc=initialize_aden_in_worker, + )) +``` + +## Budget Actions Reference + +| Action | When | Behavior | +|--------|------|----------| +| `BeforeRequestResult.proceed()` | Within budget | Request continues normally | +| `BeforeRequestResult.cancel(msg)` | Budget exhausted | Raises `RequestCancelledError` | +| `BeforeRequestResult.throttle(delay_ms=N)` | Near limit (95%+) | Delays request by N ms | +| `BeforeRequestResult.degrade(to_model, reason)` | Approaching limit (80%+) | Switches to cheaper model | + +## Key Points + +- **Use `prewarm_fnc`** - LiveKit spawns worker processes; instrumentation must happen in each worker +- **Don't instrument in main process** - It won't affect the worker processes where LLM calls happen +- `emit_metric` is **required** - use `create_console_emitter(pretty=True)` for dev +- Control agent connects automatically when `api_key` + `server_url` are provided + +## Troubleshooting + +**No metrics showing?** +- Ensure `prewarm_fnc` is set in `WorkerOptions` +- Check that `ADEN_API_KEY` and `ADEN_API_URL` are in your `.env` +- Verify you're using `aden-py[livekit]` (with the livekit extra) + +**Metrics in test but not in agent?** +- LiveKit uses multiprocessing - the main process instrumentation doesn't carry over +- The `prewarm_fnc` runs in each worker before your `entrypoint` is called diff --git a/hive/docs/api/user-authentication.md b/hive/docs/api/user-authentication.md new file mode 100644 index 00000000..7d19ff31 --- /dev/null +++ b/hive/docs/api/user-authentication.md @@ -0,0 +1,247 @@ +# User Authentication API + +This document describes the user authentication endpoints available in the Hive backend. + +## Base URL + +``` +http://localhost:4000 +``` + +## Endpoints + +### Register a New User + +Create a new user account and receive an authentication token. + +``` +POST /user/register +``` + +#### Request Headers + +| Header | Value | Required | +| ------------ | ---------------- | -------- | +| Content-Type | application/json | Yes | + +#### Request Body + +| Field | Type | Required | Description | +| --------- | ------ | -------- | ------------------------------- | +| email | string | Yes | User's email address | +| password | string | Yes | Password (minimum 8 characters) | +| name | string | No | Display name | +| firstname | string | No | First name | +| lastname | string | No | Last name | + +#### Example Request + +```bash +curl -X POST http://localhost:4000/user/register \ + -H "Content-Type: application/json" \ + -d '{ + "email": "user@example.com", + "password": "securepassword123", + "firstname": "John", + "lastname": "Doe" + }' +``` + +#### Success Response (201 Created) + +```json +{ + "success": true, + "token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...", + "email": "user@example.com", + "name": "John Doe", + "firstname": "John", + "lastname": "Doe", + "current_team_id": 1, + "create_time": "2026-01-13T01:52:56.604Z" +} +``` + +#### Error Responses + +| Status | Code | Message | +| ------ | --------------------- | -------------------------------------- | +| 400 | Bad Request | Email and password are required | +| 400 | Bad Request | Please enter a valid email | +| 400 | Bad Request | Password must be at least 8 characters | +| 409 | Conflict | Email already registered | +| 500 | Internal Server Error | Registration failed. Please try again. | + +--- + +### Login + +Authenticate an existing user and receive an authentication token. + +``` +POST /user/login-v2 +``` + +#### Request Headers + +| Header | Value | Required | +| ------------ | ---------------- | -------- | +| Content-Type | application/json | Yes | + +#### Request Body + +| Field | Type | Required | Description | +| -------- | ------ | -------- | -------------------- | +| email | string | Yes | User's email address | +| password | string | Yes | User's password | + +#### Example Request + +```bash +curl -X POST http://localhost:4000/user/login-v2 \ + -H "Content-Type: application/json" \ + -d '{ + "email": "user@example.com", + "password": "securepassword123" + }' +``` + +#### Success Response (200 OK) + +```json +{ + "success": true, + "token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...", + "email": "user@example.com", + "firstname": "John", + "lastname": "Doe", + "name": "John Doe", + "current_team_id": 1, + "create_time": "2026-01-13T01:52:56.594Z" +} +``` + +#### Error Responses + +| Status | Code | Message | +| ------ | --------------------- | -------------------------------------- | +| 400 | Bad Request | Email and password are required | +| 400 | Bad Request | Please enter a valid email | +| 400 | Bad Request | Password must be at least 6 characters | +| 400 | Bad Request | Please sign in with OAuth | +| 401 | Unauthorized | Invalid email or password | +| 403 | Forbidden | Your account has been disabled | +| 500 | Internal Server Error | Login failed. Please try again. | + +--- + +### Get Current User + +Retrieve information about the currently authenticated user. + +``` +GET /user/me +``` + +#### Request Headers + +| Header | Value | Required | +| ------------- | ------- | -------- | +| Authorization | {token} | Yes | + +#### Example Request + +```bash +curl -X GET http://localhost:4000/user/me \ + -H "Authorization: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9..." +``` + +#### Success Response (200 OK) + +```json +{ + "success": true, + "user": { + "id": 1, + "email": "user@example.com", + "name": "John Doe", + "firstname": "John", + "lastname": "Doe", + "current_team_id": 1, + "avatar_url": null + } +} +``` + +#### Error Responses + +| Status | Code | Message | +| ------ | --------------------- | ----------------------- | +| 401 | Unauthorized | No token provided | +| 401 | Unauthorized | Invalid token | +| 500 | Internal Server Error | Failed to get user info | + +--- + +## Authentication + +After successful login or registration, the API returns a JWT token. Include this token in the `Authorization` header for authenticated requests: + +``` +Authorization: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9... +``` + +### Token Structure + +The JWT token contains the following claims: + +| Claim | Description | +| --------------- | -------------------------------- | +| id | User ID | +| email | User email | +| firstname | User first name | +| lastname | User last name | +| current_team_id | User's current team ID | +| salt | Random salt for token validation | +| iat | Issued at timestamp | +| exp | Expiration timestamp | + +### Token Expiration + +By default, tokens expire after 7 days. This can be configured via the `JWT_EXPIRES_IN` environment variable. + +--- + +## Development Credentials + +For local development, the following default user is available: + +| Field | Value | +| -------- | ------------------- | +| Email | dev@honeycomb.local | +| Password | honeycomb123 | + +--- + +## Error Response Format + +All error responses follow this format: + +```json +{ + "success": false, + "msg": "Error message describing what went wrong" +} +``` + +--- + +## Rate Limiting + +Currently, rate limiting is not enabled by default. It can be enabled via the `features.rate_limiting` config option. + +--- + +## CORS + +The API supports CORS. Configure the allowed origin via the `cors.origin` config option (default: `http://localhost:3000`). diff --git a/hive/docs/sdk-event-specification.md b/hive/docs/sdk-event-specification.md new file mode 100644 index 00000000..78ccf07b --- /dev/null +++ b/hive/docs/sdk-event-specification.md @@ -0,0 +1,703 @@ +# Aden SDK Trace Event Specification + +**Version:** 2.0.0 +**Last Updated:** 2026-01-08 + +This document defines the authoritative specification for all events transmitted between the Aden SDK and the Aden Hive control server. + +--- + +## Table of Contents + +1. [Overview](#overview) +2. [Event Types](#event-types) +3. [MetricEvent](#metricevent) +4. [ContentCapture (Layer 0)](#contentcapture-layer-0) +5. [ToolCallCapture (Layer 6)](#toolcallcapture-layer-6) +6. [ControlEvent](#controlevent) +7. [HeartbeatEvent](#heartbeatevent) +8. [ErrorEvent](#errorevent) +9. [API Endpoints](#api-endpoints) +10. [Storage Architecture](#storage-architecture) + +--- + +## Overview + +The Aden SDK captures telemetry from LLM API calls and transmits events to the Aden Hive server for: +- **Observability**: Token usage, latency, cost tracking +- **Governance**: Content capture, tool call validation +- **Control**: Budget enforcement, rate limiting, model degradation + +### Providers Supported + +| Provider | Value | +|----------|-------| +| OpenAI | `openai` | +| Anthropic | `anthropic` | +| Google Gemini | `gemini` | + +### Transport + +Events are sent via: +- **HTTP POST** to `/v1/control/events` (batch) +- **WebSocket** for real-time policy sync + +--- + +## Event Types + +| Event Type | Description | Direction | +|------------|-------------|-----------| +| `metric` | LLM call telemetry | SDK → Server | +| `control` | Control action taken | SDK → Server | +| `heartbeat` | Health status | SDK → Server | +| `error` | Error report | SDK → Server | + +--- + +## MetricEvent + +The primary event emitted after each LLM API call. Contains flat fields for consistent cross-provider analytics. + +### Envelope Structure + +```json +{ + "event_type": "metric", + "timestamp": "2026-01-08T12:00:00.000Z", + "sdk_instance_id": "uuid-v4", + "data": { /* MetricEvent fields */ } +} +``` + +### MetricEvent Fields + +#### Identity (OpenTelemetry-compatible) + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `trace_id` | string | **Yes** | Trace ID grouping related operations | +| `span_id` | string | Yes | Unique span ID for this operation | +| `parent_span_id` | string | No | Parent span for nested calls | +| `request_id` | string | No | Provider-specific request ID | +| `call_sequence` | integer | Yes | Sequence number within the trace | + +#### Provider & Model + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `provider` | string | **Yes** | `openai`, `anthropic`, `gemini` | +| `model` | string | **Yes** | Model identifier (e.g., `gpt-4o`, `claude-3-opus`) | +| `stream` | boolean | Yes | Whether streaming was enabled | +| `timestamp` | string | **Yes** | ISO 8601 timestamp of request start | + +#### Performance + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `latency_ms` | float | Yes | Request latency in milliseconds | +| `status_code` | integer | No | HTTP status code | +| `error` | string | No | Error message if request failed | + +#### Token Usage + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `input_tokens` | integer | Yes | Input/prompt tokens consumed | +| `output_tokens` | integer | Yes | Output/completion tokens consumed | +| `total_tokens` | integer | Yes | Total tokens (input + output) | +| `cached_tokens` | integer | No | Tokens served from cache | +| `reasoning_tokens` | integer | No | Reasoning tokens (o1/o3 models) | + +#### Rate Limits + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `rate_limit_remaining_requests` | integer | No | Remaining requests in window | +| `rate_limit_remaining_tokens` | integer | No | Remaining tokens in window | +| `rate_limit_reset_requests` | float | No | Seconds until request limit resets | +| `rate_limit_reset_tokens` | float | No | Seconds until token limit resets | + +#### Call Context + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `agent_stack` | string[] | No | Stack of agent names leading to this call | +| `call_site_file` | string | No | File path of immediate caller | +| `call_site_line` | integer | No | Line number | +| `call_site_column` | integer | No | Column number | +| `call_site_function` | string | No | Function name | +| `call_stack` | string[] | No | Full call stack (file:line:function) | + +#### Tool Usage (Summary) + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `tool_call_count` | integer | No | Number of tool calls made | +| `tool_names` | string | No | Tool names (comma-separated) | + +#### Provider-specific + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `service_tier` | string | No | Service tier (auto, default, flex, priority) | +| `metadata` | object | No | Custom metadata attached to request | + +#### Layer 0: Content Capture + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `content_capture` | ContentCapture | No | Full content capture (see below) | + +#### Layer 6: Tool Call Deep Inspection + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `tool_calls_captured` | ToolCallCapture[] | No | Detailed tool call captures | +| `tool_validation_errors_count` | integer | No | Count of validation errors | + +### Example MetricEvent + +```json +{ + "event_type": "metric", + "timestamp": "2026-01-08T12:00:00.000Z", + "sdk_instance_id": "abc123", + "data": { + "trace_id": "tr_abc123", + "span_id": "sp_def456", + "call_sequence": 1, + "provider": "openai", + "model": "gpt-4o", + "stream": false, + "latency_ms": 1234.5, + "input_tokens": 150, + "output_tokens": 50, + "total_tokens": 200, + "cached_tokens": 0, + "agent_stack": ["main_agent", "sub_agent"], + "tool_call_count": 2, + "tool_names": "search,calculate", + "metadata": { + "user_id": "user_123", + "session_id": "sess_456" + }, + "content_capture": { + "system_prompt": "You are a helpful assistant.", + "messages": [...], + "response_content": "Here is my response...", + "finish_reason": "stop" + } + } +} +``` + +--- + +## ContentCapture (Layer 0) + +Full content capture for request and response. Enables governance, debugging, and compliance. + +### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `system_prompt` | string \| ContentReference | System prompt | +| `messages` | MessageCapture[] \| ContentReference | Message history | +| `tools` | ToolSchemaCapture[] \| ContentReference | Tools schema | +| `params` | RequestParamsCapture | Request parameters | +| `response_content` | string \| ContentReference | Response text | +| `finish_reason` | string | Why response ended: `stop`, `length`, `tool_calls`, `content_filter` | +| `choice_count` | integer | Number of choices (for n > 1) | +| `has_images` | boolean | Whether request contained images | +| `image_urls` | string[] | Image URLs (never base64) | + +### ContentReference + +When content exceeds `max_content_bytes`, it's stored separately and referenced: + +```json +{ + "content_id": "uuid-v4", + "content_hash": "sha256-hex", + "byte_size": 12345, + "truncated_preview": "First 100 chars..." +} +``` + +### MessageCapture + +```json +{ + "role": "user|assistant|system|tool", + "content": "string or ContentReference", + "name": "optional name", + "tool_call_id": "for tool results" +} +``` + +### ToolSchemaCapture + +```json +{ + "name": "function_name", + "description": "Tool description", + "parameters_schema": { /* JSON Schema */ } +} +``` + +### RequestParamsCapture + +```json +{ + "temperature": 0.7, + "max_tokens": 1000, + "top_p": 1.0, + "frequency_penalty": 0, + "presence_penalty": 0, + "stop": ["STOP"], + "seed": 12345, + "top_k": 40 +} +``` + +--- + +## ToolCallCapture (Layer 6) + +Detailed tool call capture with validation results. + +### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `id` | string | Tool call ID for correlation | +| `name` | string | Tool/function name | +| `arguments` | object \| ContentReference | Parsed arguments | +| `arguments_raw` | string \| ContentReference | Raw JSON string | +| `validation_errors` | ValidationError[] | Schema validation errors | +| `is_valid` | boolean | Whether arguments passed validation | +| `index` | integer | Position in tool_calls array | + +### ValidationError + +```json +{ + "path": "properties.name", + "message": "Required property missing", + "expected_type": "string", + "actual_type": "undefined" +} +``` + +--- + +## ControlEvent + +Emitted when a control action is taken on a request. + +### Fields + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `event_type` | string | Yes | Always `"control"` | +| `timestamp` | string | Yes | ISO 8601 timestamp | +| `sdk_instance_id` | string | Yes | SDK instance identifier | +| `trace_id` | string | Yes | Associated trace ID | +| `span_id` | string | Yes | Associated span ID | +| `provider` | string | Yes | Provider name | +| `original_model` | string | Yes | Originally requested model | +| `action` | string | Yes | Action taken (see below) | +| `reason` | string | No | Human-readable reason | +| `degraded_to` | string | No | Model switched to (if degraded) | +| `throttle_delay_ms` | integer | No | Delay applied (if throttled) | +| `estimated_cost` | float | No | Estimated cost that triggered decision | +| `policy_id` | string | Yes | Policy ID (default: `"default"`) | +| `budget_id` | string | No | Budget that triggered action | +| `context_id` | string | No | Context ID (user, session, etc.) | + +### Control Actions + +| Action | Description | +|--------|-------------| +| `allow` | Request proceeds normally | +| `block` | Request is rejected | +| `throttle` | Request is delayed before proceeding | +| `degrade` | Request uses a cheaper/fallback model | +| `alert` | Request proceeds but triggers alert | + +### Example ControlEvent + +```json +{ + "event_type": "control", + "timestamp": "2026-01-08T12:00:00.000Z", + "sdk_instance_id": "abc123", + "trace_id": "tr_abc123", + "span_id": "sp_def456", + "provider": "openai", + "original_model": "gpt-4o", + "action": "degrade", + "reason": "Budget limit exceeded", + "degraded_to": "gpt-4o-mini", + "estimated_cost": 0.05, + "policy_id": "default", + "budget_id": "budget_monthly" +} +``` + +--- + +## HeartbeatEvent + +Periodic health check sent by the SDK. + +### Fields + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `event_type` | string | Yes | Always `"heartbeat"` | +| `timestamp` | string | Yes | ISO 8601 timestamp | +| `sdk_instance_id` | string | Yes | SDK instance identifier | +| `status` | string | Yes | `healthy`, `degraded`, `reconnecting` | +| `requests_since_last` | integer | Yes | Requests since last heartbeat | +| `errors_since_last` | integer | Yes | Errors since last heartbeat | +| `policy_cache_age_seconds` | integer | Yes | Policy cache age | +| `websocket_connected` | boolean | Yes | WebSocket connection status | +| `sdk_version` | string | Yes | SDK version | + +--- + +## ErrorEvent + +Emitted when an error occurs in the SDK. + +### Fields + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `event_type` | string | Yes | Always `"error"` | +| `timestamp` | string | Yes | ISO 8601 timestamp | +| `sdk_instance_id` | string | Yes | SDK instance identifier | +| `message` | string | Yes | Error message | +| `code` | string | No | Error code | +| `stack` | string | No | Stack trace | +| `trace_id` | string | No | Related trace ID | + +--- + +## API Endpoints + +### POST /v1/control/events + +Submit events batch. + +**Request:** +```json +{ + "events": [ + { "event_type": "metric", "timestamp": "...", "data": {...} }, + { "event_type": "control", "timestamp": "...", ... } + ] +} +``` + +**Response:** +```json +{ + "success": true, + "processed": 2 +} +``` + +### POST /v1/control/content + +Store large content items (MongoDB - for SDK content references). + +**Request:** +```json +{ + "items": [ + { + "content_id": "uuid", + "content_hash": "sha256-hex", + "content": "full content string", + "byte_size": 12345 + } + ] +} +``` + +**Response:** +```json +{ + "success": true, + "stored": 1 +} +``` + +### GET /v1/control/content/:contentId + +Retrieve stored content by ID (MongoDB). + +**Response:** +```json +{ + "content_id": "uuid", + "content_hash": "sha256-hex", + "content": "full content string", + "byte_size": 12345 +} +``` + +### GET /v1/control/events/:traceId/:callSequence/content + +Retrieve content for a specific event from TSDB warm/cold storage. + +**Response:** +```json +{ + "trace_id": "tr_abc123", + "call_sequence": 1, + "content_items": [ + { + "content_type": "system_prompt", + "content_hash": "sha256-hex", + "byte_size": 256, + "truncated_preview": "You are a helpful...", + "content": "You are a helpful assistant..." + }, + { + "content_type": "messages", + "content_hash": "sha256-hex", + "byte_size": 4096, + "message_count": 5, + "truncated_preview": "[{\"role\":\"user\"...", + "content": "[{\"role\":\"user\",\"content\":\"Hello\"}...]" + }, + { + "content_type": "response", + "content_hash": "sha256-hex", + "byte_size": 512, + "truncated_preview": "Here is my response...", + "content": "Here is my response to your question..." + } + ], + "count": 3 +} +``` + +### GET /v1/control/content/hash/:contentHash + +Retrieve content from cold storage by SHA-256 hash. + +**Response:** +```json +{ + "content_hash": "sha256-hex", + "content": "full content string", + "byte_size": 12345 +} +``` + +### GET /v1/control/policy + +Fetch current control policy. + +### POST /v1/control/budget/validate + +Server-side budget validation (hybrid enforcement). + +--- + +## Storage Architecture + +The storage system uses a **hot/warm/cold** architecture optimized for time-series analytics with content deduplication. + +``` +┌─────────────────────────────────────────────────────────────────────┐ +│ SDK Event Ingestion │ +└─────────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────────┐ +│ Event Normalization & Content Extraction │ +│ │ +│ • Extract content_capture fields │ +│ • Hash content with SHA-256 │ +│ • Create lightweight content flags for hot table │ +└─────────────────────────────────────────────────────────────────────┘ + │ + ┌───────────────┼───────────────┐ + ▼ ▼ ▼ + ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ + │ HOT TABLE │ │ WARM TABLE │ │ COLD TABLE │ + │ llm_events │ │llm_event_ │ │llm_content_ │ + │ │ │ content │ │ store │ + │ Metrics only │ │Content refs │ │ Deduplicated │ + │ Fast queries │ │ per event │ │ content │ + └──────────────┘ └──────────────┘ └──────────────┘ +``` + +### Design Principles + +1. **Hot/Cold Separation**: Metrics stay in the hot table for fast time-series queries; content is stored separately +2. **Content Deduplication**: Identical content (same SHA-256 hash) is stored once, regardless of how many events reference it +3. **Reference Counting**: Cold storage tracks how many events reference each piece of content +4. **Preview Without Fetch**: Warm table stores truncated previews for quick scanning without fetching full content + +### TSDB Hot Table: `llm_events` + +Stores metric events for fast time-series analytics. **Content is NOT stored here** (only lightweight flags). + +| Column | Type | Description | +|--------|------|-------------| +| `timestamp` | timestamptz | Event timestamp (partition key) | +| `ingest_date` | date | Ingestion date | +| `team_id` | text | Team identifier | +| `user_id` | text | User identifier | +| `trace_id` | text | Trace ID | +| `span_id` | text | Span ID | +| `parent_span_id` | text | Parent span ID | +| `request_id` | text | Provider request ID | +| `provider` | text | Provider name | +| `call_sequence` | integer | Sequence within trace | +| `model` | text | Model identifier | +| `stream` | boolean | Streaming flag | +| `agent` | text | Primary agent name | +| `agent_stack` | jsonb | Full agent stack | +| `latency_ms` | double precision | Latency in ms | +| `usage_input_tokens` | double precision | Input tokens | +| `usage_output_tokens` | double precision | Output tokens | +| `usage_total_tokens` | double precision | Total tokens | +| `usage_cached_tokens` | double precision | Cached tokens | +| `usage_reasoning_tokens` | double precision | Reasoning tokens | +| `cost_total` | numeric | Calculated cost | +| `metadata` | jsonb | Custom metadata | +| `call_site` | jsonb | Call site info | +| `has_content` | boolean | Whether content was captured | +| `finish_reason` | text | Response finish reason | +| `tool_call_count` | integer | Number of tool calls | +| `created_at` | timestamptz | Record creation time | + +**Primary Key:** `(timestamp, trace_id, call_sequence)` + +**Indexes:** +- `idx_llm_events_ts` - timestamp DESC +- `idx_llm_events_team_ts` - team_id, timestamp DESC +- `idx_llm_events_model` - model +- `idx_llm_events_agent` - agent +- `idx_llm_events_trace` - trace_id + +### TSDB Warm Table: `llm_event_content` + +Links events to deduplicated content in cold storage. One row per content type per event. + +| Column | Type | Description | +|--------|------|-------------| +| `id` | bigserial | Auto-increment ID | +| `timestamp` | timestamptz | Event timestamp | +| `trace_id` | text | Trace ID | +| `call_sequence` | integer | Sequence within trace | +| `team_id` | text | Team identifier | +| `content_type` | text | Type: `system_prompt`, `messages`, `response`, `tools`, `params` | +| `content_hash` | text | SHA-256 hash (FK to cold store) | +| `byte_size` | integer | Content size in bytes | +| `message_count` | integer | Number of messages (for `messages` type) | +| `truncated_preview` | text | First 200 chars for quick preview | +| `created_at` | timestamptz | Record creation time | + +**Primary Key:** `(id)` + +**Indexes:** +- `idx_llm_event_content_event` - trace_id, call_sequence, timestamp +- `idx_llm_event_content_type` - team_id, content_type, timestamp DESC +- `idx_llm_event_content_hash` - content_hash + +### TSDB Cold Table: `llm_content_store` + +Content-addressable storage with SHA-256 hashes. Deduplicated across all events. + +| Column | Type | Description | +|--------|------|-------------| +| `content_hash` | text | SHA-256 hash of content (PK) | +| `team_id` | text | Team identifier (PK) | +| `content` | text | Full content string | +| `byte_size` | integer | Content size in bytes | +| `ref_count` | integer | Number of events referencing this content | +| `first_seen_at` | timestamptz | When content was first stored | +| `last_seen_at` | timestamptz | When content was last referenced | + +**Primary Key:** `(content_hash, team_id)` + +**Indexes:** +- `idx_llm_content_store_refs` - team_id, ref_count, last_seen_at (for cleanup) + +### MongoDB: `aden_control_content` + +Stores large content items from SDK's content reference system (separate from TSDB storage). + +| Field | Type | Description | +|-------|------|-------------| +| `content_id` | string | Unique content identifier | +| `team_id` | string | Team identifier | +| `content_hash` | string | SHA-256 hash | +| `content` | string | Full content | +| `byte_size` | number | Content size in bytes | +| `created_at` | string | Creation timestamp | +| `updated_at` | string | Last update timestamp | + +**Index:** `{ content_id: 1, team_id: 1 }` (unique) + +### MongoDB: `aden_control_policies` + +Stores control policies for teams. + +--- + +## Content Types + +The warm table stores references to different content types: + +| Type | Description | Example | +|------|-------------|---------| +| `system_prompt` | System/developer message | "You are a helpful assistant..." | +| `messages` | Full conversation history | JSON array of messages | +| `response` | Model's response content | "Here is my response..." | +| `tools` | Tool/function schemas | JSON array of tool definitions | +| `params` | Request parameters | `{"temperature": 0.7, "max_tokens": 1000}` | + +--- + +## Deduplication Example + +When the same system prompt is used across multiple requests: + +``` +Request 1: system_prompt = "You are a helpful assistant." + → Hash: abc123... + → Cold store: INSERT (ref_count = 1) + → Warm store: INSERT reference for event 1 + +Request 2: system_prompt = "You are a helpful assistant." (same) + → Hash: abc123... (same hash) + → Cold store: UPDATE ref_count = 2 + → Warm store: INSERT reference for event 2 + +Request 3: system_prompt = "You are a code reviewer." + → Hash: def456... (different) + → Cold store: INSERT (ref_count = 1) + → Warm store: INSERT reference for event 3 +``` + +This means the first system prompt is stored **once** but referenced by two events. + +--- + +## Version History + +| Version | Date | Changes | +|---------|------|---------| +| 2.0.0 | 2026-01-08 | Hot/warm/cold storage architecture; content deduplication | +| 1.0.0 | 2026-01-08 | Initial specification | diff --git a/hive/k8s/base/deployment.yaml b/hive/k8s/base/deployment.yaml new file mode 100644 index 00000000..6023d634 --- /dev/null +++ b/hive/k8s/base/deployment.yaml @@ -0,0 +1,101 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: aden-hive + labels: + app: aden-hive + app.kubernetes.io/name: aden-hive +spec: + replicas: 1 + selector: + matchLabels: + app: aden-hive + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + template: + metadata: + labels: + app: aden-hive + app.kubernetes.io/name: aden-hive + spec: + containers: + - name: aden-hive + image: aden-hive + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 3001 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MYSQL_SSL_CA + value: /mnt/certs/mysql/server-ca.pem + - name: MYSQL_SSL_KEY + value: /mnt/certs/mysql/client-key.pem + - name: MYSQL_SSL_CERT + value: /mnt/certs/mysql/client-cert.pem + volumeMounts: + - name: mysql-ssl-certs + mountPath: /mnt/certs/mysql + readOnly: true + envFrom: + - configMapRef: + name: aden-hive-config + - secretRef: + name: aden-hive-secrets + resources: + requests: + cpu: 250m + memory: 256Mi + limits: + cpu: 1000m + memory: 512Mi + livenessProbe: + httpGet: + path: /health + port: 3001 + initialDelaySeconds: 60 + periodSeconds: 60 + timeoutSeconds: 15 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /health + port: 3001 + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 10 + failureThreshold: 5 + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + runAsUser: 1001 + capabilities: + drop: + - ALL + volumes: + - name: mysql-ssl-certs + secret: + secretName: mysql-ssl-certs + defaultMode: 0444 + items: + - key: server-ca.pem + path: server-ca.pem + - key: client-key.pem + path: client-key.pem + - key: client-cert.pem + path: client-cert.pem diff --git a/hive/k8s/base/kustomization.yaml b/hive/k8s/base/kustomization.yaml new file mode 100644 index 00000000..5b98e944 --- /dev/null +++ b/hive/k8s/base/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - deployment.yaml + - service.yaml diff --git a/hive/k8s/base/service.yaml b/hive/k8s/base/service.yaml new file mode 100644 index 00000000..3df25259 --- /dev/null +++ b/hive/k8s/base/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: aden-hive + labels: + app: aden-hive +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 3001 + protocol: TCP + name: http + selector: + app: aden-hive diff --git a/hive/k8s/overlays/production/kustomization.yaml b/hive/k8s/overlays/production/kustomization.yaml new file mode 100644 index 00000000..b426fcc9 --- /dev/null +++ b/hive/k8s/overlays/production/kustomization.yaml @@ -0,0 +1,21 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: production + +resources: + - ../../base + - namespace.yaml + +namePrefix: prod- + +commonLabels: + environment: production + +images: + - name: aden-hive + newName: gcr.io/tool-for-analyst/aden-hive + newTag: latest + +patches: + - path: patches/deployment.yaml diff --git a/hive/k8s/overlays/production/namespace.yaml b/hive/k8s/overlays/production/namespace.yaml new file mode 100644 index 00000000..43df7d33 --- /dev/null +++ b/hive/k8s/overlays/production/namespace.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: production + labels: + environment: production diff --git a/hive/k8s/overlays/production/patches/deployment.yaml b/hive/k8s/overlays/production/patches/deployment.yaml new file mode 100644 index 00000000..f1f2423c --- /dev/null +++ b/hive/k8s/overlays/production/patches/deployment.yaml @@ -0,0 +1,27 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: aden-hive +spec: + replicas: 2 + template: + spec: + containers: + - name: aden-hive + env: + - name: NODE_ENV + value: production + envFrom: + - configMapRef: + name: aden-api-server-config + - secretRef: + name: aden-api-server-secrets + - secretRef: + name: database-secrets + resources: + requests: + cpu: 500m + memory: 512Mi + limits: + cpu: 1000m + memory: 1Gi diff --git a/hive/k8s/overlays/staging/kustomization.yaml b/hive/k8s/overlays/staging/kustomization.yaml new file mode 100644 index 00000000..4f83faf1 --- /dev/null +++ b/hive/k8s/overlays/staging/kustomization.yaml @@ -0,0 +1,21 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: staging + +resources: + - ../../base + - namespace.yaml + +namePrefix: staging- + +commonLabels: + environment: staging + +images: + - name: aden-hive + newName: gcr.io/acho-alpha-project/aden-hive + newTag: latest + +patches: + - path: patches/deployment.yaml diff --git a/hive/k8s/overlays/staging/namespace.yaml b/hive/k8s/overlays/staging/namespace.yaml new file mode 100644 index 00000000..11eb8621 --- /dev/null +++ b/hive/k8s/overlays/staging/namespace.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: staging + labels: + environment: staging diff --git a/hive/k8s/overlays/staging/patches/deployment.yaml b/hive/k8s/overlays/staging/patches/deployment.yaml new file mode 100644 index 00000000..962acbf8 --- /dev/null +++ b/hive/k8s/overlays/staging/patches/deployment.yaml @@ -0,0 +1,27 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: aden-hive +spec: + replicas: 1 + template: + spec: + containers: + - name: aden-hive + env: + - name: NODE_ENV + value: staging + envFrom: + - configMapRef: + name: aden-api-server-config + - secretRef: + name: aden-api-server-secrets + - secretRef: + name: database-secrets + resources: + requests: + cpu: 250m + memory: 256Mi + limits: + cpu: 500m + memory: 512Mi diff --git a/hive/package.json b/hive/package.json new file mode 100644 index 00000000..e4c314b4 --- /dev/null +++ b/hive/package.json @@ -0,0 +1,61 @@ +{ + "name": "hive", + "version": "1.0.0", + "description": "Aden Hive - LLM observability and control plane backend", + "private": true, + "main": "dist/index.js", + "scripts": { + "dev": "ts-node-dev --respawn --transpile-only src/index.ts", + "build": "tsc && npm run build:copy-sql", + "build:copy-sql": "find src -name '*.sql' -exec sh -c 'mkdir -p dist/$(dirname ${1#src/}) && cp \"$1\" dist/${1#src/}' _ {} \\;", + "start": "node dist/index.js", + "test": "jest", + "test:mcp": "ts-node --transpile-only scripts/test-mcp.ts", + "test:mcp:quick": "./scripts/test-mcp-curl.sh", + "lint": "eslint src/", + "typecheck": "tsc --noEmit", + "clean": "rm -rf dist node_modules" + }, + "dependencies": { + "@acho-inc/administration": "^1.0.7", + "@modelcontextprotocol/sdk": "^1.25.2", + "@socket.io/redis-adapter": "^8.2.1", + "@socket.io/redis-emitter": "^5.1.0", + "compression": "^1.7.4", + "cors": "^2.8.5", + "dotenv": "^16.3.1", + "express": "^4.18.2", + "helmet": "^7.1.0", + "http-errors": "^2.0.0", + "ioredis": "^5.3.2", + "jsonwebtoken": "^9.0.2", + "mongodb": "^6.3.0", + "morgan": "^1.10.0", + "passport": "^0.7.0", + "passport-jwt": "^4.0.1", + "pg": "^8.11.3", + "socket.io": "^4.6.1", + "zod": "^4.3.5" + }, + "devDependencies": { + "@types/compression": "^1.7.5", + "@types/cors": "^2.8.17", + "@types/express": "^4.17.21", + "@types/jsonwebtoken": "^9.0.5", + "@types/morgan": "^1.9.9", + "@types/node": "^20.10.0", + "@types/passport": "^1.0.16", + "@types/passport-jwt": "^4.0.1", + "@types/pg": "^8.10.9", + "@typescript-eslint/eslint-plugin": "^6.14.0", + "@typescript-eslint/parser": "^6.14.0", + "eslint": "^8.56.0", + "jest": "^29.7.0", + "ts-node": "^10.9.2", + "ts-node-dev": "^2.0.0", + "typescript": "^5.3.0" + }, + "engines": { + "node": ">=18.0.0" + } +} diff --git a/hive/scripts/migrate-add-agent-name.ts b/hive/scripts/migrate-add-agent-name.ts new file mode 100644 index 00000000..f9d8f945 --- /dev/null +++ b/hive/scripts/migrate-add-agent-name.ts @@ -0,0 +1,129 @@ +/** + * Migration: Add agent_name column to llm_events table + * + * This script adds the `agent_name` column to all existing team schemas. + * Run with: npx ts-node scripts/migrate-add-agent-name.ts + * + * Environment variables required: + * - PGHOST, PGUSER, PGPASSWORD, PGDATABASE, PGPORT (or PG_CONNECTION_STRING) + */ + +import { Pool } from "pg"; + +const getPool = (): Pool => { + // Support multiple env var names + const connectionString = + process.env.TSDB_PG_URL || + process.env.PG_CONNECTION_STRING || + process.env.DATABASE_URL; + + if (connectionString) { + return new Pool({ connectionString }); + } + + return new Pool({ + host: process.env.PGHOST || "localhost", + user: process.env.PGUSER || "postgres", + password: process.env.PGPASSWORD || "postgres", + database: process.env.PGDATABASE || "aden", + port: parseInt(process.env.PGPORT || "5432", 10), + }); +}; + +async function migrate() { + const pool = getPool(); + + try { + console.log("[Migration] Starting agent_name column migration..."); + + // Find all team schemas (schemas starting with 'team_') + const schemasResult = await pool.query(` + SELECT schema_name + FROM information_schema.schemata + WHERE schema_name LIKE 'team_%' + ORDER BY schema_name + `); + + const schemas = schemasResult.rows.map((r) => r.schema_name as string); + console.log(`[Migration] Found ${schemas.length} team schemas`); + + if (schemas.length === 0) { + console.log("[Migration] No team schemas found. Nothing to migrate."); + return; + } + + let successCount = 0; + let skipCount = 0; + let errorCount = 0; + + for (const schema of schemas) { + try { + // Check if llm_events table exists in this schema + const tableExists = await pool.query( + ` + SELECT 1 + FROM information_schema.tables + WHERE table_schema = $1 AND table_name = 'llm_events' + `, + [schema] + ); + + if (tableExists.rows.length === 0) { + console.log(`[Migration] ${schema}: No llm_events table, skipping`); + skipCount++; + continue; + } + + // Check if agent_name column already exists + const columnExists = await pool.query( + ` + SELECT 1 + FROM information_schema.columns + WHERE table_schema = $1 + AND table_name = 'llm_events' + AND column_name = 'agent_name' + `, + [schema] + ); + + if (columnExists.rows.length > 0) { + console.log(`[Migration] ${schema}: agent_name column already exists, skipping`); + skipCount++; + continue; + } + + // Add the agent_name column after agent column + await pool.query(` + ALTER TABLE ${schema}.llm_events + ADD COLUMN agent_name text + `); + + console.log(`[Migration] ${schema}: Added agent_name column`); + successCount++; + } catch (err) { + console.error(`[Migration] ${schema}: Error - ${(err as Error).message}`); + errorCount++; + } + } + + console.log("\n[Migration] Summary:"); + console.log(` - Schemas processed: ${schemas.length}`); + console.log(` - Successfully migrated: ${successCount}`); + console.log(` - Skipped (already migrated or no table): ${skipCount}`); + console.log(` - Errors: ${errorCount}`); + + if (errorCount === 0) { + console.log("\n[Migration] Completed successfully!"); + } else { + console.log("\n[Migration] Completed with errors. Please review above."); + process.exit(1); + } + } catch (err) { + console.error("[Migration] Fatal error:", (err as Error).message); + process.exit(1); + } finally { + await pool.end(); + } +} + +migrate(); diff --git a/hive/scripts/test-mcp-curl.sh b/hive/scripts/test-mcp-curl.sh new file mode 100755 index 00000000..ed4d1f28 --- /dev/null +++ b/hive/scripts/test-mcp-curl.sh @@ -0,0 +1,61 @@ +#!/bin/bash +# +# Quick MCP Server Test using curl +# +# Usage: +# ADEN_AUTH_TOKEN=your-jwt-token ./scripts/test-mcp-curl.sh +# +# The script tests basic connectivity and endpoints. + +set -e + +API_URL="${ADEN_API_URL:-http://localhost:3000}" +TOKEN="${ADEN_AUTH_TOKEN}" + +if [ -z "$TOKEN" ]; then + echo "Error: ADEN_AUTH_TOKEN environment variable is required" + echo "Usage: ADEN_AUTH_TOKEN=your-jwt-token ./scripts/test-mcp-curl.sh" + exit 1 +fi + +echo "============================================================" +echo "MCP Server Quick Test" +echo "============================================================" +echo "API URL: $API_URL" +echo "" + +# Test 1: Health check +echo "1. Health Check (GET /mcp/health)" +curl -s "$API_URL/mcp/health" | jq . +echo "" + +# Test 2: List sessions (should be empty or show existing) +echo "2. List Sessions (GET /mcp/sessions)" +curl -s -H "Authorization: Bearer $TOKEN" "$API_URL/mcp/sessions" | jq . +echo "" + +# Test 3: Start SSE connection and capture session ID +echo "3. Testing SSE Connection (GET /mcp)" +echo " Starting connection (will timeout after 2s)..." + +# Use timeout to limit the SSE connection +SESSION_ID=$(timeout 2s curl -s -N \ + -H "Authorization: Bearer $TOKEN" \ + -H "Accept: text/event-stream" \ + "$API_URL/mcp" 2>&1 | head -5 || true) + +echo " Response (first 5 lines):" +echo "$SESSION_ID" | head -5 +echo "" + +# Test 4: Check sessions again +echo "4. Sessions After Connection (GET /mcp/sessions)" +curl -s -H "Authorization: Bearer $TOKEN" "$API_URL/mcp/sessions" | jq . +echo "" + +echo "============================================================" +echo "Quick test completed!" +echo "" +echo "For full tool testing, use the TypeScript test client:" +echo " ADEN_AUTH_TOKEN=\$TOKEN npx ts-node scripts/test-mcp.ts" +echo "============================================================" diff --git a/hive/scripts/test-mcp.ts b/hive/scripts/test-mcp.ts new file mode 100644 index 00000000..bb0be516 --- /dev/null +++ b/hive/scripts/test-mcp.ts @@ -0,0 +1,176 @@ +/** + * MCP Server Test Script + * + * Tests the MCP server by connecting via HTTP/SSE and invoking tools. + * + * Usage: + * npx ts-node scripts/test-mcp.ts + * + * Environment: + * ADEN_API_URL - Base URL (default: http://localhost:3000) + * ADEN_AUTH_TOKEN - JWT token for authentication + */ + +import { Client } from "@modelcontextprotocol/sdk/client/index.js"; +import { SSEClientTransport } from "@modelcontextprotocol/sdk/client/sse.js"; + +const API_URL = process.env.ADEN_API_URL || "http://localhost:3000"; +const AUTH_TOKEN = process.env.ADEN_AUTH_TOKEN; + +if (!AUTH_TOKEN) { + console.error("Error: ADEN_AUTH_TOKEN environment variable is required"); + console.error("Usage: ADEN_AUTH_TOKEN=your-jwt-token npx ts-node scripts/test-mcp.ts"); + process.exit(1); +} + +async function main() { + console.log("=".repeat(60)); + console.log("MCP Server Test"); + console.log("=".repeat(60)); + console.log(`API URL: ${API_URL}`); + console.log(""); + + // Create MCP client + const client = new Client({ + name: "mcp-test-client", + version: "1.0.0", + }); + + // Create SSE transport with auth headers + const transport = new SSEClientTransport(new URL(`${API_URL}/mcp`), { + requestInit: { + headers: { + Authorization: `Bearer ${AUTH_TOKEN}`, + }, + }, + }); + + try { + // Connect to MCP server + console.log("Connecting to MCP server..."); + await client.connect(transport); + console.log("✓ Connected successfully\n"); + + // List available tools + console.log("Listing available tools..."); + const tools = await client.listTools(); + console.log(`✓ Found ${tools.tools.length} tools:\n`); + + // Group tools by category + const categories: Record = { + budget: [], + agents: [], + analytics: [], + policies: [], + }; + + for (const tool of tools.tools) { + if (tool.name.includes("budget")) { + categories.budget.push(tool.name); + } else if (tool.name.includes("agent")) { + categories.agents.push(tool.name); + } else if ( + tool.name.includes("analytics") || + tool.name.includes("insights") || + tool.name.includes("metrics") || + tool.name.includes("logs") + ) { + categories.analytics.push(tool.name); + } else if (tool.name.includes("polic")) { + categories.policies.push(tool.name); + } + } + + for (const [category, toolNames] of Object.entries(categories)) { + console.log(` ${category.toUpperCase()} (${toolNames.length}):`); + for (const name of toolNames) { + console.log(` - ${name}`); + } + } + console.log(""); + + // Run test scenarios + console.log("=".repeat(60)); + console.log("Running Test Scenarios"); + console.log("=".repeat(60)); + console.log(""); + + // Test 1: Get policy + await runTest(client, "hive_policy_get", { policyId: "default" }, "Get default policy"); + + // Test 2: List agents + await runTest(client, "hive_agents_summary", {}, "Get agent fleet summary"); + + // Test 3: Get insights + await runTest(client, "hive_insights", { days: 7 }, "Get 7-day insights"); + + // Test 4: Get metrics + await runTest(client, "hive_metrics", { days: 30 }, "Get 30-day metrics"); + + // Test 5: Budget validation (dry run) + await runTest( + client, + "hive_budget_validate", + { + estimatedCost: 0.01, + context: { agent: "test-agent" }, + }, + "Validate budget (dry run)" + ); + + console.log("=".repeat(60)); + console.log("All tests completed!"); + console.log("=".repeat(60)); + } catch (error) { + console.error("Error:", error); + process.exit(1); + } finally { + await client.close(); + } +} + +async function runTest( + client: Client, + toolName: string, + args: Record, + description: string +) { + console.log(`Test: ${description}`); + console.log(` Tool: ${toolName}`); + console.log(` Args: ${JSON.stringify(args)}`); + + try { + const startTime = Date.now(); + const result = await client.callTool({ name: toolName, arguments: args }); + const duration = Date.now() - startTime; + + console.log(` Status: ✓ Success (${duration}ms)`); + + // Parse and display result + if (result.content && result.content.length > 0) { + const textContent = result.content.find((c) => c.type === "text"); + if (textContent && "text" in textContent) { + try { + const parsed = JSON.parse(textContent.text); + console.log(` Result: ${JSON.stringify(parsed, null, 2).split("\n").slice(0, 10).join("\n")}`); + if (JSON.stringify(parsed, null, 2).split("\n").length > 10) { + console.log(" ... (truncated)"); + } + } catch { + console.log(` Result: ${textContent.text.slice(0, 200)}...`); + } + } + } + + if (result.isError) { + console.log(` Warning: Tool returned isError=true`); + } + } catch (error) { + console.log(` Status: ✗ Failed`); + console.log(` Error: ${error instanceof Error ? error.message : error}`); + } + + console.log(""); +} + +main().catch(console.error); diff --git a/hive/src/app.ts b/hive/src/app.ts new file mode 100644 index 00000000..197dc493 --- /dev/null +++ b/hive/src/app.ts @@ -0,0 +1,150 @@ +/** + * Express App Configuration + * + * Sets up Express with middleware and routes. + * No global state - uses dependency injection. + * Supports both MySQL (production) and PostgreSQL (local development) for user auth. + */ + +import express, { Request, Response } from 'express'; +import compression from 'compression'; +import cors from 'cors'; +import passport from 'passport'; +import { Pool } from 'pg'; + +import { auth, database, models } from '@acho-inc/administration'; +import config from './config'; +import routes from './routes'; +import { errorHandler } from './middleware/error-handler.middleware'; +import { createMcpRouter } from './mcp'; + +// Initialize Express app +const app = express(); + +// ============================================================================= +// Middleware +// ============================================================================= + +app.use(compression({ + filter: (req, res) => { + // Don't compress SSE responses - compression breaks streaming + if (req.headers.accept === 'text/event-stream' || + req.path.endsWith('/stream')) { + return false; + } + return compression.filter(req, res); + } +})); +app.use(cors()); + +// Skip body parsing for MCP message route (SDK's handlePostMessage reads raw body stream) +app.use((req, res, next) => { + if (req.path === '/mcp/message') { + return next(); + } + express.json({ limit: '10mb' })(req, res, next); +}); +app.use((req, res, next) => { + if (req.path === '/mcp/message') { + return next(); + } + express.urlencoded({ extended: true })(req, res, next); +}); + +// Disable x-powered-by header +app.disable('x-powered-by'); + +// ============================================================================= +// Database Connections +// ============================================================================= + +let userDbService: ReturnType; + +if (config.userDbType === 'postgres') { + // PostgreSQL for local development + console.log('[App] Using PostgreSQL for user authentication'); + + const pgPool = new Pool({ + connectionString: config.userDb.url, + }); + + userDbService = models.createUserDbService({ + pgPool, + dbType: 'postgres', + tables: { + USER: 'users', + DEVELOPERS: 'developers', + }, + }); + + app.locals.pgPool = pgPool; +} else { + // MySQL for production + console.log('[App] Using MySQL for user authentication'); + + const mysqlPool = database.createMySQLPool(config.mysql); + + userDbService = models.createUserDbService({ + mysqlPool, + tables: { + USER: 'user', + DEVELOPERS: 'developers', + }, + }); + + app.locals.mysqlPool = mysqlPool; +} + +// Store user service in app.locals for access in routes +app.locals.userDbService = userDbService; + +// ============================================================================= +// Passport Authentication +// ============================================================================= + +const passportStrategy = auth.createPassportStrategy({ + findSaltByToken: userDbService.findSaltByToken, + jwtSecret: config.jwt.secret, +}); + +passport.use(passportStrategy); +app.use(passport.initialize()); + +// ============================================================================= +// Routes +// ============================================================================= + +// Health check (unauthenticated) +app.get('/health', (req: Request, res: Response) => { + res.json({ + status: 'ok', + service: 'aden-hive', + timestamp: new Date().toISOString(), + userDbType: config.userDbType, + }); +}); + +// API routes +app.use('/', routes); + +// MCP Server routes (Model Context Protocol) +// The controlEmitter is set in index.ts after WebSocket initialization +const mcpRouter = createMcpRouter(() => app.locals.controlEmitter); +app.use('/mcp', mcpRouter); + +// ============================================================================= +// Error Handling +// ============================================================================= + +// 404 handler +app.use((req: Request, res: Response) => { + res.status(404).json({ + error: 'not_found', + message: `Route ${req.method} ${req.path} not found`, + }); +}); + +// Global error handler +app.use(errorHandler); + +export default app; diff --git a/hive/src/config/index.ts b/hive/src/config/index.ts new file mode 100644 index 00000000..6422efe2 --- /dev/null +++ b/hive/src/config/index.ts @@ -0,0 +1,134 @@ +/** + * Configuration Module + * + * Centralizes all configuration loading and validation. + * Supports both MySQL (production) and PostgreSQL (local development) for user database. + */ + +import fs from 'fs'; + +/** + * Helper function to safely read SSL certificates + * @param {string} envKey - Environment variable containing cert path + * @param {string} fallbackPath - Fallback path if env var not set + * @returns {Buffer|null} Certificate content or null + */ +function readCertificate(envKey: string, fallbackPath: string): Buffer | null { + const certPath = process.env[envKey]; + if (certPath && fs.existsSync(certPath)) { + return fs.readFileSync(certPath); + } + if (fallbackPath && fs.existsSync(fallbackPath)) { + return fs.readFileSync(fallbackPath); + } + return null; +} + +/** + * Load MySQL SSL certificates from environment or default paths + * @returns {Object|null} SSL config object or null if certs not found + */ +function loadMySQLSSL(): { ca: Buffer; key: Buffer; cert: Buffer } | null { + const ca = readCertificate('MYSQL_SSL_CA', '/mnt/certs/mysql/server-ca.pem'); + const key = readCertificate('MYSQL_SSL_KEY', '/mnt/certs/mysql/client-key.pem'); + const cert = readCertificate('MYSQL_SSL_CERT', '/mnt/certs/mysql/client-cert.pem'); + + return ca && key && cert ? { ca, key, cert } : null; +} + +/** + * Determine which database type to use for user authentication + * Priority: USER_DB_TYPE env var > MySQL if configured > PostgreSQL fallback + */ +function getUserDbType(): 'mysql' | 'postgres' { + const explicit = process.env.USER_DB_TYPE?.toLowerCase(); + if (explicit === 'mysql' || explicit === 'postgres') { + return explicit; + } + // Default to MySQL if MySQL host is configured, otherwise use PostgreSQL + return process.env.MYSQL_HOST ? 'mysql' : 'postgres'; +} + +const config = { + // Server + port: parseInt(process.env.PORT as string, 10) || 4000, + nodeEnv: process.env.NODE_ENV || 'development', + + // TSDB PostgreSQL (metrics storage) + tsdb: { + url: process.env.TSDB_PG_URL, + }, + + // User Database Type ('mysql' or 'postgres') + userDbType: getUserDbType(), + + // User Database (MySQL) - for production + mysql: { + host: process.env.MYSQL_HOST, + port: parseInt(process.env.MYSQL_PORT as string, 10) || 3306, + user: process.env.MYSQL_USER, + password: process.env.MYSQL_PASSWORD, + database: process.env.MYSQL_DATABASE, + ssl: loadMySQLSSL(), + }, + + // User Database (PostgreSQL) - for local development + // Defaults to same DB as TSDB if not specified + userDb: { + url: process.env.USER_DB_PG_URL || process.env.TSDB_PG_URL, + }, + + // MongoDB + mongodb: { + url: process.env.MONGODB_URL, + dbName: process.env.MONGODB_DBNAME || 'aden', + erpDbName: process.env.MONGODB_ERP_DBNAME || 'erp', + }, + + // Redis + redis: { + url: process.env.REDIS_URL, + }, + + // JWT + jwt: { + secret: process.env.JWT_SECRET || 'dev-secret-change-in-production', + expiresIn: process.env.JWT_EXPIRES_IN || '7d', + passphrase: process.env.PASSPHRASE, + }, +}; + +/** + * Validates required configuration + * @throws {Error} If required config is missing + */ +function validateConfig(): void { + const required: [string, string | undefined][] = [ + ['TSDB_PG_URL', config.tsdb.url], + ]; + + // Add database-specific requirements + if (config.userDbType === 'mysql') { + required.push( + ['MYSQL_HOST', config.mysql.host], + ['MYSQL_USER', config.mysql.user], + ['MYSQL_DATABASE', config.mysql.database], + ); + } else { + required.push(['USER_DB_PG_URL or TSDB_PG_URL', config.userDb.url]); + } + + const missing = required.filter(([name, value]) => !value); + + if (missing.length > 0) { + const names = missing.map(([name]) => name).join(', '); + console.warn(`[Config] Warning: Missing environment variables: ${names}`); + } + + console.log(`[Config] User database type: ${config.userDbType}`); +} + +// Validate on load +validateConfig(); + +export default config; diff --git a/hive/src/controllers/control.controller.ts b/hive/src/controllers/control.controller.ts new file mode 100644 index 00000000..6c5b85a9 --- /dev/null +++ b/hive/src/controllers/control.controller.ts @@ -0,0 +1,1885 @@ +/** + * Aden Control Controller + * + * HTTP endpoints for Aden SDK control plane: + * - GET /v1/control/policy - Get current policy + * - POST /v1/control/events - Submit events (batch) + * - POST /v1/control/content - Store large content items + * - GET /v1/control/events - Get events (dashboard) + * - PUT /v1/control/policy - Update policy (dashboard) + */ + +import express, { Request, Response, NextFunction } from "express"; +import createError from "http-errors"; +import passport from "passport"; + +import controlService from "../services/control/control_service"; +import pricingService from "../services/tsdb/pricing_service"; +import * as tsdbService from "../services/tsdb/tsdb_service"; +import { getTeamPool, buildSchemaName } from "../services/tsdb/team_context"; + +const router = express.Router(); + +// Passport is initialized in app.js + +interface UserPayload { + id: string; + current_team_id: string; + [key: string]: unknown; +} + +interface UserContext { + user_id: string; + team_id: string; +} + +interface AuthenticatedRequest extends Request { + user?: UserPayload; +} + +interface BudgetAlert { + threshold: number; + enabled: boolean; +} + +interface BudgetNotifications { + inApp: boolean; + email: boolean; + emailRecipients: string[]; + webhook: boolean; +} + +interface BudgetRule { + id: string; + name: string; + type: string; + tags?: string[]; + limit: number; + spent: number; + limitAction: string; + degradeToModel?: string; + degradeToProvider?: string; + alerts: BudgetAlert[]; + notifications: BudgetNotifications; +} + +interface ValidationContext { + agent?: string; + tenant_id?: string; + customer_id?: string; + feature?: string; + tags?: string[]; +} + +declare const global: { + _ADEN_CONTROL_EMITTER?: { + emitPolicyUpdate: ( + teamId: string, + policyId: string | null, + policy: unknown + ) => void; + }; +}; + +/** + * Extract user context from JWT payload for audit/scoping + * @param req - Express request with req.user from passport + * @returns User context { user_id, team_id } + */ +function getUserContext(req: AuthenticatedRequest): UserContext | null { + if (!req.user) return null; + return { + user_id: req.user.id, + team_id: req.user.current_team_id, + }; +} + +/** + * Get policy ID from request (header or query param) + * Returns null if not specified (will use default policy) + */ +function getPolicyId(req: Request): string | null { + return ( + (req.headers["x-policy-id"] as string) || + (req.query.policy_id as string) || + null + ); +} + +/** + * Resolve policy ID - handles "default" as special value + * Returns null for "default" which tells service to use team's default policy + */ +function resolvePolicyId(policyId: string): string | null { + if (!policyId || policyId === "default") { + return null; + } + return policyId; +} + +// ============================================================================= +// SDK Endpoints (used by Aden SDK) +// ============================================================================= + +/** + * GET /v1/control/policy + * Get the current control policy for the SDK + * Optional X-Policy-ID header to specify policy (uses default if not specified) + */ +router.get( + "/policy", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const userContext = getUserContext(req); + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + const policyId = getPolicyId(req); + const policy = await controlService.getPolicy( + userContext.team_id, + policyId + ); + res.json(policy); + } catch (error) { + console.error("[Aden Control] Error getting policy:", error); + next(createError(500, "Failed to get policy")); + } + } +); + +/** + * POST /v1/control/events + * Submit events from the SDK (batch) + * Optional X-Policy-ID header to specify policy (uses default if not specified) + */ +router.post( + "/events", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const userContext = getUserContext(req); + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + const { events } = req.body; + + if (!events || !Array.isArray(events)) { + return next(createError(400, "events array required")); + } + + const policyId = getPolicyId(req); + await controlService.processEvents( + userContext.team_id, + policyId, + events, + userContext + ); + + res.json({ success: true, processed: events.length }); + } catch (error) { + console.error("[Aden Control] Error processing events:", error); + next(createError(500, "Failed to process events")); + } + } +); + +/** + * POST /v1/control/content + * Store large content items from the SDK (Layer 0 content capture) + * Used for content that exceeds max_content_bytes threshold + * + * Body: { items: Array<{ content_id, content_hash, content, byte_size }> } + */ +router.post( + "/content", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + console.log("[Aden Control] Received content storage request"); + const userContext = getUserContext(req); + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + const { items } = req.body; + + if (!items || !Array.isArray(items)) { + return next(createError(400, "items array required")); + } + + // Validate each item has required fields + for (let i = 0; i < items.length; i++) { + const item = items[i]; + if (!item.content_id || typeof item.content_id !== "string") { + return next( + createError(400, `items[${i}].content_id (string) is required`) + ); + } + if (!item.content_hash || typeof item.content_hash !== "string") { + return next( + createError(400, `items[${i}].content_hash (string) is required`) + ); + } + if (item.content === undefined || item.content === null) { + return next(createError(400, `items[${i}].content is required`)); + } + if (typeof item.byte_size !== "number" || item.byte_size < 0) { + return next( + createError( + 400, + `items[${i}].byte_size must be a non-negative number` + ) + ); + } + } + + const result = await controlService.storeContent( + userContext.team_id, + items + ); + + res.json({ success: true, stored: result.stored }); + } catch (error) { + console.error("[Aden Control] Error storing content:", error); + next(createError(500, "Failed to store content")); + } + } +); + +/** + * GET /v1/control/content/:contentId + * Retrieve content by ID + */ +router.get( + "/content/:contentId", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const userContext = getUserContext(req); + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + const { contentId } = req.params; + const content = await controlService.getContent( + userContext.team_id, + contentId + ); + + if (!content) { + return next(createError(404, "Content not found")); + } + + res.json(content); + } catch (error) { + console.error("[Aden Control] Error getting content:", error); + next(createError(500, "Failed to get content")); + } + } +); + +// ============================================================================= +// TSDB Content Retrieval Endpoints (warm/cold storage) +// ============================================================================= + +/** + * GET /v1/control/events/:traceId/:callSequence/content + * Get all content for a specific event from warm/cold storage + * Returns content references with full content from cold store + */ +router.get( + "/events/:traceId/:callSequence/content", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const userContext = getUserContext(req); + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + const { traceId, callSequence } = req.params; + const callSeq = parseInt(callSequence); + + if (!traceId || isNaN(callSeq)) { + return next(createError(400, "Valid traceId and callSequence required")); + } + + // Get team pool and set schema + const pool = await getTeamPool(userContext.team_id); + const schema = buildSchemaName(userContext.team_id); + const client = await pool.connect(); + + try { + await client.query(`SET search_path TO ${schema}, public`); + await tsdbService.ensureSchema(client); + + const content = await tsdbService.getEventContent( + userContext.team_id, + traceId, + callSeq, + client + ); + + res.json({ + trace_id: traceId, + call_sequence: callSeq, + content_items: content, + count: content.length, + }); + } finally { + client.release(); + } + } catch (error) { + console.error("[Aden Control] Error getting event content:", error); + next(createError(500, "Failed to get event content")); + } + } +); + +/** + * GET /v1/control/content/hash/:contentHash + * Get content from cold storage by hash + * Useful for fetching deduplicated content directly + */ +router.get( + "/content/hash/:contentHash", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const userContext = getUserContext(req); + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + const { contentHash } = req.params; + + if (!contentHash || contentHash.length !== 64) { + return next(createError(400, "Valid SHA-256 content hash required")); + } + + // Get team pool and set schema + const pool = await getTeamPool(userContext.team_id); + const schema = buildSchemaName(userContext.team_id); + const client = await pool.connect(); + + try { + await client.query(`SET search_path TO ${schema}, public`); + await tsdbService.ensureSchema(client); + + const content = await tsdbService.getContentByHash( + userContext.team_id, + contentHash, + client + ); + + if (!content) { + return next(createError(404, "Content not found")); + } + + res.json({ + content_hash: contentHash, + content, + byte_size: Buffer.byteLength(content, "utf8"), + }); + } finally { + client.release(); + } + } catch (error) { + console.error("[Aden Control] Error getting content by hash:", error); + next(createError(500, "Failed to get content")); + } + } +); + +// ============================================================================= +// Dashboard Endpoints (used by Aden Dashboard) +// ============================================================================= + +/** + * GET /v1/control/events + * Get events for the dashboard (queries TSDB) + * Query params: limit, offset, start_date, end_date, policy_id + */ +router.get( + "/events", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const { limit, offset, start_date, end_date, policy_id } = req.query; + const userContext = getUserContext(req); + + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + const events = await controlService.getEvents( + userContext.team_id, + (policy_id as string) || null, + { + limit: parseInt(limit as string) || 100, + offset: parseInt(offset as string) || 0, + start_date: start_date as string | undefined, + end_date: end_date as string | undefined, + } + ); + + res.json({ events, count: events.length }); + } catch (error) { + console.error("[Aden Control] Error getting events:", error); + next(createError(500, "Failed to get events")); + } + } +); + +/** + * PUT /v1/control/policies/:policyId + * Update the control policy (from dashboard) + * Use "default" as policyId to update the team's default policy + */ +router.put( + "/policies/:policyId", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const policyId = resolvePolicyId(req.params.policyId); + const policyUpdate = req.body; + const userContext = getUserContext(req); + + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + // Validate policy structure + const validKeys = [ + "name", + "budgets", + "throttles", + "blocks", + "degradations", + "alerts", + ]; + const invalidKeys = Object.keys(policyUpdate).filter( + (k) => !validKeys.includes(k) + ); + if (invalidKeys.length > 0) { + return next( + createError(400, `Invalid policy keys: ${invalidKeys.join(", ")}`) + ); + } + + const policy = await controlService.updatePolicy( + userContext.team_id, + policyId, + policyUpdate, + userContext + ); + + // Notify connected SDK instances via WebSocket + if (global._ADEN_CONTROL_EMITTER) { + global._ADEN_CONTROL_EMITTER.emitPolicyUpdate( + userContext.team_id, + policyId, + policy + ); + } + + res.json(policy); + } catch (error) { + console.error("[Aden Control] Error updating policy:", error); + next(createError(500, "Failed to update policy")); + } + } +); + +/** + * DELETE /v1/control/policies/:policyId/rules + * Clear all rules from the policy (keeps the policy itself) + * Use "default" as policyId to clear the team's default policy + */ +router.delete( + "/policies/:policyId/rules", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const policyId = resolvePolicyId(req.params.policyId); + const userContext = getUserContext(req); + + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + const policy = await controlService.clearPolicy( + userContext.team_id, + policyId, + userContext + ); + + // Notify connected SDK instances via WebSocket + if (global._ADEN_CONTROL_EMITTER) { + global._ADEN_CONTROL_EMITTER.emitPolicyUpdate( + userContext.team_id, + policyId, + policy + ); + } + + res.json(policy); + } catch (error) { + console.error("[Aden Control] Error clearing policy:", error); + next(createError(500, "Failed to clear policy")); + } + } +); + +// ============================================================================= +// Rule Management Endpoints +// ============================================================================= + +/** + * Valid budget types matching frontend BudgetType enum + */ +const VALID_BUDGET_TYPES = [ + "global", + "agent", + "tenant", + "customer", + "feature", + "tag", +]; + +/** + * Valid limit actions matching frontend LimitAction enum + */ +const VALID_LIMIT_ACTIONS = ["kill", "throttle", "degrade"]; + +/** + * Validate BudgetAlert structure + */ +function isValidBudgetAlert(alert: unknown): alert is BudgetAlert { + return ( + alert !== null && + typeof alert === "object" && + typeof (alert as BudgetAlert).threshold === "number" && + (alert as BudgetAlert).threshold >= 0 && + (alert as BudgetAlert).threshold <= 100 && + typeof (alert as BudgetAlert).enabled === "boolean" + ); +} + +/** + * Validate BudgetNotifications structure + */ +function isValidBudgetNotifications( + notifications: unknown +): notifications is BudgetNotifications { + if (!notifications || typeof notifications !== "object") return false; + const n = notifications as BudgetNotifications; + if (typeof n.inApp !== "boolean") return false; + if (typeof n.email !== "boolean") return false; + if (!Array.isArray(n.emailRecipients)) return false; + if (typeof n.webhook !== "boolean") return false; + return true; +} + +/** + * POST /v1/control/policies/:policyId/budgets + * Add a budget rule + * + * Expected body (BudgetConfig): + * { + * id: string, + * name: string, + * type: 'global' | 'agent' | 'tenant' | 'customer' | 'feature' | 'tag', + * tagCategory?: string, + * limit: number, + * spent: number, + * limitAction: 'kill' | 'throttle' | 'degrade', + * degradeToModel?: string, // required when limitAction is 'degrade' + * degradeToProvider?: string, // required when limitAction is 'degrade' + * alerts: Array<{ threshold: number, enabled: boolean }>, + * notifications: { inApp: boolean, email: boolean, emailRecipients: string[], webhook: boolean } + * } + */ +router.post( + "/policies/:policyId/budgets", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const policyId = resolvePolicyId(req.params.policyId); + const rule = req.body as BudgetRule; + const userContext = getUserContext(req); + + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + // Validate required fields + if (!rule.id || typeof rule.id !== "string") { + return next(createError(400, "id (string) is required")); + } + + if (!rule.name || typeof rule.name !== "string") { + return next(createError(400, "name (string) is required")); + } + + if (!rule.type || !VALID_BUDGET_TYPES.includes(rule.type)) { + return next( + createError( + 400, + `type must be one of: ${VALID_BUDGET_TYPES.join(", ")}` + ) + ); + } + + // tags array is required when type is 'tag' + if (rule.type === "tag") { + if (!Array.isArray(rule.tags) || rule.tags.length === 0) { + return next( + createError( + 400, + "tags (non-empty array) is required when type is 'tag'" + ) + ); + } + // Validate each tag is a string + for (let i = 0; i < rule.tags.length; i++) { + if (typeof rule.tags[i] !== "string") { + return next(createError(400, `tags[${i}] must be a string`)); + } + } + } + + if (typeof rule.limit !== "number" || rule.limit < 0) { + return next(createError(400, "limit must be a non-negative number")); + } + + if (typeof rule.spent !== "number" || rule.spent < 0) { + return next(createError(400, "spent must be a non-negative number")); + } + + if ( + !rule.limitAction || + !VALID_LIMIT_ACTIONS.includes(rule.limitAction) + ) { + return next( + createError( + 400, + `limitAction must be one of: ${VALID_LIMIT_ACTIONS.join(", ")}` + ) + ); + } + + // degradeToModel and degradeToProvider are required when limitAction is 'degrade' + if (rule.limitAction === "degrade") { + if (!rule.degradeToModel || typeof rule.degradeToModel !== "string") { + return next( + createError( + 400, + "degradeToModel is required when limitAction is 'degrade'" + ) + ); + } + if ( + !rule.degradeToProvider || + typeof rule.degradeToProvider !== "string" + ) { + return next( + createError( + 400, + "degradeToProvider is required when limitAction is 'degrade'" + ) + ); + } + + // Validate model belongs to the specified provider + const targets = await pricingService.getDegradationTargets(); + const providerModels = targets.models[rule.degradeToProvider]; + + if (!providerModels) { + return next( + createError(400, `Unknown provider: ${rule.degradeToProvider}`) + ); + } + + const validModelNames = providerModels.map( + (m: { model: string }) => m.model + ); + if (!validModelNames.includes(rule.degradeToModel)) { + return next( + createError( + 400, + `degradeToModel "${rule.degradeToModel}" does not belong to provider "${rule.degradeToProvider}"` + ) + ); + } + } + + if (!Array.isArray(rule.alerts)) { + return next(createError(400, "alerts must be an array")); + } + for (let i = 0; i < rule.alerts.length; i++) { + if (!isValidBudgetAlert(rule.alerts[i])) { + return next( + createError( + 400, + `alerts[${i}] must have threshold (0-100) and enabled (boolean)` + ) + ); + } + } + + if (!isValidBudgetNotifications(rule.notifications)) { + return next( + createError( + 400, + "notifications must have inApp, email, emailRecipients[], and webhook fields" + ) + ); + } + + const policy = await controlService.addBudgetRule( + userContext.team_id, + policyId, + rule, + userContext + ); + + if (global._ADEN_CONTROL_EMITTER) { + global._ADEN_CONTROL_EMITTER.emitPolicyUpdate( + userContext.team_id, + policyId, + policy + ); + } + + res.json(policy); + } catch (error) { + console.error("[Aden Control] Error adding budget rule:", error); + next(createError(500, "Failed to add budget rule")); + } + } +); + +/** + * POST /v1/control/policies/:policyId/throttles + * Add a throttle rule + */ +router.post( + "/policies/:policyId/throttles", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const policyId = resolvePolicyId(req.params.policyId); + const rule = req.body; + const userContext = getUserContext(req); + + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + if (!rule.requests_per_minute && !rule.delay_ms) { + return next( + createError(400, "requests_per_minute or delay_ms required") + ); + } + + const policy = await controlService.addThrottleRule( + userContext.team_id, + policyId, + rule, + userContext + ); + + if (global._ADEN_CONTROL_EMITTER) { + global._ADEN_CONTROL_EMITTER.emitPolicyUpdate( + userContext.team_id, + policyId, + policy + ); + } + + res.json(policy); + } catch (error) { + console.error("[Aden Control] Error adding throttle rule:", error); + next(createError(500, "Failed to add throttle rule")); + } + } +); + +/** + * POST /v1/control/policies/:policyId/blocks + * Add a block rule + */ +router.post( + "/policies/:policyId/blocks", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const policyId = resolvePolicyId(req.params.policyId); + const rule = req.body; + const userContext = getUserContext(req); + + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + if (!rule.reason) { + return next(createError(400, "reason required")); + } + + const policy = await controlService.addBlockRule( + userContext.team_id, + policyId, + rule, + userContext + ); + + if (global._ADEN_CONTROL_EMITTER) { + global._ADEN_CONTROL_EMITTER.emitPolicyUpdate( + userContext.team_id, + policyId, + policy + ); + } + + res.json(policy); + } catch (error) { + console.error("[Aden Control] Error adding block rule:", error); + next(createError(500, "Failed to add block rule")); + } + } +); + +/** + * POST /v1/control/policies/:policyId/degradations + * Add a degradation rule (within same provider only - no cross-vendor degradation) + * + * Body: + * { + * provider: string, // e.g., "openai", "anthropic" + * from_model: string, // Model to degrade from, e.g., "gpt-4o" + * to_model: string, // Model to degrade to, e.g., "gpt-4o-mini" + * trigger: string // When to trigger: "budget_exceeded", "rate_limit", etc. + * } + */ +router.post( + "/policies/:policyId/degradations", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const policyId = resolvePolicyId(req.params.policyId); + const rule = req.body; + const userContext = getUserContext(req); + + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + if (!rule.provider || typeof rule.provider !== "string") { + return next(createError(400, "provider (string) is required")); + } + + if (!rule.from_model || typeof rule.from_model !== "string") { + return next(createError(400, "from_model (string) is required")); + } + + if (!rule.to_model || typeof rule.to_model !== "string") { + return next(createError(400, "to_model (string) is required")); + } + + if (!rule.trigger || typeof rule.trigger !== "string") { + return next(createError(400, "trigger (string) is required")); + } + + // Validate models belong to the specified provider + const targets = await pricingService.getDegradationTargets(); + const providerModels = targets.models[rule.provider]; + + if (!providerModels) { + return next(createError(400, `Unknown provider: ${rule.provider}`)); + } + + const validModelNames = providerModels.map( + (m: { model: string }) => m.model + ); + + if (!validModelNames.includes(rule.from_model)) { + return next( + createError( + 400, + `from_model "${rule.from_model}" does not belong to provider "${rule.provider}"` + ) + ); + } + + if (!validModelNames.includes(rule.to_model)) { + return next( + createError( + 400, + `to_model "${rule.to_model}" does not belong to provider "${rule.provider}"` + ) + ); + } + + const policy = await controlService.addDegradeRule( + userContext.team_id, + policyId, + rule, + userContext + ); + + if (global._ADEN_CONTROL_EMITTER) { + global._ADEN_CONTROL_EMITTER.emitPolicyUpdate( + userContext.team_id, + policyId, + policy + ); + } + + res.json(policy); + } catch (error) { + console.error("[Aden Control] Error adding degradation rule:", error); + next(createError(500, "Failed to add degradation rule")); + } + } +); + +/** + * POST /v1/control/policies/:policyId/alerts + * Add an alert rule + */ +router.post( + "/policies/:policyId/alerts", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const policyId = resolvePolicyId(req.params.policyId); + const rule = req.body; + const userContext = getUserContext(req); + + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + if (!rule.trigger || !rule.level || !rule.message) { + return next(createError(400, "trigger, level, and message required")); + } + + // Validate level + if (!["info", "warning", "critical"].includes(rule.level)) { + return next( + createError(400, "level must be one of: info, warning, critical") + ); + } + + // Validate trigger + if ( + !["budget_threshold", "model_usage", "always"].includes(rule.trigger) + ) { + return next( + createError( + 400, + "trigger must be one of: budget_threshold, model_usage, always" + ) + ); + } + + const policy = await controlService.addAlertRule( + userContext.team_id, + policyId, + rule, + userContext + ); + + if (global._ADEN_CONTROL_EMITTER) { + global._ADEN_CONTROL_EMITTER.emitPolicyUpdate( + userContext.team_id, + policyId, + policy + ); + } + + res.json(policy); + } catch (error) { + console.error("[Aden Control] Error adding alert rule:", error); + next(createError(500, "Failed to add alert rule")); + } + } +); + +// ============================================================================= +// Budget Management Endpoints +// ============================================================================= + +/** + * GET /v1/control/budget/:budgetId + * Get budget status for a budget ID + */ +router.get( + "/budget/:budgetId", + passport.authenticate("jwt", { session: false }), + async (req: Request, res: Response, next: NextFunction) => { + try { + const { budgetId } = req.params; + const status = await controlService.getBudgetStatus(budgetId); + res.json(status); + } catch (error) { + console.error("[Aden Control] Error getting budget status:", error); + next(createError(500, "Failed to get budget status")); + } + } +); + +/** + * POST /v1/control/budget/:budgetId/reset + * Reset budget for a budget ID + */ +router.post( + "/budget/:budgetId/reset", + passport.authenticate("jwt", { session: false }), + async (req: Request, res: Response, next: NextFunction) => { + try { + const { budgetId } = req.params; + await controlService.resetBudget(budgetId); + res.json({ success: true, id: budgetId }); + } catch (error) { + console.error("[Aden Control] Error resetting budget:", error); + next(createError(500, "Failed to reset budget")); + } + } +); + +// ============================================================================= +// Team Policies & Metrics Endpoints +// ============================================================================= + +/** + * GET /v1/control/policies + * Get all policies for the current team (dashboard) + */ +router.get( + "/policies", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const userContext = getUserContext(req); + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + const { limit, offset } = req.query; + const policies = await controlService.getPoliciesByTeam( + userContext.team_id, + { + limit: parseInt(limit as string) || 100, + offset: parseInt(offset as string) || 0, + } + ); + + res.json({ policies, count: policies.length }); + } catch (error) { + console.error("[Aden Control] Error getting team policies:", error); + next(createError(500, "Failed to get team policies")); + } + } +); + +/** + * POST /v1/control/policies + * Create a new policy for the team + */ +router.post( + "/policies", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const userContext = getUserContext(req); + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + const { name } = req.body; + if (!name || typeof name !== "string") { + return next(createError(400, "name (string) is required")); + } + + // Create a new policy with the given name + const policy = await controlService.updatePolicy( + userContext.team_id, + null, // Will generate a new policy ID + { name }, + userContext + ); + + res.status(201).json(policy); + } catch (error) { + console.error("[Aden Control] Error creating policy:", error); + next(createError(500, "Failed to create policy")); + } + } +); + +/** + * GET /v1/control/policies/:policyId + * Get a specific policy by ID + * Use "default" as policyId to get the team's default policy + */ +router.get( + "/policies/:policyId", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const policyId = resolvePolicyId(req.params.policyId); + const userContext = getUserContext(req); + + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + const policy = await controlService.getPolicy( + userContext.team_id, + policyId, + userContext + ); + + if (!policy) { + return next(createError(404, "Policy not found")); + } + + res.json(policy); + } catch (error) { + console.error("[Aden Control] Error getting policy:", error); + next(createError(500, "Failed to get policy")); + } + } +); + +/** + * DELETE /v1/control/policies/:policyId + * Delete a policy + * Note: "default" is NOT allowed here - must specify actual policy ID + */ +router.delete( + "/policies/:policyId", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const { policyId } = req.params; + + // Don't allow deleting "default" - must specify actual policy ID + if (policyId === "default") { + return next( + createError(400, "Cannot delete 'default' - specify actual policy ID") + ); + } + + const userContext = getUserContext(req); + + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + await controlService.deletePolicy( + userContext.team_id, + policyId, + userContext + ); + + res.json({ success: true, id: policyId }); + } catch (error) { + console.error("[Aden Control] Error deleting policy:", error); + next(createError(500, "Failed to delete policy")); + } + } +); + +/** + * GET /v1/control/metrics + * Get metrics summary for the current team (dashboard analytics) + */ +router.get( + "/metrics", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const userContext = getUserContext(req); + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + const { start_date, end_date } = req.query; + const summary = await controlService.getMetricsSummary( + userContext.team_id, + { + start_date: start_date as string | undefined, + end_date: end_date as string | undefined, + } + ); + + res.json(summary); + } catch (error) { + console.error("[Aden Control] Error getting metrics summary:", error); + next(createError(500, "Failed to get metrics summary")); + } + } +); + +// ============================================================================= +// Usage & Rate Analytics Endpoints +// ============================================================================= + +/** + * GET /v1/control/metrics/usage + * Get usage breakdown (daily, by model, by feature) + */ +router.get( + "/metrics/usage", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const userContext = getUserContext(req); + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + const { days, context_id } = req.query; + const breakdown = await controlService.getUsageBreakdown( + userContext.team_id, + { + days: days ? parseInt(days as string) : 7, + context_id: context_id as string | undefined, + } + ); + + res.json(breakdown); + } catch (error) { + console.error("[Aden Control] Error getting usage breakdown:", error); + next(createError(500, "Failed to get usage breakdown")); + } + } +); + +/** + * GET /v1/control/metrics/rates + * Get rate metrics (peak, p95, avg, min, burst) + */ +router.get( + "/metrics/rates", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const userContext = getUserContext(req); + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + const { days, context_id } = req.query; + const rates = await controlService.getRateMetrics(userContext.team_id, { + days: days ? parseInt(days as string) : 30, + context_id: context_id as string | undefined, + }); + + res.json(rates); + } catch (error) { + console.error("[Aden Control] Error getting rate metrics:", error); + next(createError(500, "Failed to get rate metrics")); + } + } +); + +/** + * GET /v1/control/policies/:policyId/budgets/:budgetId + * Get detailed budget info including usage stats + */ +router.get( + "/policies/:policyId/budgets/:budgetId", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const policyId = resolvePolicyId(req.params.policyId); + const { budgetId } = req.params; + const userContext = getUserContext(req); + + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + const budget = await controlService.getBudgetDetails( + userContext.team_id, + policyId, + budgetId + ); + + if (!budget) { + return next(createError(404, "Budget not found")); + } + + res.json(budget); + } catch (error) { + console.error("[Aden Control] Error getting budget details:", error); + next(createError(500, "Failed to get budget details")); + } + } +); + +/** + * GET /v1/control/policies/:policyId/budgets/:budgetId/usage + * Get usage breakdown for a specific budget + * Returns: { daily, by_model, by_feature } + */ +router.get( + "/policies/:policyId/budgets/:budgetId/usage", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const policyId = resolvePolicyId(req.params.policyId); + const { budgetId } = req.params; + const { days } = req.query; + const userContext = getUserContext(req); + + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + // Get budget details for filtering + const budget = await controlService.getBudgetDetails( + userContext.team_id, + policyId, + budgetId + ); + + if (!budget) { + return next(createError(404, "Budget not found")); + } + + // Pass the budget object for type-aware filtering + const breakdown = await controlService.getUsageBreakdown( + userContext.team_id, + { + days: days ? parseInt(days as string) : 7, + budget, + } + ); + + res.json(breakdown); + } catch (error) { + console.error("[Aden Control] Error getting budget usage:", error); + next(createError(500, "Failed to get budget usage")); + } + } +); + +/** + * GET /v1/control/policies/:policyId/budgets/:budgetId/rates + * Get rate metrics for a specific budget + * Returns: { peak_rate, p95_rate, avg_rate, min_rate, max_burst } + */ +router.get( + "/policies/:policyId/budgets/:budgetId/rates", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const policyId = resolvePolicyId(req.params.policyId); + const { budgetId } = req.params; + const { days } = req.query; + const userContext = getUserContext(req); + + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + // Get budget details for filtering + const budget = await controlService.getBudgetDetails( + userContext.team_id, + policyId, + budgetId + ); + + if (!budget) { + return next(createError(404, "Budget not found")); + } + + // Pass the budget object for type-aware filtering + const rates = await controlService.getRateMetrics(userContext.team_id, { + days: days ? parseInt(days as string) : 30, + budget, + }); + + res.json(rates); + } catch (error) { + console.error("[Aden Control] Error getting budget rates:", error); + next(createError(500, "Failed to get budget rates")); + } + } +); + +// ============================================================================= +// Budget Validation Endpoint (for Hybrid Enforcement) +// ============================================================================= + +/** + * POST /v1/control/budget/validate + * Server-side budget validation for hybrid enforcement. + * + * Called by SDK when local budget usage approaches threshold (e.g., 80%). + * Returns authoritative spend from TSDB and enforcement decision. + */ +router.post( + "/budget/validate", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const userContext = getUserContext(req); + if (!userContext?.team_id) { + return next(createError(400, "Team context required")); + } + + const { budget_id, estimated_cost, context, local_spend } = req.body as { + budget_id?: string; + estimated_cost: number; + context?: ValidationContext; + local_spend?: number; + }; + const policyId = getPolicyId(req); + + if (typeof estimated_cost !== "number" || estimated_cost < 0) { + return next( + createError(400, "estimated_cost must be a non-negative number") + ); + } + + // Get the policy with authoritative budget data from TSDB + const policy = await controlService.getPolicy( + userContext.team_id, + policyId, + userContext + ); + + if (!policy) { + return next(createError(404, "Policy not found")); + } + + // MULTI-BUDGET MODE: Use context to find all matching budgets + if (context && typeof context === "object") { + const matchingBudgets = controlService.findMatchingBudgetsForContext( + policy.budgets || [], + context + ); + + if (matchingBudgets.length === 0) { + // No budgets match this context - allow by default + return res.json({ + allowed: true, + action: "allow", + reason: "No budgets match the provided context", + authoritative_spend: 0, + budget_limit: 0, + usage_percent: 0, + projected_percent: 0, + policy_version: policy.version, + budgets_checked: [], + }); + } + + // Validate all matching budgets and get most restrictive result + const result = controlService.validateMultipleBudgets( + matchingBudgets, + estimated_cost, + local_spend + ); + + // Log the validation for audit + console.log( + `[Aden Control] Multi-budget validation: ` + + `checked ${result.budgets_checked.length} budgets, ` + + `action: ${result.action}` + + (result.restricting_budget_name + ? `, restricting: ${result.restricting_budget_name}` + : "") + ); + + return res.json({ + ...result, + policy_version: policy.version, + }); + } + + // SINGLE-BUDGET MODE (backward compatible): Use budget_id + if (!budget_id) { + return next(createError(400, "budget_id or context is required")); + } + + // Find the budget by ID + const budget = policy.budgets?.find( + (b: { id: string }) => b.id === budget_id + ); + if (!budget) { + // Budget not found - allow by default (budget may have been removed) + return res.json({ + allowed: true, + action: "allow", + reason: "Budget not found in policy", + authoritative_spend: 0, + budget_limit: 0, + usage_percent: 0, + projected_percent: 0, + policy_version: policy.version, + budgets_checked: [], + }); + } + + // Use the multi-budget validator for consistency (with single budget) + const result = controlService.validateMultipleBudgets( + [budget], + estimated_cost, + local_spend + ); + + // Log the validation for audit + console.log( + `[Aden Control] Budget validation: ${budget_id} - ` + + `spend: $${result.authoritative_spend.toFixed(4)}, ` + + `limit: $${budget.limit}, ` + + `action: ${result.action}` + ); + + res.json({ + ...result, + policy_version: policy.version, + // Keep backward-compatible fields + updated_spend: result.authoritative_spend, + }); + } catch (error) { + console.error("[Aden Control] Error validating budget:", error); + next(createError(500, "Failed to validate budget")); + } + } +); + +// ============================================================================= +// Model Options for Degradation +// ============================================================================= + +/** + * GET /v1/control/degradation-targets + * Get available target models for budget degradation mode, grouped by provider + * Models are sorted by cost (cheapest first) + * + * Query params: + * provider (optional) - Filter to specific provider (e.g., "openai", "anthropic") + * + * Response (no filter): + * { providers: [...], models: { openai: [...], anthropic: [...] } } + * + * Response (with provider filter): + * { provider: "openai", models: [...] } + */ +router.get( + "/degradation-targets", + passport.authenticate("jwt", { session: false }), + async (req: Request, res: Response, next: NextFunction) => { + try { + const { provider } = req.query; + const targets = await pricingService.getDegradationTargets(); + + // If provider specified, filter to that provider only + if (provider) { + const providerModels = targets.models[provider as string]; + if (!providerModels) { + return next(createError(400, `Unknown provider: ${provider}`)); + } + return res.json({ + provider, + models: providerModels, + }); + } + + res.json(targets); + } catch (error) { + console.error("[Aden Control] Error getting degradation targets:", error); + next(createError(500, "Failed to get degradation targets")); + } + } +); + +// ============================================================================= +// SSE - Real-time Agent Status Stream +// ============================================================================= + +interface ControlEmitter { + getConnectedCount: (teamId: string | number) => number; + getConnectedInstances: (teamId: string | number) => Array<{ + instance_id: string; + policy_id: string | null; + agent_name: string | null; + connected_at: string; + last_heartbeat: string; + connection_type: "websocket" | "http"; + status?: string; + }>; + getTotalConnectedCount: () => number; +} + +/** + * GET /v1/control/agent-status/stream + * SSE endpoint for real-time agent connection status + * + * Streams updates every 2 seconds with: + * - active: boolean indicating if any agents are connected + * - count: number of connected agents + * - instances: array of connected agent details + */ +router.get( + "/agent-status/stream", + passport.authenticate("jwt", { session: false }), + (req: AuthenticatedRequest, res: Response) => { + const teamId = req.user?.current_team_id; + + if (!teamId) { + res.status(401).json({ error: "Team ID required" }); + return; + } + + // Set SSE headers + res.setHeader("Content-Type", "text/event-stream"); + res.setHeader("Cache-Control", "no-cache"); + res.setHeader("Connection", "keep-alive"); + res.setHeader("X-Accel-Buffering", "no"); // Disable nginx buffering + res.flushHeaders(); + + const controlEmitter = req.app.locals.controlEmitter as + | ControlEmitter + | undefined; + + // Send initial status immediately + const sendStatus = () => { + if (!controlEmitter) { + const data = { + active: false, + count: 0, + instances: [], + timestamp: new Date().toISOString(), + error: "WebSocket not initialized", + }; + res.write(`data: ${JSON.stringify(data)}\n\n`); + return; + } + + const count = controlEmitter.getConnectedCount(teamId); + const instances = controlEmitter.getConnectedInstances(teamId); + + const data = { + active: count > 0, + count, + instances, + timestamp: new Date().toISOString(), + }; + + res.write(`data: ${JSON.stringify(data)}\n\n`); + }; + + // Send immediately + sendStatus(); + + // Send updates every 2 seconds + const intervalId = setInterval(sendStatus, 2000); + + // Cleanup on client disconnect + req.on("close", () => { + clearInterval(intervalId); + }); + } +); + +/** + * GET /v1/control/agent-status + * Get current agent connection status (non-streaming) + */ +router.get( + "/agent-status", + passport.authenticate("jwt", { session: false }), + (req: AuthenticatedRequest, res: Response) => { + const teamId = req.user?.current_team_id; + + if (!teamId) { + res.status(401).json({ error: "Team ID required" }); + return; + } + + const controlEmitter = req.app.locals.controlEmitter as + | ControlEmitter + | undefined; + + if (!controlEmitter) { + res.json({ + active: false, + count: 0, + instances: [], + timestamp: new Date().toISOString(), + error: "WebSocket not initialized", + }); + return; + } + + const count = controlEmitter.getConnectedCount(teamId); + const instances = controlEmitter.getConnectedInstances(teamId); + + res.json({ + active: count > 0, + count, + instances, + timestamp: new Date().toISOString(), + }); + } +); + +// ============================================================================= +// Agent Discovery - Historical agents with availability +// ============================================================================= + +/** + * GET /v1/control/agents + * Get all agents from past events with their current availability status + * + * Query params: + * - since: ISO date string to filter events from (optional) + * - limit: Max number of agents to return (default: 100) + * + * Returns agents sorted by last_seen descending with: + * - agent: unique agent identifier + * - agent_name: human-readable name (if available) + * - status: "connected" | "disconnected" + * - connection_type: "websocket" | "http" | null (null if disconnected) + * - first_seen: when agent first appeared in events + * - last_seen: when agent last appeared in events + * - total_requests: total LLM requests made by this agent + * - total_cost: total cost incurred by this agent + */ +router.get( + "/agents", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const teamId = req.user?.current_team_id; + if (!teamId) { + throw createError(401, "Team ID required"); + } + + // Parse query params + const since = req.query.since ? new Date(req.query.since as string) : undefined; + const limit = req.query.limit ? parseInt(req.query.limit as string, 10) : 100; + + // Get team-specific pool/schema + const teamPool = await getTeamPool(teamId); + const schemaName = buildSchemaName(teamId); + const client = await teamPool.connect(); + + let historicalAgents; + try { + await client.query(`SET search_path TO ${schemaName}`); + await tsdbService.ensureSchema(client); + + // Get all distinct agents from TSDB + historicalAgents = await tsdbService.getDistinctAgents( + teamId, + { since, limit }, + client + ); + } finally { + client.release(); + } + + // Get currently connected instances + const controlEmitter = req.app.locals.controlEmitter as ControlEmitter | undefined; + const connectedInstances = controlEmitter?.getConnectedInstances(teamId) || []; + + // Build a map of connected agents (by instance_id and agent_name) + const connectedByInstanceId = new Map(); + const connectedByAgentName = new Map(); + + for (const instance of connectedInstances) { + connectedByInstanceId.set(instance.instance_id, instance); + if (instance.agent_name) { + connectedByAgentName.set(instance.agent_name, instance); + } + } + + // Merge historical agents with connection status + const agents = historicalAgents.map((agent) => { + // Try to match by agent ID (instance_id) or agent_name + const connectedInstance = + connectedByInstanceId.get(agent.agent) || + connectedByAgentName.get(agent.agent) || + (agent.agent_name ? connectedByAgentName.get(agent.agent_name) : null); + + return { + agent: agent.agent, + agent_name: agent.agent_name || connectedInstance?.agent_name || null, + status: connectedInstance ? "connected" : "disconnected", + connection_type: connectedInstance?.connection_type || null, + instance_id: connectedInstance?.instance_id || null, + first_seen: agent.first_seen.toISOString(), + last_seen: agent.last_seen.toISOString(), + total_requests: agent.total_requests, + total_cost: agent.total_cost, + }; + }); + + // Also add any connected agents that don't have historical events yet + const historicalAgentIds = new Set(historicalAgents.map((a) => a.agent)); + const historicalAgentNames = new Set( + historicalAgents.map((a) => a.agent_name).filter(Boolean) + ); + + for (const instance of connectedInstances) { + const isInHistory = + historicalAgentIds.has(instance.instance_id) || + (instance.agent_name && historicalAgentNames.has(instance.agent_name)); + + if (!isInHistory) { + agents.push({ + agent: instance.instance_id, + agent_name: instance.agent_name, + status: "connected", + connection_type: instance.connection_type, + instance_id: instance.instance_id, + first_seen: instance.connected_at, + last_seen: instance.last_heartbeat, + total_requests: 0, + total_cost: 0, + }); + } + } + + res.json({ + agents, + total: agents.length, + connected_count: agents.filter((a) => a.status === "connected").length, + timestamp: new Date().toISOString(), + }); + } catch (error) { + next(error); + } + } +); + +// ============================================================================= +// Health Check +// ============================================================================= + +/** + * GET /v1/control/health + * Health check endpoint + */ +router.get("/health", (_req: Request, res: Response) => { + res.json({ + status: "healthy", + timestamp: new Date().toISOString(), + websocket: !!global._ADEN_CONTROL_EMITTER, + }); +}); + +export default router; diff --git a/hive/src/controllers/iam.controller.ts b/hive/src/controllers/iam.controller.ts new file mode 100644 index 00000000..57fad222 --- /dev/null +++ b/hive/src/controllers/iam.controller.ts @@ -0,0 +1,154 @@ +/** + * IAM Controller + * + * Handles Identity and Access Management endpoints. + */ + +import { Router, Request, Response } from 'express'; + +const router = Router(); + +/** + * Extract token from Authorization header + * Supports: "jwt ", "Bearer ", or raw "" + */ +function extractToken(authHeader: string): string { + if (authHeader.startsWith('jwt ')) { + return authHeader.slice(4); + } + if (authHeader.startsWith('Bearer ')) { + return authHeader.slice(7); + } + return authHeader; +} + +/** + * GET /iam/get-current-team + * + * Get the current team/organization for the authenticated user. + */ +router.get('/get-current-team', async (req: Request, res: Response) => { + try { + const authHeader = req.headers.authorization; + if (!authHeader) { + return res.status(401).json({ + success: false, + msg: 'No token provided', + }); + } + + const userDbService = req.app.locals.userDbService; + const user = await userDbService.findByToken(extractToken(authHeader)); + + if (!user) { + return res.status(401).json({ + success: false, + msg: 'Invalid token', + }); + } + + const pgPool = req.app.locals.pgPool; + if (!pgPool) { + // Return default team if no database + return res.json({ + orgId: user.current_team_id || 1, + orgName: 'Default Organization', + teamId: user.current_team_id || 1, + teamName: 'Default Team', + }); + } + + // Get team info from database + const result = await pgPool.query( + `SELECT id, name, slug FROM teams WHERE id = $1`, + [user.current_team_id || 1] + ); + + const team = result.rows[0]; + + if (!team) { + // Return default if team not found + return res.json({ + orgId: user.current_team_id || 1, + orgName: 'Default Organization', + teamId: user.current_team_id || 1, + teamName: 'Default Team', + }); + } + + res.json({ + orgId: team.id, + orgName: team.name, + teamId: team.id, + teamName: team.name, + }); + } catch (err: any) { + console.error('[IAMController] /get-current-team error:', err.message); + res.status(500).json({ + success: false, + msg: 'Failed to get current team', + }); + } +}); + +/** + * GET /iam/team/get-team-role-by-id/:teamId + * + * Get the user's role in a specific team. + */ +router.get('/team/get-team-role-by-id/:teamId', async (req: Request, res: Response) => { + try { + const authHeader = req.headers.authorization; + if (!authHeader) { + return res.status(401).json({ + success: false, + msg: 'No token provided', + }); + } + + const userDbService = req.app.locals.userDbService; + const user = await userDbService.findByToken(extractToken(authHeader)); + + if (!user) { + return res.status(401).json({ + success: false, + msg: 'Invalid token', + }); + } + + const teamId = parseInt(req.params.teamId, 10); + + const pgPool = req.app.locals.pgPool; + if (!pgPool) { + // Return default role if no database + return res.json({ roleId: 1 }); + } + + // Get user's role in this team + const result = await pgPool.query( + `SELECT role FROM team_members WHERE user_id = $1 AND team_id = $2`, + [user.id, teamId] + ); + + const membership = result.rows[0]; + + // Map role name to roleId (admin=1, member=2, viewer=3) + const roleMap: Record = { + admin: 1, + member: 2, + viewer: 3, + }; + + const roleId = membership ? (roleMap[membership.role] || 2) : 2; + + res.json({ roleId }); + } catch (err: any) { + console.error('[IAMController] /team/get-team-role-by-id error:', err.message); + res.status(500).json({ + success: false, + msg: 'Failed to get team role', + }); + } +}); + +export default router; diff --git a/hive/src/controllers/quickstart.controller.ts b/hive/src/controllers/quickstart.controller.ts new file mode 100644 index 00000000..1e66d58c --- /dev/null +++ b/hive/src/controllers/quickstart.controller.ts @@ -0,0 +1,192 @@ +/** + * Quickstart Documentation API Controller + * Generates SDK quickstart documentation based on agent framework + */ +import express, { Request, Response, NextFunction } from "express"; +import passport from "passport"; +// Passport is initialized in app.js + +import * as quickstartService from "../services/quickstart/quickstart_service"; + +const router = express.Router(); + +interface AuthenticatedUser { + id: number; + current_team_id: number; + [key: string]: unknown; +} + +interface AuthenticatedRequest extends Request { + user?: AuthenticatedUser; +} + +/** + * @swagger + * /quickstart/options: + * get: + * summary: Get available options for quickstart generation + * tags: + * - Quickstart + * responses: + * 200: + * description: Available options for quickstart document generation + */ +router.get("/options", async (req: Request, res: Response, next: NextFunction) => { + try { + const options = quickstartService.getQuickstartOptions(); + res.send(options); + } catch (error) { + next(error); + } +}); + +/** + * @swagger + * /quickstart/generate: + * post: + * summary: Generate quickstart documentation with user's system token + * tags: + * - Quickstart + * security: + * - jwtAuth: [] + * requestBody: + * required: true + * content: + * application/json: + * schema: + * type: object + * required: + * - agentFramework + * properties: + * agentFramework: + * type: string + * enum: [generic, langgraph, livekit] + * description: The agent framework to use + * responses: + * 200: + * description: Generated quickstart documentation + * 400: + * description: Invalid parameters + * 401: + * description: Unauthorized - JWT token required + */ +router.post( + "/generate", + passport.authenticate("jwt", { session: false }), + async (req: AuthenticatedRequest, res: Response, next: NextFunction) => { + try { + const { user, body } = req; + const { agentFramework, llmVendor, sdkLanguage } = body; + + // Get the user's latest non-system API key + const userDbService = req.app.locals.userDbService; + const tokenObj = user ? await userDbService.getLatestUserDevToken(user) : null; + + let apiKey: string; + let tokenName: string; + if (tokenObj) { + apiKey = tokenObj.token; + tokenName = tokenObj.label; + } else { + // No user API key - use placeholder + apiKey = "eyJ-xxx"; + tokenName = "No Key"; + } + + // Generate the quickstart document + const markdown = quickstartService.generateQuickstart({ + agentFramework, + llmVendor, + sdkLanguage, + apiKey, + }); + + res.send({ + markdown, + metadata: { + agentFramework, + llmVendor, + sdkLanguage, + tokenName, + generatedAt: new Date().toISOString(), + }, + }); + } catch (error) { + if ((error as Error).message.includes("Invalid")) { + return res.status(400).send({ error: (error as Error).message }); + } + next(error); + } + } +); + +/** + * @swagger + * /quickstart/generate-with-key: + * post: + * summary: Generate quickstart documentation with a provided API key + * description: Generate documentation without requiring authentication - API key is provided directly + * tags: + * - Quickstart + * requestBody: + * required: true + * content: + * application/json: + * schema: + * type: object + * required: + * - agentFramework + * - apiKey + * properties: + * agentFramework: + * type: string + * enum: [generic, livekit] + * apiKey: + * type: string + * description: The Aden API key to embed in the documentation + * responses: + * 200: + * description: Generated quickstart documentation + * 400: + * description: Invalid parameters + */ +router.post("/generate-with-key", async (req: Request, res: Response, next: NextFunction) => { + try { + const { agentFramework, llmVendor, sdkLanguage, apiKey } = req.body; + + if (!apiKey) { + return res.status(400).send({ + error: "API key is required", + message: "Please provide an apiKey in the request body", + }); + } + + // Generate the quickstart document + const markdown = quickstartService.generateQuickstart({ + agentFramework, + llmVendor, + sdkLanguage, + apiKey, + }); + + res.send({ + markdown, + metadata: { + agentFramework, + llmVendor, + sdkLanguage, + generatedAt: new Date().toISOString(), + }, + }); + } catch (error) { + if ( + (error as Error).message.includes("Invalid") || + (error as Error).message.includes("required") + ) { + return res.status(400).send({ error: (error as Error).message }); + } + next(error); + } +}); + +export default router; diff --git a/hive/src/controllers/tsdb.controller.ts b/hive/src/controllers/tsdb.controller.ts new file mode 100644 index 00000000..b0131a85 --- /dev/null +++ b/hive/src/controllers/tsdb.controller.ts @@ -0,0 +1,1161 @@ +/** + * TSDB ingestion and preview endpoints (protected) + */ +import express, { Request, Response } from "express"; +import passport from "passport"; +import type { PoolClient } from "pg"; + +import { + ensureSchema, + upsertEvents, +} from "../services/tsdb/tsdb_service"; +import pricingService from "../services/tsdb/pricing_service"; +import { parseToken, getTeamPool, buildSchemaName } from "../services/tsdb/team_context"; +import { buildAnalytics } from "../services/tsdb/analytics_service"; + +const router = express.Router(); + +const AUTH_MIDDLEWARE = passport.authenticate("jwt", { session: false }); + +interface TokenContext { + team_id: string; + user_id?: string; +} + +interface QueryRow { + [key: string]: unknown; +} + + +interface MetricRow { + period: string; + total_requests: string | number; + unique_traces: string | number; + unique_users: string | number; + total_input_tokens: string | number; + total_output_tokens: string | number; + total_tokens: string | number; + cached_tokens: string | number; + reasoning_tokens: string | number; + total_cost: string | number; + avg_latency_ms: string | number; + p50_latency_ms: string | number; + p95_latency_ms: string | number; + p99_latency_ms: string | number; + max_latency_ms: string | number; + streaming_requests: string | number; +} + +interface LLMEventRow { + timestamp: Date; + trace_id: string; + call_sequence: number; + model: string; + provider: string; + usage_input_tokens: number; + usage_output_tokens: number; + usage_cached_tokens: number; + cost_total: string | number; +} + +interface MergedRow { + request_count: number; + total_input_tokens: number; + total_output_tokens: number; + total_tokens: number; + total_cost: number; + avg_latency_ms: number; + latency_sum: number; + first_seen: Date; + last_seen: Date; + [key: string]: unknown; +} + +const getAuthorizationHeader = (req: Request): string | undefined => { + return req.headers.authorization || (req.headers as Record).Authorization; +}; + +const getTokenContext = (req: Request): TokenContext | null => { + return parseToken(getAuthorizationHeader(req)) as TokenContext | null; +}; + +const connectTeamClient = async (teamId: string | number): Promise => { + const pool = await getTeamPool(teamId, {}); + const schema = buildSchemaName(teamId); + const client = await pool.connect(); + await client.query(`CREATE SCHEMA IF NOT EXISTS ${schema}`); + await client.query(`SET search_path TO ${schema}, public`); + await ensureSchema(client); + return client; +}; + +router.post("/events", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { + let client: PoolClient | undefined; + try { + const ctx = getTokenContext(req); + if (!ctx || !ctx.team_id) { + return res.status(401).json({ error: "invalid_token", detail: "Missing team_id in token" }); + } + const payload = Array.isArray(req.body) ? req.body : req.body?.events; + if (!Array.isArray(payload) || payload.length === 0) { + return res.status(400).json({ error: "events array required" }); + } + if (payload.length > 2000) { + return res.status(400).json({ error: "events array too large (max 2000)" }); + } + + client = await connectTeamClient(ctx.team_id); + const enriched = payload.map((e: Record) => ({ ...e, team_id: ctx.team_id, user_id: (ctx.user_id || e.user_id) as string | undefined })); + const result = await upsertEvents(enriched, client); + return res.json({ + message: "ingested", + rows_written: result.rowsWritten, + normalized: result.normalized, + }); + } catch (err) { + console.error("[tsdb] ingest error", err); + return res.status(500).json({ error: "ingest_failed", detail: (err as Error).message }); + } finally { + if (client) client.release(); + } +}); + +router.get("/sample", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { + let client: PoolClient | undefined; + try { + const ctx = getTokenContext(req); + if (!ctx || !ctx.team_id) { + return res.status(401).json({ error: "invalid_token", detail: "Missing team_id in token" }); + } + client = await connectTeamClient(ctx.team_id); + const limit = Math.min(parseInt((req.query.limit as string) || "20", 10), 100); + const { rows } = await client.query( + 'SELECT * FROM llm_events ORDER BY "timestamp" DESC LIMIT $1', + [limit] + ); + return res.json({ rows }); + } catch (err) { + console.error("[tsdb] sample error", err); + return res.status(500).json({ error: "sample_failed", detail: (err as Error).message }); + } finally { + if (client) client.release(); + } +}); + +router.get("/counts", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { + let client: PoolClient | undefined; + try { + const ctx = getTokenContext(req); + if (!ctx || !ctx.team_id) { + return res.status(401).json({ error: "invalid_token", detail: "Missing team_id in token" }); + } + client = await connectTeamClient(ctx.team_id); + const window = (req.query.window as string) || "1 day"; + const { rows } = await client.query( + 'SELECT COUNT(*)::bigint AS count FROM llm_events WHERE "timestamp" >= NOW() - $1::interval', + [window] + ); + return res.json({ window, count: Number(rows[0].count) }); + } catch (err) { + console.error("[tsdb] counts error", err); + return res.status(500).json({ error: "counts_failed", detail: (err as Error).message }); + } finally { + if (client) client.release(); + } +}); + +router.get("/health", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { + let client: PoolClient | undefined; + try { + const ctx = getTokenContext(req); + if (!ctx || !ctx.team_id) { + return res.status(401).json({ status: "error", detail: "Missing team_id in token" }); + } + client = await connectTeamClient(ctx.team_id); + const { rows } = await client.query("SELECT NOW() AS now"); + return res.json({ status: "ok", now: rows[0].now }); + } catch (err) { + console.error("[tsdb] health error", err); + return res.status(500).json({ status: "error", detail: (err as Error).message }); + } finally { + if (client) client.release(); + } +}); + +// GET /tsdb/logs?start=2025-01-01T00:00:00Z&end=2025-01-02T00:00:00Z&limit=500&offset=0 +// Optional: group_by=model|agent|model,agent for aggregation +router.get("/logs", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { + let poolClient: PoolClient | undefined; + try { + const ctx = getTokenContext(req); + if (!ctx || !ctx.team_id) { + return res.status(401).json({ error: "invalid_token", detail: "Missing team_id in token" }); + } + const { start, end, group_by } = req.query as { start?: string; end?: string; group_by?: string }; + const startDate = start ? new Date(start) : null; + const endDate = end ? new Date(end) : null; + if (!startDate || Number.isNaN(startDate.getTime()) || !endDate || Number.isNaN(endDate.getTime())) { + return res.status(400).json({ error: "invalid_time_window", detail: "start and end must be valid ISO dates" }); + } + + const limit = Math.min(parseInt((req.query.limit as string) || "500", 10), 5000); + const offset = Math.max(parseInt((req.query.offset as string) || "0", 10), 0); + + poolClient = await connectTeamClient(ctx.team_id); + + // Handle aggregation if group_by is specified + if (group_by) { + const validGroupFields = ["model", "agent", "provider"]; + const groupFields = group_by.split(",").map((f) => f.trim()).filter((f) => validGroupFields.includes(f)); + + if (groupFields.length === 0) { + return res.status(400).json({ + error: "invalid_group_by", + detail: `group_by must be one or more of: ${validGroupFields.join(", ")}`, + }); + } + + // Try to use continuous aggregates for better performance + // Use CA when: single group field (model or agent) and provider not requested + // Hybrid approach: CA for completed days + base table for today's partial data + const useModelCA = groupFields.length === 1 && groupFields[0] === "model"; + const useModelProviderCA = groupFields.length === 2 && groupFields.includes("model") && groupFields.includes("provider"); + const useAgentCA = groupFields.length === 1 && groupFields[0] === "agent"; + + let rows: QueryRow[]; + let usedCA = false; + + const utcDayStart = (d: Date): Date => { + const x = new Date(d); + x.setUTCHours(0, 0, 0, 0); + return x; + }; + + const addUtcDays = (d: Date, days: number): Date => { + return new Date(d.getTime() + days * 24 * 60 * 60 * 1000); + }; + + const startDayStart = utcDayStart(startDate); + const endDayStart = utcDayStart(endDate); + + const fullBucketStart = startDate.getTime() === startDayStart.getTime() + ? startDayStart + : addUtcDays(startDayStart, 1); + + const fullBucketEnd = endDayStart; + + const hasFullBuckets = fullBucketStart < fullBucketEnd; + + const partialRanges: Array<{ start: Date; end: Date }> = []; + const pushRange = (rangeStart: Date, rangeEnd: Date): void => { + if (rangeEnd.getTime() <= rangeStart.getTime()) return; + partialRanges.push({ start: rangeStart, end: rangeEnd }); + }; + + pushRange(startDate, new Date(Math.min(endDate.getTime(), fullBucketStart.getTime()))); + pushRange(new Date(Math.max(startDate.getTime(), fullBucketEnd.getTime())), endDate); + + const mergeResults = (caRows: QueryRow[], baseRows: QueryRow[], keyFields: string[]): MergedRow[] => { + const merged = new Map(); + + const addRow = (row: QueryRow): void => { + const key = keyFields.map((f) => row[f]).join("|"); + const requestCount = parseInt(row.request_count as string) || 0; + const inputTokens = parseInt(row.total_input_tokens as string) || 0; + const outputTokens = parseInt(row.total_output_tokens as string) || 0; + const totalTokens = parseInt(row.total_tokens as string) || 0; + const totalCost = parseFloat(row.total_cost as string) || 0; + const avgLatency = parseFloat(row.avg_latency_ms as string) || 0; + const firstSeen = row.first_seen as Date; + const lastSeen = row.last_seen as Date; + + const existing = merged.get(key); + if (!existing) { + merged.set(key, { + ...Object.fromEntries(keyFields.map((f) => [f, row[f]])), + request_count: requestCount, + total_input_tokens: inputTokens, + total_output_tokens: outputTokens, + total_tokens: totalTokens, + total_cost: totalCost, + avg_latency_ms: avgLatency, + latency_sum: avgLatency * requestCount, + first_seen: firstSeen, + last_seen: lastSeen, + }); + return; + } + + const newCount = existing.request_count + requestCount; + const newLatencySum = existing.latency_sum + avgLatency * requestCount; + + merged.set(key, { + ...existing, + request_count: newCount, + total_input_tokens: existing.total_input_tokens + inputTokens, + total_output_tokens: existing.total_output_tokens + outputTokens, + total_tokens: existing.total_tokens + totalTokens, + total_cost: existing.total_cost + totalCost, + avg_latency_ms: newCount > 0 ? newLatencySum / newCount : 0, + latency_sum: newLatencySum, + first_seen: existing.first_seen < firstSeen ? existing.first_seen : firstSeen, + last_seen: existing.last_seen > lastSeen ? existing.last_seen : lastSeen, + }); + }; + + for (const row of caRows) addRow(row); + for (const row of baseRows) addRow(row); + + // Convert to array and sort by cost desc + return Array.from(merged.values()) + .map(({ latency_sum, ...rest }) => ({ ...rest, latency_sum: 0 })) + .sort((a, b) => b.total_cost - a.total_cost); + }; + + const getBaseAggData = async (rangeStart: Date, rangeEnd: Date, selectFields: string, groupByClause: string): Promise => { + if (rangeEnd.getTime() <= rangeStart.getTime()) return []; + + const baseSql = ` + SELECT + ${selectFields}, + COUNT(*) as request_count, + COALESCE(SUM(COALESCE(usage_input_tokens, 0)), 0) as total_input_tokens, + COALESCE(SUM(COALESCE(usage_output_tokens, 0)), 0) as total_output_tokens, + COALESCE(SUM(COALESCE(usage_total_tokens, COALESCE(usage_input_tokens, 0) + COALESCE(usage_output_tokens, 0))), 0) as total_tokens, + COALESCE(SUM(cost_total), 0) as total_cost, + COALESCE(AVG(latency_ms), 0) as avg_latency_ms, + MIN("timestamp") as first_seen, + MAX("timestamp") as last_seen + FROM llm_events + WHERE "timestamp" >= $1 AND "timestamp" <= $2 AND team_id = $3 + GROUP BY ${groupByClause} + `; + + const result = await poolClient.query(baseSql, [rangeStart.toISOString(), rangeEnd.toISOString(), String(ctx.team_id)]); + return result.rows; + }; + + if (useModelCA || useModelProviderCA) { + // Try model CA - includes provider so works for both cases + try { + const keyFields = useModelProviderCA ? ["model", "provider"] : ["model"]; + + const selectFields = useModelProviderCA ? "model, provider" : "model"; + + const baseRows = (await Promise.all( + partialRanges.map((r) => getBaseAggData(r.start, r.end, selectFields, selectFields)) + )).flat(); + + let caRows: QueryRow[] = []; + if (hasFullBuckets) { + const caSql = ` + SELECT + model, + ${useModelProviderCA ? "provider," : ""} + SUM(requests) as request_count, + COALESCE(SUM(input_tokens), 0) as total_input_tokens, + COALESCE(SUM(output_tokens), 0) as total_output_tokens, + (COALESCE(SUM(input_tokens), 0) + COALESCE(SUM(output_tokens), 0)) as total_tokens, + COALESCE(SUM(cost_total), 0) as total_cost, + COALESCE(SUM(avg_latency_ms * requests) / NULLIF(SUM(requests), 0), 0) as avg_latency_ms, + MIN(bucket) as first_seen, + MAX(bucket) as last_seen + FROM llm_events_daily_by_model_ca + WHERE bucket >= $1 AND bucket < $2 + GROUP BY model${useModelProviderCA ? ", provider" : ""} + `; + const result = await poolClient.query(caSql, [fullBucketStart.toISOString(), fullBucketEnd.toISOString()]); + caRows = result.rows; + } + + rows = mergeResults(caRows, baseRows, keyFields).slice(offset, offset + limit) as unknown as QueryRow[]; + usedCA = hasFullBuckets; + } catch (err) { + // CA not available, fall through to base table query + } + } else if (useAgentCA) { + // Try agent CA + try { + const baseRows = (await Promise.all( + partialRanges.map((r) => getBaseAggData(r.start, r.end, "agent", "agent")) + )).flat(); + + let caRows: QueryRow[] = []; + if (hasFullBuckets) { + const caSql = ` + SELECT + agent, + SUM(requests) as request_count, + COALESCE(SUM(input_tokens), 0) as total_input_tokens, + COALESCE(SUM(output_tokens), 0) as total_output_tokens, + (COALESCE(SUM(input_tokens), 0) + COALESCE(SUM(output_tokens), 0)) as total_tokens, + COALESCE(SUM(cost_total), 0) as total_cost, + COALESCE(SUM(avg_latency_ms * requests) / NULLIF(SUM(requests), 0), 0) as avg_latency_ms, + MIN(bucket) as first_seen, + MAX(bucket) as last_seen + FROM llm_events_daily_by_agent_ca + WHERE bucket >= $1 AND bucket < $2 + GROUP BY agent + `; + const result = await poolClient.query(caSql, [fullBucketStart.toISOString(), fullBucketEnd.toISOString()]); + caRows = result.rows; + } + + rows = mergeResults(caRows, baseRows, ["agent"]).slice(offset, offset + limit) as unknown as QueryRow[]; + usedCA = hasFullBuckets; + } catch (err) { + // CA not available, fall through to base table query + } + } + + // Fallback to base table query if CA not used or failed + if (!usedCA) { + const groupByClause = groupFields.join(", "); + const selectFields = groupFields.map((f) => f).join(", "); + + const aggSql = ` + SELECT + ${selectFields}, + COUNT(*) as request_count, + COALESCE(SUM(COALESCE(usage_input_tokens, 0)), 0) as total_input_tokens, + COALESCE(SUM(COALESCE(usage_output_tokens, 0)), 0) as total_output_tokens, + COALESCE(SUM(COALESCE(usage_total_tokens, COALESCE(usage_input_tokens, 0) + COALESCE(usage_output_tokens, 0))), 0) as total_tokens, + COALESCE(SUM(cost_total), 0) as total_cost, + COALESCE(AVG(latency_ms), 0) as avg_latency_ms, + MIN("timestamp") as first_seen, + MAX("timestamp") as last_seen + FROM llm_events + WHERE "timestamp" >= $1 AND "timestamp" <= $2 AND team_id = $3 + GROUP BY ${groupByClause} + ORDER BY total_cost DESC + LIMIT $4 OFFSET $5 + `; + const result = await poolClient.query(aggSql, [startDate.toISOString(), endDate.toISOString(), String(ctx.team_id), limit, offset]); + rows = result.rows; + } + + return res.json({ + window: { start: startDate.toISOString(), end: endDate.toISOString() }, + group_by: groupFields, + count: rows!.length, + source: usedCA ? "continuous_aggregate" : "base_table", + aggregations: rows!.map((row) => ({ + ...Object.fromEntries(groupFields.map((f) => [f, row[f]])), + request_count: parseInt(row.request_count as string) || 0, + total_input_tokens: parseInt(row.total_input_tokens as string) || 0, + total_output_tokens: parseInt(row.total_output_tokens as string) || 0, + total_tokens: parseInt(row.total_tokens as string) || 0, + total_cost: parseFloat(row.total_cost as string) || 0, + avg_latency_ms: parseFloat(row.avg_latency_ms as string) || 0, + first_seen: row.first_seen, + last_seen: row.last_seen, + })), + }); + } + + // Default: return raw rows + const sql = ` + SELECT * + FROM llm_events + WHERE "timestamp" >= $1 AND "timestamp" <= $2 AND team_id = $3 + ORDER BY "timestamp" DESC + LIMIT $4 OFFSET $5 + `; + const params = [startDate.toISOString(), endDate.toISOString(), String(ctx.team_id), limit, offset]; + const { rows } = await poolClient.query(sql, params); + return res.json({ + window: { start: startDate.toISOString(), end: endDate.toISOString() }, + count: rows.length, + rows, + }); + } catch (err) { + console.error("[tsdb] logs error", err); + return res.status(500).json({ error: "logs_failed", detail: (err as Error).message }); + } finally { + if (poolClient) poolClient.release(); + } +}); + +// GET /tsdb/metrics?days=30 +// Returns summary metrics with period-over-period % change +router.get("/metrics", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { + let client: PoolClient | undefined; + try { + const ctx = getTokenContext(req); + if (!ctx || !ctx.team_id) { + return res.status(401).json({ error: "invalid_token", detail: "Missing team_id in token" }); + } + + const days = Math.min(parseInt((req.query.days as string) || "30", 10), 365); + + client = await connectTeamClient(ctx.team_id); + + // Calculate date ranges for current and previous periods + const now = new Date(); + const currentStart = new Date(now); + currentStart.setDate(currentStart.getDate() - days); + const previousStart = new Date(currentStart); + previousStart.setDate(previousStart.getDate() - days); + + // Query metrics for both periods in a single query using CASE statements + const metricsSql = ` + WITH period_data AS ( + SELECT + CASE + WHEN "timestamp" >= $2 THEN 'current' + ELSE 'previous' + END as period, + 1 as request, + COALESCE(usage_input_tokens, 0) as input_tokens, + COALESCE(usage_output_tokens, 0) as output_tokens, + COALESCE( + usage_total_tokens, + COALESCE(usage_input_tokens, 0) + COALESCE(usage_output_tokens, 0), + 0 + ) as total_tokens, + COALESCE(usage_cached_tokens, 0) as cached_tokens, + COALESCE(usage_reasoning_tokens, 0) as reasoning_tokens, + COALESCE(cost_total, 0) as cost, + latency_ms, + trace_id, + user_id, + CASE WHEN stream = true THEN 1 ELSE 0 END as is_streaming + FROM llm_events + WHERE "timestamp" >= $1 AND "timestamp" <= $3 AND team_id = $4 + ), + aggregated AS ( + SELECT + period, + COUNT(*) as total_requests, + COUNT(DISTINCT trace_id) as unique_traces, + COUNT(DISTINCT user_id) as unique_users, + SUM(input_tokens) as total_input_tokens, + SUM(output_tokens) as total_output_tokens, + SUM(total_tokens) as total_tokens, + SUM(cached_tokens) as cached_tokens, + SUM(reasoning_tokens) as reasoning_tokens, + SUM(cost) as total_cost, + AVG(latency_ms) as avg_latency_ms, + PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY latency_ms) as p50_latency_ms, + PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY latency_ms) as p95_latency_ms, + PERCENTILE_CONT(0.99) WITHIN GROUP (ORDER BY latency_ms) as p99_latency_ms, + MAX(latency_ms) as max_latency_ms, + SUM(is_streaming) as streaming_requests + FROM period_data + GROUP BY period + ) + SELECT * FROM aggregated + `; + + const { rows } = await client.query(metricsSql, [ + previousStart.toISOString(), + currentStart.toISOString(), + now.toISOString(), + String(ctx.team_id), + ]); + + // Parse results into current and previous periods + const current = rows.find((r) => r.period === "current") || {} as Partial; + const previous = rows.find((r) => r.period === "previous") || {} as Partial; + + // Helper to calculate % change + const pctChange = (curr: string | number | undefined, prev: string | number | undefined): number => { + const c = parseFloat(curr as string) || 0; + const p = parseFloat(prev as string) || 0; + if (p === 0) return c > 0 ? 100 : 0; + return ((c - p) / p) * 100; + }; + + // Helper to safely parse numbers + const num = (val: string | number | undefined): number => parseFloat(val as string) || 0; + const int = (val: string | number | undefined): number => parseInt(val as string) || 0; + + // Calculate derived metrics + const totalRequests = int(current.total_requests); + const totalTokens = num(current.total_tokens); + const cachedTokens = num(current.cached_tokens); + const inputTokens = num(current.total_input_tokens); + const uniqueTraces = int(current.unique_traces); + const streamingRequests = int(current.streaming_requests); + + const cacheHitRate = inputTokens > 0 ? (cachedTokens / inputTokens) * 100 : 0; + const prevCacheHitRate = num(previous.total_input_tokens) > 0 + ? (num(previous.cached_tokens) / num(previous.total_input_tokens)) * 100 + : 0; + + const streamingRate = totalRequests > 0 ? (streamingRequests / totalRequests) * 100 : 0; + const prevStreamingRate = int(previous.total_requests) > 0 + ? (int(previous.streaming_requests) / int(previous.total_requests)) * 100 + : 0; + + const avgCallsPerTrace = uniqueTraces > 0 ? totalRequests / uniqueTraces : 0; + const prevAvgCallsPerTrace = int(previous.unique_traces) > 0 + ? int(previous.total_requests) / int(previous.unique_traces) + : 0; + + const totalCost = num(current.total_cost); + const costPer1kTokens = totalTokens > 0 ? (totalCost / (totalTokens / 1000)) : 0; + const prevTotalTokens = num(previous.total_tokens); + const prevCostPer1kTokens = prevTotalTokens > 0 + ? (num(previous.total_cost) / (prevTotalTokens / 1000)) + : 0; + + const metrics = { + period: { + days, + current: { start: currentStart.toISOString(), end: now.toISOString() }, + previous: { start: previousStart.toISOString(), end: currentStart.toISOString() }, + }, + volume: { + total_requests: { + value: totalRequests, + unit: "requests", + change_pct: pctChange(current.total_requests, previous.total_requests), + }, + unique_traces: { + value: uniqueTraces, + unit: "traces", + change_pct: pctChange(current.unique_traces, previous.unique_traces), + }, + unique_users: { + value: int(current.unique_users), + unit: "users", + change_pct: pctChange(current.unique_users, previous.unique_users), + }, + avg_calls_per_trace: { + value: Math.round(avgCallsPerTrace * 100) / 100, + unit: "calls/trace", + change_pct: pctChange(avgCallsPerTrace, prevAvgCallsPerTrace), + }, + }, + tokens: { + total_input_tokens: { + value: int(current.total_input_tokens), + unit: "tokens", + change_pct: pctChange(current.total_input_tokens, previous.total_input_tokens), + }, + total_output_tokens: { + value: int(current.total_output_tokens), + unit: "tokens", + change_pct: pctChange(current.total_output_tokens, previous.total_output_tokens), + }, + total_tokens: { + value: int(totalTokens), + unit: "tokens", + change_pct: pctChange(current.total_tokens, previous.total_tokens), + }, + cached_tokens: { + value: int(cachedTokens), + unit: "tokens", + change_pct: pctChange(current.cached_tokens, previous.cached_tokens), + }, + reasoning_tokens: { + value: int(current.reasoning_tokens), + unit: "tokens", + change_pct: pctChange(current.reasoning_tokens, previous.reasoning_tokens), + }, + cache_hit_rate: { + value: Math.round(cacheHitRate * 100) / 100, + unit: "%", + change_pct: pctChange(cacheHitRate, prevCacheHitRate), + }, + avg_tokens_per_request: { + value: totalRequests > 0 ? Math.round(totalTokens / totalRequests) : 0, + unit: "tokens/req", + change_pct: pctChange( + totalRequests > 0 ? totalTokens / totalRequests : 0, + int(previous.total_requests) > 0 ? prevTotalTokens / int(previous.total_requests) : 0 + ), + }, + }, + performance: { + avg_latency_ms: { + value: Math.round(num(current.avg_latency_ms) * 100) / 100, + unit: "ms", + change_pct: pctChange(current.avg_latency_ms, previous.avg_latency_ms), + }, + p50_latency_ms: { + value: Math.round(num(current.p50_latency_ms) * 100) / 100, + unit: "ms", + change_pct: pctChange(current.p50_latency_ms, previous.p50_latency_ms), + }, + p95_latency_ms: { + value: Math.round(num(current.p95_latency_ms) * 100) / 100, + unit: "ms", + change_pct: pctChange(current.p95_latency_ms, previous.p95_latency_ms), + }, + p99_latency_ms: { + value: Math.round(num(current.p99_latency_ms) * 100) / 100, + unit: "ms", + change_pct: pctChange(current.p99_latency_ms, previous.p99_latency_ms), + }, + max_latency_ms: { + value: Math.round(num(current.max_latency_ms) * 100) / 100, + unit: "ms", + change_pct: pctChange(current.max_latency_ms, previous.max_latency_ms), + }, + }, + cost: { + total_cost: { + value: Math.round(totalCost * 100) / 100, + unit: "USD", + change_pct: pctChange(current.total_cost, previous.total_cost), + }, + avg_cost_per_request: { + value: totalRequests > 0 ? Math.round((totalCost / totalRequests) * 10000) / 10000 : 0, + unit: "USD/req", + change_pct: pctChange( + totalRequests > 0 ? totalCost / totalRequests : 0, + int(previous.total_requests) > 0 ? num(previous.total_cost) / int(previous.total_requests) : 0 + ), + }, + cost_per_1k_tokens: { + value: Math.round(costPer1kTokens * 10000) / 10000, + unit: "USD/1k tokens", + change_pct: pctChange(costPer1kTokens, prevCostPer1kTokens), + }, + }, + usage_patterns: { + streaming_rate: { + value: Math.round(streamingRate * 100) / 100, + unit: "%", + change_pct: pctChange(streamingRate, prevStreamingRate), + }, + }, + }; + + return res.json(metrics); + } catch (err) { + console.error("[tsdb] metrics error", err); + return res.status(500).json({ error: "metrics_failed", detail: (err as Error).message }); + } finally { + if (client) client.release(); + } +}); + +// POST /tsdb/refresh-aggregates +// Manually refresh all continuous aggregates to ensure data is up-to-date +router.post("/refresh-aggregates", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { + let client: PoolClient | undefined; + try { + const ctx = getTokenContext(req); + if (!ctx || !ctx.team_id) { + return res.status(401).json({ error: "invalid_token", detail: "Missing team_id in token" }); + } + + client = await connectTeamClient(ctx.team_id); + + const results: Array<{ ca: string; status: string; error?: string }> = []; + + // Refresh all CAs from beginning of time to now + const cas = [ + "llm_events_daily_ca", + "llm_events_daily_by_model_ca", + "llm_events_daily_by_agent_ca", + ]; + + for (const ca of cas) { + try { + await client.query(`CALL refresh_continuous_aggregate('${ca}', NULL, NOW())`); + results.push({ ca, status: "refreshed" }); + } catch (err) { + results.push({ ca, status: "error", error: (err as Error).message }); + } + } + + return res.json({ + message: "Continuous aggregates refresh completed", + results, + refreshed_at: new Date().toISOString(), + }); + } catch (err) { + console.error("[tsdb] refresh-aggregates error", err); + return res.status(500).json({ error: "refresh_failed", detail: (err as Error).message }); + } finally { + if (client) client.release(); + } +}); + +// ==================== PRICING CRUD ENDPOINTS ==================== + +// GET /tsdb/pricing - List all pricing +// Optional: ?group_by=provider to group by provider +router.get("/pricing", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { + try { + const { group_by } = req.query; + + if (group_by === "provider") { + const pricing = await pricingService.getPricingByProvider(); + return res.json({ pricing, grouped_by: "provider" }); + } + + const pricing = await pricingService.getAllPricing(); + return res.json({ pricing, count: Object.keys(pricing).length }); + } catch (err) { + console.error("[tsdb] pricing list error", err); + return res.status(500).json({ error: "pricing_list_failed", detail: (err as Error).message }); + } +}); + +// GET /tsdb/pricing/:model - Get specific model pricing +router.get("/pricing/:model", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { + try { + const { model } = req.params; + const { provider } = req.query; + + const pricing = await pricingService.getModelPricing(model, provider as string | undefined); + return res.json({ pricing }); + } catch (err) { + console.error("[tsdb] pricing get error", err); + return res.status(500).json({ error: "pricing_get_failed", detail: (err as Error).message }); + } +}); + +// POST /tsdb/pricing - Add new model pricing +router.post("/pricing", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { + try { + const ctx = getTokenContext(req); + const { model, provider, input_per_1m, output_per_1m, cached_input_per_1m, aliases } = req.body; + + if (!model) { + return res.status(400).json({ error: "model is required" }); + } + if (input_per_1m === undefined || output_per_1m === undefined) { + return res.status(400).json({ error: "input_per_1m and output_per_1m are required" }); + } + + const result = await pricingService.upsertPricing( + model, + { + provider, + input_per_1m, + output_per_1m, + cached_input_per_1m: cached_input_per_1m ?? input_per_1m * 0.5, + aliases: aliases || [], + }, + ctx?.user_id + ); + + return res.json({ message: "pricing_created", pricing: result }); + } catch (err) { + console.error("[tsdb] pricing create error", err); + return res.status(500).json({ error: "pricing_create_failed", detail: (err as Error).message }); + } +}); + +// PUT /tsdb/pricing/:model - Update model pricing +router.put("/pricing/:model", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { + try { + const ctx = getTokenContext(req); + const { model } = req.params; + const { provider, input_per_1m, output_per_1m, cached_input_per_1m, aliases } = req.body; + + const result = await pricingService.upsertPricing( + model, + { + provider, + input_per_1m, + output_per_1m, + cached_input_per_1m, + aliases, + }, + ctx?.user_id + ); + + return res.json({ message: "pricing_updated", pricing: result }); + } catch (err) { + console.error("[tsdb] pricing update error", err); + return res.status(500).json({ error: "pricing_update_failed", detail: (err as Error).message }); + } +}); + +// DELETE /tsdb/pricing/:model - Remove model pricing +router.delete("/pricing/:model", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { + try { + const { model } = req.params; + const deleted = await pricingService.deletePricing(model); + + if (!deleted) { + return res.status(404).json({ error: "pricing_not_found", model }); + } + + return res.json({ message: "pricing_deleted", model }); + } catch (err) { + console.error("[tsdb] pricing delete error", err); + return res.status(500).json({ error: "pricing_delete_failed", detail: (err as Error).message }); + } +}); + +// POST /tsdb/pricing/seed - Seed default pricing to DB +router.post("/pricing/seed", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { + try { + const ctx = getTokenContext(req); + const { overwrite } = req.body; + + const result = await pricingService.seedDefaultPricing(ctx?.user_id, overwrite === true); + + return res.json({ + message: "pricing_seeded", + ...result, + }); + } catch (err) { + console.error("[tsdb] pricing seed error", err); + return res.status(500).json({ error: "pricing_seed_failed", detail: (err as Error).message }); + } +}); + +// POST /tsdb/pricing/refresh - Force refresh pricing cache +router.post("/pricing/refresh", AUTH_MIDDLEWARE, async (_req: Request, res: Response) => { + try { + await pricingService.loadPricingFromDb(true); + const pricing = await pricingService.getAllPricing(); + + return res.json({ + message: "cache_refreshed", + count: Object.keys(pricing).length, + refreshed_at: new Date().toISOString(), + }); + } catch (err) { + console.error("[tsdb] pricing refresh error", err); + return res.status(500).json({ error: "pricing_refresh_failed", detail: (err as Error).message }); + } +}); + +router.get("/analytics-wide", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { + let client: PoolClient | undefined; + try { + const ctx = getTokenContext(req); + if (!ctx || !ctx.team_id) { + return res.status(401).json({ error: "invalid_token", detail: "Missing team_id in token" }); + } + + const windowLabel = (req.query.window as string) || "this_month"; + + client = await connectTeamClient(ctx.team_id); + + const analytics = await buildAnalytics({ + windowLabel, + client, + resolution: "day", + }); + + return res.json({ analytics }); + } catch (err) { + console.error("[tsdb] analytics-wide error", err); + return res.status(500).json({ error: "analytics_failed", detail: (err as Error).message }); + } finally { + if (client) client.release(); + } +}); + +router.get("/analytics-narrow", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { + let client: PoolClient | undefined; + try { + const ctx = getTokenContext(req); + if (!ctx || !ctx.team_id) { + return res.status(401).json({ error: "invalid_token", detail: "Missing team_id in token" }); + } + + client = await connectTeamClient(ctx.team_id); + + const analytics = await buildAnalytics({ + windowLabel: "today", + client, + resolution: "hour", + }); + + return res.json({ analytics }); + } catch (err) { + console.error("[tsdb] analytics-narrow error", err); + return res.status(500).json({ error: "analytics_failed", detail: (err as Error).message }); + } finally { + if (client) client.release(); + } +}); + +// POST /tsdb/recalculate-costs - Recalculate historical costs with current pricing +router.post("/recalculate-costs", AUTH_MIDDLEWARE, async (req: Request, res: Response) => { + let poolClient: PoolClient | undefined; + try { + const ctx = getTokenContext(req); + if (!ctx || !ctx.team_id) { + return res.status(401).json({ error: "invalid_token", detail: "Missing team_id in token" }); + } + + const { start, end, batch_size = 1000 } = req.body; + + if (!start || !end) { + return res.status(400).json({ error: "start and end dates are required" }); + } + + const startDate = new Date(start); + const endDate = new Date(end); + + if (isNaN(startDate.getTime()) || isNaN(endDate.getTime())) { + return res.status(400).json({ error: "invalid_dates", detail: "start and end must be valid ISO dates" }); + } + + if (endDate < startDate) { + return res.status(400).json({ error: "invalid_range", detail: "end must be after start" }); + } + + poolClient = await connectTeamClient(ctx.team_id); + + // Ensure pricing is loaded + await pricingService.loadPricingFromDb(true); + + const results: { + updated: number; + processed: number; + errors: Array<{ trace_id?: string; call_sequence?: number; batch?: number; error?: string; warning?: string }>; + batches: number; + } = { + updated: 0, + processed: 0, + errors: [], + batches: 0, + }; + + const startTime = Date.now(); + let offset = 0; + let hasMore = true; + + // Process in batches + while (hasMore) { + // Fetch batch of events + const selectSql = ` + SELECT + "timestamp", + trace_id, + call_sequence, + model, + provider, + usage_input_tokens, + usage_output_tokens, + usage_cached_tokens, + cost_total + FROM llm_events + WHERE "timestamp" >= $1 AND "timestamp" <= $2 AND team_id = $3 + ORDER BY "timestamp" + LIMIT $4 OFFSET $5 + `; + + const { rows } = await poolClient.query(selectSql, [ + startDate.toISOString(), + endDate.toISOString(), + String(ctx.team_id), + batch_size, + offset, + ]); + + if (rows.length === 0) { + hasMore = false; + break; + } + + results.batches++; + + // Calculate new costs and prepare updates + const updates: Array<{ timestamp: Date; trace_id: string; call_sequence: number; new_cost: number }> = []; + for (const row of rows) { + try { + const costResult = pricingService.calculateCostSync({ + model: row.model || "", + provider: row.provider, + input_tokens: row.usage_input_tokens || 0, + output_tokens: row.usage_output_tokens || 0, + cached_tokens: row.usage_cached_tokens || 0, + }); + + // Only update if cost changed + const oldCost = parseFloat(row.cost_total as string) || 0; + const newCost = costResult.total; + + if (Math.abs(newCost - oldCost) > 0.000001) { + updates.push({ + timestamp: row.timestamp, + trace_id: row.trace_id, + call_sequence: row.call_sequence, + new_cost: newCost, + }); + } + + results.processed++; + } catch (err) { + results.errors.push({ + trace_id: row.trace_id, + call_sequence: row.call_sequence, + error: (err as Error).message, + }); + } + } + + // Apply batch updates + if (updates.length > 0) { + // Use a single UPDATE with CASE for efficiency + const updateSql = ` + UPDATE llm_events + SET cost_total = updates.new_cost + FROM (VALUES ${updates.map((_, i) => `($${i * 4 + 1}::timestamptz, $${i * 4 + 2}::text, $${i * 4 + 3}::integer, $${i * 4 + 4}::numeric)`).join(", ")}) AS updates(ts, tid, cs, new_cost) + WHERE llm_events."timestamp" = updates.ts + AND llm_events.trace_id = updates.tid + AND llm_events.call_sequence = updates.cs + `; + + const updateValues = updates.flatMap((u) => [u.timestamp, u.trace_id, u.call_sequence, u.new_cost]); + + try { + await poolClient.query(updateSql, updateValues); + results.updated += updates.length; + } catch (err) { + results.errors.push({ batch: results.batches, error: (err as Error).message }); + } + } + + offset += batch_size; + + // Safety check - stop if taking too long (5 minutes) + if (Date.now() - startTime > 5 * 60 * 1000) { + results.errors.push({ warning: "Timeout reached after 5 minutes. Partial recalculation completed." }); + hasMore = false; + } + } + + // Refresh continuous aggregates after recalculation + const caRefreshResults: Array<{ ca: string; status: string; error?: string }> = []; + const cas = ["llm_events_daily_ca", "llm_events_daily_by_model_ca", "llm_events_daily_by_agent_ca"]; + + for (const ca of cas) { + try { + await poolClient.query(`CALL refresh_continuous_aggregate('${ca}', $1::timestamptz, $2::timestamptz)`, [ + startDate.toISOString(), + endDate.toISOString(), + ]); + caRefreshResults.push({ ca, status: "refreshed" }); + } catch (err) { + caRefreshResults.push({ ca, status: "error", error: (err as Error).message }); + } + } + + return res.json({ + message: "recalculation_complete", + period: { start: startDate.toISOString(), end: endDate.toISOString() }, + stats: { + processed: results.processed, + updated: results.updated, + batches: results.batches, + duration_ms: Date.now() - startTime, + }, + continuous_aggregates: caRefreshResults, + errors: results.errors.slice(0, 10), // Limit error output + error_count: results.errors.length, + }); + } catch (err) { + console.error("[tsdb] recalculate-costs error", err); + return res.status(500).json({ error: "recalculate_failed", detail: (err as Error).message }); + } finally { + if (poolClient) poolClient.release(); + } +}); + +export default router; diff --git a/hive/src/controllers/user.controller.ts b/hive/src/controllers/user.controller.ts new file mode 100644 index 00000000..8d61166d --- /dev/null +++ b/hive/src/controllers/user.controller.ts @@ -0,0 +1,466 @@ +/** + * User Controller + * + * Handles user authentication endpoints including login-v2. + */ + +import { Router, Request, Response, NextFunction } from "express"; +import config from "../config"; + +const router = Router(); + +/** + * Extract token from Authorization header + * Supports: "jwt ", "Bearer ", or raw "" + */ +function extractToken(authHeader: string): string { + if (authHeader.startsWith("jwt ")) { + return authHeader.slice(4); + } + if (authHeader.startsWith("Bearer ")) { + return authHeader.slice(7); + } + return authHeader; +} + +// Email validation regex +const EMAIL_REGEX = + /[\w!#$%&'*+/=?^_`{|}~-]+(?:\.[\w!#$%&'*+/=?^_`{|}~-]+)*@(?:[\w](?:[\w-]*[\w])?\.)+[\w](?:[\w-]*[\w])?/; + +/** + * POST /user/login-v2 + * + * Authenticate a user with email and password. + * Returns a JWT token on success. + */ +router.post( + "/login-v2", + async (req: Request, res: Response, next: NextFunction) => { + try { + let { email, password } = req.body; + + // Validate required fields + if ( + !email || + typeof email !== "string" || + !password || + typeof password !== "string" + ) { + return res.status(400).json({ + success: false, + msg: "Email and password are required", + }); + } + + // Validate email format + if (!EMAIL_REGEX.test(email)) { + return res.status(400).json({ + success: false, + msg: "Please enter a valid email", + }); + } + + // Trim email + email = email.trim().toLowerCase(); + + // Validate password length + if (password.length < 6) { + return res.status(400).json({ + success: false, + msg: "Password must be at least 6 characters", + }); + } + + // Get userDbService from app.locals + const userDbService = req.app.locals.userDbService; + if (!userDbService) { + console.error("[UserController] userDbService not found in app.locals"); + return res.status(500).json({ + success: false, + msg: "Internal server error", + }); + } + + // Attempt login + const result = await userDbService.login(email, password, { + jwtSecret: config.jwt.secret, + expiresIn: config.jwt.expiresIn, + }); + + console.log( + `[UserController] login-v2: User ${email} logged in successfully` + ); + + // Return success response + res.json({ + success: true, + token: result.token, + email: result.email, + firstname: result.firstname, + lastname: result.lastname, + name: result.name, + current_team_id: result.current_team_id, + create_time: result.created_at, + }); + } catch (err: any) { + console.error("[UserController] login-v2 error:", err.message); + + // Handle specific error codes + if (err.code === "USER_NOT_FOUND" || err.code === "INVALID_CREDENTIALS") { + return res.status(401).json({ + success: false, + msg: "Invalid email or password", + }); + } + + if (err.code === "OAUTH_REQUIRED") { + return res.status(400).json({ + success: false, + msg: err.message, + }); + } + + if (err.code === "ACCOUNT_DISABLED") { + return res.status(403).json({ + success: false, + msg: "Your account has been disabled", + }); + } + + // Generic error + return res.status(500).json({ + success: false, + msg: "Login failed. Please try again.", + }); + } + } +); + +/** + * POST /user/register + * + * Register a new user account. + * Returns a JWT token on success. + */ +router.post("/register", async (req: Request, res: Response) => { + try { + let { email, password, name, firstname, lastname } = req.body; + + // Validate required fields + if ( + !email || + typeof email !== "string" || + !password || + typeof password !== "string" + ) { + return res.status(400).json({ + success: false, + msg: "Email and password are required", + }); + } + + // Validate email format + if (!EMAIL_REGEX.test(email)) { + return res.status(400).json({ + success: false, + msg: "Please enter a valid email", + }); + } + + // Trim and lowercase email + email = email.trim().toLowerCase(); + + // Validate password length + if (password.length < 8) { + return res.status(400).json({ + success: false, + msg: "Password must be at least 8 characters", + }); + } + + // Get userDbService from app.locals + const userDbService = req.app.locals.userDbService; + if (!userDbService) { + console.error("[UserController] userDbService not found in app.locals"); + return res.status(500).json({ + success: false, + msg: "Internal server error", + }); + } + + // Attempt registration + const result = await userDbService.register( + { email, password, name, firstname, lastname }, + { + jwtSecret: config.jwt.secret, + expiresIn: config.jwt.expiresIn, + defaultTeamId: 1, // Default to team 1 for local dev + } + ); + + console.log( + `[UserController] register: User ${email} registered successfully` + ); + + // Return success response + res.status(201).json({ + success: true, + token: result.token, + email: result.email, + name: result.name, + firstname: result.firstname, + lastname: result.lastname, + current_team_id: result.current_team_id, + create_time: result.created_at, + }); + } catch (err: any) { + console.error("[UserController] register error:", err.message); + + // Handle specific error codes + if (err.code === "EMAIL_EXISTS") { + return res.status(409).json({ + success: false, + msg: "Email already registered", + }); + } + + // Generic error + return res.status(500).json({ + success: false, + msg: "Registration failed. Please try again.", + }); + } +}); + +/** + * GET /user/profile + * + * Get current user profile. + * Requires authentication. + */ +router.get("/profile", async (req: Request, res: Response) => { + try { + const authHeader = req.headers.authorization; + if (!authHeader) { + return res.status(401).json({ + success: false, + msg: "No token provided", + }); + } + + const userDbService = req.app.locals.userDbService; + const user = await userDbService.findByToken(extractToken(authHeader)); + + if (!user) { + return res.status(401).json({ + success: false, + msg: "Invalid token", + }); + } + + // Return in format expected by frontend + res.json({ + data: { + firstname: user.firstname || "", + lastname: user.lastname || "", + email: user.email, + company_name: user.company_name || null, + profile_img_url: user.avatar_url || null, + roleId: user.role_id || 1, + user_id: String(user.id), + team_id: String(user.current_team_id || 1), + roles: user.roles || ["user"], + }, + }); + } catch (err: any) { + console.error("[UserController] /profile error:", err.message); + res.status(500).json({ + success: false, + msg: "Failed to get user profile", + }); + } +}); + +/** + * PUT /user/profile + * + * Update current user profile. + * Requires authentication. + */ +router.put("/profile", async (req: Request, res: Response) => { + try { + const authHeader = req.headers.authorization; + if (!authHeader) { + return res.status(401).json({ + success: false, + msg: "No token provided", + }); + } + + const userDbService = req.app.locals.userDbService; + const user = await userDbService.findByToken(extractToken(authHeader)); + + if (!user) { + return res.status(401).json({ + success: false, + msg: "Invalid token", + }); + } + + const { firstname, lastname } = req.body; + + // Update user profile (basic implementation) + if (userDbService.updateProfile) { + await userDbService.updateProfile(user.id, { firstname, lastname }); + } + + res.json({ message: "Profile updated successfully" }); + } catch (err: any) { + console.error("[UserController] PUT /profile error:", err.message); + res.status(500).json({ + success: false, + msg: "Failed to update profile", + }); + } +}); + +/** + * GET /user/me + * + * Get current user info from token. + * Requires authentication. + */ +router.get("/me", async (req: Request, res: Response) => { + try { + const authHeader = req.headers.authorization; + if (!authHeader) { + return res.status(401).json({ + success: false, + msg: "No token provided", + }); + } + + const userDbService = req.app.locals.userDbService; + const user = await userDbService.findByToken(extractToken(authHeader)); + + if (!user) { + return res.status(401).json({ + success: false, + msg: "Invalid token", + }); + } + + res.json({ + success: true, + user: { + id: user.id, + email: user.email, + name: user.name, + firstname: user.firstname, + lastname: user.lastname, + current_team_id: user.current_team_id, + avatar_url: user.avatar_url, + }, + }); + } catch (err: any) { + console.error("[UserController] /me error:", err.message); + res.status(500).json({ + success: false, + msg: "Failed to get user info", + }); + } +}); + +/** + * GET /user/get-dev-tokens + * + * Get all developer API tokens for the current user. + * Requires authentication. + */ +router.get("/get-dev-tokens", async (req: Request, res: Response) => { + try { + const authHeader = req.headers.authorization; + if (!authHeader) { + return res.status(401).json({ + success: false, + msg: "No token provided", + }); + } + + const userDbService = req.app.locals.userDbService; + const user = await userDbService.findByToken(extractToken(authHeader)); + + if (!user) { + return res.status(401).json({ + success: false, + msg: "Invalid token", + }); + } + + const tokens = await userDbService.getDevTokens(user); + + res.json({ + success: true, + data: tokens, + }); + } catch (err: any) { + console.error("[UserController] /get-dev-tokens error:", err.message); + res.status(500).json({ + success: false, + msg: "Failed to get API tokens", + }); + } +}); + +/** + * POST /user/generate-dev-token + * + * Generate a new developer API token. + * Requires authentication. + */ +router.post("/generate-dev-token", async (req: Request, res: Response) => { + try { + const authHeader = req.headers.authorization; + if (!authHeader) { + return res.status(401).json({ + success: false, + msg: "No token provided", + }); + } + + const userDbService = req.app.locals.userDbService; + const user = await userDbService.findByToken(extractToken(authHeader)); + + if (!user) { + return res.status(401).json({ + success: false, + msg: "Invalid token", + }); + } + + const { label, ttl } = req.body; + + const tokenResult = await userDbService.generateDevToken(user, { + label, + ttl, + jwtSecret: config.jwt.secret, + }); + + console.log( + `[UserController] generate-dev-token: Created token for user ${user.id}` + ); + + res.status(201).json({ + success: true, + data: tokenResult, + }); + } catch (err: any) { + console.error("[UserController] /generate-dev-token error:", err.message); + res.status(500).json({ + success: false, + msg: "Failed to generate API token", + }); + } +}); + +export default router; diff --git a/hive/src/index.ts b/hive/src/index.ts new file mode 100644 index 00000000..df960e08 --- /dev/null +++ b/hive/src/index.ts @@ -0,0 +1,116 @@ +/** + * Aden Hive - DevTool Backend Entry Point + * + * LLM observability and control plane service. + */ + +import "dotenv/config"; + +import http from "http"; +import { MongoClient } from "mongodb"; +import app from "./app"; +import config from "./config"; +import { initializeSockets, setUserDbService } from "./sockets/control.socket"; + +const PORT = process.env.PORT || 4000; + +// Declare globals for MongoDB (used by services) +declare global { + var _ACHO_MG_DB: MongoClient; + var _ACHO_MDB_CONFIG: { ERP_DBNAME: string; DBNAME: string }; + var _ACHO_MDB_COLLECTIONS: { + ADEN_CONTROL_POLICIES: string; + ADEN_CONTROL_CONTENT: string; + LLM_PRICING: string; + }; +} + +/** + * Initialize MongoDB connection + */ +async function initMongoDB(): Promise { + if (!config.mongodb.url) { + console.warn( + "[MongoDB] No MONGODB_URL configured, skipping MongoDB initialization" + ); + return; + } + + try { + const client = new MongoClient(config.mongodb.url); + await client.connect(); + + // Set global MongoDB client and config + global._ACHO_MG_DB = client; + global._ACHO_MDB_CONFIG = { + ERP_DBNAME: config.mongodb.erpDbName, + DBNAME: config.mongodb.dbName, + }; + global._ACHO_MDB_COLLECTIONS = { + ADEN_CONTROL_POLICIES: "aden_control_policies", + ADEN_CONTROL_CONTENT: "aden_control_content", + LLM_PRICING: "llm_pricing", + }; + + console.log("[MongoDB] Connected successfully"); + } catch (error) { + console.error("[MongoDB] Connection error:", error); + throw error; + } +} + +// Create HTTP server +const server = http.createServer(app); + +/** + * Start the server + */ +async function start(): Promise { + // Initialize MongoDB + await initMongoDB(); + + // Pass userDbService to socket layer for JWT verification + if (app.locals.userDbService) { + setUserDbService(app.locals.userDbService, config.jwt.secret); + } + + // Initialize WebSockets + const { controlEmitter } = await initializeSockets(server); + + // Make control emitter available for policy updates + app.locals.controlEmitter = controlEmitter; + console.log("[Aden Hive] WebSocket initialized"); + + // Start server + server.listen(PORT, () => { + console.log(`[Aden Hive] Server running on port ${PORT}`); + console.log( + `[Aden Hive] Environment: ${process.env.NODE_ENV || "development"}` + ); + }); +} + +// Start the application +start().catch((error) => { + console.error("[Aden Hive] Failed to start:", error); + process.exit(1); +}); + +// Graceful shutdown +process.on("SIGTERM", () => { + console.log("[Aden Hive] SIGTERM received, shutting down gracefully"); + server.close(() => { + console.log("[Aden Hive] Server closed"); + process.exit(0); + }); +}); + +process.on("SIGINT", () => { + console.log("[Aden Hive] SIGINT received, shutting down gracefully"); + server.close(() => { + console.log("[Aden Hive] Server closed"); + process.exit(0); + }); +}); + +export default server; diff --git a/hive/src/mcp/index.ts b/hive/src/mcp/index.ts new file mode 100644 index 00000000..97674d46 --- /dev/null +++ b/hive/src/mcp/index.ts @@ -0,0 +1,62 @@ +/** + * Aden Hive MCP Server + * + * Model Context Protocol server for LLM governance. + * Exposes 19 tools: + * + * Budget Tools (6): + * - hive_budget_get, hive_budget_reset, hive_budget_validate + * - hive_budget_rule_create, hive_budget_rule_update, hive_budget_rule_delete + * + * Agent Status Tools (3): + * - hive_agents_list, hive_agent_health_check, hive_agents_summary + * + * Analytics Tools (5): + * - hive_analytics_wide, hive_analytics_narrow, hive_insights + * - hive_metrics, hive_logs + * + * Policy Tools (5): + * - hive_policies_list, hive_policy_get, hive_policy_create + * - hive_policy_update, hive_policy_clear + * + * Usage: + * import { createMcpRouter } from './mcp'; + * app.use('/mcp', createMcpRouter(getControlEmitter)); + */ + +// Server creation +export { createHiveMcpServer, TOOL_CATALOG } from "./server"; +export type { HiveMcpServerOptions } from "./server"; + +// HTTP transport +export { + createMcpRouter, + getActiveMcpSessionCount, + getTeamMcpSessions, +} from "./transport/http"; + +// API client for direct usage +export { createApiClient } from "./utils/api-client"; +export type { ApiClient, ApiContext } from "./utils/api-client"; + +// Response helpers +export { + createSuccessResponse, + createErrorResponse, + handleToolError, +} from "./utils/response-helpers"; + +// Schema helpers +export { + idSchema, + dateSchema, + dateTimeSchema, + amountSchema, + budgetTypeSchema, + limitActionSchema, + analyticsWindowSchema, + validationContextSchema, + budgetAlertSchema, + budgetNotificationsSchema, + paginationSchema, +} from "./utils/schema-helpers"; diff --git a/hive/src/mcp/server.ts b/hive/src/mcp/server.ts new file mode 100644 index 00000000..cc2e6d6e --- /dev/null +++ b/hive/src/mcp/server.ts @@ -0,0 +1,90 @@ +/** + * Aden Hive MCP Server + * + * MCP server with tools for: + * - Cost control (budget management) + * - Agent status (fleet monitoring) + * - Analytics (insights, metrics, logs) + * - Policy management + */ +import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; +import { createApiClient, type ApiContext } from "./utils/api-client"; +import { registerBudgetTools } from "./tools/budget"; +import { registerAgentTools, type ControlEmitter } from "./tools/agents"; +import { registerAnalyticsTools } from "./tools/analytics"; +import { registerPolicyTools } from "./tools/policies"; + +export interface HiveMcpServerOptions { + context: ApiContext; + getControlEmitter?: () => ControlEmitter | undefined; +} + +/** + * Create and configure the Aden Hive MCP server + */ +export function createHiveMcpServer(options: HiveMcpServerOptions): McpServer { + const { context, getControlEmitter } = options; + + // Create MCP server + const server = new McpServer({ + name: "aden-hive", + version: "1.0.0", + }); + + // Create API client bound to team context + const api = createApiClient(context); + + // Register all tool categories + registerBudgetTools(server, api); + registerAgentTools(server, api, getControlEmitter || (() => undefined)); + registerAnalyticsTools(server, api); + registerPolicyTools(server, api); + + console.log( + `[MCP] Aden Hive server created with ${19} tools for team ${context.teamId}` + ); + + return server; +} + +/** + * Tool categories and counts for reference + */ +export const TOOL_CATALOG = { + budget: { + count: 6, + tools: [ + "hive_budget_get", + "hive_budget_reset", + "hive_budget_validate", + "hive_budget_rule_create", + "hive_budget_rule_update", + "hive_budget_rule_delete", + ], + }, + agents: { + count: 3, + tools: ["hive_agents_list", "hive_agent_health_check", "hive_agents_summary"], + }, + analytics: { + count: 5, + tools: [ + "hive_analytics_wide", + "hive_analytics_narrow", + "hive_insights", + "hive_metrics", + "hive_logs", + ], + }, + policies: { + count: 5, + tools: [ + "hive_policies_list", + "hive_policy_get", + "hive_policy_create", + "hive_policy_update", + "hive_policy_clear", + ], + }, + total: 19, +}; diff --git a/hive/src/mcp/tools/agents.ts b/hive/src/mcp/tools/agents.ts new file mode 100644 index 00000000..a6bbe36f --- /dev/null +++ b/hive/src/mcp/tools/agents.ts @@ -0,0 +1,197 @@ +/** + * Agent Status MCP Tools + * + * Tools for monitoring connected SDK agent instances: + * - hive_agents_list: List all connected SDK instances + * - hive_agent_health_check: Check health of specific agent + * - hive_agents_summary: Get fleet health overview + */ +import { z } from "zod"; +import type { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; +import type { ApiClient } from "../utils/api-client"; +import { + createSuccessResponse, + handleToolError, +} from "../utils/response-helpers"; + +export interface ControlEmitter { + getConnectedCount: (teamId: string) => number; + getConnectedInstances: (teamId: string) => Array<{ + instance_id: string; + agent?: string; + policy_id?: string | null; + connected_at: string; + last_heartbeat: string; + }>; +} + +export function registerAgentTools( + server: McpServer, + api: ApiClient, + getControlEmitter: () => ControlEmitter | undefined +) { + // ==================== hive_agents_list ==================== + server.tool( + "hive_agents_list", + "Get list of all connected SDK agent instances with health status and connection details", + { + includeMetrics: z + .boolean() + .default(false) + .describe("Include per-agent metrics (connection duration, heartbeat lag)"), + }, + async (params) => { + try { + const controlEmitter = getControlEmitter(); + const result = api.agents.getList(controlEmitter); + + if (params.includeMetrics && result.instances) { + const now = Date.now(); + const enrichedInstances = (result.instances as Array<{ + instance_id: string; + connected_at: string; + last_heartbeat: string; + }>).map((instance) => { + const connectedAt = new Date(instance.connected_at).getTime(); + const lastHeartbeat = new Date(instance.last_heartbeat).getTime(); + + return { + ...instance, + metrics: { + connection_duration_ms: now - connectedAt, + connection_duration_seconds: Math.round((now - connectedAt) / 1000), + heartbeat_lag_ms: now - lastHeartbeat, + heartbeat_lag_seconds: Math.round((now - lastHeartbeat) / 1000), + is_healthy: now - lastHeartbeat < 60000, + }, + }; + }); + + return createSuccessResponse({ + ...result, + instances: enrichedInstances, + }); + } + + return createSuccessResponse(result); + } catch (error) { + return handleToolError(error, "hive_agents_list"); + } + } + ); + + // ==================== hive_agent_health_check ==================== + server.tool( + "hive_agent_health_check", + "Check health of a specific agent by instance ID or agent name. Returns health status, last heartbeat, and connection details.", + { + instanceId: z + .string() + .optional() + .describe("SDK instance ID to check"), + agentName: z + .string() + .optional() + .describe("Agent name to filter (returns all instances with this name)"), + }, + async (params) => { + try { + if (!params.instanceId && !params.agentName) { + return handleToolError( + new Error("Either instanceId or agentName is required"), + "hive_agent_health_check" + ); + } + + const controlEmitter = getControlEmitter(); + const result = api.agents.getList(controlEmitter); + + if (!result.instances || result.instances.length === 0) { + return createSuccessResponse({ + found: false, + message: "No agents connected", + query: params, + }); + } + + const now = Date.now(); + const STALE_THRESHOLD_MS = 60000; // 60 seconds + + // Filter instances based on query + const instances = (result.instances as Array<{ + instance_id: string; + agent?: string; + connected_at: string; + last_heartbeat: string; + }>).filter((instance) => { + if (params.instanceId && instance.instance_id === params.instanceId) { + return true; + } + if (params.agentName && instance.agent === params.agentName) { + return true; + } + return false; + }); + + if (instances.length === 0) { + return createSuccessResponse({ + found: false, + message: params.instanceId + ? `Instance ${params.instanceId} not found` + : `No instances found for agent ${params.agentName}`, + query: params, + total_connected: result.count, + }); + } + + // Enrich with health status + const healthResults = instances.map((instance) => { + const lastHeartbeat = new Date(instance.last_heartbeat).getTime(); + const heartbeatLag = now - lastHeartbeat; + const isHealthy = heartbeatLag < STALE_THRESHOLD_MS; + + return { + instance_id: instance.instance_id, + agent_name: instance.agent || "unknown", + status: isHealthy ? "healthy" : "unhealthy", + last_heartbeat: instance.last_heartbeat, + last_heartbeat_ago_seconds: Math.round(heartbeatLag / 1000), + connected_at: instance.connected_at, + connection_duration_seconds: Math.round( + (now - new Date(instance.connected_at).getTime()) / 1000 + ), + health_threshold_seconds: STALE_THRESHOLD_MS / 1000, + }; + }); + + return createSuccessResponse({ + found: true, + count: healthResults.length, + instances: healthResults, + summary: { + healthy: healthResults.filter((h) => h.status === "healthy").length, + unhealthy: healthResults.filter((h) => h.status === "unhealthy").length, + }, + }); + } catch (error) { + return handleToolError(error, "hive_agent_health_check"); + } + } + ); + + // ==================== hive_agents_summary ==================== + server.tool( + "hive_agents_summary", + "Get summary of agent fleet health: total active, healthy count, unhealthy count, and breakdown by agent name", + {}, + async () => { + try { + const controlEmitter = getControlEmitter(); + const result = api.agents.getSummary(controlEmitter); + return createSuccessResponse(result); + } catch (error) { + return handleToolError(error, "hive_agents_summary"); + } + } + ); +} diff --git a/hive/src/mcp/tools/analytics.ts b/hive/src/mcp/tools/analytics.ts new file mode 100644 index 00000000..7dc38dab --- /dev/null +++ b/hive/src/mcp/tools/analytics.ts @@ -0,0 +1,169 @@ +/** + * Analytics MCP Tools + * + * Tools for querying analytics and insights: + * - hive_analytics_wide: Dashboard analytics with daily resolution + * - hive_analytics_narrow: Hourly analytics for today + * - hive_insights: Actionable insights and anomalies + * - hive_metrics: Summary metrics with period-over-period change + * - hive_logs: Raw or aggregated event logs + */ +import { z } from "zod"; +import type { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; +import type { ApiClient } from "../utils/api-client"; +import { + createSuccessResponse, + handleToolError, +} from "../utils/response-helpers"; +import { analyticsWindowSchema, dateTimeSchema } from "../utils/schema-helpers"; + +export function registerAnalyticsTools(server: McpServer, api: ApiClient) { + // ==================== hive_analytics_wide ==================== + server.tool( + "hive_analytics_wide", + "Get dashboard analytics with daily resolution. Use for trend analysis over days/weeks/months. Returns volume, cost, tokens, and performance data points by day.", + { + window: analyticsWindowSchema.describe( + "Time window: all_time, this_month, this_week, last_2_weeks, or today" + ), + }, + async (params) => { + try { + const result = await api.analytics.getWide(params.window); + return createSuccessResponse(result); + } catch (error) { + return handleToolError(error, "hive_analytics_wide"); + } + } + ); + + // ==================== hive_analytics_narrow ==================== + server.tool( + "hive_analytics_narrow", + "Get hourly analytics for today. Use for intraday monitoring, detecting recent spikes, and real-time cost tracking.", + {}, + async () => { + try { + const result = await api.analytics.getNarrow(); + return createSuccessResponse(result); + } catch (error) { + return handleToolError(error, "hive_analytics_narrow"); + } + } + ); + + // ==================== hive_insights ==================== + server.tool( + "hive_insights", + "Get actionable insights: cost spikes, anomalies, trends, cache efficiency, and recommendations. Critical for autonomous monitoring and cost control.", + { + days: z + .number() + .min(1) + .max(90) + .default(30) + .describe("Analysis period in days (1-90)"), + }, + async (params) => { + try { + const result = await api.analytics.getInsights(params.days); + return createSuccessResponse(result); + } catch (error) { + return handleToolError(error, "hive_insights"); + } + } + ); + + // ==================== hive_metrics ==================== + server.tool( + "hive_metrics", + "Get summary metrics with period-over-period percentage change. Good for quick health checks and comparing current vs previous period.", + { + days: z + .number() + .min(1) + .max(365) + .default(30) + .describe("Period in days for current window and comparison"), + }, + async (params) => { + try { + const result = await api.analytics.getMetrics(params.days); + return createSuccessResponse(result); + } catch (error) { + return handleToolError(error, "hive_metrics"); + } + } + ); + + // ==================== hive_logs ==================== + server.tool( + "hive_logs", + "Query raw or aggregated event logs. Use for investigation, drill-down, and detailed analysis. Supports grouping by model, agent, or provider.", + { + start: dateTimeSchema.describe("Start time (ISO 8601 format)"), + end: dateTimeSchema.describe("End time (ISO 8601 format)"), + groupBy: z + .enum(["model", "agent", "provider", "model,agent", "model,provider"]) + .optional() + .describe( + "Aggregate by field(s). If not specified, returns raw log rows." + ), + limit: z + .number() + .min(1) + .max(5000) + .default(500) + .describe("Maximum rows/aggregations to return"), + offset: z + .number() + .min(0) + .default(0) + .describe("Number of rows to skip (for pagination)"), + }, + async (params) => { + try { + // Validate date range + const startDate = new Date(params.start); + const endDate = new Date(params.end); + + if (isNaN(startDate.getTime()) || isNaN(endDate.getTime())) { + return handleToolError( + new Error("Invalid date format. Use ISO 8601 format."), + "hive_logs" + ); + } + + if (endDate < startDate) { + return handleToolError( + new Error("End date must be after start date"), + "hive_logs" + ); + } + + // Warn if range is too large + const rangeDays = + (endDate.getTime() - startDate.getTime()) / (1000 * 60 * 60 * 24); + if (rangeDays > 90 && !params.groupBy) { + console.warn( + `[MCP] hive_logs: Large date range (${rangeDays.toFixed( + 0 + )} days) without aggregation may be slow` + ); + } + + const result = await api.analytics.getLogs({ + start: params.start, + end: params.end, + groupBy: params.groupBy, + limit: params.limit, + offset: params.offset, + }); + + return createSuccessResponse(result); + } catch (error) { + return handleToolError(error, "hive_logs"); + } + } + ); +} diff --git a/hive/src/mcp/tools/budget.ts b/hive/src/mcp/tools/budget.ts new file mode 100644 index 00000000..06c4f3d3 --- /dev/null +++ b/hive/src/mcp/tools/budget.ts @@ -0,0 +1,335 @@ +/** + * Budget MCP Tools + * + * Tools for cost control and budget management: + * - hive_budget_get: Get budget status + * - hive_budget_reset: Reset budget spend + * - hive_budget_validate: Validate request against budgets + * - hive_budget_rule_create: Create budget rule + * - hive_budget_rule_update: Update budget rule + * - hive_budget_rule_delete: Delete budget rule + */ +import { z } from "zod"; +import type { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; +import type { ApiClient } from "../utils/api-client"; +import { + createSuccessResponse, + handleToolError, +} from "../utils/response-helpers"; +import { + idSchema, + budgetTypeSchema, + limitActionSchema, + validationContextSchema, + budgetAlertSchema, + budgetNotificationsSchema, +} from "../utils/schema-helpers"; + +export function registerBudgetTools(server: McpServer, api: ApiClient) { + // ==================== hive_budget_get ==================== + server.tool( + "hive_budget_get", + "Get budget status including spend, limit, burn rate, and projected spend for a specific budget ID", + { + budgetId: idSchema.describe("Budget ID to query"), + }, + async (params) => { + try { + const result = await api.budget.getStatus(params.budgetId); + return createSuccessResponse(result); + } catch (error) { + return handleToolError(error, "hive_budget_get"); + } + } + ); + + // ==================== hive_budget_reset ==================== + server.tool( + "hive_budget_reset", + "Reset a budget spend counter to zero. Use when starting new billing cycle or after resolving overage.", + { + budgetId: idSchema.describe("Budget ID to reset"), + reason: z + .string() + .optional() + .describe("Reason for reset (for audit trail)"), + }, + async (params) => { + try { + const result = await api.budget.reset(params.budgetId); + return createSuccessResponse({ + ...result, + reason: params.reason, + reset_at: new Date().toISOString(), + }); + } catch (error) { + return handleToolError(error, "hive_budget_reset"); + } + } + ); + + // ==================== hive_budget_validate ==================== + server.tool( + "hive_budget_validate", + "Validate if a request should be allowed based on budget constraints. Returns allow/throttle/degrade/block decision with authoritative spend data.", + { + budgetId: z + .string() + .optional() + .describe("Specific budget ID to validate against"), + estimatedCost: z + .number() + .min(0) + .describe("Estimated cost of the request in USD"), + context: validationContextSchema + .optional() + .describe( + "Context for multi-budget matching (agent, tenant_id, customer_id, feature, tags)" + ), + localSpend: z + .number() + .optional() + .describe("Local spend tracked by SDK (for drift detection)"), + }, + async (params) => { + try { + const result = await api.budget.validate({ + budgetId: params.budgetId, + estimatedCost: params.estimatedCost, + context: params.context, + localSpend: params.localSpend, + }); + return createSuccessResponse(result); + } catch (error) { + return handleToolError(error, "hive_budget_validate"); + } + } + ); + + // ==================== hive_budget_rule_create ==================== + server.tool( + "hive_budget_rule_create", + "Create a new budget rule within a policy. Budget rules define spending limits and actions when exceeded.", + { + policyId: z + .string() + .default("default") + .describe('Policy ID (use "default" for default policy)'), + id: idSchema.describe("Unique budget rule ID"), + name: z.string().min(1).describe("Human-readable budget name"), + type: budgetTypeSchema.describe("Budget scope type"), + limit: z.number().min(0).describe("Budget limit in USD"), + limitAction: limitActionSchema + .default("kill") + .describe("Action when limit exceeded"), + degradeToModel: z + .string() + .optional() + .describe('Target model for degradation (required when limitAction is "degrade")'), + degradeToProvider: z + .string() + .optional() + .describe('Target provider for degradation (required when limitAction is "degrade")'), + tags: z + .array(z.string()) + .optional() + .describe('Tags for tag-type budgets (required when type is "tag")'), + alerts: z + .array(budgetAlertSchema) + .default([ + { threshold: 80, enabled: true }, + { threshold: 95, enabled: true }, + ]) + .describe("Alert thresholds as percentage of limit"), + notifications: budgetNotificationsSchema + .default({ + inApp: true, + email: false, + emailRecipients: [], + webhook: false, + }) + .describe("Notification settings"), + }, + async (params) => { + try { + // Validate degradation requirements + if (params.limitAction === "degrade") { + if (!params.degradeToModel || !params.degradeToProvider) { + return handleToolError( + new Error( + "degradeToModel and degradeToProvider are required when limitAction is 'degrade'" + ), + "hive_budget_rule_create" + ); + } + } + + // Validate tag requirements + if (params.type === "tag") { + if (!params.tags || params.tags.length === 0) { + return handleToolError( + new Error("tags array is required when type is 'tag'"), + "hive_budget_rule_create" + ); + } + } + + const result = await api.policy.addBudgetRule(params.policyId, { + id: params.id, + name: params.name, + type: params.type, + limit: params.limit, + spent: 0, + limitAction: params.limitAction, + degradeToModel: params.degradeToModel, + degradeToProvider: params.degradeToProvider, + tags: params.tags, + alerts: params.alerts, + notifications: params.notifications, + }); + + return createSuccessResponse({ + success: true, + budget_id: params.id, + policy: result, + }); + } catch (error) { + return handleToolError(error, "hive_budget_rule_create"); + } + } + ); + + // ==================== hive_budget_rule_update ==================== + server.tool( + "hive_budget_rule_update", + "Update an existing budget rule. Only provided fields will be updated.", + { + policyId: z + .string() + .default("default") + .describe('Policy ID (use "default" for default policy)'), + budgetId: idSchema.describe("Budget rule ID to update"), + name: z.string().optional().describe("New budget name"), + limit: z.number().min(0).optional().describe("New budget limit in USD"), + limitAction: limitActionSchema.optional().describe("New action when limit exceeded"), + degradeToModel: z + .string() + .optional() + .describe("New target model for degradation"), + degradeToProvider: z + .string() + .optional() + .describe("New target provider for degradation"), + alerts: z + .array(budgetAlertSchema) + .optional() + .describe("New alert thresholds"), + }, + async (params) => { + try { + // Get current policy to find and update the budget + const policy = await api.policy.get(params.policyId); + + if (!policy) { + return handleToolError( + new Error("Policy not found"), + "hive_budget_rule_update" + ); + } + + const budgets = policy.budgets || []; + const budgetIndex = budgets.findIndex( + (b: { id: string }) => b.id === params.budgetId + ); + + if (budgetIndex === -1) { + return handleToolError( + new Error(`Budget ${params.budgetId} not found in policy`), + "hive_budget_rule_update" + ); + } + + // Update the budget with new values + const updatedBudget = { + ...budgets[budgetIndex], + ...(params.name && { name: params.name }), + ...(params.limit !== undefined && { limit: params.limit }), + ...(params.limitAction && { limitAction: params.limitAction }), + ...(params.degradeToModel && { degradeToModel: params.degradeToModel }), + ...(params.degradeToProvider && { degradeToProvider: params.degradeToProvider }), + ...(params.alerts && { alerts: params.alerts }), + }; + + budgets[budgetIndex] = updatedBudget; + + const result = await api.policy.update(params.policyId, { budgets }); + + return createSuccessResponse({ + success: true, + budget_id: params.budgetId, + updated_fields: Object.keys(params).filter( + (k) => + k !== "policyId" && + k !== "budgetId" && + params[k as keyof typeof params] !== undefined + ), + policy: result, + }); + } catch (error) { + return handleToolError(error, "hive_budget_rule_update"); + } + } + ); + + // ==================== hive_budget_rule_delete ==================== + server.tool( + "hive_budget_rule_delete", + "Delete a budget rule from a policy", + { + policyId: z + .string() + .default("default") + .describe('Policy ID (use "default" for default policy)'), + budgetId: idSchema.describe("Budget rule ID to delete"), + }, + async (params) => { + try { + // Get current policy to remove the budget + const policy = await api.policy.get(params.policyId); + + if (!policy) { + return handleToolError( + new Error("Policy not found"), + "hive_budget_rule_delete" + ); + } + + const budgets = policy.budgets || []; + const budgetIndex = budgets.findIndex( + (b: { id: string }) => b.id === params.budgetId + ); + + if (budgetIndex === -1) { + return handleToolError( + new Error(`Budget ${params.budgetId} not found in policy`), + "hive_budget_rule_delete" + ); + } + + // Remove the budget + budgets.splice(budgetIndex, 1); + + const result = await api.policy.update(params.policyId, { budgets }); + + return createSuccessResponse({ + success: true, + deleted_budget_id: params.budgetId, + remaining_budgets: budgets.length, + policy: result, + }); + } catch (error) { + return handleToolError(error, "hive_budget_rule_delete"); + } + } + ); +} diff --git a/hive/src/mcp/tools/policies.ts b/hive/src/mcp/tools/policies.ts new file mode 100644 index 00000000..d2cc4e06 --- /dev/null +++ b/hive/src/mcp/tools/policies.ts @@ -0,0 +1,225 @@ +/** + * Policy Management MCP Tools + * + * Tools for managing control policies: + * - hive_policies_list: List all policies + * - hive_policy_get: Get specific policy with rules + * - hive_policy_create: Create new policy + * - hive_policy_update: Update policy + * - hive_policy_clear: Clear all rules from policy + */ +import { z } from "zod"; +import type { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; +import type { ApiClient } from "../utils/api-client"; +import { + createSuccessResponse, + handleToolError, +} from "../utils/response-helpers"; +import { idSchema, paginationSchema } from "../utils/schema-helpers"; + +export function registerPolicyTools(server: McpServer, api: ApiClient) { + // ==================== hive_policies_list ==================== + server.tool( + "hive_policies_list", + "List all policies for the team. Returns policy IDs, names, and rule counts.", + { + limit: z + .number() + .min(1) + .max(100) + .default(100) + .describe("Maximum policies to return"), + offset: z + .number() + .min(0) + .default(0) + .describe("Number of policies to skip"), + }, + async (params) => { + try { + const policies = await api.policy.list({ + limit: params.limit, + offset: params.offset, + }); + + // Summarize policies + const summary = (policies as unknown as Array<{ + _id?: string; + id?: string; + name?: string; + budgets?: unknown[]; + throttles?: unknown[]; + blocks?: unknown[]; + degradations?: unknown[]; + }>).map((p) => ({ + id: p._id || p.id || "unknown", + name: p.name || "Unnamed Policy", + rule_counts: { + budgets: p.budgets?.length || 0, + throttles: p.throttles?.length || 0, + blocks: p.blocks?.length || 0, + degradations: p.degradations?.length || 0, + }, + })); + + return createSuccessResponse({ + count: policies.length, + policies: summary, + }); + } catch (error) { + return handleToolError(error, "hive_policies_list"); + } + } + ); + + // ==================== hive_policy_get ==================== + server.tool( + "hive_policy_get", + 'Get a specific policy with all rules (budgets, throttles, blocks, degradations). Use "default" to get the team\'s default policy.', + { + policyId: z + .string() + .default("default") + .describe('Policy ID or "default" for team default'), + }, + async (params) => { + try { + const policy = await api.policy.get(params.policyId); + + if (!policy) { + return handleToolError( + new Error(`Policy ${params.policyId} not found`), + "hive_policy_get" + ); + } + + return createSuccessResponse(policy); + } catch (error) { + return handleToolError(error, "hive_policy_get"); + } + } + ); + + // ==================== hive_policy_create ==================== + server.tool( + "hive_policy_create", + "Create a new policy for the team. New policies start empty (no rules).", + { + name: z.string().min(1).describe("Policy name"), + }, + async (params) => { + try { + const policy = await api.policy.create(params.name); + + return createSuccessResponse({ + success: true, + message: "Policy created", + policy, + }); + } catch (error) { + return handleToolError(error, "hive_policy_create"); + } + } + ); + + // ==================== hive_policy_update ==================== + server.tool( + "hive_policy_update", + "Update a policy's name or replace all rules. For individual rule changes, use budget/throttle/block rule tools.", + { + policyId: z + .string() + .default("default") + .describe('Policy ID or "default" for team default'), + name: z.string().optional().describe("New policy name"), + budgets: z + .array(z.any()) + .optional() + .describe("Complete budgets array (replaces all budgets)"), + throttles: z + .array(z.any()) + .optional() + .describe("Complete throttles array (replaces all throttles)"), + blocks: z + .array(z.any()) + .optional() + .describe("Complete blocks array (replaces all blocks)"), + degradations: z + .array(z.any()) + .optional() + .describe("Complete degradations array (replaces all degradations)"), + }, + async (params) => { + try { + // Only pass defined fields + const updates: { + name?: string; + budgets?: unknown[]; + throttles?: unknown[]; + blocks?: unknown[]; + degradations?: unknown[]; + } = {}; + + if (params.name !== undefined) updates.name = params.name; + if (params.budgets !== undefined) updates.budgets = params.budgets; + if (params.throttles !== undefined) updates.throttles = params.throttles; + if (params.blocks !== undefined) updates.blocks = params.blocks; + if (params.degradations !== undefined) + updates.degradations = params.degradations; + + if (Object.keys(updates).length === 0) { + return handleToolError( + new Error("No updates provided"), + "hive_policy_update" + ); + } + + const policy = await api.policy.update(params.policyId, updates); + + return createSuccessResponse({ + success: true, + updated_fields: Object.keys(updates), + policy, + }); + } catch (error) { + return handleToolError(error, "hive_policy_update"); + } + } + ); + + // ==================== hive_policy_clear ==================== + server.tool( + "hive_policy_clear", + "Clear all rules from a policy (budgets, throttles, blocks, degradations). The policy itself is preserved.", + { + policyId: z + .string() + .default("default") + .describe('Policy ID or "default" for team default'), + confirm: z + .boolean() + .describe("Set to true to confirm clearing all rules"), + }, + async (params) => { + try { + if (!params.confirm) { + return createSuccessResponse({ + warning: + "This will delete ALL rules from the policy. Set confirm=true to proceed.", + policy_id: params.policyId, + }); + } + + const policy = await api.policy.clear(params.policyId); + + return createSuccessResponse({ + success: true, + message: "All rules cleared from policy", + policy, + }); + } catch (error) { + return handleToolError(error, "hive_policy_clear"); + } + } + ); +} diff --git a/hive/src/mcp/transport/http.ts b/hive/src/mcp/transport/http.ts new file mode 100644 index 00000000..977761c3 --- /dev/null +++ b/hive/src/mcp/transport/http.ts @@ -0,0 +1,238 @@ +/** + * HTTP/SSE Transport for Aden Hive MCP Server + * + * Provides HTTP-based transport for autonomous LLM agents: + * - GET /mcp - SSE stream for server-to-client messages + * - POST /mcp/message - Client-to-server messages + */ +import express, { Request, Response, NextFunction, Router } from "express"; +import passport from "passport"; +import { SSEServerTransport } from "@modelcontextprotocol/sdk/server/sse.js"; +import { createHiveMcpServer, type HiveMcpServerOptions } from "../server"; +import type { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; + +interface AuthenticatedRequest extends Request { + user?: { + id: string; + current_team_id: string; + }; +} + +interface McpSession { + server: McpServer; + transport: SSEServerTransport; + teamId: string; + userId: string; + createdAt: Date; +} + +// Active MCP sessions by session ID +const sessions = new Map(); + +/** + * Create MCP HTTP router + */ +export function createMcpRouter( + getControlEmitter?: HiveMcpServerOptions["getControlEmitter"] +): Router { + const router = express.Router(); + + // All MCP routes require authentication + const authMiddleware = passport.authenticate("jwt", { session: false }); + + /** + * GET /mcp + * SSE endpoint - establishes persistent connection for server-to-client messages + */ + router.get( + "/", + authMiddleware, + async (req: AuthenticatedRequest, res: Response) => { + const teamId = req.user?.current_team_id; + const userId = req.user?.id; + + if (!teamId) { + res.status(401).json({ error: "Team ID required" }); + return; + } + + // Set custom headers (SSE headers are set by the transport) + res.setHeader("X-Accel-Buffering", "no"); + + // Create MCP server for this session + const server = createHiveMcpServer({ + context: { + teamId, + userId, + }, + getControlEmitter, + }); + + // Create SSE transport - it generates its own sessionId internally + const transport = new SSEServerTransport("/mcp/message", res); + + // Get the SDK's session ID (used in query params for POST requests) + const sdkSessionId = transport.sessionId; + + console.log(`[MCP] New SSE connection: session=${sdkSessionId}, team=${teamId}`); + + // Store session by the SDK's session ID + sessions.set(sdkSessionId, { + server, + transport, + teamId, + userId: userId || "unknown", + createdAt: new Date(), + }); + + // Connect server to transport + await server.connect(transport); + + // Handle client disconnect + req.on("close", () => { + console.log(`[MCP] SSE connection closed: session=${sdkSessionId}`); + sessions.delete(sdkSessionId); + server.close(); + }); + } + ); + + /** + * POST /mcp/message + * Receives messages from client + */ + // Note: Do NOT use express.json() here - handlePostMessage reads the raw body stream + router.post( + "/message", + authMiddleware, + async (req: AuthenticatedRequest, res: Response) => { + // SDK passes session ID as query parameter: /mcp/message?sessionId=xxx + const sessionId = req.query.sessionId as string; + + if (!sessionId) { + res.status(400).json({ error: "sessionId query parameter required" }); + return; + } + + const session = sessions.get(sessionId); + + if (!session) { + res.status(404).json({ + error: "Session not found", + hint: "Establish SSE connection first via GET /mcp", + }); + return; + } + + // Verify team ID matches + if (session.teamId !== req.user?.current_team_id) { + res.status(403).json({ error: "Session team mismatch" }); + return; + } + + try { + // Handle the message through the transport + await session.transport.handlePostMessage(req, res); + } catch (error) { + console.error(`[MCP] Error handling message:`, error); + res.status(500).json({ + error: "Failed to process message", + details: error instanceof Error ? error.message : "Unknown error", + }); + } + } + ); + + /** + * GET /mcp/sessions + * List active MCP sessions (admin/debug endpoint) + */ + router.get( + "/sessions", + authMiddleware, + (req: AuthenticatedRequest, res: Response) => { + const teamId = req.user?.current_team_id; + + // Only show sessions for the requesting team + const teamSessions = Array.from(sessions.entries()) + .filter(([_, session]) => session.teamId === teamId) + .map(([id, session]) => ({ + session_id: id, + team_id: session.teamId, + user_id: session.userId, + created_at: session.createdAt.toISOString(), + age_seconds: Math.round( + (Date.now() - session.createdAt.getTime()) / 1000 + ), + })); + + res.json({ + count: teamSessions.length, + sessions: teamSessions, + }); + } + ); + + /** + * DELETE /mcp/sessions/:sessionId + * Close a specific MCP session + */ + router.delete( + "/sessions/:sessionId", + authMiddleware, + (req: AuthenticatedRequest, res: Response) => { + const { sessionId } = req.params; + const teamId = req.user?.current_team_id; + + const session = sessions.get(sessionId); + + if (!session) { + res.status(404).json({ error: "Session not found" }); + return; + } + + // Verify team ID matches + if (session.teamId !== teamId) { + res.status(403).json({ error: "Cannot close session from another team" }); + return; + } + + // Close the session + session.server.close(); + sessions.delete(sessionId); + + res.json({ + success: true, + message: `Session ${sessionId} closed`, + }); + } + ); + + /** + * GET /mcp/health + * Health check endpoint + */ + router.get("/health", (_req: Request, res: Response) => { + res.json({ + status: "healthy", + active_sessions: sessions.size, + timestamp: new Date().toISOString(), + }); + }); + + return router; +} + +/** + * Get count of active MCP sessions + */ +export function getActiveMcpSessionCount(): number { + return sessions.size; +} + +/** + * Get active sessions for a specific team + */ +export function getTeamMcpSessions(teamId: string): McpSession[] { + return Array.from(sessions.values()).filter((s) => s.teamId === teamId); +} diff --git a/hive/src/mcp/utils/api-client.ts b/hive/src/mcp/utils/api-client.ts new file mode 100644 index 00000000..04953c58 --- /dev/null +++ b/hive/src/mcp/utils/api-client.ts @@ -0,0 +1,610 @@ +/** + * Internal API client for MCP tools + * + * This client makes direct calls to the control and tsdb services + * rather than HTTP calls, since we're running inside the same process. + */ +import controlService from "../../services/control/control_service"; +import * as tsdbService from "../../services/tsdb/tsdb_service"; +import { buildAnalytics } from "../../services/tsdb/analytics_service"; +import { getTeamPool, buildSchemaName } from "../../services/tsdb/team_context"; +import type { PoolClient } from "pg"; + +export interface ApiContext { + teamId: string; + userId?: string; +} + +export interface BudgetRule { + id: string; + name?: string; + type?: string; + tags?: string[]; + limit?: number; + spent?: number; + limitAction?: string; + degradeToModel?: string; + degradeToProvider?: string; + alerts?: Array<{ threshold: number; enabled: boolean }>; + notifications?: { + inApp: boolean; + email: boolean; + emailRecipients: string[]; + webhook: boolean; + }; +} + +export interface ValidationContext { + agent?: string; + tenant_id?: string; + customer_id?: string; + feature?: string; + tags?: string[]; +} + +/** + * Create an API client bound to a specific team context + */ +export function createApiClient(context: ApiContext) { + const userContext = { + user_id: context.userId || "mcp-agent", + team_id: context.teamId, + }; + + return { + // ==================== Budget Operations ==================== + budget: { + /** + * Get budget status by ID + */ + async getStatus(budgetId: string) { + return controlService.getBudgetStatus(budgetId); + }, + + /** + * Reset budget spend to zero + */ + async reset(budgetId: string) { + await controlService.resetBudget(budgetId); + return { success: true, id: budgetId }; + }, + + /** + * Validate a request against budgets + */ + async validate(params: { + budgetId?: string; + estimatedCost: number; + context?: ValidationContext; + localSpend?: number; + }) { + // Get the policy to validate against + const policy = await controlService.getPolicy( + context.teamId, + null, + userContext + ); + + if (!policy) { + return { + allowed: true, + action: "allow", + reason: "No policy found", + budgets_checked: [], + }; + } + + // Multi-budget validation using context + if (params.context && typeof params.context === "object") { + const matchingBudgets = controlService.findMatchingBudgetsForContext( + policy.budgets || [], + params.context + ); + + if (matchingBudgets.length === 0) { + return { + allowed: true, + action: "allow", + reason: "No budgets match the provided context", + authoritative_spend: 0, + budget_limit: 0, + usage_percent: 0, + projected_percent: 0, + budgets_checked: [], + }; + } + + return controlService.validateMultipleBudgets( + matchingBudgets, + params.estimatedCost, + params.localSpend + ); + } + + // Single budget validation + if (params.budgetId) { + const budget = policy.budgets?.find( + (b: { id: string }) => b.id === params.budgetId + ); + if (!budget) { + return { + allowed: true, + action: "allow", + reason: "Budget not found in policy", + budgets_checked: [], + }; + } + + return controlService.validateMultipleBudgets( + [budget], + params.estimatedCost, + params.localSpend + ); + } + + return { + allowed: true, + action: "allow", + reason: "No budget_id or context provided", + budgets_checked: [], + }; + }, + }, + + // ==================== Policy Operations ==================== + policy: { + /** + * Get all policies for the team + */ + async list(pagination?: { limit?: number; offset?: number }) { + return controlService.getPoliciesByTeam(context.teamId, { + limit: pagination?.limit || 100, + offset: pagination?.offset || 0, + }); + }, + + /** + * Get a specific policy + */ + async get(policyId: string | null) { + const resolvedId = + policyId === "default" || !policyId ? null : policyId; + return controlService.getPolicy(context.teamId, resolvedId, userContext); + }, + + /** + * Create a new policy + */ + async create(name: string) { + return controlService.updatePolicy( + context.teamId, + null, + { name }, + userContext + ); + }, + + /** + * Update a policy + */ + async update( + policyId: string | null, + updates: { + name?: string; + budgets?: unknown[]; + throttles?: unknown[]; + blocks?: unknown[]; + degradations?: unknown[]; + } + ) { + const resolvedId = + policyId === "default" || !policyId ? null : policyId; + return controlService.updatePolicy( + context.teamId, + resolvedId, + updates as Record, + userContext + ); + }, + + /** + * Clear all rules from a policy + */ + async clear(policyId: string | null) { + const resolvedId = + policyId === "default" || !policyId ? null : policyId; + return controlService.clearPolicy( + context.teamId, + resolvedId, + userContext + ); + }, + + /** + * Delete a policy + */ + async delete(policyId: string) { + return controlService.deletePolicy( + context.teamId, + policyId, + userContext + ); + }, + + /** + * Add a budget rule to a policy + */ + async addBudgetRule(policyId: string | null, rule: BudgetRule) { + const resolvedId = + policyId === "default" || !policyId ? null : policyId; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + return controlService.addBudgetRule( + context.teamId, + resolvedId, + rule as any, + userContext + ); + }, + }, + + // ==================== Analytics Operations ==================== + analytics: { + /** + * Get wide analytics (daily resolution) + */ + async getWide(window: string = "this_month") { + const pool = await getTeamPool(context.teamId); + const schema = buildSchemaName(context.teamId); + const client = await pool.connect(); + + try { + await client.query(`SET search_path TO ${schema}, public`); + await tsdbService.ensureSchema(client); + + return buildAnalytics({ + windowLabel: window, + client, + resolution: "day", + }); + } finally { + client.release(); + } + }, + + /** + * Get narrow analytics (hourly resolution for today) + */ + async getNarrow() { + const pool = await getTeamPool(context.teamId); + const schema = buildSchemaName(context.teamId); + const client = await pool.connect(); + + try { + await client.query(`SET search_path TO ${schema}, public`); + await tsdbService.ensureSchema(client); + + return buildAnalytics({ + windowLabel: "today", + client, + resolution: "hour", + }); + } finally { + client.release(); + } + }, + + /** + * Get actionable insights + */ + async getInsights(days: number = 30) { + const pool = await getTeamPool(context.teamId); + const schema = buildSchemaName(context.teamId); + const client = await pool.connect(); + + try { + await client.query(`SET search_path TO ${schema}, public`); + await tsdbService.ensureSchema(client); + + // Use the insights generation logic from tsdb controller + // This is a simplified version - full implementation would mirror the controller + return this._generateInsights(client, days); + } finally { + client.release(); + } + }, + + /** + * Get summary metrics with period-over-period change + */ + async getMetrics(days: number = 30) { + const pool = await getTeamPool(context.teamId); + const schema = buildSchemaName(context.teamId); + const client = await pool.connect(); + + try { + await client.query(`SET search_path TO ${schema}, public`); + await tsdbService.ensureSchema(client); + + return this._generateMetrics(client, days); + } finally { + client.release(); + } + }, + + /** + * Get logs (raw or aggregated) + */ + async getLogs(params: { + start: string; + end: string; + groupBy?: string; + limit?: number; + offset?: number; + }) { + const pool = await getTeamPool(context.teamId); + const schema = buildSchemaName(context.teamId); + const client = await pool.connect(); + + try { + await client.query(`SET search_path TO ${schema}, public`); + await tsdbService.ensureSchema(client); + + return this._getLogs(client, params); + } finally { + client.release(); + } + }, + + // Internal helper methods + async _generateInsights(client: PoolClient, days: number) { + // Simplified insights generation + const now = new Date(); + const periodStart = new Date(now); + periodStart.setDate(periodStart.getDate() - days); + + const { rows } = await client.query( + ` + SELECT + COUNT(*) as total_requests, + COALESCE(SUM(cost_total), 0) as total_cost, + COALESCE(AVG(latency_ms), 0) as avg_latency + FROM llm_events + WHERE "timestamp" >= $1 AND "timestamp" <= $2 + `, + [periodStart.toISOString(), now.toISOString()] + ); + + const stats = rows[0]; + const insights = []; + + // Basic usage summary insight + insights.push({ + id: "usage_snapshot", + severity: "summary", + title: "Period usage summary", + description: `${parseInt(stats.total_requests).toLocaleString()} requests totaling $${parseFloat(stats.total_cost).toFixed(2)} over the last ${days} days.`, + metric: { + total_requests: parseInt(stats.total_requests), + total_cost: parseFloat(stats.total_cost), + }, + }); + + return { + period: { days, start: periodStart.toISOString(), end: now.toISOString() }, + insights, + summary: { + total: insights.length, + critical: insights.filter((i) => i.severity === "critical").length, + warning: insights.filter((i) => i.severity === "warning").length, + info: insights.filter((i) => i.severity === "info").length, + }, + }; + }, + + async _generateMetrics(client: PoolClient, days: number) { + const now = new Date(); + const currentStart = new Date(now); + currentStart.setDate(currentStart.getDate() - days); + + const { rows } = await client.query( + ` + SELECT + COUNT(*) as total_requests, + COUNT(DISTINCT trace_id) as unique_traces, + COALESCE(SUM(usage_input_tokens), 0) as total_input_tokens, + COALESCE(SUM(usage_output_tokens), 0) as total_output_tokens, + COALESCE(SUM(cost_total), 0) as total_cost, + COALESCE(AVG(latency_ms), 0) as avg_latency_ms + FROM llm_events + WHERE "timestamp" >= $1 AND "timestamp" <= $2 + `, + [currentStart.toISOString(), now.toISOString()] + ); + + const stats = rows[0]; + + return { + period: { days, start: currentStart.toISOString(), end: now.toISOString() }, + volume: { + total_requests: parseInt(stats.total_requests), + unique_traces: parseInt(stats.unique_traces), + }, + tokens: { + total_input_tokens: parseInt(stats.total_input_tokens), + total_output_tokens: parseInt(stats.total_output_tokens), + }, + cost: { + total_cost: parseFloat(stats.total_cost), + }, + performance: { + avg_latency_ms: parseFloat(stats.avg_latency_ms), + }, + }; + }, + + async _getLogs( + client: PoolClient, + params: { + start: string; + end: string; + groupBy?: string; + limit?: number; + offset?: number; + } + ) { + const { start, end, groupBy, limit = 500, offset = 0 } = params; + + if (groupBy) { + const validFields = ["model", "agent", "provider"]; + const groupFields = groupBy + .split(",") + .map((f) => f.trim()) + .filter((f) => validFields.includes(f)); + + if (groupFields.length > 0) { + const selectFields = groupFields.join(", "); + const { rows } = await client.query( + ` + SELECT + ${selectFields}, + COUNT(*) as request_count, + COALESCE(SUM(cost_total), 0) as total_cost + FROM llm_events + WHERE "timestamp" >= $1 AND "timestamp" <= $2 + GROUP BY ${selectFields} + ORDER BY total_cost DESC + LIMIT $3 OFFSET $4 + `, + [start, end, limit, offset] + ); + + return { + window: { start, end }, + group_by: groupFields, + count: rows.length, + aggregations: rows, + }; + } + } + + // Raw logs + const { rows } = await client.query( + ` + SELECT * + FROM llm_events + WHERE "timestamp" >= $1 AND "timestamp" <= $2 + ORDER BY "timestamp" DESC + LIMIT $3 OFFSET $4 + `, + [start, end, limit, offset] + ); + + return { + window: { start, end }, + count: rows.length, + rows, + }; + }, + }, + + // ==================== Agent Status Operations ==================== + agents: { + /** + * Get connected agent instances + * This requires access to the controlEmitter which is set on the Express app + */ + getList(controlEmitter?: { + getConnectedCount: (teamId: string) => number; + getConnectedInstances: (teamId: string) => unknown[]; + }) { + if (!controlEmitter) { + return { + active: false, + count: 0, + instances: [], + timestamp: new Date().toISOString(), + error: "WebSocket not initialized", + }; + } + + const count = controlEmitter.getConnectedCount(context.teamId); + const instances = controlEmitter.getConnectedInstances(context.teamId); + + return { + active: count > 0, + count, + instances, + timestamp: new Date().toISOString(), + }; + }, + + /** + * Get agent fleet summary + */ + getSummary(controlEmitter?: { + getConnectedCount: (teamId: string) => number; + getConnectedInstances: (teamId: string) => Array<{ + instance_id: string; + agent?: string; + last_heartbeat: string; + }>; + }) { + if (!controlEmitter) { + return { + total_active: 0, + healthy: 0, + unhealthy: 0, + stale_connections: 0, + by_agent_name: {}, + timestamp: new Date().toISOString(), + error: "WebSocket not initialized", + }; + } + + const instances = controlEmitter.getConnectedInstances(context.teamId); + const now = Date.now(); + const STALE_THRESHOLD_MS = 60000; // 60 seconds + + let healthy = 0; + let unhealthy = 0; + const byAgentName: Record< + string, + { count: number; healthy: number; unhealthy: number } + > = {}; + + for (const instance of instances) { + const lastHeartbeat = new Date(instance.last_heartbeat).getTime(); + const isHealthy = now - lastHeartbeat < STALE_THRESHOLD_MS; + + if (isHealthy) { + healthy++; + } else { + unhealthy++; + } + + const agentName = instance.agent || "unknown"; + if (!byAgentName[agentName]) { + byAgentName[agentName] = { count: 0, healthy: 0, unhealthy: 0 }; + } + byAgentName[agentName].count++; + if (isHealthy) { + byAgentName[agentName].healthy++; + } else { + byAgentName[agentName].unhealthy++; + } + } + + return { + total_active: instances.length, + healthy, + unhealthy, + stale_connections: unhealthy, + by_agent_name: byAgentName, + timestamp: new Date().toISOString(), + }; + }, + }, + }; +} + +export type ApiClient = ReturnType; diff --git a/hive/src/mcp/utils/response-helpers.ts b/hive/src/mcp/utils/response-helpers.ts new file mode 100644 index 00000000..1ef37fa4 --- /dev/null +++ b/hive/src/mcp/utils/response-helpers.ts @@ -0,0 +1,65 @@ +/** + * MCP response formatting helpers + */ + +export interface MCPResponse { + [key: string]: unknown; + content: Array<{ + type: "text"; + text: string; + }>; + isError?: boolean; +} + +/** + * Create a successful MCP response + */ +export function createSuccessResponse(data: unknown): MCPResponse { + return { + content: [ + { + type: "text", + text: JSON.stringify(data, null, 2), + }, + ], + }; +} + +/** + * Create an error MCP response + */ +export function createErrorResponse( + error: string, + details?: unknown +): MCPResponse { + const errorData = { + error, + ...(details && { details }), + }; + + return { + content: [ + { + type: "text", + text: JSON.stringify(errorData, null, 2), + }, + ], + isError: true, + }; +} + +/** + * Handle tool errors consistently + */ +export function handleToolError(error: unknown, toolName: string): MCPResponse { + console.error(`[MCP] Error in ${toolName}:`, error); + + if (error instanceof Error) { + return createErrorResponse(error.message, { + tool: toolName, + stack: process.env.NODE_ENV === "development" ? error.stack : undefined, + }); + } + + return createErrorResponse("Unknown error occurred", { tool: toolName }); +} diff --git a/hive/src/mcp/utils/schema-helpers.ts b/hive/src/mcp/utils/schema-helpers.ts new file mode 100644 index 00000000..3a6b1d38 --- /dev/null +++ b/hive/src/mcp/utils/schema-helpers.ts @@ -0,0 +1,80 @@ +/** + * Zod schema helpers for MCP tools + */ +import { z } from "zod"; + +// Basic types +export const idSchema = z.string().min(1).describe("Unique identifier"); +export const dateSchema = z + .string() + .regex(/^\d{4}-\d{2}-\d{2}$/) + .describe("Date in YYYY-MM-DD format"); +export const dateTimeSchema = z + .string() + .datetime() + .describe("ISO 8601 datetime string"); +export const amountSchema = z.number().describe("Monetary amount in USD"); + +// Budget types +export const budgetTypeSchema = z + .enum(["global", "agent", "tenant", "customer", "feature", "tag"]) + .describe("Type of budget scope"); + +export const limitActionSchema = z + .enum(["kill", "throttle", "degrade"]) + .describe("Action when budget limit exceeded"); + +// Pagination +export const paginationSchema = z.object({ + limit: z + .number() + .min(1) + .max(1000) + .default(100) + .describe("Max items to return"), + offset: z.number().min(0).default(0).describe("Number of items to skip"), +}); + +// Analytics window +export const analyticsWindowSchema = z + .enum(["all_time", "this_month", "this_week", "last_2_weeks", "today"]) + .default("this_month") + .describe("Time window for analytics data"); + +// Budget validation context +export const validationContextSchema = z + .object({ + agent: z.string().optional().describe("Agent name for agent-type budgets"), + tenant_id: z + .string() + .optional() + .describe("Tenant ID for tenant-type budgets"), + customer_id: z + .string() + .optional() + .describe("Customer ID for customer-type budgets"), + feature: z.string().optional().describe("Feature name for feature-type budgets"), + tags: z.array(z.string()).optional().describe("Tags for tag-type budgets"), + }) + .describe("Context for multi-budget matching"); + +// Budget alert configuration +export const budgetAlertSchema = z.object({ + threshold: z + .number() + .min(0) + .max(100) + .describe("Alert threshold as percentage of limit"), + enabled: z.boolean().describe("Whether alert is enabled"), +}); + +// Budget notifications configuration +export const budgetNotificationsSchema = z.object({ + inApp: z.boolean().default(true).describe("Enable in-app notifications"), + email: z.boolean().default(false).describe("Enable email notifications"), + emailRecipients: z + .array(z.string().email()) + .default([]) + .describe("Email recipients"), + webhook: z.boolean().default(false).describe("Enable webhook notifications"), +}); diff --git a/hive/src/middleware/error-handler.middleware.ts b/hive/src/middleware/error-handler.middleware.ts new file mode 100644 index 00000000..8acf5c78 --- /dev/null +++ b/hive/src/middleware/error-handler.middleware.ts @@ -0,0 +1,43 @@ +/** + * Global Error Handler Middleware + * + * Handles all errors and sends consistent JSON responses. + */ + +import { Request, Response, NextFunction } from 'express'; + +interface HttpError extends Error { + status?: number; + statusCode?: number; +} + +/** + * Error handler middleware + * @param {Error} err - Error object + * @param {Object} req - Express request + * @param {Object} res - Express response + * @param {Function} next - Next middleware + */ +function errorHandler(err: HttpError, req: Request, res: Response, next: NextFunction): void { + // Log error + console.error('[Error]', { + message: err.message, + status: err.status || err.statusCode || 500, + path: req.path, + method: req.method, + stack: process.env.NODE_ENV === 'development' ? err.stack : undefined, + }); + + // Get status code + const status = err.status || err.statusCode || 500; + + // Send error response + res.status(status).json({ + error: err.name || 'Error', + message: err.message || 'An unexpected error occurred', + status, + ...(process.env.NODE_ENV === 'development' && { stack: err.stack }), + }); +} + +export { errorHandler }; diff --git a/hive/src/routes.ts b/hive/src/routes.ts new file mode 100644 index 00000000..eaed4283 --- /dev/null +++ b/hive/src/routes.ts @@ -0,0 +1,43 @@ +/** + * Route Definitions + * + * Central route registration for all DevTool APIs. + */ + +import express from 'express'; + +// Controllers +import tsdbController from './controllers/tsdb.controller'; +import controlController from './controllers/control.controller'; +import quickstartController from './controllers/quickstart.controller'; +import userController from './controllers/user.controller'; +import iamController from './controllers/iam.controller'; + +const router = express.Router(); + +// ============================================================================= +// User Routes - Authentication and user management +// ============================================================================= +router.use('/user', userController); + +// ============================================================================= +// IAM Routes - Identity and Access Management +// ============================================================================= +router.use('/iam', iamController); + +// ============================================================================= +// TSDB Routes - Time Series Database for LLM metrics +// ============================================================================= +router.use('/tsdb', tsdbController); + +// ============================================================================= +// Control Routes - SDK control plane +// ============================================================================= +router.use('/v1/control', controlController); + +// ============================================================================= +// Quickstart Routes - SDK documentation generation +// ============================================================================= +router.use('/quickstart', quickstartController); + +export default router; diff --git a/hive/src/services/control/control_service.ts b/hive/src/services/control/control_service.ts new file mode 100644 index 00000000..e90bed5e --- /dev/null +++ b/hive/src/services/control/control_service.ts @@ -0,0 +1,2065 @@ +/** + * Aden Control Service + * + * Manages control policies and events for the Aden SDK. + * Provides policy management, event storage, and budget tracking. + */ + +import { randomUUID } from "crypto"; +import * as tsdbService from "../tsdb/tsdb_service"; +import pricingService from "../tsdb/pricing_service"; +import { getTeamPool, buildSchemaName } from "../tsdb/team_context"; +// TODO: Integrate mail service from @aden/administration +// import mailService from "../mail_service/mail_service"; +import llmEventBatcher from "./llm_event_batcher"; +import { registerHttpAgent } from "./control_sockets"; + +// In-memory budget tracking (could be moved to Redis for distributed tracking) +// Map: budget_id -> { spent: number, lastReset: Date } +const budgetTracker = new Map(); + +// Notification cooldown tracking to prevent spam +// Map: "budget_id:alert_type:threshold" -> timestamp +const notificationCooldowns = new Map(); +const NOTIFICATION_COOLDOWN_MS = 15 * 60 * 1000; // 15 minutes + +interface MongoCollection { + find: (query: Record) => { toArray: () => Promise; sort: (sort: Record) => { skip: (n: number) => { limit: (n: number) => { toArray: () => Promise } } } }; + findOne: (query: Record) => Promise; + insertOne: (doc: Record) => Promise; + updateOne: (query: Record, update: Record, options?: Record) => Promise; + deleteOne: (query: Record) => Promise<{ deletedCount: number }>; +} + +declare const _ACHO_MG_DB: { db: (name: string) => { collection: (name: string) => MongoCollection } }; +declare const _ACHO_MDB_CONFIG: { ERP_DBNAME: string }; +declare const _ACHO_MDB_COLLECTIONS: { ADEN_CONTROL_POLICIES: string; ADEN_CONTROL_CONTENT: string }; +declare const _GLOBAL_CONST: { ARP_URL: string }; + +interface UserContext { + user_id?: string; + team_id?: string | number; +} + +interface Budget { + id: string; + name: string; + type: string; + limit: number; + spent?: number; + limitAction?: string; + degradeToModel?: string; + degradeToProvider?: string; + tagCategory?: string; + tags?: string[]; + alerts?: Array<{ threshold: number; enabled: boolean }>; + notifications?: { + email?: boolean; + emailRecipients?: string[]; + webhook?: boolean; + webhookUrl?: string; + }; + analytics?: { + burnRate: number; + projectedSpend: number; + daysUntilLimit: number | null; + usagePercent: number; + projectedPercent: number; + status: string; + period: { + daysInMonth: number; + daysElapsed: number; + daysRemaining: number; + startOfMonth: string; + endOfMonth: string; + }; + }; +} + +interface Policy { + id: string; + team_id: string | number; + name: string; + version: string; + budgets: Budget[]; + throttles: unknown[]; + blocks: unknown[]; + degradations: unknown[]; + alerts: unknown[]; + created_at: string; + updated_at: string; + created_by?: string; + updated_by?: string; +} + +interface ContentCapture { + system_prompt?: string; + messages?: unknown[]; + tools?: unknown[]; + params?: Record; + response_content?: string; + finish_reason?: string; + choice_count?: number; + has_images?: boolean; + image_urls?: string[]; +} + +interface MetricData { + provider?: string; + model?: string; + total_tokens?: number; + input_tokens?: number; + output_tokens?: number; + cached_tokens?: number; + reasoning_tokens?: number; + agent?: string; + metadata?: Record; + trace_id?: string; + span_id?: string; + request_id?: string; + call_sequence?: number; + stream?: boolean; + agent_stack?: string[]; + latency_ms?: number; + content_capture?: ContentCapture; +} + +interface Event { + event_type: string; + timestamp?: string; + trace_id?: string; + data?: MetricData; + action?: string; + original_model?: string; + provider?: string; + reason?: string; + budget_id?: string; + policy_id?: string; + agent?: string; + agent_name?: string; + sdk_instance_id?: string; + status?: string; + requests_since_last?: number; + message?: string; + stack?: string; +} + +/** + * Get the MongoDB collection for control policies + * @returns MongoDB collection + */ +function getPolicyCollection(): MongoCollection { + return _ACHO_MG_DB + .db(_ACHO_MDB_CONFIG.ERP_DBNAME) + .collection(_ACHO_MDB_COLLECTIONS.ADEN_CONTROL_POLICIES); +} + +/** + * Calculate actual spend and burn rate analytics for a budget from TSDB data + * Uses hybrid CA + base table approach for lowest latency + */ +async function calculateBudgetAnalyticsFromTsdb(teamId: string | number, budget: Budget): Promise<{ + spent: number; + burnRate: number; + projectedSpend: number; + daysUntilLimit: number | null; + usagePercent: number; + projectedPercent: number; + status: string; + source: string; + period: { + daysInMonth: number; + daysElapsed: number; + daysRemaining: number; + startOfMonth: string; + endOfMonth: string; + }; +}> { + const now = new Date(); + const startOfMonth = new Date(now.getFullYear(), now.getMonth(), 1); + const endOfMonth = new Date(now.getFullYear(), now.getMonth() + 1, 0); + const daysInMonth = endOfMonth.getDate(); + const daysElapsed = Math.max( + 1, + Math.floor((now.getTime() - startOfMonth.getTime()) / (1000 * 60 * 60 * 24)) + 1 + ); + const daysRemaining = daysInMonth - daysElapsed + 1; + + // Today's midnight for CA vs base table split + const todayMidnight = new Date(); + todayMidnight.setUTCHours(0, 0, 0, 0); + + try { + const pool = await getTeamPool(teamId); + const schema = buildSchemaName(teamId); + const client = await pool.connect(); + + try { + // Explicitly set search_path to team schema before querying + await client.query(`SET search_path TO ${schema}, public`); + await tsdbService.ensureSchema(client); + + let spent = 0; + const usedCA = false; + + // Determine which CA to use based on budget type + // Note: CA (continuous aggregates) are disabled for now because: + // 1. Team-specific schemas don't have CA tables populated + // 2. CA tables need to be refreshed periodically + // TODO: Enable CA once aggregation is set up per-team + const canUseGlobalCA = false; // Disabled: CA not populated in team schemas + const canUseAgentCA = false; // Disabled: agent may be in metadata->>'agent' + + // --- Query CA for historical data (before today) --- + if (startOfMonth < todayMidnight) { + if (canUseGlobalCA) { + // Use daily CA for global budgets + try { + const caSql = ` + SELECT COALESCE(SUM(cost_total), 0) as total_cost + FROM llm_events_daily_ca + WHERE bucket >= $1 AND bucket < $2 + `; + const caResult = await client.query(caSql, [ + startOfMonth.toISOString(), + todayMidnight.toISOString(), + ]); + spent += parseFloat(caResult.rows[0]?.total_cost) || 0; + } catch (caErr) { + // CA not available, will fall back to base table + } + } else if (canUseAgentCA) { + // Use agent CA for agent budgets + try { + const caSql = ` + SELECT COALESCE(SUM(cost_total), 0) as total_cost + FROM llm_events_daily_by_agent_ca + WHERE bucket >= $1 AND bucket < $2 AND agent = $3 + `; + const caResult = await client.query(caSql, [ + startOfMonth.toISOString(), + todayMidnight.toISOString(), + budget.name, + ]); + spent += parseFloat(caResult.rows[0]?.total_cost) || 0; + } catch (caErr) { + // CA not available, will fall back to base table + } + } + } + + // --- Query base table for today's data (always) + historical if CA failed --- + const baseTableStart = usedCA ? todayMidnight : startOfMonth; + + const conditions = [`team_id = $1`, `"timestamp" >= $2`, `"timestamp" <= $3`]; + const values: unknown[] = [String(teamId), baseTableStart, now]; + let paramIndex = 4; + + // Apply budget-specific filter based on budget type + const budgetFilter = getBudgetFilter(budget, paramIndex); + if (budgetFilter) { + conditions.push(budgetFilter.condition); + values.push(budgetFilter.value); + } + + const baseSql = ` + SELECT COALESCE(SUM(cost_total), 0) as total_cost + FROM llm_events + WHERE ${conditions.join(" AND ")} + `; + + console.log(`[Aden Control] Budget analytics query for team ${teamId}, schema ${schema}:`); + console.log(`[Aden Control] SQL: ${baseSql}`); + console.log(`[Aden Control] Values:`, values); + + // Debug: check row count and cost + const countResult = await client.query(`SELECT COUNT(*) as cnt FROM llm_events WHERE team_id = $1`, [String(teamId)]); + console.log(`[Aden Control] Total rows in llm_events for team ${teamId}: ${countResult.rows[0]?.cnt}`); + + // Debug: check total cost regardless of timestamp filter + const debugResult = await client.query(`SELECT SUM(cost_total) as total, MIN("timestamp") as min_ts, MAX("timestamp") as max_ts FROM llm_events WHERE team_id = $1`, [String(teamId)]); + console.log(`[Aden Control] All-time cost: $${debugResult.rows[0]?.total}, timestamps: ${debugResult.rows[0]?.min_ts} to ${debugResult.rows[0]?.max_ts}`); + + const baseResult = await client.query(baseSql, values); + console.log(`[Aden Control] Result:`, baseResult.rows[0]); + + spent += parseFloat(baseResult.rows[0]?.total_cost) || 0; + console.log(`[Aden Control] Total spent for budget ${budget.name}: $${spent}`); + + // Calculate burn rate analytics + const burnRate = daysElapsed > 0 ? spent / daysElapsed : 0; + const projectedSpend = burnRate * daysInMonth; + const remaining = Math.max(0, budget.limit - spent); + const daysUntilLimit = burnRate > 0 ? remaining / burnRate : Infinity; + const usagePercent = budget.limit > 0 ? (spent / budget.limit) * 100 : 0; + const projectedPercent = + budget.limit > 0 ? (projectedSpend / budget.limit) * 100 : 0; + + // Determine status based on projected spend and current usage + let status = "healthy"; + if (usagePercent >= 100) { + status = "exceeded"; + } else if (projectedPercent >= 100 || daysUntilLimit <= daysRemaining) { + status = "at_risk"; + } else if (usagePercent >= 80 || projectedPercent >= 80) { + status = "warning"; + } + + return { + spent, + burnRate, + projectedSpend, + daysUntilLimit: daysUntilLimit === Infinity ? null : daysUntilLimit, + usagePercent, + projectedPercent, + status, + source: usedCA ? "hybrid_ca" : "base_table", + period: { + daysInMonth, + daysElapsed, + daysRemaining, + startOfMonth: startOfMonth.toISOString(), + endOfMonth: endOfMonth.toISOString(), + }, + }; + } finally { + client.release(); + } + } catch (error) { + console.error( + `[Aden Control] Failed to calculate budget analytics from TSDB:`, + (error as Error).message + ); + // Fall back to in-memory tracker with minimal analytics + const tracker = budgetTracker.get(budget.id); + const spent = tracker?.spent ?? budget.spent ?? 0; + const burnRate = daysElapsed > 0 ? spent / daysElapsed : 0; + const projectedSpend = burnRate * daysInMonth; + + return { + spent, + burnRate, + projectedSpend, + daysUntilLimit: burnRate > 0 ? Math.max(0, budget.limit - spent) / burnRate : null, + usagePercent: budget.limit > 0 ? (spent / budget.limit) * 100 : 0, + projectedPercent: budget.limit > 0 ? (projectedSpend / budget.limit) * 100 : 0, + status: "unknown", + source: "fallback", + period: { + daysInMonth, + daysElapsed, + daysRemaining, + startOfMonth: startOfMonth.toISOString(), + endOfMonth: endOfMonth.toISOString(), + }, + }; + } +} + +/** + * Get policy for a team by policy ID + */ +async function getPolicy(teamId: string | number | null, policyId: string | null = null, userContext: UserContext | null = null): Promise { + if (!teamId) { + teamId = userContext?.team_id ?? null; + } + if (!teamId) { + throw new Error("team_id is required to get policy"); + } + + // Use "default" as the actual policy ID when not specified + const actualPolicyId = policyId || "default"; + + const collection = getPolicyCollection(); + let policyDoc = await collection.findOne({ team_id: teamId, id: actualPolicyId }) as Policy & { _id?: unknown } | null; + + if (!policyDoc) { + // Create empty policy with the specified ID + const newPolicy: Policy & { _id?: unknown } = { + id: actualPolicyId, + team_id: teamId, + name: actualPolicyId === "default" ? "Default Policy" : "New Policy", + version: randomUUID().slice(0, 8), + budgets: [], + throttles: [], + blocks: [], + degradations: [], + alerts: [], + created_at: new Date().toISOString(), + updated_at: new Date().toISOString(), + ...(userContext?.user_id && { created_by: userContext.user_id }), + }; + await collection.insertOne(newPolicy as unknown as Record); + policyDoc = newPolicy; + } + + // Remove MongoDB _id from response + const { _id, ...policy } = policyDoc; + + // Enrich budget rules with actual spend and analytics from TSDB + if (policy.budgets && policy.budgets.length > 0) { + policy.budgets = await Promise.all( + policy.budgets.map(async (budget) => { + const analytics = await calculateBudgetAnalyticsFromTsdb(teamId!, budget); + return { + ...budget, + spent: analytics.spent, + analytics: { + burnRate: analytics.burnRate, + projectedSpend: analytics.projectedSpend, + daysUntilLimit: analytics.daysUntilLimit, + usagePercent: analytics.usagePercent, + projectedPercent: analytics.projectedPercent, + status: analytics.status, + period: analytics.period, + }, + }; + }) + ); + } + + return policy as Policy; +} + +/** + * Update policy for a team (or create new if policyId is null) + */ +async function updatePolicy(teamId: string | number | null, policyId: string | null, policyUpdate: Partial, userContext: UserContext | null = null): Promise { + if (!teamId) { + teamId = userContext?.team_id ?? null; + } + if (!teamId) { + throw new Error("team_id is required to update policy"); + } + + // Use "default" as the actual policy ID when not specified + const actualPolicyId = policyId || "default"; + + const collection = getPolicyCollection(); + + const updateFields = { + ...policyUpdate, + version: randomUUID().slice(0, 8), + updated_at: new Date().toISOString(), + ...(userContext?.user_id && { updated_by: userContext.user_id }), + }; + + // Build setOnInsert with only fields NOT in policyUpdate to avoid MongoDB conflicts + // Fields in both $set and $setOnInsert cause "would create a conflict" errors + const defaultName = actualPolicyId === "default" ? "Default Policy" : "New Policy"; + const setOnInsert: Record = { + id: actualPolicyId, + team_id: teamId, + ...(!policyUpdate.name && { name: defaultName }), + ...(!("budgets" in policyUpdate) && { budgets: [] }), + ...(!("throttles" in policyUpdate) && { throttles: [] }), + ...(!("blocks" in policyUpdate) && { blocks: [] }), + ...(!("degradations" in policyUpdate) && { degradations: [] }), + ...(!("alerts" in policyUpdate) && { alerts: [] }), + created_at: new Date().toISOString(), + ...(userContext?.user_id && { created_by: userContext.user_id }), + }; + + await collection.updateOne( + { team_id: teamId, id: actualPolicyId }, + { + $set: updateFields, + $setOnInsert: setOnInsert, + }, + { upsert: true } + ); + + // Return the updated policy + return getPolicy(teamId, actualPolicyId); +} + +/** + * Transform a metric event to TSDB format + */ +function transformMetricToTsdbEvent(event: Event, teamId: string | number, policyId: string | null): Record { + const data = event.data || {}; + const now = new Date(); + // Extract agent - metadata.agent takes precedence over top-level agent + const effectiveAgent = (data.metadata?.agent as string) || data.agent || null; + + // Calculate cost for real-time streaming + const cost = pricingService.calculateCostSync({ + model: data.model || "", + provider: data.provider, + input_tokens: data.input_tokens || 0, + output_tokens: data.output_tokens || 0, + cached_tokens: data.cached_tokens || 0, + }).total; + + return { + timestamp: event.timestamp || now.toISOString(), + team_id: String(teamId), + user_id: (data.metadata?.user_id as string) || null, + trace_id: data.trace_id || event.trace_id || randomUUID(), + span_id: data.span_id || null, + request_id: data.request_id || null, + provider: data.provider || null, + call_sequence: data.call_sequence ?? 0, + model: data.model || "", + stream: Boolean(data.stream), + agent: effectiveAgent, + agent_name: event.agent_name || null, + agent_stack: data.agent_stack || [], + latency_ms: data.latency_ms || null, + usage: { + input_tokens: data.input_tokens || 0, + output_tokens: data.output_tokens || 0, + total_tokens: data.total_tokens || 0, + cached_tokens: data.cached_tokens || 0, + reasoning_tokens: data.reasoning_tokens || 0, + }, + cost_total: cost, + metadata: { + ...data.metadata, + policy_id: policyId, + event_type: event.event_type, + }, + // Layer 0 content capture (if enabled in SDK) + content_capture: data.content_capture || null, + }; +} + +/** + * Process incoming events from SDK + */ +async function processEvents(teamId: string | number | null, policyId: string | null, events: Event[], userContext: UserContext | null = null): Promise { + if (!teamId) { + teamId = userContext?.team_id ?? null; + } + if (!teamId) { + throw new Error("team_id is required to process events"); + } + + const tsdbEvents: Record[] = []; + + for (const event of events) { + // Process specific event types + switch (event.event_type) { + case "metric": + await processMetricEvent(teamId, policyId, event, userContext); + // Transform and collect metric events for TSDB + tsdbEvents.push(transformMetricToTsdbEvent(event, teamId, policyId)); + break; + case "control": + await processControlEvent(teamId, event, policyId); + break; + case "heartbeat": + await processHeartbeatEvent(teamId, policyId, event); + break; + case "error": + await processErrorEvent(teamId, event); + break; + } + } + + // Store metric events in TSDB if we have team context + if (tsdbEvents.length > 0) { + try { + const pool = await getTeamPool(teamId); + const schema = buildSchemaName(teamId); + const client = await pool.connect(); + try { + // Explicitly set search_path to team schema before inserting + await client.query(`CREATE SCHEMA IF NOT EXISTS ${schema}`); + await client.query(`SET search_path TO ${schema}, public`); + await tsdbService.ensureSchema(client); + const result = await tsdbService.upsertEvents(tsdbEvents as unknown[], client); + console.log( + `[Aden Control] Stored ${result.rowsWritten} events in TSDB for team ${teamId}` + ); + + // Push to real-time WebSocket stream + if (result.rowsWritten > 0) { + llmEventBatcher.add(teamId, tsdbEvents as unknown[]); + } + } finally { + client.release(); + } + } catch (error) { + console.error(`[Aden Control] Failed to store events in TSDB:`, (error as Error).message); + } + } +} + +/** + * Process a metric event - update budget tracking + * Updates spend for all matching budgets based on their type + */ +async function processMetricEvent(teamId: string | number, policyId: string | null, event: Event, userContext: UserContext | null = null): Promise { + const metricData = event.data; + if (!metricData) return; + + // Calculate cost from tokens (simplified pricing) + const cost = estimateCost(metricData); + + // Get the policy to find matching budgets + const policy = await getPolicy(teamId, policyId, userContext); + let budgetUpdated = false; + + if (policy.budgets && policy.budgets.length > 0) { + for (const budget of policy.budgets) { + // Determine if this metric applies to this budget based on type + const shouldApply = matchesBudgetType(budget, metricData); + + if (shouldApply) { + const tracker = budgetTracker.get(budget.id) || { + spent: 0, + lastReset: new Date(), + }; + tracker.spent += cost; + budgetTracker.set(budget.id, tracker); + budgetUpdated = true; + + // Check if budget alerts should be triggered + checkBudgetAlerts(budget, tracker.spent, teamId, policyId); + } + } + } + + // Push updated policy with new spend to SDK via WebSocket + if (budgetUpdated && (global as unknown as Record)._ADEN_CONTROL_EMITTER) { + const updatedPolicy = await getPolicy(teamId, policyId); + ((global as unknown as Record)._ADEN_CONTROL_EMITTER as { emitPolicyUpdate: (teamId: string | number, policyId: string | null, policy: Policy) => void }).emitPolicyUpdate(teamId, policyId, updatedPolicy); + } + + console.log( + `[Aden Control] Metric: ${metricData.provider}/${metricData.model} - ${ + metricData.total_tokens + } tokens, $${cost.toFixed(6)}` + ); +} + +/** + * Check if a metric event matches a budget's type criteria + */ +function matchesBudgetType(budget: Budget, metricData: MetricData): boolean { + const metadata = metricData.metadata || {}; + // metadata.agent takes precedence over top-level agent + const effectiveAgent = (metadata.agent as string) || metricData.agent; + + switch (budget.type) { + case "global": + // Global budgets apply to all metrics + return true; + + case "agent": + // Agent budgets apply when agent name matches (from top-level or metadata) + return !!effectiveAgent && budget.name === effectiveAgent; + + case "tenant": + // Tenant budgets apply when tenant_id matches + return !!metadata.tenant_id && budget.name === metadata.tenant_id; + + case "customer": + // Customer budgets apply when customer_id matches + return !!metadata.customer_id && budget.name === metadata.customer_id; + + case "feature": + // Feature budgets apply when feature name matches + return !!metadata.feature && budget.name === metadata.feature; + + case "tag": + // Tag budgets apply when the tagCategory value matches budget name + if (!budget.tagCategory || !metadata.tags) return false; + const tagValue = (metadata.tags as Record)[budget.tagCategory]; + return !!tagValue && budget.name === tagValue; + + default: + return false; + } +} + +/** + * Send budget notifications via configured channels (email, webhook) + * Includes cooldown logic to prevent notification spam. + */ +async function sendBudgetNotifications(budget: Budget, alertData: Record, alertType: string = "threshold"): Promise { + const notifications = budget.notifications; + if (!notifications) { + console.log( + `[Aden Control] No notifications configured for budget ${budget.name} (${budget.id})` + ); + return false; + } + + // Check if any notification channel is enabled + if (!notifications.email && !notifications.webhook) { + console.log( + `[Aden Control] Notifications disabled for budget ${budget.name} (email: ${notifications.email}, webhook: ${notifications.webhook})` + ); + return false; + } + + // Check cooldown to prevent spam + const cooldownKey = `${budget.id}:${alertType}:${ + alertData.threshold || alertData.action || "default" + }`; + const lastSent = notificationCooldowns.get(cooldownKey); + const now = Date.now(); + + if (lastSent && now - lastSent < NOTIFICATION_COOLDOWN_MS) { + console.log( + `[Aden Control] Notification for budget ${budget.name} (${alertType}) skipped - cooldown active` + ); + return false; + } + + const { spent, limit, threshold, action } = alertData as { spent: number; limit: number; threshold?: number; action?: string }; + const spentPercentage = limit > 0 ? ((spent / limit) * 100).toFixed(1) : "0"; + + // Determine alert severity color + const isLimitAction = alertType === "limit_action"; + const alertColor = isLimitAction + ? "#dc2626" + : parseFloat(spentPercentage) >= 90 + ? "#f59e0b" + : "#3b82f6"; + const alertBgColor = isLimitAction + ? "#fef2f2" + : parseFloat(spentPercentage) >= 90 + ? "#fffbeb" + : "#eff6ff"; + + // Build notification content + let subject: string, title: string, description: string; + if (isLimitAction) { + subject = `[Aden] Budget "${budget.name}" - ${(action || "").toUpperCase()}`; + title = "Budget Limit Triggered"; + description = `The budget ${budget.name} has exceeded its limit and triggered a control action.`; + } else { + subject = `[Aden] Budget "${budget.name}" at ${spentPercentage}%`; + title = "Budget Threshold Alert"; + description = `The budget ${budget.name} has reached ${threshold}% of its limit.`; + } + + const htmlContent = ` + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + ${isLimitAction ? action : "Alert"} + +
+

${title}

+
+

${description}

+
+
+ + + + + + + +
+ ${spentPercentage}% + of budget used +
+
+
+
+
+
+ + + + + + + + + + + ${alertData.model ? ` + + + + ` : ""} +
+ + + + + +
Spent$${(spent || 0).toFixed(4)}
+
+ + + + + +
Limit$${(limit || 0).toFixed(2)}
+
+ + + + + +
Budget Type${budget.type}
+
+ + + + + +
Model${alertData.model}
+
+
+ + + + +
+ + View Cost Control Center + +
+
+

+ Sent by Aden Cost Control +

+
+
+ +`; + + // Send email notifications + if (notifications.email) { + if (!notifications.emailRecipients?.length) { + console.log( + `[Aden Control] Email enabled but no recipients configured for budget ${budget.name}` + ); + } else { + // TODO: Re-enable when mailService is integrated from @aden/administration + console.log( + `[Aden Control] Email notification skipped (mail service not configured) for budget ${budget.name}` + ); + } + } + + // Send webhook notifications + if (notifications.webhook && notifications.webhookUrl) { + try { + const response = await fetch(notifications.webhookUrl, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ + type: "budget_alert", + alert_type: alertType, + budget_id: budget.id, + budget_name: budget.name, + budget_type: budget.type, + ...alertData, + timestamp: new Date().toISOString(), + }), + }); + if (!response.ok) { + console.error(`[Aden Control] Webhook returned ${response.status}`); + } else { + console.log(`[Aden Control] Sent webhook notification for budget ${budget.name}`); + } + } catch (err) { + console.error(`[Aden Control] Failed to send webhook notification:`, (err as Error).message); + } + } + + // Record cooldown timestamp + notificationCooldowns.set(cooldownKey, now); + return true; +} + +/** + * Check budget alerts and emit notifications if thresholds are crossed + */ +async function checkBudgetAlerts(budget: Budget, currentSpent: number, teamId: string | number, policyId: string | null): Promise { + if (!budget.alerts || !budget.alerts.length || !budget.limit) return; + + const spentPercentage = (currentSpent / budget.limit) * 100; + + for (const alert of budget.alerts) { + if (!alert.enabled) continue; + if (spentPercentage >= alert.threshold) { + const alertData = { + budget_id: budget.id, + budget_name: budget.name, + threshold: alert.threshold, + current_percentage: spentPercentage, + spent: currentSpent, + limit: budget.limit, + }; + + // Emit alert event via WebSocket (inApp notification) + if ((global as unknown as Record)._ADEN_CONTROL_EMITTER) { + ((global as unknown as Record)._ADEN_CONTROL_EMITTER as { emitAlert: (teamId: string | number, policyId: string | null, alert: Record) => void }).emitAlert(teamId, policyId, { + ...alertData, + notifications: budget.notifications, + }); + } + + // Send email/webhook notifications + await sendBudgetNotifications(budget, alertData, "threshold"); + } + } +} + +/** + * Estimate cost from metric data using unified pricing service + */ +function estimateCost(metricData: MetricData): number { + const result = pricingService.calculateCostSync({ + model: metricData.model || "", + provider: metricData.provider, + input_tokens: metricData.input_tokens || 0, + output_tokens: metricData.output_tokens || 0, + cached_tokens: metricData.cached_tokens || 0, + }); + return result.total; +} + +/** + * Process a control event - log control decisions and send notifications + */ +async function processControlEvent(teamId: string | number, event: Event, policyId: string | null = null): Promise { + console.log( + `[Aden Control] Control action: ${event.action} on ${event.provider}/${ + event.original_model + }${event.reason ? ` - ${event.reason}` : ""}` + ); + + // Check if this is a budget-related control action + const isBudgetAction = + event.budget_id || + event.reason?.includes("budget") || + ["kill", "throttle", "degrade", "block"].includes(event.action || ""); + + // Fall back to default policy if not provided + const effectivePolicyId = policyId || event.policy_id || "default"; + + console.log( + `[Aden Control] Control event notification check: isBudgetAction=${isBudgetAction}, policyId=${effectivePolicyId}, budget_id=${event.budget_id}` + ); + + if (isBudgetAction) { + try { + // Get the policy to find the budget + const policy = await getPolicy(teamId, effectivePolicyId); + console.log( + `[Aden Control] Found policy with ${policy?.budgets?.length || 0} budgets` + ); + + if (policy?.budgets?.length) { + // Find matching budget by ID or by type/name + let budget = event.budget_id + ? policy.budgets.find((b) => b.id === event.budget_id) + : null; + + // If no budget_id, try to find by context (agent, etc.) + if (!budget && event.agent) { + budget = policy.budgets.find( + (b) => b.type === "agent" && b.name === event.agent + ); + } + + // Fallback to global budget + if (!budget) { + budget = policy.budgets.find((b) => b.type === "global"); + } + + console.log( + `[Aden Control] Budget lookup result: ${ + budget ? `found "${budget.name}" (${budget.id})` : "not found" + }` + ); + + if (budget) { + const alertData = { + action: event.action, + reason: event.reason, + model: event.original_model, + provider: event.provider, + spent: budget.spent || 0, + limit: budget.limit || 0, + }; + + console.log( + `[Aden Control] Sending notification for budget "${ + budget.name + }", notifications: ${JSON.stringify(budget.notifications)}` + ); + await sendBudgetNotifications(budget, alertData, "limit_action"); + } + } + } catch (err) { + console.error( + `[Aden Control] Failed to send control event notifications:`, + (err as Error).message, + (err as Error).stack + ); + } + } else { + console.log( + `[Aden Control] Skipping notification: isBudgetAction=${isBudgetAction}, policyId=${ + policyId || "null" + }` + ); + } +} + +/** + * Process a heartbeat event - track SDK health + */ +async function processHeartbeatEvent( + teamId: string | number, + policyId: string | null, + event: Event +): Promise { + console.log( + `[Aden Control] Heartbeat from ${event.agent_name || event.sdk_instance_id}: ${event.status}, ${event.requests_since_last} requests` + ); + + // Register/update HTTP agent tracking + if (event.sdk_instance_id) { + registerHttpAgent( + teamId, + event.sdk_instance_id, + event.policy_id || policyId, + event.agent_name || null, + event.status || "unknown" + ); + } +} + +/** + * Process an error event + */ +async function processErrorEvent(teamId: string | number, event: Event): Promise { + console.error(`[Aden Control] Error from SDK: ${event.message}`, event.stack); +} + +/** + * Get events for a team (for dashboard) + */ +async function getEvents(teamId: string | number, policyId: string | null = null, options: { limit?: number; offset?: number; start_date?: string; end_date?: string } = {}): Promise { + const { limit = 100, offset = 0, start_date, end_date } = options; + + if (!teamId) { + console.warn(`[Aden Control] No team_id provided, returning empty events`); + return []; + } + + try { + const pool = await getTeamPool(teamId); + const schema = buildSchemaName(teamId); + const client = await pool.connect(); + + try { + await client.query(`SET search_path TO ${schema}, public`); + await tsdbService.ensureSchema(client); + + // Build query with filters + const conditions = [`team_id = $1`]; + const values: unknown[] = [String(teamId)]; + let paramIndex = 2; + + // Filter by policy_id in metadata if provided + if (policyId) { + conditions.push(`metadata->>'policy_id' = $${paramIndex}`); + values.push(policyId); + paramIndex++; + } + + if (start_date) { + conditions.push(`"timestamp" >= $${paramIndex}`); + values.push(new Date(start_date)); + paramIndex++; + } + + if (end_date) { + conditions.push(`"timestamp" <= $${paramIndex}`); + values.push(new Date(end_date)); + paramIndex++; + } + + const sql = ` + SELECT + "timestamp", + trace_id, + span_id, + provider, + model, + agent, + latency_ms, + usage_input_tokens as input_tokens, + usage_output_tokens as output_tokens, + usage_total_tokens as total_tokens, + cost_total, + metadata + FROM llm_events + WHERE ${conditions.join(" AND ")} + ORDER BY "timestamp" DESC + LIMIT $${paramIndex} OFFSET $${paramIndex + 1} + `; + + values.push(limit, offset); + + const result = await client.query(sql, values); + + return result.rows.map((row: Record) => ({ + timestamp: row.timestamp, + trace_id: row.trace_id, + span_id: row.span_id, + provider: row.provider, + model: row.model, + agent: row.agent, + latency_ms: row.latency_ms, + input_tokens: row.input_tokens, + output_tokens: row.output_tokens, + total_tokens: row.total_tokens, + cost_usd: row.cost_total, + metadata: row.metadata, + })); + } finally { + client.release(); + } + } catch (error) { + console.error(`[Aden Control] Failed to get events from TSDB:`, (error as Error).message); + return []; + } +} + +/** + * Get metrics summary for a team (for dashboard analytics) + */ +async function getMetricsSummary(teamId: string | number, options: { start_date?: string; end_date?: string; group_by?: string } = {}): Promise<{ + total_requests: number; + total_cost: number; + total_input_tokens: number; + total_output_tokens: number; + total_tokens: number; + breakdown_by_model: Array<{ model: string; provider: string; requests: number; cost: number; tokens: number }>; +}> { + const { start_date, end_date } = options; + + if (!teamId) { + return { total_requests: 0, total_cost: 0, total_input_tokens: 0, total_output_tokens: 0, total_tokens: 0, breakdown_by_model: [] }; + } + + try { + const pool = await getTeamPool(teamId); + const schema = buildSchemaName(teamId); + const client = await pool.connect(); + + try { + await client.query(`SET search_path TO ${schema}, public`); + await tsdbService.ensureSchema(client); + + const conditions = [`team_id = $1`]; + const values: unknown[] = [String(teamId)]; + let paramIndex = 2; + + if (start_date) { + conditions.push(`"timestamp" >= $${paramIndex}`); + values.push(new Date(start_date)); + paramIndex++; + } + + if (end_date) { + conditions.push(`"timestamp" <= $${paramIndex}`); + values.push(new Date(end_date)); + paramIndex++; + } + + // Get totals + const totalsSql = ` + SELECT + COUNT(*) as total_requests, + COALESCE(SUM(cost_total), 0) as total_cost, + COALESCE(SUM(usage_input_tokens), 0) as total_input_tokens, + COALESCE(SUM(usage_output_tokens), 0) as total_output_tokens, + COALESCE(SUM(usage_total_tokens), 0) as total_tokens + FROM llm_events + WHERE ${conditions.join(" AND ")} + `; + + const totalsResult = await client.query(totalsSql, values); + const totals = totalsResult.rows[0] || {}; + + // Get breakdown by model + const breakdownSql = ` + SELECT + model, + provider, + COUNT(*) as requests, + COALESCE(SUM(cost_total), 0) as cost, + COALESCE(SUM(usage_total_tokens), 0) as tokens + FROM llm_events + WHERE ${conditions.join(" AND ")} + GROUP BY model, provider + ORDER BY cost DESC + LIMIT 20 + `; + + const breakdownResult = await client.query(breakdownSql, values); + + return { + total_requests: parseInt(totals.total_requests) || 0, + total_cost: parseFloat(totals.total_cost) || 0, + total_input_tokens: parseInt(totals.total_input_tokens) || 0, + total_output_tokens: parseInt(totals.total_output_tokens) || 0, + total_tokens: parseInt(totals.total_tokens) || 0, + breakdown_by_model: breakdownResult.rows.map((row: Record) => ({ + model: row.model as string, + provider: row.provider as string, + requests: parseInt(row.requests as string) || 0, + cost: parseFloat(row.cost as string) || 0, + tokens: parseInt(row.tokens as string) || 0, + })), + }; + } finally { + client.release(); + } + } catch (error) { + console.error(`[Aden Control] Failed to get metrics summary:`, (error as Error).message); + return { total_requests: 0, total_cost: 0, total_input_tokens: 0, total_output_tokens: 0, total_tokens: 0, breakdown_by_model: [] }; + } +} + +/** + * Get budget status for a budget ID + */ +async function getBudgetStatus(budgetId: string): Promise<{ id: string; spent: number; last_reset: string | null }> { + const tracker = budgetTracker.get(budgetId); + return { + id: budgetId, + spent: tracker?.spent || 0, + last_reset: tracker?.lastReset?.toISOString() || null, + }; +} + +/** + * Reset budget for a budget ID + */ +async function resetBudget(budgetId: string): Promise { + budgetTracker.set(budgetId, { spent: 0, lastReset: new Date() }); +} + +/** + * Add a budget rule to a policy + */ +async function addBudgetRule(teamId: string | number, policyId: string | null, rule: Budget, userContext: UserContext | null = null): Promise { + const policy = await getPolicy(teamId, policyId, userContext); + policy.budgets = policy.budgets || []; + policy.budgets.push(rule); + return updatePolicy(teamId, policyId, { budgets: policy.budgets }, userContext); +} + +/** + * Add a throttle rule to a policy + */ +async function addThrottleRule(teamId: string | number, policyId: string | null, rule: unknown, userContext: UserContext | null = null): Promise { + const policy = await getPolicy(teamId, policyId, userContext); + policy.throttles = policy.throttles || []; + policy.throttles.push(rule); + return updatePolicy(teamId, policyId, { throttles: policy.throttles }, userContext); +} + +/** + * Add a block rule to a policy + */ +async function addBlockRule(teamId: string | number, policyId: string | null, rule: unknown, userContext: UserContext | null = null): Promise { + const policy = await getPolicy(teamId, policyId, userContext); + policy.blocks = policy.blocks || []; + policy.blocks.push(rule); + return updatePolicy(teamId, policyId, { blocks: policy.blocks }, userContext); +} + +/** + * Add a degradation rule to a policy + */ +async function addDegradeRule(teamId: string | number, policyId: string | null, rule: unknown, userContext: UserContext | null = null): Promise { + const policy = await getPolicy(teamId, policyId, userContext); + policy.degradations = policy.degradations || []; + policy.degradations.push(rule); + return updatePolicy( + teamId, + policyId, + { degradations: policy.degradations }, + userContext + ); +} + +/** + * Add an alert rule to a policy + */ +async function addAlertRule(teamId: string | number, policyId: string | null, rule: unknown, userContext: UserContext | null = null): Promise { + const policy = await getPolicy(teamId, policyId, userContext); + policy.alerts = policy.alerts || []; + policy.alerts.push(rule); + return updatePolicy(teamId, policyId, { alerts: policy.alerts }, userContext); +} + +/** + * Clear all rules from a policy + */ +async function clearPolicy(teamId: string | number, policyId: string | null, userContext: UserContext | null = null): Promise { + return updatePolicy( + teamId, + policyId, + { + budgets: [], + throttles: [], + blocks: [], + degradations: [], + alerts: [], + }, + userContext + ); +} + +/** + * Delete a policy + */ +async function deletePolicy(teamId: string | number | null, policyId: string | null, userContext: UserContext | null = null): Promise { + if (!teamId) { + teamId = userContext?.team_id ?? null; + } + if (!teamId) { + throw new Error("team_id is required to delete policy"); + } + if (!policyId) { + throw new Error("policy_id is required to delete policy"); + } + + const collection = getPolicyCollection(); + const result = await collection.deleteOne({ team_id: teamId, id: policyId }); + + if (result.deletedCount === 0) { + throw new Error("Policy not found"); + } + + return true; +} + +/** + * Get all policies for a team + */ +async function getPoliciesByTeam(teamId: string | number, options: { limit?: number; offset?: number } = {}): Promise { + const { limit = 100, offset = 0 } = options; + const collection = getPolicyCollection(); + + const policies = await collection + .find({ team_id: teamId }) + .sort({ updated_at: -1 }) + .skip(offset) + .limit(limit) + .toArray() as (Policy & { _id?: unknown })[]; + + // Enrich each policy's budgets with actual spend and analytics from TSDB + const enrichedPolicies = await Promise.all( + policies.map(async ({ _id, ...policy }) => { + if (policy.budgets && policy.budgets.length > 0) { + policy.budgets = await Promise.all( + policy.budgets.map(async (budget) => { + const analytics = await calculateBudgetAnalyticsFromTsdb(teamId, budget); + return { + ...budget, + spent: analytics.spent, + analytics: { + burnRate: analytics.burnRate, + projectedSpend: analytics.projectedSpend, + daysUntilLimit: analytics.daysUntilLimit, + usagePercent: analytics.usagePercent, + projectedPercent: analytics.projectedPercent, + status: analytics.status, + period: analytics.period, + }, + }; + }) + ); + } + return policy as Policy; + }) + ); + + return enrichedPolicies; +} + +/** + * Get usage breakdown for dashboard analytics + */ +async function getUsageBreakdown(teamId: string | number, options: { days?: number; context_id?: string; budget?: Budget } = {}): Promise<{ + daily: Array<{ date: Date; cost: number; requests: number; tokens: number }>; + by_model: Array<{ model: string; provider: string; cost: number; requests: number; tokens: number }>; + by_feature: Array<{ feature: string; cost: number; requests: number; tokens: number; percentage: number }>; +}> { + const { days = 7, context_id, budget } = options; + + if (!teamId) { + return { daily: [], by_model: [], by_feature: [] }; + } + + try { + const pool = await getTeamPool(teamId); + const schema = buildSchemaName(teamId); + const client = await pool.connect(); + + try { + await client.query(`SET search_path TO ${schema}, public`); + await tsdbService.ensureSchema(client); + + const startDate = new Date(); + startDate.setDate(startDate.getDate() - days); + + const conditions = [`team_id = $1`, `"timestamp" >= $2`]; + const values: unknown[] = [String(teamId), startDate]; + let paramIndex = 3; + + // Apply budget-specific filter based on budget type + if (budget) { + const budgetFilter = getBudgetFilter(budget, paramIndex); + if (budgetFilter) { + conditions.push(budgetFilter.condition); + values.push(budgetFilter.value); + paramIndex++; + } + } else if (context_id) { + // Fallback to context_id filter + conditions.push(`metadata->>'context_id' = $${paramIndex}`); + values.push(context_id); + paramIndex++; + } + + // Daily usage breakdown + const dailySql = ` + SELECT + DATE_TRUNC('day', "timestamp") as date, + COALESCE(SUM(cost_total), 0) as cost, + COUNT(*) as requests, + COALESCE(SUM(usage_total_tokens), 0) as tokens + FROM llm_events + WHERE ${conditions.join(" AND ")} + GROUP BY DATE_TRUNC('day', "timestamp") + ORDER BY date DESC + LIMIT ${days} + `; + const dailyResult = await client.query(dailySql, values); + + // Usage by model + const byModelSql = ` + SELECT + model, + provider, + COALESCE(SUM(cost_total), 0) as cost, + COUNT(*) as requests, + COALESCE(SUM(usage_total_tokens), 0) as tokens + FROM llm_events + WHERE ${conditions.join(" AND ")} + GROUP BY model, provider + ORDER BY cost DESC + LIMIT 10 + `; + const byModelResult = await client.query(byModelSql, values); + + // Usage by feature (from metadata) + const byFeatureSql = ` + SELECT + COALESCE(metadata->>'feature', agent, 'unknown') as feature, + COALESCE(SUM(cost_total), 0) as cost, + COUNT(*) as requests, + COALESCE(SUM(usage_total_tokens), 0) as tokens + FROM llm_events + WHERE ${conditions.join(" AND ")} + GROUP BY COALESCE(metadata->>'feature', agent, 'unknown') + ORDER BY cost DESC + LIMIT 10 + `; + const byFeatureResult = await client.query(byFeatureSql, values); + + // Calculate totals for percentages + const totalCost = byFeatureResult.rows.reduce( + (sum: number, row: Record) => sum + parseFloat((row.cost as string) || "0"), + 0 + ); + + return { + daily: dailyResult.rows + .map((row: Record) => ({ + date: row.date as Date, + cost: parseFloat(row.cost as string) || 0, + requests: parseInt(row.requests as string) || 0, + tokens: parseInt(row.tokens as string) || 0, + })) + .reverse(), + by_model: byModelResult.rows.map((row: Record) => ({ + model: row.model as string, + provider: row.provider as string, + cost: parseFloat(row.cost as string) || 0, + requests: parseInt(row.requests as string) || 0, + tokens: parseInt(row.tokens as string) || 0, + })), + by_feature: byFeatureResult.rows.map((row: Record) => ({ + feature: row.feature as string, + cost: parseFloat(row.cost as string) || 0, + requests: parseInt(row.requests as string) || 0, + tokens: parseInt(row.tokens as string) || 0, + percentage: totalCost > 0 ? ((parseFloat(row.cost as string) || 0) / totalCost) * 100 : 0, + })), + }; + } finally { + client.release(); + } + } catch (error) { + console.error(`[Aden Control] Failed to get usage breakdown:`, (error as Error).message); + return { daily: [], by_model: [], by_feature: [] }; + } +} + +/** + * Get SQL filter condition for a budget based on its type + */ +function getBudgetFilter(budget: Budget, paramIndex: number): { condition: string; value: unknown } | null { + switch (budget.type) { + case "global": + // Global budgets apply to all events - no filter needed + return null; + + case "agent": + // Agent budgets filter by agent column OR metadata.agent (for legacy data) + return { + condition: `(agent = $${paramIndex} OR metadata->>'agent' = $${paramIndex})`, + value: budget.name, + }; + + case "tenant": + // Tenant budgets filter by tenant_id in metadata + return { condition: `metadata->>'tenant_id' = $${paramIndex}`, value: budget.name }; + + case "customer": + // Customer budgets filter by customer_id in metadata + return { + condition: `metadata->>'customer_id' = $${paramIndex}`, + value: budget.name, + }; + + case "feature": + // Feature budgets filter by feature in metadata or agent + return { + condition: `(metadata->>'feature' = $${paramIndex} OR agent = $${paramIndex})`, + value: budget.name, + }; + + case "tag": + // Tag budgets filter by tags array matching + if (budget.tags && budget.tags.length > 0) { + // Match if any of the budget's tags are in the event's tags array + return { + condition: `metadata->'tags' ?| $${paramIndex}`, + value: budget.tags, + }; + } + return null; + + default: + return null; + } +} + +/** + * Get rate metrics for dashboard analytics + */ +async function getRateMetrics(teamId: string | number, options: { days?: number; context_id?: string; budget?: Budget } = {}): Promise<{ + peak_rate: number; + p95_rate: number; + avg_rate: number; + min_rate: number; + max_burst: number; +}> { + const { days = 30, context_id, budget } = options; + + if (!teamId) { + return { + peak_rate: 0, + p95_rate: 0, + avg_rate: 0, + min_rate: 0, + max_burst: 0, + }; + } + + try { + const pool = await getTeamPool(teamId); + const schema = buildSchemaName(teamId); + const client = await pool.connect(); + + try { + await client.query(`SET search_path TO ${schema}, public`); + await tsdbService.ensureSchema(client); + + const startDate = new Date(); + startDate.setDate(startDate.getDate() - days); + + const conditions = [`team_id = $1`, `"timestamp" >= $2`]; + const values: unknown[] = [String(teamId), startDate]; + let paramIndex = 3; + + // Apply budget-specific filter based on budget type + if (budget) { + const budgetFilter = getBudgetFilter(budget, paramIndex); + if (budgetFilter) { + conditions.push(budgetFilter.condition); + values.push(budgetFilter.value); + paramIndex++; + } + } else if (context_id) { + conditions.push(`metadata->>'context_id' = $${paramIndex}`); + values.push(context_id); + paramIndex++; + } + + // Calculate requests per second in 1-minute buckets + const ratesSql = ` + WITH minute_buckets AS ( + SELECT + DATE_TRUNC('minute', "timestamp") as minute, + COUNT(*) as requests + FROM llm_events + WHERE ${conditions.join(" AND ")} + GROUP BY DATE_TRUNC('minute', "timestamp") + ) + SELECT + MAX(requests / 60.0) as peak_rate, + PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY requests / 60.0) as p95_rate, + AVG(requests / 60.0) as avg_rate, + MIN(requests / 60.0) as min_rate + FROM minute_buckets + `; + const ratesResult = await client.query(ratesSql, values); + const rates = ratesResult.rows[0] || {}; + + // Calculate max burst in 5-second windows + const burstSql = ` + WITH five_second_buckets AS ( + SELECT + DATE_TRUNC('second', "timestamp") - + (EXTRACT(SECOND FROM "timestamp")::integer % 5) * INTERVAL '1 second' as bucket, + COUNT(*) as requests + FROM llm_events + WHERE ${conditions.join(" AND ")} + GROUP BY DATE_TRUNC('second', "timestamp") - + (EXTRACT(SECOND FROM "timestamp")::integer % 5) * INTERVAL '1 second' + ) + SELECT MAX(requests) as max_burst + FROM five_second_buckets + `; + const burstResult = await client.query(burstSql, values); + const maxBurst = burstResult.rows[0]?.max_burst || 0; + + return { + peak_rate: parseFloat(rates.peak_rate) || 0, + p95_rate: parseFloat(rates.p95_rate) || 0, + avg_rate: parseFloat(rates.avg_rate) || 0, + min_rate: parseFloat(rates.min_rate) || 0, + max_burst: parseInt(maxBurst) || 0, + }; + } finally { + client.release(); + } + } catch (error) { + console.error(`[Aden Control] Failed to get rate metrics:`, (error as Error).message); + return { + peak_rate: 0, + p95_rate: 0, + avg_rate: 0, + min_rate: 0, + max_burst: 0, + }; + } +} + +/** + * Get detailed budget info including spend tracking + */ +async function getBudgetDetails(teamId: string | number, policyId: string | null, budgetId: string): Promise { + const policy = await getPolicy(teamId, policyId); + const budget = policy.budgets?.find((b) => b.id === budgetId); + + if (!budget) { + return null; + } + + // Get real-time tracker status + const tracker = budgetTracker.get(budgetId); + const spent = tracker?.spent ?? budget.spent ?? 0; + const remaining = Math.max(0, budget.limit - spent); + const usagePercent = budget.limit > 0 ? (spent / budget.limit) * 100 : 0; + + return { + ...budget, + spent, + }; +} + +interface BudgetContext { + agent?: string; + metadata?: Record; + tenant_id?: string; + customer_id?: string; + feature?: string; + tags?: string[]; +} + +/** + * Find all budgets that match a given context + * Used for multi-budget validation to check ALL applicable budgets + */ +function findMatchingBudgetsForContext(budgets: Budget[], context: BudgetContext = {}): Budget[] { + if (!budgets || !Array.isArray(budgets)) return []; + + // metadata.agent takes precedence over top-level agent + const metadata = context.metadata || {}; + const effectiveAgent = (metadata.agent as string) || context.agent; + + return budgets.filter((budget) => { + switch (budget.type) { + case "global": + // Global budgets always match + return true; + + case "agent": + // Agent budgets match when agent name matches (from top-level or metadata) + return !!effectiveAgent && budget.name === effectiveAgent; + + case "tenant": + // Tenant budgets match when tenant_id matches + return !!context.tenant_id && budget.name === context.tenant_id; + + case "customer": + // Customer budgets match when customer_id matches + return !!context.customer_id && budget.name === context.customer_id; + + case "feature": + // Feature budgets match when feature name matches + return !!context.feature && budget.name === context.feature; + + case "tag": + // Tag budgets match when any budget tag is in context tags + if (!budget.tags || !context.tags) return false; + return budget.tags.some((t) => context.tags!.includes(t)); + + default: + return false; + } + }); +} + +interface BudgetValidationResult { + budget_id: string; + budget_name: string; + budget_type: string; + allowed: boolean; + action: string; + reason: string | null; + authoritative_spend: number; + budget_limit: number; + usage_percent: number; + projected_percent: number; + degrade_to_model: string | null; + degrade_to_provider: string | null; +} + +interface MultiValidationResult { + allowed: boolean; + action: string; + reason: string | undefined; + authoritative_spend: number; + budget_limit: number; + usage_percent: number; + projected_percent: number; + degrade_to_model: string | undefined; + degrade_to_provider: string | undefined; + restricting_budget_id: string | undefined; + restricting_budget_name: string | undefined; + budgets_checked: BudgetValidationResult[]; +} + +/** + * Validate multiple budgets and return the most restrictive result + */ +function validateMultipleBudgets(budgets: Budget[], estimatedCost: number, localSpend: number | null = null): MultiValidationResult { + if (!budgets || budgets.length === 0) { + return { + allowed: true, + action: "allow", + reason: "No budgets to validate", + authoritative_spend: 0, + budget_limit: 0, + usage_percent: 0, + projected_percent: 0, + degrade_to_model: undefined, + degrade_to_provider: undefined, + restricting_budget_id: undefined, + restricting_budget_name: undefined, + budgets_checked: [], + }; + } + + // Action priority (higher = more restrictive) + const actionPriority: Record = { allow: 0, throttle: 1, degrade: 2, block: 3 }; + + let mostRestrictiveResult: BudgetValidationResult | null = null; + const budgetsChecked: BudgetValidationResult[] = []; + + for (const budget of budgets) { + // Calculate projected spend + const tsdbSpend = budget.spent || 0; + const authoritativeSpend = + typeof localSpend === "number" && localSpend > tsdbSpend ? localSpend : tsdbSpend; + const projectedSpend = authoritativeSpend + estimatedCost; + const usagePercent = budget.limit > 0 ? (authoritativeSpend / budget.limit) * 100 : 0; + const projectedPercent = budget.limit > 0 ? (projectedSpend / budget.limit) * 100 : 0; + + // Determine action for this budget + let allowed = true; + let action = "allow"; + let reason: string | null = null; + let degradeToModel: string | null = null; + let degradeToProvider: string | null = null; + + if (projectedPercent >= 100) { + const limitAction = budget.limitAction || "kill"; + + switch (limitAction) { + case "kill": + allowed = false; + action = "block"; + reason = `Budget "${budget.name}" exceeded: $${projectedSpend.toFixed(4)} > $${ + budget.limit + } (${projectedPercent.toFixed(1)}%)`; + break; + case "degrade": + allowed = true; + action = "degrade"; + reason = `Budget "${budget.name}" at limit, degrading model`; + degradeToModel = budget.degradeToModel || null; + degradeToProvider = budget.degradeToProvider || null; + break; + case "throttle": + allowed = true; + action = "throttle"; + reason = `Budget "${budget.name}" at limit, throttling`; + break; + default: + allowed = false; + action = "block"; + reason = `Budget "${budget.name}" exceeded with unknown action`; + } + } else if ( + projectedPercent >= 90 && + budget.limitAction === "degrade" && + budget.degradeToModel + ) { + allowed = true; + action = "degrade"; + reason = `Budget "${budget.name}" approaching limit (${projectedPercent.toFixed( + 1 + )}%), pre-emptive degradation`; + degradeToModel = budget.degradeToModel; + degradeToProvider = budget.degradeToProvider || null; + } + + const budgetResult: BudgetValidationResult = { + budget_id: budget.id, + budget_name: budget.name, + budget_type: budget.type, + allowed, + action, + reason, + authoritative_spend: authoritativeSpend, + budget_limit: budget.limit, + usage_percent: usagePercent, + projected_percent: projectedPercent, + degrade_to_model: degradeToModel, + degrade_to_provider: degradeToProvider, + }; + + budgetsChecked.push(budgetResult); + + // Track most restrictive result + if ( + !mostRestrictiveResult || + actionPriority[action] > actionPriority[mostRestrictiveResult.action] + ) { + mostRestrictiveResult = budgetResult; + } + } + + return { + allowed: mostRestrictiveResult?.allowed ?? true, + action: mostRestrictiveResult?.action ?? "allow", + reason: mostRestrictiveResult?.reason ?? undefined, + authoritative_spend: mostRestrictiveResult?.authoritative_spend ?? 0, + budget_limit: mostRestrictiveResult?.budget_limit ?? 0, + usage_percent: mostRestrictiveResult?.usage_percent ?? 0, + projected_percent: mostRestrictiveResult?.projected_percent ?? 0, + degrade_to_model: mostRestrictiveResult?.degrade_to_model ?? undefined, + degrade_to_provider: mostRestrictiveResult?.degrade_to_provider ?? undefined, + restricting_budget_id: mostRestrictiveResult?.budget_id, + restricting_budget_name: mostRestrictiveResult?.budget_name, + budgets_checked: budgetsChecked, + }; +} + +// ============================================================================= +// Content Storage (for Layer 0 content capture) +// ============================================================================= + +interface ContentItem { + content_id: string; + content_hash: string; + content: string; + byte_size: number; +} + +/** + * Get the MongoDB collection for content storage + */ +function getContentCollection(): MongoCollection { + return _ACHO_MG_DB + .db(_ACHO_MDB_CONFIG.ERP_DBNAME) + .collection(_ACHO_MDB_COLLECTIONS.ADEN_CONTROL_CONTENT); +} + +/** + * Store large content items from SDK + * Used by Layer 0 content capture for storing content that exceeds max_content_bytes threshold + */ +async function storeContent(teamId: string | number, items: ContentItem[]): Promise<{ stored: number }> { + if (!items || items.length === 0) { + return { stored: 0 }; + } + + const collection = getContentCollection(); + const now = new Date().toISOString(); + + let stored = 0; + for (const item of items) { + try { + await collection.updateOne( + { content_id: item.content_id, team_id: teamId }, + { + $set: { + content_hash: item.content_hash, + content: item.content, + byte_size: item.byte_size, + updated_at: now, + }, + $setOnInsert: { + content_id: item.content_id, + team_id: teamId, + created_at: now, + }, + }, + { upsert: true } + ); + stored++; + } catch (error) { + console.error(`[Aden Control] Failed to store content ${item.content_id}:`, (error as Error).message); + } + } + + console.log(`[Aden Control] Stored ${stored}/${items.length} content items for team ${teamId}`); + return { stored }; +} + +/** + * Retrieve content by ID + */ +async function getContent(teamId: string | number, contentId: string): Promise { + const collection = getContentCollection(); + const doc = await collection.findOne({ content_id: contentId, team_id: teamId }) as (ContentItem & { _id?: unknown }) | null; + + if (!doc) { + return null; + } + + const { _id, ...content } = doc; + return content as ContentItem; +} + +export default { + getPolicy, + updatePolicy, + deletePolicy, + processEvents, + getEvents, + getMetricsSummary, + getUsageBreakdown, + getRateMetrics, + getBudgetStatus, + getBudgetDetails, + resetBudget, + addBudgetRule, + addThrottleRule, + addBlockRule, + addDegradeRule, + addAlertRule, + clearPolicy, + getPoliciesByTeam, + findMatchingBudgetsForContext, + validateMultipleBudgets, + storeContent, + getContent, +}; diff --git a/hive/src/services/control/control_sockets.ts b/hive/src/services/control/control_sockets.ts new file mode 100644 index 00000000..f126f89f --- /dev/null +++ b/hive/src/services/control/control_sockets.ts @@ -0,0 +1,584 @@ +/** + * Aden Control Sockets + * + * WebSocket namespace for real-time control plane communication. + * Handles: + * - SDK connections and authentication + * - Real-time policy updates + * - Event ingestion + * - Heartbeat monitoring + */ + +import jwt from "jsonwebtoken"; +// Note: userDB.findSaltByToken will be injected via initialization +import controlService from "./control_service"; +import llmEventBatcher from "./llm_event_batcher"; +import type { Server, Socket, Namespace } from "socket.io"; + +interface UserDbService { + findSaltByToken: (token: string) => Promise; +} + +let userDbService: UserDbService | null = null; +let jwtSecret: string = ""; + +/** + * Set user DB service for JWT verification + * @param service - User DB service with findSaltByToken method + * @param secret - JWT secret for token verification + */ +function setUserDbService(service: UserDbService, secret?: string): void { + userDbService = service; + if (secret) { + jwtSecret = secret; + } +} + +interface InstanceInfo { + socket: Socket; + instanceId: string; + policyId: string | null; + connectedAt: Date; + lastHeartbeat: Date; +} + +// HTTP-only agents (no socket connection) +interface HttpInstanceInfo { + instanceId: string; + policyId: string | null; + agentName: string | null; + status: string; + firstSeen: Date; + lastHeartbeat: Date; +} + +// Track connected SDK instances (WebSocket) +// teamId -> Map +const connectedInstances = new Map>(); + +// Track HTTP-only SDK instances (no WebSocket, identified by heartbeats) +// teamId -> Map +const httpInstances = new Map>(); + +// TTL for HTTP agents (remove if no heartbeat for this duration) +const HTTP_AGENT_TTL_MS = 60000; // 60 seconds + +/** + * Register or update an HTTP-only agent from heartbeat + * Called from control_service when processing heartbeat events + */ +function registerHttpAgent( + teamId: string | number, + instanceId: string, + policyId: string | null, + agentName: string | null, + status: string +): void { + const teamKey = String(teamId); + + // Check if this instance is already connected via WebSocket + const wsInstances = connectedInstances.get(teamKey); + if (wsInstances) { + for (const info of wsInstances.values()) { + if (info.instanceId === instanceId) { + // Already tracked via WebSocket, just update heartbeat there + info.lastHeartbeat = new Date(); + return; + } + } + } + + // Track as HTTP-only agent + if (!httpInstances.has(teamKey)) { + httpInstances.set(teamKey, new Map()); + } + + const existing = httpInstances.get(teamKey)!.get(instanceId); + if (existing) { + // Update existing + existing.lastHeartbeat = new Date(); + existing.status = status; + existing.policyId = policyId; + existing.agentName = agentName; + } else { + // New HTTP agent + httpInstances.get(teamKey)!.set(instanceId, { + instanceId, + policyId, + agentName, + status, + firstSeen: new Date(), + lastHeartbeat: new Date(), + }); + console.log( + `[Aden Control] HTTP agent registered: ${agentName || instanceId.slice(0, 8)}... (team: ${teamKey})` + ); + } +} + +/** + * Clean up stale HTTP agents that haven't sent heartbeats + */ +function cleanupStaleHttpAgents(): void { + const now = Date.now(); + + for (const [teamId, instances] of httpInstances) { + for (const [instanceId, info] of instances) { + if (now - info.lastHeartbeat.getTime() > HTTP_AGENT_TTL_MS) { + instances.delete(instanceId); + console.log( + `[Aden Control] HTTP agent expired: ${instanceId.slice(0, 8)}... (team: ${teamId})` + ); + } + } + + // Clean up empty team maps + if (instances.size === 0) { + httpInstances.delete(teamId); + } + } +} + +// Run cleanup every 30 seconds +setInterval(cleanupStaleHttpAgents, 30000); + +interface AdenSocket extends Socket { + user?: Record; + teamId?: string; + policyId?: string | null; + sdkInstanceId?: string; +} + +interface RedisEmitter { + of: (namespace: string) => ControlEmitterInner; +} + +interface ControlEmitterInner { + to: (room: string) => { emit: (event: string, payload: unknown) => void }; + emit: (event: string, payload: unknown) => void; +} + +interface MessageData { + event_type?: string; + [key: string]: unknown; +} + +interface ControlEmitter { + emitPolicyUpdate: (teamId: string | number, policyId: string | null, policy: unknown) => void; + emitCommand: (teamId: string | number, command: { action: string; [key: string]: unknown }) => void; + emitAlert: (teamId: string | number, policyId: string | null, alert: unknown) => void; + emitToInstance: (teamId: string | number, instanceId: string, message: unknown) => boolean; + getConnectedCount: (teamId: string | number) => number; + getConnectedInstances: (teamId: string | number) => Array<{ + instance_id: string; + policy_id: string | null; + agent_name: string | null; + connected_at: string; + last_heartbeat: string; + connection_type: "websocket" | "http"; + status?: string; + }>; + getTotalConnectedCount: () => number; +} + +/** + * Initialize Aden Control WebSocket namespace + * @param io - Socket.IO server instance + * @param rootEmitter - Redis emitter for cross-instance communication + * @returns Control emitter for sending updates + */ +function initAdenControlSockets(io: Server, rootEmitter: RedisEmitter): ControlEmitter { + // Create namespace for control plane + const controlNamespace: Namespace = io.of("/v1/control/ws"); + + // Create emitter for this namespace + const controlEmitter: ControlEmitterInner = rootEmitter.of("/v1/control/ws"); + + // Initialize LLM event batcher with emitter for real-time streaming + llmEventBatcher.setEmitter(controlEmitter as unknown as { to: (room: string) => { emit: (event: string, payload: unknown) => void } }); + + // Authentication middleware - verify JWT token + controlNamespace.use(async (socket: AdenSocket, next: (err?: Error) => void) => { + try { + let token: string | undefined = + socket.handshake.auth?.token || + socket.handshake.headers?.authorization || + (socket.handshake.query?.token as string | undefined); + + if (!token) { + console.error("[Aden Control WS] No authorization provided"); + return next(new Error("Authentication required")); + } + + // Extract token (support "Bearer " and "jwt " formats) + if (token.startsWith("Bearer ")) { + token = token.slice(7); + } else if (token.startsWith("jwt ")) { + token = token.slice(4); + } + + if (!token) { + return next(new Error("Invalid token")); + } + + // Verify JWT token using user's salt + if (!userDbService) { + console.error("[Aden Control WS] userDbService not initialized"); + return next(new Error("Server configuration error")); + } + const salt = await userDbService.findSaltByToken(token); + if (!salt) { + console.error("[Aden Control WS] No salt found for token"); + return next(new Error("Invalid token")); + } + // Token is signed with jwtSecret + salt + const verifySecret = jwtSecret ? jwtSecret + salt : salt; + const decoded = await new Promise>((resolve, reject) => { + jwt.verify(token!, verifySecret, (err, decoded) => { + if (err) reject(err); + else resolve(decoded as Record); + }); + }); + + // Store user info on socket + socket.user = decoded; + socket.teamId = decoded.current_team_id as string; + socket.policyId = + (socket.handshake.headers?.["x-policy-id"] as string) || + (socket.handshake.query?.policy_id as string) || + null; + socket.sdkInstanceId = + (socket.handshake.headers?.["x-sdk-instance-id"] as string) || + (socket.handshake.query?.instance_id as string) || + socket.id; + + console.log( + `[Aden Control WS] SDK connecting: ${socket.sdkInstanceId!.slice(0, 8)}... (team: ${socket.teamId})` + ); + + next(); + } catch (error) { + console.error("[Aden Control WS] Auth error:", (error as Error).message); + next(new Error("Authentication failed")); + } + }); + + // Handle connections + controlNamespace.on("connection", async (socket: AdenSocket) => { + const { teamId, policyId, sdkInstanceId } = socket; + + console.log( + `[Aden Control WS] SDK connected: ${sdkInstanceId!.slice(0, 8)}... (socket: ${socket.id}, team: ${teamId})` + ); + + // Track this instance by team + if (!connectedInstances.has(teamId!)) { + connectedInstances.set(teamId!, new Map()); + } + connectedInstances.get(teamId!)!.set(socket.id, { + socket, + instanceId: sdkInstanceId!, + policyId: policyId || null, + connectedAt: new Date(), + lastHeartbeat: new Date(), + }); + + // Join room for this team (for policy broadcasts) + socket.join(`team:${teamId}`); + // Also join policy-specific room if policy specified + if (policyId) { + socket.join(`team:${teamId}:policy:${policyId}`); + } + + // Send current policy immediately + try { + const policy = await controlService.getPolicy(teamId!, policyId || null); + socket.emit("message", { + type: "policy", + policy, + }); + } catch (error) { + console.error("[Aden Control WS] Error sending initial policy:", error); + } + + // Handle incoming messages from SDK + socket.on("message", async (data: MessageData | string) => { + try { + await handleSdkMessage(socket, data); + } catch (error) { + console.error("[Aden Control WS] Error handling message:", error); + socket.emit("message", { + type: "error", + error: (error as Error).message, + }); + } + }); + + // Handle direct event submission (alternative to message) + socket.on("event", async (event: Record) => { + try { + await controlService.processEvents(teamId!, policyId || null, [event as any]); + } catch (error) { + console.error("[Aden Control WS] Error processing event:", error); + } + }); + + // Handle disconnection + socket.on("disconnect", (reason: string) => { + console.log( + `[Aden Control WS] SDK disconnected: ${sdkInstanceId!.slice(0, 8)}... (reason: ${reason})` + ); + + // Remove from tracking + const instances = connectedInstances.get(teamId!); + if (instances) { + instances.delete(socket.id); + if (instances.size === 0) { + connectedInstances.delete(teamId!); + } + } + }); + + // Handle errors + socket.on("error", (error: Error) => { + console.error( + `[Aden Control WS] Socket error for ${sdkInstanceId!.slice(0, 8)}...:`, + error.message + ); + }); + + // Handle LLM events stream subscription (for dashboard real-time updates) + socket.on("subscribe-llm-events", () => { + const room = `team:${teamId}:llm-events`; + socket.join(room); + console.log(`[Aden Control WS] Socket ${socket.id} subscribed to ${room}`); + socket.emit("message", { + type: "subscribed", + stream: "llm-events", + teamId: teamId, + }); + }); + + socket.on("unsubscribe-llm-events", () => { + const room = `team:${teamId}:llm-events`; + socket.leave(room); + console.log(`[Aden Control WS] Socket ${socket.id} unsubscribed from ${room}`); + socket.emit("message", { + type: "unsubscribed", + stream: "llm-events", + teamId: teamId, + }); + }); + }); + + /** + * Handle incoming message from SDK + */ + async function handleSdkMessage(socket: AdenSocket, data: MessageData | string): Promise { + // Parse if string + let parsedData: MessageData; + if (typeof data === "string") { + parsedData = JSON.parse(data); + } else { + parsedData = data; + } + + const { teamId, policyId, sdkInstanceId } = socket; + + // Route based on event type + switch (parsedData.event_type) { + case "metric": + case "control": + case "heartbeat": + case "error": + // Process as event + await controlService.processEvents(teamId!, policyId || null, [parsedData as any]); + + // Update last heartbeat time + if (parsedData.event_type === "heartbeat") { + const instances = connectedInstances.get(teamId!); + const instance = instances?.get(socket.id); + if (instance) { + instance.lastHeartbeat = new Date(); + } + } + break; + + case "get_policy": + // Request for current policy + const policy = await controlService.getPolicy(teamId!, policyId || null); + socket.emit("message", { + type: "policy", + policy, + }); + break; + + default: + console.warn( + `[Aden Control WS] Unknown event type from ${sdkInstanceId!.slice(0, 8)}...: ${parsedData.event_type}` + ); + } + } + + /** + * Create emitter object for external use + */ + const emitter: ControlEmitter = { + /** + * Emit policy update to all SDK instances for a team/policy + * @param teamId - The team ID + * @param policyId - The policy ID (optional, broadcasts to all team instances if not specified) + * @param policy - The policy object + */ + emitPolicyUpdate(teamId: string | number, policyId: string | null, policy: unknown): void { + console.log(`[Aden Control WS] Broadcasting policy update for team ${teamId}`); + + // If policyId specified, emit only to instances using that policy + if (policyId) { + controlEmitter.to(`team:${teamId}:policy:${policyId}`).emit("message", { + type: "policy", + policy, + }); + } else { + // Broadcast to all team instances + controlEmitter.to(`team:${teamId}`).emit("message", { + type: "policy", + policy, + }); + } + }, + + /** + * Emit a command to all SDK instances for a team + */ + emitCommand(teamId: string | number, command: { action: string; [key: string]: unknown }): void { + console.log(`[Aden Control WS] Broadcasting command: ${command.action}`); + + controlEmitter.to(`team:${teamId}`).emit("message", { + type: "command", + command, + }); + }, + + /** + * Emit alert to team instances + */ + emitAlert(teamId: string | number, policyId: string | null, alert: unknown): void { + console.log(`[Aden Control WS] Broadcasting alert for team ${teamId}`); + + const room = policyId ? `team:${teamId}:policy:${policyId}` : `team:${teamId}`; + controlEmitter.to(room).emit("message", { + type: "alert", + alert, + }); + }, + + /** + * Emit to a specific SDK instance + */ + emitToInstance(teamId: string | number, instanceId: string, message: unknown): boolean { + const instances = connectedInstances.get(String(teamId)); + if (!instances) return false; + + for (const [, info] of instances) { + if (info.instanceId === instanceId) { + info.socket.emit("message", message); + return true; + } + } + return false; + }, + + /** + * Get connected instance count for a team (WebSocket + HTTP) + */ + getConnectedCount(teamId: string | number): number { + const teamKey = String(teamId); + const wsCount = connectedInstances.get(teamKey)?.size || 0; + const httpCount = httpInstances.get(teamKey)?.size || 0; + return wsCount + httpCount; + }, + + /** + * Get all connected instances info (for dashboard) + * Includes both WebSocket and HTTP-only agents + */ + getConnectedInstances(teamId: string | number): Array<{ + instance_id: string; + policy_id: string | null; + agent_name: string | null; + connected_at: string; + last_heartbeat: string; + connection_type: "websocket" | "http"; + status?: string; + }> { + const teamKey = String(teamId); + const results: Array<{ + instance_id: string; + policy_id: string | null; + agent_name: string | null; + connected_at: string; + last_heartbeat: string; + connection_type: "websocket" | "http"; + status?: string; + }> = []; + + // Add WebSocket-connected instances + const wsInstances = connectedInstances.get(teamKey); + if (wsInstances) { + for (const info of wsInstances.values()) { + results.push({ + instance_id: info.instanceId, + policy_id: info.policyId, + agent_name: null, // WebSocket connections don't have agent_name yet + connected_at: info.connectedAt.toISOString(), + last_heartbeat: info.lastHeartbeat.toISOString(), + connection_type: "websocket", + }); + } + } + + // Add HTTP-only instances + const httpInsts = httpInstances.get(teamKey); + if (httpInsts) { + for (const info of httpInsts.values()) { + results.push({ + instance_id: info.instanceId, + policy_id: info.policyId, + agent_name: info.agentName, + connected_at: info.firstSeen.toISOString(), + last_heartbeat: info.lastHeartbeat.toISOString(), + connection_type: "http", + status: info.status, + }); + } + } + + return results; + }, + + /** + * Get total connected SDK count across all teams (WebSocket + HTTP) + */ + getTotalConnectedCount(): number { + let total = 0; + for (const instances of connectedInstances.values()) { + total += instances.size; + } + for (const instances of httpInstances.values()) { + total += instances.size; + } + return total; + }, + }; + + // Note: Emitter is returned instead of stored globally + // Use app.locals.controlEmitter to access in routes + + console.log("[Aden Control WS] WebSocket namespace initialized at /v1/control/ws"); + + return emitter; +} + +export default initAdenControlSockets; +export { setUserDbService, registerHttpAgent }; diff --git a/hive/src/services/control/llm_event_batcher.ts b/hive/src/services/control/llm_event_batcher.ts new file mode 100644 index 00000000..5e1e247a --- /dev/null +++ b/hive/src/services/control/llm_event_batcher.ts @@ -0,0 +1,349 @@ +/** + * LLMEventBatcher - Batches LLM events for efficient WebSocket delivery + * + * Features: + * - Per-team in-memory buffers + * - 5-second flush interval (configurable) + * - Buffer size cap with graceful degradation (drop oldest) + * - Payload optimization (only essential fields) + * - Periodic cleanup for idle teams + */ + +const FLUSH_REASONS = { + TIMER: 1, + BUFFER_FULL: 2, + MANUAL: 3, +} as const; + +type FlushReason = typeof FLUSH_REASONS[keyof typeof FLUSH_REASONS]; + +interface TsdbEvent { + timestamp?: Date | string; + trace_id?: string; + model?: string; + provider?: string; + agent?: string; + cost_total?: number; + latency_ms?: number; + usage?: { + input_tokens?: number; + output_tokens?: number; + }; + usage_input_tokens?: number; + usage_output_tokens?: number; +} + +interface EventSummary { + timestamp: string | undefined; + trace_id: string | undefined; + model: string; + provider: string | null; + agent: string | null; + input_tokens: number; + output_tokens: number; + cost: number; + latency_ms: number | null; +} + +interface TeamBuffer { + teamId: string; + events: EventSummary[]; + flushTimer: ReturnType | null; + lastFlush: Date; + droppedCount: number; +} + +interface BatchPayload { + type: string; + teamId: string; + events: EventSummary[]; + meta: { + batchSize: number; + droppedCount: number; + windowStart: string | undefined; + windowEnd: string | undefined; + flushReason: FlushReason; + }; +} + +interface Emitter { + to: (room: string) => { emit: (event: string, payload: BatchPayload) => void }; +} + +interface BatcherOptions { + flushIntervalMs?: number; + maxBufferSize?: number; + maxEventsPerFlush?: number; +} + +class LLMEventBatcher { + private flushIntervalMs: number; + private maxBufferSize: number; + private maxEventsPerFlush: number; + private teamBuffers: Map; + private emitter: Emitter | null; + private totalEventsBuffered: number; + private totalBatchesSent: number; + private totalEventsDropped: number; + private _cleanupInterval: ReturnType; + + constructor(options: BatcherOptions = {}) { + // Configuration + this.flushIntervalMs = options.flushIntervalMs || 5000; // 5 seconds + this.maxBufferSize = options.maxBufferSize || 500; // Max events per team buffer + this.maxEventsPerFlush = options.maxEventsPerFlush || 100; // Max events per batch + + // State + this.teamBuffers = new Map(); // teamId -> TeamBuffer + this.emitter = null; // Set by setEmitter() + + // Metrics + this.totalEventsBuffered = 0; + this.totalBatchesSent = 0; + this.totalEventsDropped = 0; + + // Start periodic cleanup + this._cleanupInterval = setInterval(() => { + this.cleanup(); + }, 300000); // Every 5 minutes + } + + /** + * Set the Socket.IO emitter for broadcasting + * Called during control_sockets initialization + * @param {Object} controlEmitter - Socket.IO namespace emitter + */ + setEmitter(controlEmitter: Emitter): void { + this.emitter = controlEmitter; + console.log("[LLMEventBatcher] Emitter configured"); + } + + /** + * Add events to the buffer for a team + * Called from control_service.js after TSDB insert + * @param {string|number} teamId - Team identifier + * @param {Array} tsdbEvents - Array of TSDB events + */ + add(teamId: string | number, tsdbEvents: TsdbEvent[]): void { + if (!tsdbEvents || tsdbEvents.length === 0) return; + + const teamIdStr = String(teamId); + + // Transform to lightweight summaries + const summaries = tsdbEvents.map((e) => this._transformToSummary(e)); + + // Get or create buffer + let buffer = this.teamBuffers.get(teamIdStr); + if (!buffer) { + buffer = this._createBuffer(teamIdStr); + this.teamBuffers.set(teamIdStr, buffer); + } + + // Add events with overflow handling + this._addToBuffer(buffer, summaries); + + // Start/reset flush timer if not already running + this._scheduleFlush(teamIdStr, buffer); + } + + /** + * Transform full TSDB event to lightweight summary + * Only includes fields needed for dashboard display + * @param {Object} event - Full TSDB event + * @returns {Object} Lightweight event summary + */ + private _transformToSummary(event: TsdbEvent): EventSummary { + // Handle both nested usage object (from transformMetricToTsdbEvent) + // and flat fields (from TSDB query results) + const inputTokens = event.usage?.input_tokens ?? event.usage_input_tokens ?? 0; + const outputTokens = event.usage?.output_tokens ?? event.usage_output_tokens ?? 0; + + return { + timestamp: event.timestamp instanceof Date ? event.timestamp.toISOString() : event.timestamp, + trace_id: event.trace_id, + model: event.model || "", + provider: event.provider || null, + agent: event.agent || null, + input_tokens: inputTokens, + output_tokens: outputTokens, + cost: event.cost_total || 0, + latency_ms: event.latency_ms || null, + }; + } + + /** + * Add events to buffer with overflow handling + * @param {Object} buffer - Team buffer + * @param {Array} summaries - Event summaries to add + */ + private _addToBuffer(buffer: TeamBuffer, summaries: EventSummary[]): void { + for (const summary of summaries) { + if (buffer.events.length >= this.maxBufferSize) { + // Drop oldest event + buffer.events.shift(); + buffer.droppedCount++; + this.totalEventsDropped++; + } + buffer.events.push(summary); + this.totalEventsBuffered++; + } + + // Force flush if buffer is full + if (buffer.events.length >= this.maxBufferSize) { + this._flush(buffer.teamId, FLUSH_REASONS.BUFFER_FULL); + } + } + + /** + * Schedule flush timer for a team + * @param {string} teamId - Team identifier + * @param {Object} buffer - Team buffer + */ + private _scheduleFlush(teamId: string, buffer: TeamBuffer): void { + // Don't reschedule if timer already running + if (buffer.flushTimer) return; + + buffer.flushTimer = setTimeout(() => { + this._flush(teamId, FLUSH_REASONS.TIMER); + }, this.flushIntervalMs); + } + + /** + * Flush buffered events to WebSocket + * @param {string} teamId - Team identifier + * @param {number} flushReason - Reason for flush + */ + private _flush(teamId: string, flushReason: FlushReason): void { + const buffer = this.teamBuffers.get(teamId); + if (!buffer || buffer.events.length === 0) return; + + // Clear timer + if (buffer.flushTimer) { + clearTimeout(buffer.flushTimer); + buffer.flushTimer = null; + } + + // Extract batch (up to maxEventsPerFlush) + const batch = buffer.events.splice(0, this.maxEventsPerFlush); + const droppedCount = buffer.droppedCount; + buffer.droppedCount = 0; + buffer.lastFlush = new Date(); + + // Build payload + const payload: BatchPayload = { + type: "llm-events-batch", + teamId: teamId, + events: batch, + meta: { + batchSize: batch.length, + droppedCount: droppedCount, + windowStart: batch[0]?.timestamp, + windowEnd: batch[batch.length - 1]?.timestamp, + flushReason: flushReason, + }, + }; + + // Emit to team room + if (this.emitter) { + const room = `team:${teamId}:llm-events`; + this.emitter.to(room).emit("message", payload); + this.totalBatchesSent++; + + if (batch.length > 0) { + console.log( + `[LLMEventBatcher] Flushed ${batch.length} events to ${room} ` + + `(dropped: ${droppedCount}, reason: ${flushReason})` + ); + } + } + + // Schedule next flush if buffer still has events + if (buffer.events.length > 0) { + this._scheduleFlush(teamId, buffer); + } + } + + /** + * Create a new buffer for a team + * @param {string} teamId - Team identifier + * @returns {Object} New team buffer + */ + private _createBuffer(teamId: string): TeamBuffer { + return { + teamId: teamId, + events: [], + flushTimer: null, + lastFlush: new Date(), + droppedCount: 0, + }; + } + + /** + * Manually flush all buffers (useful for shutdown) + */ + flushAll(): void { + for (const [teamId] of this.teamBuffers) { + this._flush(teamId, FLUSH_REASONS.MANUAL); + } + } + + /** + * Get metrics for monitoring + * @returns {Object} Batcher metrics + */ + getMetrics(): { activeTeams: number; totalBuffered: number; totalEventsBuffered: number; totalBatchesSent: number; totalEventsDropped: number } { + const activeTeams = this.teamBuffers.size; + const totalBuffered = Array.from(this.teamBuffers.values()).reduce( + (sum, b) => sum + b.events.length, + 0 + ); + + return { + activeTeams, + totalBuffered, + totalEventsBuffered: this.totalEventsBuffered, + totalBatchesSent: this.totalBatchesSent, + totalEventsDropped: this.totalEventsDropped, + }; + } + + /** + * Cleanup buffers for teams with no recent activity + * Prevents memory leaks from inactive teams + * @param {number} maxIdleMs - Max idle time before cleanup (default: 5 minutes) + */ + cleanup(maxIdleMs = 300000): void { + const now = Date.now(); + let cleaned = 0; + + for (const [teamId, buffer] of this.teamBuffers.entries()) { + if (buffer.events.length === 0 && now - buffer.lastFlush.getTime() > maxIdleMs) { + if (buffer.flushTimer) { + clearTimeout(buffer.flushTimer); + } + this.teamBuffers.delete(teamId); + cleaned++; + } + } + + if (cleaned > 0) { + console.log(`[LLMEventBatcher] Cleaned up ${cleaned} idle team buffers`); + } + } + + /** + * Shutdown the batcher (cleanup intervals and flush remaining) + */ + shutdown(): void { + if (this._cleanupInterval) { + clearInterval(this._cleanupInterval); + } + this.flushAll(); + console.log("[LLMEventBatcher] Shutdown complete"); + } +} + +// Singleton instance +const llmEventBatcher = new LLMEventBatcher(); + +export default llmEventBatcher; diff --git a/hive/src/services/mongo/mongo_db.ts b/hive/src/services/mongo/mongo_db.ts new file mode 100644 index 00000000..dcecfd32 --- /dev/null +++ b/hive/src/services/mongo/mongo_db.ts @@ -0,0 +1,26 @@ +import config from "../../config"; +import { MongoClient } from "mongodb"; + +declare const _ACHO_MG_DB: undefined | { db: (name: string) => unknown }; + +let client: MongoClient | null = null; + +const getMongoClient = async (): Promise => { + if (client) return client; + if (!config.mongodb.url) { + throw new Error("Missing MONGODB_URL in environment"); + } + client = new MongoClient(config.mongodb.url); + await client.connect(); + return client; +}; + +const getMongoDb = async (dbName = config.mongodb.dbName): Promise => { + if (typeof _ACHO_MG_DB !== "undefined" && _ACHO_MG_DB && typeof _ACHO_MG_DB.db === "function") { + return _ACHO_MG_DB.db(dbName); + } + const c = await getMongoClient(); + return c.db(dbName); +}; + +export { getMongoDb }; diff --git a/hive/src/services/quickstart/quickstart_service.ts b/hive/src/services/quickstart/quickstart_service.ts new file mode 100644 index 00000000..5f3b4a4e --- /dev/null +++ b/hive/src/services/quickstart/quickstart_service.ts @@ -0,0 +1,227 @@ +/** + * Quickstart Document Generation Service + * Template-based SDK quickstart documentation generator + * + * Structure: + * - docs/aden-sdk-documents/config/*.json - Configuration for vendors, languages, frameworks + * - docs/aden-sdk-documents/templates/{language}/*.md - Complete template files + */ + +import fs from "fs"; +import path from "path"; + +// Base paths +const DOCS_BASE = path.join(__dirname, "../../../docs/aden-sdk-documents"); +const CONFIG_PATH = path.join(DOCS_BASE, "config"); +const TEMPLATES_PATH = path.join(DOCS_BASE, "templates"); + +interface VendorConfig { + name: string; + envVarComment?: string; +} + +interface LanguageConfig { + name: string; +} + +interface FrameworkConfig { + name: string; + description: string; + templateFile: string; + pythonSupport: boolean; + typescriptSupport: boolean; +} + +interface ConfigCache { + vendors: Record; + languages: Record; + frameworks: Record; +} + +// Cache for configs and templates +let configCache: ConfigCache | null = null; +let templateCache: Record = {}; + +/** + * Load all configuration files + */ +function loadConfigs(): ConfigCache { + if (configCache) return configCache; + + configCache = { + vendors: JSON.parse( + fs.readFileSync(path.join(CONFIG_PATH, "llm-vendors.json"), "utf-8") + ), + languages: JSON.parse( + fs.readFileSync(path.join(CONFIG_PATH, "sdk-languages.json"), "utf-8") + ), + frameworks: JSON.parse( + fs.readFileSync(path.join(CONFIG_PATH, "agent-frameworks.json"), "utf-8") + ), + }; + + return configCache; +} + +/** + * Load a template file + */ +function loadTemplate(language: string, templateName: string): string | null { + const cacheKey = `${language}/${templateName}`; + if (templateCache[cacheKey]) return templateCache[cacheKey]; + + const templatePath = path.join( + TEMPLATES_PATH, + language, + `${templateName}.md` + ); + + if (!fs.existsSync(templatePath)) { + return null; + } + + templateCache[cacheKey] = fs.readFileSync(templatePath, "utf-8"); + return templateCache[cacheKey]; +} + +/** + * Clear caches (useful for development/testing) + */ +function clearCaches(): void { + configCache = null; + templateCache = {}; +} + +/** + * Replace variables in template: {{variableName}} + */ +function replaceVariables( + template: string, + variables: Record +): string { + return template.replace(/\{\{(\w+)\}\}/g, (_match, key) => { + return variables[key] !== undefined ? variables[key] : ""; + }); +} + +interface GenerateQuickstartParams { + llmVendor?: string; + sdkLanguage?: string; + agentFramework: string; + apiKey: string; +} + +/** + * Generate quickstart document based on parameters + */ +function generateQuickstart({ + llmVendor = "openai", + sdkLanguage = "python", + agentFramework, + apiKey, +}: GenerateQuickstartParams): string { + const config = loadConfigs(); + + // Validate inputs + if (!config.vendors[llmVendor]) { + throw new Error( + `Invalid LLM vendor: ${llmVendor}. Valid options: ${Object.keys( + config.vendors + ).join(", ")}` + ); + } + if (!config.languages[sdkLanguage]) { + throw new Error( + `Invalid SDK language: ${sdkLanguage}. Valid options: ${Object.keys( + config.languages + ).join(", ")}` + ); + } + if (!config.frameworks[agentFramework]) { + throw new Error( + `Invalid agent framework: ${agentFramework}. Valid options: ${Object.keys( + config.frameworks + ).join(", ")}` + ); + } + if (!apiKey) { + throw new Error("API key is required"); + } + + const vendor = config.vendors[llmVendor]; + const framework = config.frameworks[agentFramework]; + + // Check language support + if (sdkLanguage === "python" && !framework.pythonSupport) { + throw new Error(`${framework.name} does not support Python`); + } + if (sdkLanguage !== "python" && !framework.typescriptSupport) { + throw new Error(`${framework.name} does not support ${sdkLanguage}`); + } + + // Load template + const template = loadTemplate(sdkLanguage, framework.templateFile); + + if (!template) { + throw new Error( + `Template not found: ${sdkLanguage}/${framework.templateFile}` + ); + } + + // Build variables + const variables: Record = { + apiKey, + serverUrl: process.env.HIVE_HOST || "https://hive.adenhq.com", + envVarComment: vendor.envVarComment || "", + }; + + // Replace variables and return + return replaceVariables(template, variables); +} + +interface QuickstartOptions { + llmVendors: Array<{ id: string; name: string }>; + sdkLanguages: Array<{ id: string; name: string }>; + agentFrameworks: Array<{ + id: string; + name: string; + description: string; + pythonSupport: boolean; + typescriptSupport: boolean; + }>; +} + +/** + * Get available options for quickstart generation + */ +function getQuickstartOptions(): QuickstartOptions { + const config = loadConfigs(); + + return { + llmVendors: Object.entries(config.vendors).map(([key, value]) => ({ + id: key, + name: value.name, + })), + sdkLanguages: Object.entries(config.languages).map(([key, value]) => ({ + id: key, + name: value.name, + })), + agentFrameworks: Object.entries(config.frameworks).map(([key, value]) => ({ + id: key, + name: value.name, + description: value.description, + pythonSupport: value.pythonSupport, + typescriptSupport: value.typescriptSupport, + })), + }; +} + +/** + * Reload configs (useful after updating config files) + */ +function reloadConfigs(): ConfigCache { + clearCaches(); + return loadConfigs(); +} + +export { generateQuickstart, getQuickstartOptions, reloadConfigs, clearCaches }; diff --git a/hive/src/services/tsdb/00-init-timescaledb.sql b/hive/src/services/tsdb/00-init-timescaledb.sql new file mode 100644 index 00000000..40e8046c --- /dev/null +++ b/hive/src/services/tsdb/00-init-timescaledb.sql @@ -0,0 +1,11 @@ +-- Initialize TimescaleDB extension +-- This must run BEFORE schema.sql to enable hypertables and continuous aggregates + +-- Create TimescaleDB extension +CREATE EXTENSION IF NOT EXISTS timescaledb; + +-- Log successful initialization +DO $$ +BEGIN + RAISE NOTICE 'TimescaleDB extension initialized successfully'; +END$$; diff --git a/hive/src/services/tsdb/analytics_service.ts b/hive/src/services/tsdb/analytics_service.ts new file mode 100644 index 00000000..ea52e330 --- /dev/null +++ b/hive/src/services/tsdb/analytics_service.ts @@ -0,0 +1,748 @@ +/** + * TSDB Analytics Service + * Computes windowed aggregations from llm_events for dashboard analytics. + */ + +import { PoolClient } from 'pg'; +import pricingService from './pricing_service'; + +const BUCKETS = [ + { label: '0-1s', min: 0, max: 1000 }, + { label: '1-2s', min: 1000, max: 2000 }, + { label: '2-5s', min: 2000, max: 5000 }, + { label: '5-10s', min: 5000, max: 10000 }, + { label: '10-20s', min: 10000, max: 20000 }, + { label: '20s+', min: 20000, max: null as number | null }, +]; + +interface WindowDef { + label: string; + start: Date | null; + end: Date; +} + +interface DailyRow { + bucket: string; + requests: number; + cost_total: number; + tokens: { + total: number; + input: number; + output: number; + cached: number; + }; +} + +interface LatencyRow { + bucket: string; + count: number; + avg_ms: number | null; + p50_ms: number | null; + p95_ms: number | null; + p99_ms: number | null; +} + +interface ModelCostRow { + model: string; + cost_total: number; + cached_tokens: number; +} + +interface AgentCostRow { + agent: string; + requests: number; + cost_total: number; + input_tokens: number; + output_tokens: number; + avg_latency_ms: number | null; +} + +const toNumber = (val: unknown, fallback = 0): number => { + const n = Number(val); + return Number.isFinite(n) ? n : fallback; +}; + +const percentile = (values: number[], pct: number): number | null => { + if (!values.length) return null; + const sorted = [...values].sort((a, b) => a - b); + if (sorted.length === 1) return sorted[0]; + const idx = Math.max(0, Math.min(sorted.length - 1, Math.floor(pct * (sorted.length - 1)))); + return sorted[idx]; +}; + +const startOfWeekUtc = (d: Date): Date => { + const day = d.getUTCDay(); + const diff = (day + 6) % 7; + const monday = new Date(Date.UTC(d.getUTCFullYear(), d.getUTCMonth(), d.getUTCDate(), 0, 0, 0, 0)); + monday.setUTCDate(monday.getUTCDate() - diff); + return monday; +}; + +const startOfMonthUtc = (d: Date): Date => + new Date(Date.UTC(d.getUTCFullYear(), d.getUTCMonth(), 1, 0, 0, 0, 0)); + +const startOfDayUtc = (d: Date): Date => + new Date(Date.UTC(d.getUTCFullYear(), d.getUTCMonth(), d.getUTCDate(), 0, 0, 0, 0)); + +export const parseAnalyticsWindow = (label: string): WindowDef => { + const now = new Date(); + switch ((label || '').toLowerCase()) { + case 'all_time': + case 'all-time': + case 'alltime': + return { label: 'all_time', start: null, end: now }; + case 'today': { + const start = startOfDayUtc(now); + return { label: 'today', start, end: now }; + } + case 'last_2_weeks': + case 'last-2-weeks': + case 'last2weeks': { + const start = new Date(now.getTime() - 14 * 24 * 3600 * 1000); + return { label: 'last_2_weeks', start, end: now }; + } + case 'this_week': { + const start = startOfWeekUtc(now); + return { label: 'this_week', start, end: now }; + } + case 'this_month': + default: { + const start = startOfMonthUtc(now); + return { label: 'this_month', start, end: now }; + } + } +}; + +const bucketLatency = (latMs: number, buckets: typeof BUCKETS): string | null => { + if (latMs === null || latMs === undefined) return null; + for (const b of buckets) { + if (b.max === null) { + if (latMs >= b.min) return b.label; + } else if (latMs >= b.min && latMs < b.max) { + return b.label; + } + } + return null; +}; + +const buildLatencyDistribution = (rows: { bucket: string; count: number }[]) => { + const counts = new Map(rows.map((r) => [r.bucket, r.count])); + const total = rows.reduce((acc, r) => acc + (r.count || 0), 0); + return BUCKETS.map((b) => { + const count = counts.get(b.label) || 0; + return { + bucket: b.label, + count, + share: total ? count / total : null, + }; + }); +}; + +const bucketLabel = (date: Date, resolution: string): string => { + if (resolution === 'hour') { + const h = new Date( + Date.UTC(date.getUTCFullYear(), date.getUTCMonth(), date.getUTCDate(), date.getUTCHours(), 0, 0, 0) + ); + return h.toISOString().slice(0, 13) + ':00:00Z'; + } + return date.toISOString().slice(0, 10); +}; + +const fetchDailyCA = async ({ + client, + start, + end, +}: { + client: PoolClient; + start: Date | null; + end: Date | null; +}): Promise => { + const params: (Date | null)[] = []; + const conds: string[] = []; + if (start) { + params.push(start); + conds.push(`bucket >= $${params.length}`); + } + if (end) { + params.push(end); + conds.push(`bucket < $${params.length}`); + } + const sql = ` + SELECT bucket, requests, cost_total, input_tokens, output_tokens, total_tokens, cached_tokens + FROM llm_events_daily_ca + ${conds.length ? `WHERE ${conds.join(' AND ')}` : ''} + ORDER BY bucket ASC + `; + const { rows } = await client.query(sql, params); + return rows.map((r: any) => ({ + bucket: r.bucket instanceof Date ? r.bucket.toISOString().slice(0, 10) : r.bucket, + requests: Number(r.requests) || 0, + cost_total: toNumber(r.cost_total, 0), + tokens: { + total: toNumber(r.total_tokens, 0), + input: toNumber(r.input_tokens, 0), + output: toNumber(r.output_tokens, 0), + cached: toNumber(r.cached_tokens, 0), + }, + })); +}; + +const fetchTodayFromBaseTable = async ({ + client, + todayStart, + end, +}: { + client: PoolClient; + todayStart: Date; + end: Date; +}): Promise => { + const sql = ` + SELECT + $1::date as bucket, + COUNT(*) as requests, + COALESCE(SUM(cost_total), 0) as cost_total, + COALESCE(SUM(usage_input_tokens), 0) as input_tokens, + COALESCE(SUM(usage_output_tokens), 0) as output_tokens, + COALESCE(SUM(COALESCE(usage_total_tokens, usage_input_tokens + usage_output_tokens)), 0) as total_tokens, + COALESCE(SUM(usage_cached_tokens), 0) as cached_tokens + FROM llm_events + WHERE "timestamp" >= $1 AND "timestamp" <= $2 + `; + const { rows } = await client.query(sql, [todayStart, end]); + if (!rows.length || rows[0].requests === 0 || rows[0].requests === '0') { + return null; + } + const r = rows[0]; + return { + bucket: todayStart.toISOString().slice(0, 10), + requests: Number(r.requests) || 0, + cost_total: toNumber(r.cost_total, 0), + tokens: { + total: toNumber(r.total_tokens, 0), + input: toNumber(r.input_tokens, 0), + output: toNumber(r.output_tokens, 0), + cached: toNumber(r.cached_tokens, 0), + }, + }; +}; + +const fetchLatencyDaily = async ({ + client, + start, + end, +}: { + client: PoolClient; + start: Date | null; + end: Date | null; +}): Promise => { + const params: (string | Date)[] = ['1 day']; + const conds = ['latency_ms IS NOT NULL']; + if (start) { + params.push(start); + conds.push(`"timestamp" >= $${params.length}`); + } + if (end) { + params.push(end); + conds.push(`"timestamp" < $${params.length}`); + } + const sql = ` + SELECT + time_bucket($1::interval, "timestamp") AS bucket, + COUNT(latency_ms) AS count, + AVG(latency_ms) AS avg_ms, + percentile_cont(0.5) WITHIN GROUP (ORDER BY latency_ms) AS p50_ms, + percentile_cont(0.95) WITHIN GROUP (ORDER BY latency_ms) AS p95_ms, + percentile_cont(0.99) WITHIN GROUP (ORDER BY latency_ms) AS p99_ms + FROM llm_events + WHERE ${conds.join(' AND ')} + GROUP BY 1 + ORDER BY 1 ASC + `; + const { rows } = await client.query(sql, params); + return rows.map((r: any) => ({ + bucket: r.bucket instanceof Date ? r.bucket.toISOString().slice(0, 10) : r.bucket, + count: Number(r.count) || 0, + avg_ms: r.avg_ms === null ? null : Number(r.avg_ms), + p50_ms: r.p50_ms === null ? null : Number(r.p50_ms), + p95_ms: r.p95_ms === null ? null : Number(r.p95_ms), + p99_ms: r.p99_ms === null ? null : Number(r.p99_ms), + })); +}; + +const fetchLatencyDistributionDaily = async ({ + client, + start, + end, +}: { + client: PoolClient; + start: Date | null; + end: Date | null; +}): Promise<{ bucket: string; count: number }[]> => { + const params: Date[] = []; + const conds = ['latency_ms IS NOT NULL']; + if (start) { + params.push(start); + conds.push(`"timestamp" >= $${params.length}`); + } + if (end) { + params.push(end); + conds.push(`"timestamp" < $${params.length}`); + } + const sql = ` + SELECT + CASE + WHEN latency_ms < 1000 THEN '0-1s' + WHEN latency_ms < 2000 THEN '1-2s' + WHEN latency_ms < 5000 THEN '2-5s' + WHEN latency_ms < 10000 THEN '5-10s' + WHEN latency_ms < 20000 THEN '10-20s' + ELSE '20s+' + END AS bucket, + COUNT(*) AS count + FROM llm_events + WHERE ${conds.join(' AND ')} + GROUP BY 1 + `; + const { rows } = await client.query(sql, params); + return rows.map((r: any) => ({ bucket: r.bucket, count: Number(r.count) || 0 })); +}; + +const fetchModelCost = async ({ + client, + start, + end, +}: { + client: PoolClient; + start: Date | null; + end: Date | null; +}): Promise => { + const params: Date[] = []; + const conds: string[] = []; + if (start) { + params.push(start); + conds.push(`"timestamp" >= $${params.length}`); + } + if (end) { + params.push(end); + conds.push(`"timestamp" < $${params.length}`); + } + const sql = ` + SELECT model, + SUM(cost_total) AS cost_total, + SUM(usage_cached_tokens) AS cached_tokens + FROM llm_events + ${conds.length ? `WHERE ${conds.join(' AND ')}` : ''} + GROUP BY model + `; + const { rows } = await client.query(sql, params); + return rows + .filter((r: any) => r.model) + .map((r: any) => ({ + model: r.model, + cost_total: toNumber(r.cost_total, 0), + cached_tokens: toNumber(r.cached_tokens, 0), + })); +}; + +const fetchAgentCost = async ({ + client, + start, + end, +}: { + client: PoolClient; + start: Date | null; + end: Date | null; +}): Promise => { + const params: Date[] = []; + const conds: string[] = []; + if (start) { + params.push(start); + conds.push(`"timestamp" >= $${params.length}`); + } + if (end) { + params.push(end); + conds.push(`"timestamp" < $${params.length}`); + } + const sql = ` + SELECT agent, + COUNT(*) AS requests, + SUM(cost_total) AS cost_total, + SUM(usage_input_tokens) AS input_tokens, + SUM(usage_output_tokens) AS output_tokens, + AVG(latency_ms) AS avg_latency_ms + FROM llm_events + ${conds.length ? `WHERE ${conds.join(' AND ')}` : ''} + GROUP BY agent + `; + const { rows } = await client.query(sql, params); + return rows + .filter((r: any) => r.agent) + .map((r: any) => ({ + agent: r.agent, + requests: Number(r.requests) || 0, + cost_total: toNumber(r.cost_total, 0), + input_tokens: toNumber(r.input_tokens, 0), + output_tokens: toNumber(r.output_tokens, 0), + avg_latency_ms: r.avg_latency_ms === null ? null : Number(r.avg_latency_ms), + })); +}; + +export const buildAnalytics = async ({ + windowLabel, + client, + resolution = 'day', +}: { + windowLabel: string; + client: PoolClient; + resolution?: 'day' | 'hour'; +}) => { + const windowDef = parseAnalyticsWindow(windowLabel); + + if (resolution === 'day') { + try { + const now = windowDef.end || new Date(); + const todayMidnight = new Date( + Date.UTC(now.getUTCFullYear(), now.getUTCMonth(), now.getUTCDate(), 0, 0, 0, 0) + ); + + const caRows = await fetchDailyCA({ client, start: windowDef.start, end: todayMidnight }); + + let todayData: DailyRow | null = null; + if (now >= todayMidnight) { + try { + todayData = await fetchTodayFromBaseTable({ client, todayStart: todayMidnight, end: now }); + } catch { + // Ignore errors fetching today's data + } + } + + const allRows = [...(caRows || [])]; + if (todayData) { + const todayBucket = todayData.bucket; + const existingIdx = allRows.findIndex((r) => r.bucket === todayBucket); + if (existingIdx >= 0) { + allRows[existingIdx] = todayData; + } else { + allRows.push(todayData); + } + } + + if (allRows && allRows.length) { + const total_cost = allRows.reduce((acc, r) => acc + (r.cost_total || 0), 0); + const total_requests = allRows.reduce((acc, r) => acc + (r.requests || 0), 0); + const total_tokens = allRows.reduce((acc, r) => acc + (r.tokens.total || 0), 0); + + const bucket_cost = allRows.map((r) => ({ bucket: r.bucket, cost_total: r.cost_total })); + const bucket_requests = allRows.map((r) => ({ bucket: r.bucket, requests: r.requests })); + const bucket_tokens = allRows.map((r) => ({ + bucket: r.bucket, + total_tokens: r.tokens.total, + input_tokens: r.tokens.input, + output_tokens: r.tokens.output, + cached_tokens: r.tokens.cached, + })); + + const latencyBuckets = await fetchLatencyDaily({ + client, + start: windowDef.start, + end: windowDef.end, + }); + const latencyDistributionRows = await fetchLatencyDistributionDaily({ + client, + start: windowDef.start, + end: windowDef.end, + }); + const latency_distribution = buildLatencyDistribution(latencyDistributionRows); + const latency_total = latencyDistributionRows.reduce((acc, r) => acc + (r.count || 0), 0); + const avg_latency_ms = + latencyBuckets.reduce( + (acc, r) => acc + (r.avg_ms !== null ? r.avg_ms * (r.count || 0) : 0), + 0 + ) / (latency_total || 1); + + const modelRows = await fetchModelCost({ client, start: windowDef.start, end: windowDef.end }); + const models = modelRows + .sort((a, b) => (b.cost_total || 0) - (a.cost_total || 0)) + .map((r) => ({ + model: r.model, + cost_total: r.cost_total, + share: total_cost ? r.cost_total / total_cost : null, + })); + const cache_savings = modelRows.reduce((acc, r) => { + const pricing = pricingService.getModelPricingSync(r.model || ''); + return acc + (r.cached_tokens / 1_000_000) * pricing.input; + }, 0); + + const agentRows = await fetchAgentCost({ client, start: windowDef.start, end: windowDef.end }); + const agents = agentRows + .sort((a, b) => (b.cost_total || 0) - (a.cost_total || 0)) + .map((r) => ({ + agent: r.agent, + requests: r.requests, + cost_total: r.cost_total, + share: total_cost ? r.cost_total / total_cost : null, + avg_latency_ms: r.avg_latency_ms, + })); + + return { + window: { + label: windowDef.label, + start: windowDef.start ? windowDef.start.toISOString() : null, + end: windowDef.end ? windowDef.end.toISOString() : null, + }, + summary: { + total_cost, + total_requests, + total_tokens, + avg_latency_ms: Number.isFinite(avg_latency_ms) ? avg_latency_ms : null, + cache_savings, + }, + timeline: { + resolution: 'day', + daily: { + cost: bucket_cost, + requests: bucket_requests, + tokens: bucket_tokens, + latency_percentiles: latencyBuckets, + }, + }, + cost_by_model: { + total_cost, + models, + }, + cost_by_agent: { + total_cost, + agents, + }, + latency_distribution: { + total: latency_total, + buckets: latency_distribution, + }, + }; + } + } catch (err) { + // Fall through to base-table path + } + } + + // Fallback: scan base table directly + const params: Date[] = []; + const conditions: string[] = []; + if (windowDef.start) { + params.push(windowDef.start); + conditions.push(`"timestamp" >= $${params.length}`); + } + if (windowDef.end) { + params.push(windowDef.end); + conditions.push(`"timestamp" < $${params.length}`); + } + + const sql = ` + SELECT + "timestamp", + model, + agent, + latency_ms, + cost_total, + usage_input_tokens, + usage_output_tokens, + usage_total_tokens, + usage_cached_tokens + FROM llm_events + ${conditions.length ? `WHERE ${conditions.join(' AND ')}` : ''} + ORDER BY "timestamp" ASC + `; + + const { rows } = await client.query(sql, params); + + const bucketCost = new Map(); + const bucketRequests = new Map(); + const bucketTokens = new Map(); + const bucketLatencies = new Map(); + const modelCost = new Map(); + const agentStats = new Map(); + const latencyBucketCounts = new Map(); + + let totalCost = 0; + let totalRequests = 0; + let totalTokens = 0; + let totalLatency = 0; + let latencyCount = 0; + let cacheSavings = 0; + + rows.forEach((r: any) => { + const ts = r.timestamp instanceof Date ? r.timestamp : new Date(r.timestamp); + if (!ts || Number.isNaN(ts.getTime())) return; + const bucket = bucketLabel(ts, resolution); + + const cost = toNumber(r.cost_total, 0); + const inTok = toNumber(r.usage_input_tokens, 0); + const outTok = toNumber(r.usage_output_tokens, 0); + const totalTokRaw = toNumber(r.usage_total_tokens, inTok + outTok); + const cachedTok = toNumber(r.usage_cached_tokens, 0); + const lat = r.latency_ms === null || r.latency_ms === undefined ? null : Number(r.latency_ms); + + totalRequests += 1; + totalCost += cost; + totalTokens += totalTokRaw; + if (lat !== null && !Number.isNaN(lat)) { + totalLatency += lat; + latencyCount += 1; + } + + bucketCost.set(bucket, (bucketCost.get(bucket) || 0) + cost); + bucketRequests.set(bucket, (bucketRequests.get(bucket) || 0) + 1); + const tok = bucketTokens.get(bucket) || { total: 0, input: 0, output: 0, cached: 0 }; + tok.total += totalTokRaw; + tok.input += inTok; + tok.output += outTok; + tok.cached += cachedTok; + bucketTokens.set(bucket, tok); + + if (lat !== null && !Number.isNaN(lat)) { + const arr = bucketLatencies.get(bucket) || []; + arr.push(lat); + bucketLatencies.set(bucket, arr); + + const latBucket = bucketLatency(lat, BUCKETS); + if (latBucket) latencyBucketCounts.set(latBucket, (latencyBucketCounts.get(latBucket) || 0) + 1); + } + + if (r.model) { + modelCost.set(r.model, (modelCost.get(r.model) || 0) + cost); + } + + if (r.agent) { + const stats = agentStats.get(r.agent) || { cost: 0, requests: 0, latencies: [] }; + stats.cost += cost; + stats.requests += 1; + if (lat !== null && !Number.isNaN(lat)) { + stats.latencies.push(lat); + } + agentStats.set(r.agent, stats); + } + + if (cachedTok > 0) { + const pricing = pricingService.getModelPricingSync(r.model || ''); + cacheSavings += (cachedTok / 1_000_000) * pricing.input; + } + }); + + const sortedBuckets = Array.from( + new Set([ + ...bucketCost.keys(), + ...bucketRequests.keys(), + ...bucketTokens.keys(), + ...bucketLatencies.keys(), + ]) + ).sort(); + + const bucket_cost = sortedBuckets.map((key) => ({ bucket: key, cost_total: bucketCost.get(key) || 0 })); + const bucket_requests = sortedBuckets.map((key) => ({ + bucket: key, + requests: bucketRequests.get(key) || 0, + })); + const bucket_tokens = sortedBuckets.map((key) => { + const tok = bucketTokens.get(key) || { total: 0, input: 0, output: 0, cached: 0 }; + return { + bucket: key, + total_tokens: tok.total, + input_tokens: tok.input, + output_tokens: tok.output, + cached_tokens: tok.cached, + }; + }); + const bucket_latency_percentiles = sortedBuckets.map((key) => { + const lats = bucketLatencies.get(key) || []; + return { + bucket: key, + count: lats.length, + avg_ms: lats.length ? lats.reduce((a, b) => a + b, 0) / lats.length : null, + p50_ms: percentile(lats, 0.5), + p95_ms: percentile(lats, 0.95), + p99_ms: percentile(lats, 0.99), + }; + }); + + const latency_total = Array.from(latencyBucketCounts.values()).reduce((a, b) => a + b, 0); + const latency_distribution = BUCKETS.map((b) => { + const count = latencyBucketCounts.get(b.label) || 0; + return { + bucket: b.label, + count, + share: latency_total ? count / latency_total : null, + }; + }); + + const models = Array.from(modelCost.entries()) + .sort((a, b) => (b[1] || 0) - (a[1] || 0)) + .map(([model, cost]) => ({ + model, + cost_total: cost, + share: totalCost ? cost / totalCost : null, + })); + + const agents = Array.from(agentStats.entries()) + .sort((a, b) => (b[1].cost || 0) - (a[1].cost || 0)) + .map(([agent, stats]) => ({ + agent, + requests: stats.requests, + cost_total: stats.cost, + share: totalCost ? stats.cost / totalCost : null, + avg_latency_ms: stats.latencies.length + ? stats.latencies.reduce((a, b) => a + b, 0) / stats.latencies.length + : null, + })); + + return { + window: { + label: windowDef.label, + start: windowDef.start ? windowDef.start.toISOString() : null, + end: windowDef.end ? windowDef.end.toISOString() : null, + }, + summary: { + total_cost: totalCost, + total_requests: totalRequests, + total_tokens: totalTokens, + avg_latency_ms: latencyCount ? totalLatency / latencyCount : null, + cache_savings: cacheSavings, + }, + timeline: + resolution === 'hour' + ? { + resolution: 'hour', + hourly: { + cost: bucket_cost, + requests: bucket_requests, + tokens: bucket_tokens, + latency_percentiles: bucket_latency_percentiles, + }, + } + : { + resolution: 'day', + daily: { + cost: bucket_cost, + requests: bucket_requests, + tokens: bucket_tokens, + latency_percentiles: bucket_latency_percentiles, + }, + }, + cost_by_model: { + total_cost: totalCost, + models, + }, + cost_by_agent: { + total_cost: totalCost, + agents, + }, + latency_distribution: { + total: latency_total, + buckets: latency_distribution, + }, + }; +}; + +export default { + buildAnalytics, + parseAnalyticsWindow, +}; diff --git a/hive/src/services/tsdb/pricing_service.ts b/hive/src/services/tsdb/pricing_service.ts new file mode 100644 index 00000000..971834da --- /dev/null +++ b/hive/src/services/tsdb/pricing_service.ts @@ -0,0 +1,743 @@ +/** + * LLM Pricing Service + * + * Centralized pricing table for calculating costs by provider and model. + * Prices are stored in MongoDB and cached in memory for performance. + * Prices are in USD per 1M tokens (industry standard). + * + * Sources: + * - OpenAI: https://openai.com/pricing + * - Anthropic: https://www.anthropic.com/pricing + * - Google: https://ai.google.dev/pricing + * - AWS Bedrock: https://aws.amazon.com/bedrock/pricing/ + */ + +// In-memory cache for pricing data +let pricingCache = new Map(); +let aliasCacheMap = new Map(); // model alias -> canonical model +let cacheLoadedAt: number | null = null; +const CACHE_TTL_MS = 5 * 60 * 1000; // 5 minutes + +interface PricingEntry { + model: string; + provider: string; + input: number; + output: number; + cached_input: number; + aliases: string[]; + effective_date?: Date; + updated_at?: Date; + source?: string; +} + +interface PricingTableEntry { + provider: string; + input: number; + output: number; + cached_input: number; + aliases: string[]; +} + +// Fallback pricing for unknown models (conservative estimate) +const DEFAULT_PRICING = { input: 1.00, output: 3.00, cached_input: 0.25 }; + +// Default pricing table for seeding - USD per 1M tokens +// Updated: 2025-01-01 +const DEFAULT_PRICING_TABLE: Record = { + // OpenAI Models + "gpt-4o": { provider: "openai", input: 2.50, output: 10.00, cached_input: 1.25, aliases: ["gpt-4o-2024-11-20", "gpt-4o-2024-08-06"] }, + "gpt-4o-2024-05-13": { provider: "openai", input: 5.00, output: 15.00, cached_input: 2.50, aliases: [] }, + "gpt-4o-mini": { provider: "openai", input: 0.15, output: 0.60, cached_input: 0.075, aliases: ["gpt-4o-mini-2024-07-18"] }, + "gpt-4-turbo": { provider: "openai", input: 10.00, output: 30.00, cached_input: 5.00, aliases: ["gpt-4-turbo-2024-04-09", "gpt-4-turbo-preview"] }, + "gpt-4": { provider: "openai", input: 30.00, output: 60.00, cached_input: 15.00, aliases: ["gpt-4-0613"] }, + "gpt-3.5-turbo": { provider: "openai", input: 0.50, output: 1.50, cached_input: 0.25, aliases: ["gpt-3.5-turbo-0125"] }, + "o1": { provider: "openai", input: 15.00, output: 60.00, cached_input: 7.50, aliases: ["o1-2024-12-17", "o1-preview"] }, + "o1-mini": { provider: "openai", input: 3.00, output: 12.00, cached_input: 1.50, aliases: ["o1-mini-2024-09-12"] }, + "o3-mini": { provider: "openai", input: 1.10, output: 4.40, cached_input: 0.55, aliases: [] }, + + // Anthropic Models + "claude-3-5-sonnet-20241022": { provider: "anthropic", input: 3.00, output: 15.00, cached_input: 0.30, aliases: ["claude-3-5-sonnet-20240620", "claude-3-5-sonnet-latest"] }, + "claude-sonnet-4-20250514": { provider: "anthropic", input: 3.00, output: 15.00, cached_input: 0.30, aliases: ["claude-sonnet-4-5-20250929"] }, + "claude-3-5-haiku-20241022": { provider: "anthropic", input: 0.80, output: 4.00, cached_input: 0.08, aliases: ["claude-3-5-haiku-latest"] }, + "claude-3-opus-20240229": { provider: "anthropic", input: 15.00, output: 75.00, cached_input: 1.50, aliases: ["claude-3-opus-latest"] }, + "claude-3-sonnet-20240229": { provider: "anthropic", input: 3.00, output: 15.00, cached_input: 0.30, aliases: [] }, + "claude-3-haiku-20240307": { provider: "anthropic", input: 0.25, output: 1.25, cached_input: 0.025, aliases: [] }, + "claude-opus-4-5-20251101": { provider: "anthropic", input: 15.00, output: 75.00, cached_input: 1.50, aliases: ["claude-opus-4-20250514"] }, + + // Google Models + "gemini-2.0-flash": { provider: "google", input: 0.10, output: 0.40, cached_input: 0.025, aliases: ["gemini-2.0-flash-exp"] }, + "gemini-1.5-flash": { provider: "google", input: 0.075, output: 0.30, cached_input: 0.01875, aliases: ["gemini-1.5-flash-latest"] }, + "gemini-1.5-flash-8b": { provider: "google", input: 0.0375, output: 0.15, cached_input: 0.01, aliases: [] }, + "gemini-1.5-pro": { provider: "google", input: 1.25, output: 5.00, cached_input: 0.3125, aliases: ["gemini-1.5-pro-latest"] }, + "gemini-1.0-pro": { provider: "google", input: 0.50, output: 1.50, cached_input: 0.125, aliases: ["gemini-pro"] }, + "gemini-exp-1206": { provider: "google", input: 0.00, output: 0.00, cached_input: 0.00, aliases: [] }, + + // AWS Bedrock - Claude (cross-region inference) + "anthropic.claude-3-5-sonnet-20241022-v2:0": { provider: "bedrock", input: 3.00, output: 15.00, cached_input: 0.30, aliases: [] }, + "anthropic.claude-3-5-haiku-20241022-v1:0": { provider: "bedrock", input: 0.80, output: 4.00, cached_input: 0.08, aliases: [] }, + "anthropic.claude-3-opus-20240229-v1:0": { provider: "bedrock", input: 15.00, output: 75.00, cached_input: 1.50, aliases: [] }, + "anthropic.claude-3-sonnet-20240229-v1:0": { provider: "bedrock", input: 3.00, output: 15.00, cached_input: 0.30, aliases: [] }, + "anthropic.claude-3-haiku-20240307-v1:0": { provider: "bedrock", input: 0.25, output: 1.25, cached_input: 0.025, aliases: [] }, + + // AWS Bedrock - Amazon Models + "amazon.nova-pro-v1:0": { provider: "bedrock", input: 0.80, output: 3.20, cached_input: 0.20, aliases: [] }, + "amazon.nova-lite-v1:0": { provider: "bedrock", input: 0.06, output: 0.24, cached_input: 0.015, aliases: [] }, + "amazon.nova-micro-v1:0": { provider: "bedrock", input: 0.035, output: 0.14, cached_input: 0.00875, aliases: [] }, + "amazon.titan-text-express-v1": { provider: "bedrock", input: 0.20, output: 0.60, cached_input: 0.05, aliases: [] }, + "amazon.titan-text-lite-v1": { provider: "bedrock", input: 0.15, output: 0.20, cached_input: 0.0375, aliases: [] }, + + // Mistral Models + "mistral-large-latest": { provider: "mistral", input: 2.00, output: 6.00, cached_input: 0.50, aliases: ["mistral-large-2411"] }, + "mistral-medium-latest": { provider: "mistral", input: 2.70, output: 8.10, cached_input: 0.675, aliases: [] }, + "mistral-small-latest": { provider: "mistral", input: 0.20, output: 0.60, cached_input: 0.05, aliases: ["mistral-small-2409"] }, + "codestral-latest": { provider: "mistral", input: 0.30, output: 0.90, cached_input: 0.075, aliases: [] }, + "pixtral-large-latest": { provider: "mistral", input: 2.00, output: 6.00, cached_input: 0.50, aliases: [] }, + "ministral-8b-latest": { provider: "mistral", input: 0.10, output: 0.10, cached_input: 0.025, aliases: [] }, + "ministral-3b-latest": { provider: "mistral", input: 0.04, output: 0.04, cached_input: 0.01, aliases: [] }, + + // Cohere Models + "command-r-plus": { provider: "cohere", input: 2.50, output: 10.00, cached_input: 0.625, aliases: [] }, + "command-r": { provider: "cohere", input: 0.15, output: 0.60, cached_input: 0.0375, aliases: [] }, + "command": { provider: "cohere", input: 1.00, output: 2.00, cached_input: 0.25, aliases: [] }, + "command-light": { provider: "cohere", input: 0.30, output: 0.60, cached_input: 0.075, aliases: [] }, + + // DeepSeek Models + "deepseek-chat": { provider: "deepseek", input: 0.14, output: 0.28, cached_input: 0.014, aliases: [] }, + "deepseek-reasoner": { provider: "deepseek", input: 0.55, output: 2.19, cached_input: 0.055, aliases: [] }, + + // Groq Models (inference pricing, not training) + "llama-3.3-70b-versatile": { provider: "groq", input: 0.59, output: 0.79, cached_input: 0.15, aliases: [] }, + "llama-3.1-70b-versatile": { provider: "groq", input: 0.59, output: 0.79, cached_input: 0.15, aliases: [] }, + "llama-3.1-8b-instant": { provider: "groq", input: 0.05, output: 0.08, cached_input: 0.0125, aliases: [] }, + "llama-3.2-90b-vision-preview": { provider: "groq", input: 0.90, output: 0.90, cached_input: 0.225, aliases: [] }, + "mixtral-8x7b-32768": { provider: "groq", input: 0.24, output: 0.24, cached_input: 0.06, aliases: [] }, +}; + +declare const _ACHO_MG_DB: { db: (name: string) => { collection: (name: string) => unknown } }; +declare const _ACHO_MDB_CONFIG: { ERP_DBNAME: string }; +declare const _ACHO_MDB_COLLECTIONS: { ADEN_LLM_PRICING: string }; + +interface MongoCollection { + find: (query: Record) => { toArray: () => Promise; sort: (sort: Record) => { toArray: () => Promise } }; + findOne: (query: Record) => Promise; + findOneAndUpdate: (query: Record, update: Record, options: Record) => Promise; + deleteOne: (query: Record) => Promise<{ deletedCount: number }>; + insertOne: (doc: Record) => Promise; + updateOne: (query: Record, update: Record) => Promise; +} + +/** + * Get the MongoDB collection for pricing + * @returns {Collection} MongoDB collection + */ +function getPricingCollection(): MongoCollection { + const db = _ACHO_MG_DB.db(_ACHO_MDB_CONFIG.ERP_DBNAME); + return db.collection(_ACHO_MDB_COLLECTIONS.ADEN_LLM_PRICING) as MongoCollection; +} + +/** + * Check if cache is still valid + * @returns {boolean} + */ +function isCacheValid(): boolean { + if (!cacheLoadedAt || pricingCache.size === 0) return false; + return Date.now() - cacheLoadedAt < CACHE_TTL_MS; +} + +interface DbPricingDoc { + model: string; + provider: string; + input_per_1m: number; + output_per_1m: number; + cached_input_per_1m: number; + aliases?: string[]; + effective_date?: Date; + updated_at?: Date; +} + +/** + * Load pricing from MongoDB into memory cache + * @param {boolean} force - Force reload even if cache is valid + * @returns {Promise} Pricing cache + */ +async function loadPricingFromDb(force = false): Promise> { + if (!force && isCacheValid()) { + return pricingCache; + } + + try { + const collection = getPricingCollection(); + const docs = await collection.find({}).toArray() as DbPricingDoc[]; + + if (docs.length === 0) { + console.log("[pricing_service] No pricing in DB, using defaults"); + loadFromDefaults(); + return pricingCache; + } + + // Clear and rebuild cache + pricingCache.clear(); + aliasCacheMap.clear(); + + for (const doc of docs) { + const pricing: PricingEntry = { + model: doc.model, + provider: doc.provider, + input: doc.input_per_1m, + output: doc.output_per_1m, + cached_input: doc.cached_input_per_1m, + aliases: doc.aliases || [], + effective_date: doc.effective_date, + updated_at: doc.updated_at, + }; + pricingCache.set(doc.model.toLowerCase(), pricing); + + // Build alias map + for (const alias of pricing.aliases) { + aliasCacheMap.set(alias.toLowerCase(), doc.model.toLowerCase()); + } + } + + cacheLoadedAt = Date.now(); + console.log(`[pricing_service] Loaded ${pricingCache.size} pricing entries from DB`); + return pricingCache; + } catch (err) { + console.error("[pricing_service] Error loading from DB, using defaults:", (err as Error).message); + loadFromDefaults(); + return pricingCache; + } +} + +/** + * Load pricing from hardcoded defaults into cache + */ +function loadFromDefaults(): void { + pricingCache.clear(); + aliasCacheMap.clear(); + + for (const [model, data] of Object.entries(DEFAULT_PRICING_TABLE)) { + const pricing: PricingEntry = { + model, + provider: data.provider, + input: data.input, + output: data.output, + cached_input: data.cached_input, + aliases: data.aliases || [], + source: "default", + }; + pricingCache.set(model.toLowerCase(), pricing); + + // Build alias map + for (const alias of data.aliases || []) { + aliasCacheMap.set(alias.toLowerCase(), model.toLowerCase()); + } + } + + cacheLoadedAt = Date.now(); + console.log(`[pricing_service] Loaded ${pricingCache.size} pricing entries from defaults`); +} + +/** + * Invalidate cache to force reload on next access + */ +function invalidateCache(): void { + cacheLoadedAt = null; +} + +/** + * Resolve model name to canonical form using aliases + * @param {string} model - Model name (possibly an alias) + * @returns {string} Canonical model name + */ +function resolveAlias(model: string): string | null { + if (!model) return null; + const lower = model.toLowerCase().trim(); + + // Check if it's a direct match + if (pricingCache.has(lower)) { + return lower; + } + + // Check alias map + if (aliasCacheMap.has(lower)) { + return aliasCacheMap.get(lower)!; + } + + // Try partial matching for model families + for (const [key, pricing] of pricingCache.entries()) { + // Check if input starts with a known model prefix + if (lower.startsWith(key) || key.startsWith(lower)) { + return key; + } + // Check aliases + for (const alias of pricing.aliases || []) { + if (lower.startsWith(alias.toLowerCase()) || alias.toLowerCase().startsWith(lower)) { + return key; + } + } + } + + return lower; +} + +interface ModelPricingResult { + input: number; + output: number; + cached_input: number; + model: string; + provider: string; + source: string; +} + +/** + * Get pricing for a model + * @param {string} model - Model name + * @param {string} provider - Provider name (optional, for disambiguation) + * @returns {Promise} Pricing { input, output, cached_input } in USD per 1M tokens + */ +async function getModelPricing(model: string, provider: string | null = null): Promise { + await loadPricingFromDb(); + + const resolved = resolveAlias(model); + + // Try exact match + if (resolved && pricingCache.has(resolved)) { + const pricing = pricingCache.get(resolved)!; + return { + input: pricing.input, + output: pricing.output, + cached_input: pricing.cached_input, + model: pricing.model, + provider: pricing.provider, + source: "db", + }; + } + + // Try provider-prefixed lookup for Bedrock + if (provider === "bedrock" || provider === "aws") { + for (const [key, pricing] of pricingCache.entries()) { + if (key.includes(resolved || "") || (resolved || "").includes(key.split(".").pop()?.split("-")[0] || "")) { + return { + input: pricing.input, + output: pricing.output, + cached_input: pricing.cached_input, + model: pricing.model, + provider: pricing.provider, + source: "bedrock_match", + }; + } + } + } + + // Return default pricing + console.log(`[pricing_service] Unknown model: ${model}, using default pricing`); + return { + ...DEFAULT_PRICING, + model: model, + provider: provider || "unknown", + source: "default", + }; +} + +/** + * Get model pricing synchronously (uses cached data) + * @param {string} model - Model name + * @returns {Object} Pricing { input, output, cached_input } in USD per 1M tokens + */ +function getModelPricingSync(model: string): ModelPricingResult { + const resolved = resolveAlias(model); + + if (resolved && pricingCache.has(resolved)) { + const cached = pricingCache.get(resolved)!; + return { + input: cached.input, + output: cached.output, + cached_input: cached.cached_input, + model: cached.model, + provider: cached.provider, + source: "db", + }; + } + + return { ...DEFAULT_PRICING, model, provider: "unknown", source: "default" }; +} + +interface CostCalculationParams { + model: string; + provider?: string; + input_tokens?: number; + output_tokens?: number; + cached_tokens?: number; +} + +interface CostResult { + total: number; + input_cost: number; + output_cost: number; + cached_cost: number; + pricing: { + model: string; + source: string; + input_per_1m: number; + output_per_1m: number; + cached_per_1m: number; + }; +} + +/** + * Calculate cost for a request (synchronous version using cached data) + * @param {Object} params - Request parameters + * @returns {Object} Cost breakdown { total, input_cost, output_cost, cached_cost, pricing } + */ +function calculateCostSync({ model, provider, input_tokens = 0, output_tokens = 0, cached_tokens = 0 }: CostCalculationParams): CostResult { + const resolved = resolveAlias(model); + let pricing: { input: number; output: number; cached_input: number; model: string; source: string }; + + if (resolved && pricingCache.has(resolved)) { + const cached = pricingCache.get(resolved)!; + pricing = { + input: cached.input, + output: cached.output, + cached_input: cached.cached_input, + model: cached.model, + source: "db", + }; + } else { + pricing = { ...DEFAULT_PRICING, model, source: "default" }; + } + + // Non-cached input tokens + const nonCachedInput = Math.max(0, input_tokens - cached_tokens); + + // Calculate costs (pricing is per 1M tokens) + const inputCost = (nonCachedInput / 1_000_000) * pricing.input; + const outputCost = (output_tokens / 1_000_000) * pricing.output; + const cachedCost = (cached_tokens / 1_000_000) * pricing.cached_input; + + const total = inputCost + outputCost + cachedCost; + + return { + total, + input_cost: inputCost, + output_cost: outputCost, + cached_cost: cachedCost, + pricing: { + model: pricing.model, + source: pricing.source, + input_per_1m: pricing.input, + output_per_1m: pricing.output, + cached_per_1m: pricing.cached_input, + }, + }; +} + +/** + * Calculate cost for a request (async version) + * @param {Object} params - Request parameters + * @returns {Promise} Cost breakdown { total, input_cost, output_cost, cached_cost, pricing } + */ +async function calculateCost(params: CostCalculationParams): Promise { + await loadPricingFromDb(); + return calculateCostSync(params); +} + +interface UpsertPricingInput { + provider?: string; + input_per_1m?: number; + input?: number; + output_per_1m?: number; + output?: number; + cached_input_per_1m?: number; + cached_input?: number; + aliases?: string[]; + effective_date?: Date; +} + +/** + * Upsert pricing for a model + * @param {string} model - Model identifier + * @param {Object} pricing - Pricing data + * @param {string} userId - User making the change + * @returns {Promise} Updated document + */ +async function upsertPricing(model: string, pricing: UpsertPricingInput, userId: string | null = null): Promise { + const collection = getPricingCollection(); + + const doc = { + model: model, + provider: pricing.provider, + input_per_1m: pricing.input_per_1m ?? pricing.input, + output_per_1m: pricing.output_per_1m ?? pricing.output, + cached_input_per_1m: pricing.cached_input_per_1m ?? pricing.cached_input, + aliases: pricing.aliases || [], + effective_date: pricing.effective_date || new Date(), + updated_at: new Date(), + updated_by: userId, + }; + + const result = await collection.findOneAndUpdate( + { model: model }, + { $set: doc }, + { upsert: true, returnDocument: "after" } + ); + + // Invalidate cache to force reload + invalidateCache(); + + return result; +} + +/** + * Delete pricing for a model + * @param {string} model - Model identifier + * @returns {Promise} True if deleted + */ +async function deletePricing(model: string): Promise { + const collection = getPricingCollection(); + const result = await collection.deleteOne({ model: model }); + + // Invalidate cache to force reload + invalidateCache(); + + return result.deletedCount > 0; +} + +interface SeedResult { + inserted: number; + updated: number; + skipped: number; + errors: { model: string; error: string }[]; +} + +/** + * Seed default pricing to MongoDB + * @param {string} userId - User making the change + * @param {boolean} overwrite - If true, overwrite existing entries + * @returns {Promise} Seed results + */ +async function seedDefaultPricing(userId: string | null = null, overwrite = false): Promise { + const collection = getPricingCollection(); + const results: SeedResult = { inserted: 0, updated: 0, skipped: 0, errors: [] }; + + for (const [model, data] of Object.entries(DEFAULT_PRICING_TABLE)) { + try { + const existing = await collection.findOne({ model }); + + if (existing && !overwrite) { + results.skipped++; + continue; + } + + const doc = { + model, + provider: data.provider, + input_per_1m: data.input, + output_per_1m: data.output, + cached_input_per_1m: data.cached_input, + aliases: data.aliases || [], + effective_date: new Date(), + updated_at: new Date(), + updated_by: userId, + }; + + if (existing) { + await collection.updateOne({ model }, { $set: doc }); + results.updated++; + } else { + await collection.insertOne(doc); + results.inserted++; + } + } catch (err) { + results.errors.push({ model, error: (err as Error).message }); + } + } + + // Invalidate cache to force reload + invalidateCache(); + + console.log(`[pricing_service] Seeded pricing: ${results.inserted} inserted, ${results.updated} updated, ${results.skipped} skipped`); + return results; +} + +interface AllPricingResult { + [key: string]: { + provider: string; + input: number; + output: number; + cached_input: number; + aliases: string[]; + }; +} + +/** + * Get all available pricing data + * @returns {Promise} Full pricing table + */ +async function getAllPricing(): Promise { + await loadPricingFromDb(); + + const result: AllPricingResult = {}; + for (const [key, pricing] of pricingCache.entries()) { + result[pricing.model] = { + provider: pricing.provider, + input: pricing.input, + output: pricing.output, + cached_input: pricing.cached_input, + aliases: pricing.aliases, + }; + } + return result; +} + +interface PricingByProviderResult { + [provider: string]: { + [model: string]: { + input: number; + output: number; + cached_input: number; + aliases: string[]; + }; + }; +} + +/** + * Get pricing summary grouped by provider + * @returns {Promise} Pricing by provider + */ +async function getPricingByProvider(): Promise { + await loadPricingFromDb(); + + const byProvider: PricingByProviderResult = {}; + + for (const [, pricing] of pricingCache.entries()) { + const provider = pricing.provider || "other"; + if (!byProvider[provider]) { + byProvider[provider] = {}; + } + byProvider[provider][pricing.model] = { + input: pricing.input, + output: pricing.output, + cached_input: pricing.cached_input, + aliases: pricing.aliases, + }; + } + + return byProvider; +} + +interface DegradationModel { + model: string; + label: string; + input_cost: number; + output_cost: number; + avg_cost: number; +} + +interface DegradationTargetsResult { + providers: string[]; + models: { [provider: string]: DegradationModel[] }; +} + +/** + * Get degradation target models grouped by provider + * Returns models sorted by cost (cheapest first) for budget control "degrade" mode + * @returns {Promise} { providers: [...], models: { provider: [...] } } + */ +async function getDegradationTargets(): Promise { + await loadPricingFromDb(); + + const byProvider: { [provider: string]: DegradationModel[] } = {}; + + for (const [, pricing] of pricingCache.entries()) { + const provider = pricing.provider || "other"; + if (!byProvider[provider]) { + byProvider[provider] = []; + } + + // Calculate average cost per 1M tokens (input + output) / 2 + const avgCost = (pricing.input + pricing.output) / 2; + + byProvider[provider].push({ + model: pricing.model, + label: pricing.model, + input_cost: pricing.input, + output_cost: pricing.output, + avg_cost: avgCost, + }); + } + + // Sort models within each provider by avg_cost (cheapest first) + for (const provider of Object.keys(byProvider)) { + byProvider[provider].sort((a, b) => a.avg_cost - b.avg_cost); + } + + // Get sorted list of providers + const providers = Object.keys(byProvider).sort(); + + return { + providers, + models: byProvider, + }; +} + +/** + * Get pricing directly from DB (bypasses cache) + * @param {string} model - Model identifier + * @returns {Promise} Pricing document or null + */ +async function getPricingFromDb(model: string): Promise { + const collection = getPricingCollection(); + return collection.findOne({ model }); +} + +/** + * List all pricing from DB (bypasses cache) + * @returns {Promise} All pricing documents + */ +async function listAllPricingFromDb(): Promise { + const collection = getPricingCollection(); + return collection.find({}).sort({ provider: 1, model: 1 }).toArray(); +} + +/** + * Initialize pricing service - call on server startup + * @returns {Promise} + */ +async function initialize(): Promise { + try { + await loadPricingFromDb(true); + console.log("[pricing_service] Initialized successfully"); + } catch (err) { + console.error("[pricing_service] Failed to initialize, using defaults:", (err as Error).message); + loadFromDefaults(); + } +} + +export default { + // Core functions + getModelPricing, + getModelPricingSync, + calculateCost, + calculateCostSync, + + // CRUD operations + upsertPricing, + deletePricing, + seedDefaultPricing, + + // Query functions + getAllPricing, + getPricingByProvider, + getDegradationTargets, + getPricingFromDb, + listAllPricingFromDb, + + // Cache management + loadPricingFromDb, + invalidateCache, + initialize, + + // Constants (for reference/testing) + DEFAULT_PRICING, + DEFAULT_PRICING_TABLE, +}; diff --git a/hive/src/services/tsdb/schema.sql b/hive/src/services/tsdb/schema.sql new file mode 100644 index 00000000..f3811afa --- /dev/null +++ b/hive/src/services/tsdb/schema.sql @@ -0,0 +1,358 @@ +-- TSDB schema for team-scoped hypertable (Timescale) +-- Architecture: Hot (metrics) / Warm (content refs) / Cold (content store) + +-- ============================================================================= +-- Enable TimescaleDB extension (required for hypertables and continuous aggregates) +-- This is safe to run multiple times - CREATE EXTENSION IF NOT EXISTS is idempotent +-- ============================================================================= +CREATE EXTENSION IF NOT EXISTS timescaledb; + +-- ============================================================================= +-- HOT TABLE: llm_events (metrics only - fast time-series queries) +-- ============================================================================= +CREATE TABLE IF NOT EXISTS llm_events ( + "timestamp" timestamptz NOT NULL, + ingest_date date, + team_id text NOT NULL, + user_id text, + trace_id text NOT NULL, + span_id text, + parent_span_id text, + request_id text, + provider text, + call_sequence integer NOT NULL, + model text, + stream boolean DEFAULT false, + agent text, + agent_name text, + agent_stack jsonb, + call_site jsonb, + metadata jsonb, + latency_ms double precision, + usage_input_tokens double precision, + usage_output_tokens double precision, + usage_total_tokens double precision, + usage_cached_tokens double precision, + usage_reasoning_tokens double precision, + usage_accepted_prediction_tokens double precision, + usage_rejected_prediction_tokens double precision, + cost_total numeric, + -- Content flags (lightweight references instead of full content) + has_content boolean DEFAULT false, + finish_reason text, + tool_call_count integer DEFAULT 0, + -- Deprecated: content_capture jsonb (migrated to warm storage) + content_capture jsonb, + created_at timestamptz DEFAULT now(), + CONSTRAINT llm_events_pk PRIMARY KEY ("timestamp", trace_id, call_sequence) +); + +-- ============================================================================= +-- WARM TABLE: llm_event_content (content references per event) +-- Links events to deduplicated content in the cold store +-- ============================================================================= +CREATE TABLE IF NOT EXISTS llm_event_content ( + id bigserial, + "timestamp" timestamptz NOT NULL, + trace_id text NOT NULL, + call_sequence integer NOT NULL, + team_id text NOT NULL, + -- Content type: 'system_prompt', 'messages', 'response', 'tools', 'params' + content_type text NOT NULL, + -- Reference to cold storage (content-addressable) + content_hash text NOT NULL, + -- Quick access metadata (no need to fetch from cold store) + byte_size integer NOT NULL DEFAULT 0, + message_count integer, -- For messages type + truncated_preview text, -- First 200 chars for quick preview + created_at timestamptz DEFAULT now(), + CONSTRAINT llm_event_content_pk PRIMARY KEY (id) +); + +-- Index for joining back to events +CREATE INDEX IF NOT EXISTS idx_llm_event_content_event + ON llm_event_content (trace_id, call_sequence, "timestamp"); + +-- Index for content type queries +CREATE INDEX IF NOT EXISTS idx_llm_event_content_type + ON llm_event_content (team_id, content_type, "timestamp" DESC); + +-- Index for content hash lookups (finding which events use a content) +CREATE INDEX IF NOT EXISTS idx_llm_event_content_hash + ON llm_event_content (content_hash); + +-- ============================================================================= +-- COLD TABLE: llm_content_store (deduplicated content storage) +-- Content-addressable storage with SHA-256 hashes +-- ============================================================================= +CREATE TABLE IF NOT EXISTS llm_content_store ( + content_hash text NOT NULL, + team_id text NOT NULL, + content text NOT NULL, + byte_size integer NOT NULL, + ref_count integer DEFAULT 1, -- Number of events referencing this content + first_seen_at timestamptz DEFAULT now(), + last_seen_at timestamptz DEFAULT now(), + CONSTRAINT llm_content_store_pk PRIMARY KEY (content_hash, team_id) +); + +-- Index for cleanup queries (find orphaned content) +CREATE INDEX IF NOT EXISTS idx_llm_content_store_refs + ON llm_content_store (team_id, ref_count, last_seen_at); + +-- ============================================================================= +-- MIGRATION: Add new columns to existing llm_events tables +-- ============================================================================= +ALTER TABLE llm_events ADD COLUMN IF NOT EXISTS has_content boolean DEFAULT false; +ALTER TABLE llm_events ADD COLUMN IF NOT EXISTS finish_reason text; +ALTER TABLE llm_events ADD COLUMN IF NOT EXISTS tool_call_count integer DEFAULT 0; + +-- Ensure primary key includes timestamp if table already existed without it +DO $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_constraint c + JOIN pg_class t ON c.conrelid = t.oid + WHERE t.relname = 'llm_events' + AND c.contype = 'p' + AND NOT EXISTS ( + SELECT 1 + FROM unnest(c.conkey) WITH ORDINALITY AS ck(attnum, ord) + JOIN pg_attribute a ON a.attrelid = t.oid AND a.attnum = ck.attnum + WHERE a.attname = 'timestamp' + ) + ) THEN + ALTER TABLE llm_events DROP CONSTRAINT IF EXISTS llm_events_pk; + ALTER TABLE llm_events ADD CONSTRAINT llm_events_pk PRIMARY KEY ("timestamp", trace_id, call_sequence); + END IF; +END$$; + +-- Promote to hypertable when Timescale is available +DO $$ +BEGIN + IF EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'timescaledb') THEN + PERFORM public.create_hypertable('llm_events', 'timestamp', if_not_exists => TRUE); + END IF; +END$$; + +-- Ensure metadata column exists for flexible fields +ALTER TABLE llm_events + ADD COLUMN IF NOT EXISTS metadata jsonb; + +-- Ensure content_capture column exists (for Layer 0 content capture) +ALTER TABLE llm_events + ADD COLUMN IF NOT EXISTS content_capture jsonb; + +-- Helpful indexes +CREATE INDEX IF NOT EXISTS idx_llm_events_ts ON llm_events ("timestamp" DESC); +CREATE INDEX IF NOT EXISTS idx_llm_events_team_ts ON llm_events (team_id, "timestamp" DESC); +CREATE INDEX IF NOT EXISTS idx_llm_events_model ON llm_events (model); +CREATE INDEX IF NOT EXISTS idx_llm_events_agent ON llm_events (agent); +CREATE INDEX IF NOT EXISTS idx_llm_events_trace ON llm_events (trace_id); + +-- Continuous aggregate: daily rollup for analytics-wide +DO $$ +BEGIN + IF EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'timescaledb') THEN + CREATE MATERIALIZED VIEW IF NOT EXISTS llm_events_daily_ca + WITH (timescaledb.continuous) AS + SELECT + time_bucket('1 day', "timestamp") AS bucket, + COUNT(*) AS requests, + SUM(cost_total) AS cost_total, + SUM(usage_input_tokens) AS input_tokens, + SUM(usage_output_tokens) AS output_tokens, + SUM(COALESCE(usage_total_tokens, COALESCE(usage_input_tokens, 0) + COALESCE(usage_output_tokens, 0))) AS total_tokens, + SUM(usage_cached_tokens) AS cached_tokens + FROM llm_events + GROUP BY 1 + WITH NO DATA; + + -- Initial refresh to populate the CA immediately + CALL refresh_continuous_aggregate('llm_events_daily_ca', NULL, NOW()); + END IF; +EXCEPTION + WHEN others THEN NULL; -- Ignore errors if CA already exists or refresh fails +END$$; + +-- Index on CA for fast range scans +DO $$ +BEGIN + IF EXISTS (SELECT 1 FROM pg_class WHERE relname = 'llm_events_daily_ca') THEN + CREATE INDEX IF NOT EXISTS idx_llm_events_daily_ca_bucket ON llm_events_daily_ca (bucket DESC); + END IF; +EXCEPTION WHEN undefined_table THEN + NULL; +END$$; + +-- Continuous aggregate: daily rollup by model for fast model-grouped queries +DO $$ +BEGIN + IF EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'timescaledb') THEN + CREATE MATERIALIZED VIEW IF NOT EXISTS llm_events_daily_by_model_ca + WITH (timescaledb.continuous) AS + SELECT + time_bucket('1 day', "timestamp") AS bucket, + model, + provider, + COUNT(*) AS requests, + SUM(cost_total) AS cost_total, + SUM(usage_input_tokens) AS input_tokens, + SUM(usage_output_tokens) AS output_tokens, + SUM(COALESCE(usage_total_tokens, COALESCE(usage_input_tokens, 0) + COALESCE(usage_output_tokens, 0))) AS total_tokens, + SUM(usage_cached_tokens) AS cached_tokens, + AVG(latency_ms) AS avg_latency_ms + FROM llm_events + GROUP BY 1, 2, 3 + WITH NO DATA; + + -- Initial refresh to populate the CA immediately + CALL refresh_continuous_aggregate('llm_events_daily_by_model_ca', NULL, NOW()); + END IF; +EXCEPTION + WHEN others THEN NULL; -- Ignore errors if CA already exists or refresh fails +END$$; + +-- Index on model CA for fast range scans +DO $$ +BEGIN + IF EXISTS (SELECT 1 FROM pg_class WHERE relname = 'llm_events_daily_by_model_ca') THEN + CREATE INDEX IF NOT EXISTS idx_llm_events_daily_by_model_ca_bucket ON llm_events_daily_by_model_ca (bucket DESC); + CREATE INDEX IF NOT EXISTS idx_llm_events_daily_by_model_ca_model ON llm_events_daily_by_model_ca (model); + END IF; +EXCEPTION WHEN undefined_table THEN + NULL; +END$$; + +-- Continuous aggregate: daily rollup by agent for fast agent-grouped queries +DO $$ +BEGIN + IF EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'timescaledb') THEN + CREATE MATERIALIZED VIEW IF NOT EXISTS llm_events_daily_by_agent_ca + WITH (timescaledb.continuous) AS + SELECT + time_bucket('1 day', "timestamp") AS bucket, + agent, + COUNT(*) AS requests, + SUM(cost_total) AS cost_total, + SUM(usage_input_tokens) AS input_tokens, + SUM(usage_output_tokens) AS output_tokens, + SUM(COALESCE(usage_total_tokens, COALESCE(usage_input_tokens, 0) + COALESCE(usage_output_tokens, 0))) AS total_tokens, + SUM(usage_cached_tokens) AS cached_tokens, + AVG(latency_ms) AS avg_latency_ms + FROM llm_events + GROUP BY 1, 2 + WITH NO DATA; + + -- Initial refresh to populate the CA immediately + CALL refresh_continuous_aggregate('llm_events_daily_by_agent_ca', NULL, NOW()); + END IF; +EXCEPTION + WHEN others THEN NULL; -- Ignore errors if CA already exists or refresh fails +END$$; + +-- Index on agent CA for fast range scans +DO $$ +BEGIN + IF EXISTS (SELECT 1 FROM pg_class WHERE relname = 'llm_events_daily_by_agent_ca') THEN + CREATE INDEX IF NOT EXISTS idx_llm_events_daily_by_agent_ca_bucket ON llm_events_daily_by_agent_ca (bucket DESC); + CREATE INDEX IF NOT EXISTS idx_llm_events_daily_by_agent_ca_agent ON llm_events_daily_by_agent_ca (agent); + END IF; +EXCEPTION WHEN undefined_table THEN + NULL; +END$$; + +-- Refresh policies: keep recent buckets fresh +-- Note: Using timescaledb_information.jobs (not the deprecated policy_refresh_continuous_aggregate view) +DO $$ +BEGIN + IF EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'timescaledb') + AND EXISTS ( + SELECT 1 + FROM timescaledb_information.continuous_aggregates + WHERE view_name = 'llm_events_daily_ca' + AND view_schema = current_schema() + ) + THEN + -- Add refresh policy if none exists for this CA + IF NOT EXISTS ( + SELECT 1 FROM timescaledb_information.jobs + WHERE proc_name = 'policy_refresh_continuous_aggregate' + AND hypertable_schema = current_schema() + AND hypertable_name = 'llm_events_daily_ca' + ) THEN + PERFORM add_continuous_aggregate_policy( + 'llm_events_daily_ca', + start_offset => interval '30 days', + end_offset => interval '1 hour', + schedule_interval => interval '15 minutes' + ); + END IF; + END IF; +EXCEPTION + WHEN undefined_table THEN NULL; + WHEN undefined_function THEN NULL; +END$$; + +-- Refresh policies for llm_events_daily_by_model_ca +DO $$ +BEGIN + IF EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'timescaledb') + AND EXISTS ( + SELECT 1 + FROM timescaledb_information.continuous_aggregates + WHERE view_name = 'llm_events_daily_by_model_ca' + AND view_schema = current_schema() + ) + THEN + -- Add refresh policy if none exists for this CA + IF NOT EXISTS ( + SELECT 1 FROM timescaledb_information.jobs + WHERE proc_name = 'policy_refresh_continuous_aggregate' + AND hypertable_schema = current_schema() + AND hypertable_name = 'llm_events_daily_by_model_ca' + ) THEN + PERFORM add_continuous_aggregate_policy( + 'llm_events_daily_by_model_ca', + start_offset => interval '30 days', + end_offset => interval '1 hour', + schedule_interval => interval '15 minutes' + ); + END IF; + END IF; +EXCEPTION + WHEN undefined_table THEN NULL; + WHEN undefined_function THEN NULL; +END$$; + +-- Refresh policies for llm_events_daily_by_agent_ca +DO $$ +BEGIN + IF EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'timescaledb') + AND EXISTS ( + SELECT 1 + FROM timescaledb_information.continuous_aggregates + WHERE view_name = 'llm_events_daily_by_agent_ca' + AND view_schema = current_schema() + ) + THEN + -- Add refresh policy if none exists for this CA + IF NOT EXISTS ( + SELECT 1 FROM timescaledb_information.jobs + WHERE proc_name = 'policy_refresh_continuous_aggregate' + AND hypertable_schema = current_schema() + AND hypertable_name = 'llm_events_daily_by_agent_ca' + ) THEN + PERFORM add_continuous_aggregate_policy( + 'llm_events_daily_by_agent_ca', + start_offset => interval '30 days', + end_offset => interval '1 hour', + schedule_interval => interval '15 minutes' + ); + END IF; + END IF; +EXCEPTION + WHEN undefined_table THEN NULL; + WHEN undefined_function THEN NULL; +END$$; diff --git a/hive/src/services/tsdb/team_context.ts b/hive/src/services/tsdb/team_context.ts new file mode 100644 index 00000000..578dfefb --- /dev/null +++ b/hive/src/services/tsdb/team_context.ts @@ -0,0 +1,114 @@ +import { Pool, PoolConfig, PoolClient } from "pg"; +import jwt from "jsonwebtoken"; + +// Cache pools per team schema +const poolCache = new Map(); + +interface TokenPayload { + team_id?: string; + team?: string; + teamId?: string; + current_team_id?: string; + user_id?: string; + sub?: string; + user?: string; + userId?: string; + [key: string]: unknown; +} + +interface ParsedToken { + team_id: string; + user_id: string | null; + token: string; + payload: TokenPayload; +} + +/** + * Parse JWT to extract team_id and user_id. + * - Supports Authorization header formats: "Bearer " or "jwt " or raw token. + * - team_id: payload.team_id || payload.team || payload.teamId + * - user_id: payload.user_id || payload.sub || payload.user || payload.userId + */ +const parseToken = (authHeader: string | undefined): ParsedToken | null => { + if (!authHeader) return null; + const parts = authHeader.trim().split(" "); + const token = parts.length === 2 ? parts[1] : parts[0]; + if (!token) return null; + + // Token is already verified by passport middleware; decode only to extract team/user fields. + const payload = jwt.decode(token) as TokenPayload | null; + if (!payload || typeof payload !== "object") return null; + + const team_id = payload.team_id || payload.team || payload.teamId || payload.current_team_id; + const user_id = payload.user_id || payload.sub || payload.user || payload.userId || null; + if (!team_id) return null; + + return { team_id, user_id: user_id as string | null, token, payload }; +}; + +const buildSchemaName = (team_id: string | number): string => { + return `team_${team_id}`.replace(/[^a-zA-Z0-9_]/g, "_"); +}; + +declare const _GLOBAL_CONST: { ACHO_PG_CONFIG?: { USER: string; HOST: string; DATABASE: string; PASSWORD: string; PORT: number } }; + +const basePoolConfig = (): Partial => { + const connStr = (process.env.TSDB_PG_URL || "").replace(/\s+/g, ""); + if (connStr) { + // Only enable SSL for non-local connections or when explicitly requested + const isLocal = connStr.includes("localhost") || connStr.includes("127.0.0.1") || connStr.includes("timescaledb"); + const sslRequested = connStr.includes("sslmode=require") || process.env.TSDB_SSL === "true"; + const ssl = !isLocal || sslRequested ? { rejectUnauthorized: false } : false; + return { connectionString: connStr, ssl }; + } + if (typeof _GLOBAL_CONST !== "undefined" && _GLOBAL_CONST.ACHO_PG_CONFIG) { + const cfg = _GLOBAL_CONST.ACHO_PG_CONFIG; + return { + user: cfg.USER, + host: cfg.HOST, + database: cfg.DATABASE, + password: cfg.PASSWORD, + port: cfg.PORT, + }; + } + return {}; +}; + +const getTeamPool = async (team_id: string | number, overrideConfig?: Partial): Promise => { + const schema = buildSchemaName(team_id); + if (poolCache.has(schema)) return poolCache.get(schema)!; + + const pool = new Pool({ + ...basePoolConfig(), + ...(overrideConfig || {}), + max: 10, + idleTimeoutMillis: 30000, + connectionTimeoutMillis: 10000, + }); + + // Handle pool-level errors to prevent unhandled rejections + pool.on("error", (err) => { + console.error(`[team_context] Pool error for schema ${schema}:`, err.message); + // Remove from cache to force fresh pool on next request + poolCache.delete(schema); + }); + + // Ensure schema exists and set search_path per connection + pool.on("connect", (client: PoolClient) => { + // Fire-and-forget with error handling - don't await in event handler + client.query(`CREATE SCHEMA IF NOT EXISTS ${schema}`) + .then(() => client.query(`SET search_path TO ${schema}, public`)) + .catch((err: Error) => { + console.error(`[team_context] Schema setup error for ${schema}:`, err.message); + }); + }); + + poolCache.set(schema, pool); + return pool; +}; + +export { + parseToken, + buildSchemaName, + getTeamPool, +}; diff --git a/hive/src/services/tsdb/tsdb_service.ts b/hive/src/services/tsdb/tsdb_service.ts new file mode 100644 index 00000000..02d5d2f2 --- /dev/null +++ b/hive/src/services/tsdb/tsdb_service.ts @@ -0,0 +1,955 @@ +import fs from "fs"; +import path from "path"; +import crypto from "crypto"; +import { Pool, PoolClient } from "pg"; +import pricingService from "./pricing_service"; + +let _tsdbPool: Pool | undefined; +let _schemaReadyPromise: Promise | null; +const _schemaReadyByName = new Map>(); // Per-schema initialization tracking +const SCHEMA_SQL = fs.readFileSync(path.join(__dirname, "schema.sql"), "utf8"); + +const safeParseJson = (val: unknown): unknown => { + if (val === null || val === undefined) return null; + if (typeof val === "object") return val; + if (typeof val === "string") { + try { + return JSON.parse(val); + } catch (_e) { + return null; + } + } + return null; +}; + +const asObject = (val: unknown, fallback: Record = {}): Record => { + const parsed = safeParseJson(val); + if (parsed && !Array.isArray(parsed) && typeof parsed === "object") return parsed as Record; + if (val && typeof val === "object" && !Array.isArray(val)) return val as Record; + return fallback; +}; + +const asArray = (val: unknown, fallback: unknown[] = []): unknown[] => { + if (Array.isArray(val)) return val; + const parsed = safeParseJson(val); + if (Array.isArray(parsed)) return parsed; + if (typeof val === "string") { + const trimmed = val.trim(); + if (trimmed.startsWith("{") && trimmed.endsWith("}")) { + const inner = trimmed.slice(1, -1).trim(); + if (!inner) return []; + return inner + .split(",") + .map((s) => s.trim().replace(/^"+|"+$/g, "")) + .filter(Boolean); + } + return [val]; + } + if (val !== null && val !== undefined && typeof val !== "object" && typeof val !== "function") { + return [val]; + } + return fallback; +}; + +const buildMetadata = (raw: Record): Record | null => { + const base = asObject(raw.metadata ?? raw.meta ?? raw.properties ?? raw.extra, {}); + const tags = asArray(raw.tags ?? raw.labels, []) as string[]; + if (tags && tags.length) { + base.tags = tags; + } + const sessionId = raw.session_id ?? raw.sessionId; + if (sessionId !== undefined && sessionId !== null && base.session_id === undefined) { + base.session_id = sessionId; + } + const environment = raw.environment ?? raw.env; + if (environment && base.environment === undefined) { + base.environment = environment; + } + return Object.keys(base).length ? base : null; +}; + +interface UsageData { + input_tokens?: number; + output_tokens?: number; + total_tokens?: number; + cached_tokens?: number; + reasoning_tokens?: number; + accepted_prediction_tokens?: number; + rejected_prediction_tokens?: number; +} + +const calcCost = (model: string, usage: UsageData = {}): number => { + const inputTokens = Number.isFinite(Number(usage.input_tokens)) ? Number(usage.input_tokens) : 0; + const outputTokens = Number.isFinite(Number(usage.output_tokens)) ? Number(usage.output_tokens) : 0; + const cachedTokens = Number.isFinite(Number(usage.cached_tokens)) ? Number(usage.cached_tokens) : 0; + + const result = pricingService.calculateCostSync({ + model: model || "", + input_tokens: inputTokens, + output_tokens: outputTokens, + cached_tokens: cachedTokens, + }); + + return result.total; +}; + +// ============================================================================= +// Content Storage Types and Utilities +// ============================================================================= + +interface ContentCapture { + system_prompt?: string; + messages?: unknown[]; + tools?: unknown[]; + params?: Record; + response_content?: string; + finish_reason?: string; + choice_count?: number; + has_images?: boolean; + image_urls?: string[]; +} + +interface ContentReference { + content_type: string; + content_hash: string; + byte_size: number; + message_count?: number; + truncated_preview?: string; +} + +interface ContentToStore { + content_hash: string; + content: string; + byte_size: number; +} + +/** + * Generate SHA-256 hash of content for content-addressable storage + */ +const hashContent = (content: string): string => { + return crypto.createHash("sha256").update(content, "utf8").digest("hex"); +}; + +/** + * Create a truncated preview of content (first 200 chars) + */ +const createPreview = (content: string, maxLength: number = 200): string => { + if (!content || content.length <= maxLength) return content || ""; + return content.slice(0, maxLength) + "..."; +}; + +/** + * Extract content from ContentCapture and prepare for storage + * Returns content references for warm table and content items for cold table + */ +const extractContent = ( + contentCapture: ContentCapture | null | undefined +): { refs: ContentReference[]; items: ContentToStore[] } => { + if (!contentCapture) { + return { refs: [], items: [] }; + } + + const refs: ContentReference[] = []; + const items: ContentToStore[] = []; + const seenHashes = new Set(); + + // Helper to process a content field + const processContent = ( + type: string, + value: unknown, + messageCount?: number + ): void => { + if (value === null || value === undefined) return; + + const contentStr = typeof value === "string" ? value : JSON.stringify(value); + if (!contentStr || contentStr === "null" || contentStr === "{}") return; + + const hash = hashContent(contentStr); + const byteSize = Buffer.byteLength(contentStr, "utf8"); + + refs.push({ + content_type: type, + content_hash: hash, + byte_size: byteSize, + message_count: messageCount, + truncated_preview: createPreview(contentStr), + }); + + // Only store content once per hash (deduplication within batch) + if (!seenHashes.has(hash)) { + seenHashes.add(hash); + items.push({ + content_hash: hash, + content: contentStr, + byte_size: byteSize, + }); + } + }; + + // Extract each content type + if (contentCapture.system_prompt) { + processContent("system_prompt", contentCapture.system_prompt); + } + + if (contentCapture.messages && Array.isArray(contentCapture.messages) && contentCapture.messages.length > 0) { + processContent("messages", contentCapture.messages, contentCapture.messages.length); + } + + if (contentCapture.response_content) { + processContent("response", contentCapture.response_content); + } + + if (contentCapture.tools && Array.isArray(contentCapture.tools) && contentCapture.tools.length > 0) { + processContent("tools", contentCapture.tools); + } + + // Only store params if they have meaningful values (not all nulls) + if (contentCapture.params) { + const hasValues = Object.values(contentCapture.params).some( + (v) => v !== null && v !== undefined + ); + if (hasValues) { + processContent("params", contentCapture.params); + } + } + + return { refs, items }; +}; + +const parseDate = (val: unknown): Date | null => { + if (!val) return null; + const d = new Date(val as string | number | Date); + return Number.isNaN(d.getTime()) ? null : d; +}; + +const numberOrNull = (val: unknown): number | null => { + const n = Number(val); + return Number.isFinite(n) ? n : null; +}; + +interface RawEvent { + timestamp?: unknown; + team_id?: unknown; + traceId?: string; + trace_id?: string; + spanId?: string; + span_id?: string; + parent_span_id?: string; + callSequence?: number; + call_sequence?: number; + requestId?: string; + request_id?: string; + provider?: string; + model?: string; + stream?: boolean; + agent?: string; + agent_name?: string; + user_id?: string; + latency_ms?: number; + usage?: UsageData; + input_tokens?: number; + output_tokens?: number; + total_tokens?: number; + cached_tokens?: number; + reasoning_tokens?: number; + accepted_prediction_tokens?: number; + rejected_prediction_tokens?: number; + metadata?: Record; + meta?: Record; + properties?: Record; + extra?: Record; + tags?: string[]; + labels?: string[]; + session_id?: string; + sessionId?: string; + environment?: string; + env?: string; + agentStack?: string[]; + agent_stack?: string[]; + callSite?: Record; + call_site?: Record; + call_site_file?: string; + call_site_line?: number; + call_site_column?: number; + call_site_function?: string; + call_stack?: string[]; + content_capture?: Record; +} + +interface NormalizedEvent { + timestamp: Date; + ingest_date: string; + team_id: string; + trace_id: string; + span_id: string | null; + parent_span_id: string | null; + request_id: string | null; + provider: string | null; + call_sequence: number; + model: string; + stream: boolean; + agent: string | null; + agent_name: string | null; + user_id: string | null; + latency_ms: number | null; + usage_input_tokens: number | null; + usage_output_tokens: number | null; + usage_total_tokens: number | null; + usage_cached_tokens: number | null; + usage_reasoning_tokens: number | null; + usage_accepted_prediction_tokens: number | null; + usage_rejected_prediction_tokens: number | null; + metadata: Record | null; + call_site: Record; + agent_stack: string[]; + cost_total: number; + // Hot table fields (lightweight content indicators) + has_content: boolean; + finish_reason: string | null; + tool_call_count: number; + // Content data for warm/cold storage (extracted separately) + content_refs: ContentReference[]; + content_items: ContentToStore[]; + // Deprecated: kept for backward compatibility during migration + content_capture: Record | null; +} + +const normalizeEvent = (raw: RawEvent): NormalizedEvent | null => { + const ts = raw.timestamp; + const teamId = raw.team_id; + const parsedTs = parseDate(ts); + if (!parsedTs) return null; + + const traceId = raw.traceId || raw.trace_id; + const spanId = raw.spanId || raw.span_id; + const parentSpanId = raw.parent_span_id || null; + const callSeqRaw = raw.callSequence ?? raw.call_sequence; + if ( + traceId === undefined || + callSeqRaw === undefined || + callSeqRaw === null || + teamId === undefined || + teamId === null + ) { + return null; + } + const callSeq = Number(callSeqRaw); + if (!Number.isInteger(callSeq)) return null; + + const usage: UsageData = raw.usage || { + input_tokens: raw.input_tokens, + output_tokens: raw.output_tokens, + total_tokens: raw.total_tokens, + cached_tokens: raw.cached_tokens, + reasoning_tokens: raw.reasoning_tokens, + accepted_prediction_tokens: raw.accepted_prediction_tokens, + rejected_prediction_tokens: raw.rejected_prediction_tokens, + }; + // Extract agent - metadata.agent takes precedence over top-level agent + const metadata = asObject(raw.metadata, {}); + const effectiveAgent = (metadata.agent as string) || raw.agent || null; + + let agentStack = asArray(raw.agentStack ?? raw.agent_stack, []) as string[]; + if (effectiveAgent) { + const agentVal = String(effectiveAgent); + if (!agentStack.includes(agentVal)) { + agentStack = [agentVal, ...agentStack]; + } + } + const callSite = + raw.callSite || + asObject(raw.call_site, { + file: raw.call_site_file, + line: raw.call_site_line, + column: raw.call_site_column, + function: raw.call_site_function, + stack: asArray(raw.call_stack, []), + }); + + // Extract content for warm/cold storage + const contentCapture = raw.content_capture as ContentCapture | undefined; + const { refs: contentRefs, items: contentItems } = extractContent(contentCapture); + + // Extract lightweight content indicators for hot table + const hasContent = contentRefs.length > 0; + const finishReason = contentCapture?.finish_reason || null; + + // Count tool calls from messages or tool_calls field + let toolCallCount = 0; + if (contentCapture?.messages && Array.isArray(contentCapture.messages)) { + for (const msg of contentCapture.messages) { + const msgObj = msg as Record; + if (msgObj.tool_calls && Array.isArray(msgObj.tool_calls)) { + toolCallCount += msgObj.tool_calls.length; + } + } + } + + return { + timestamp: parsedTs, + ingest_date: parsedTs.toISOString().slice(0, 10), + team_id: String(teamId), + trace_id: String(traceId), + span_id: spanId || null, + parent_span_id: parentSpanId, + request_id: raw.requestId || raw.request_id || null, + provider: raw.provider || null, + call_sequence: callSeq, + model: raw.model || "", + stream: Boolean(raw.stream), + agent: agentStack[0] || null, + agent_name: raw.agent_name || null, + user_id: raw.user_id || null, + latency_ms: numberOrNull(raw.latency_ms), + usage_input_tokens: numberOrNull(usage.input_tokens), + usage_output_tokens: numberOrNull(usage.output_tokens), + usage_total_tokens: numberOrNull(usage.total_tokens), + usage_cached_tokens: numberOrNull(usage.cached_tokens), + usage_reasoning_tokens: numberOrNull(usage.reasoning_tokens), + usage_accepted_prediction_tokens: numberOrNull(usage.accepted_prediction_tokens), + usage_rejected_prediction_tokens: numberOrNull(usage.rejected_prediction_tokens), + metadata: buildMetadata(raw as Record), + call_site: callSite as Record, + agent_stack: agentStack, + cost_total: calcCost(raw.model || "", usage), + // Hot table content indicators + has_content: hasContent, + finish_reason: finishReason, + tool_call_count: toolCallCount, + // Content for warm/cold storage + content_refs: contentRefs, + content_items: contentItems, + // Deprecated: kept for backward compatibility + content_capture: raw.content_capture || null, + }; +}; + +const dedupeEvents = (events: NormalizedEvent[]): NormalizedEvent[] => { + const deduped = new Map(); + events.forEach((ev) => { + const key = `${ev.trace_id}||${ev.call_sequence}`; + const existing = deduped.get(key); + if (!existing) { + deduped.set(key, ev); + return; + } + if (existing.timestamp && ev.timestamp && ev.timestamp > existing.timestamp) { + deduped.set(key, ev); + } + }); + return Array.from(deduped.values()); +}; + +const normalizeEvents = (rawEvents: RawEvent[] = []): NormalizedEvent[] => { + const normalized: NormalizedEvent[] = []; + rawEvents.forEach((ev) => { + const n = normalizeEvent(ev); + if (n) normalized.push(n); + }); + return dedupeEvents(normalized); +}; + +const getTsdbPool = (): Pool => { + if (_tsdbPool) return _tsdbPool; + const connStr = (process.env.TSDB_PG_URL || "").replace(/\s+/g, ""); + if (connStr) { + _tsdbPool = new Pool({ + connectionString: connStr, + ssl: { rejectUnauthorized: false }, + }); + return _tsdbPool; + } + if ((global as unknown as Record)._ACHO_PG_POOL) { + _tsdbPool = (global as unknown as Record)._ACHO_PG_POOL as Pool; + return _tsdbPool; + } + throw new Error("TSDB pool not available. Set TSDB_PG_URL or initialize _ACHO_PG_POOL."); +}; + +const ensureSchema = async (client?: PoolClient): Promise => { + if (client) { + // Get current schema name for per-schema caching + const schemaResult = await client.query("SELECT current_schema()"); + const schemaName = schemaResult.rows[0]?.current_schema || "public"; + + // Check if this schema is already initialized + if (_schemaReadyByName.has(schemaName)) { + return _schemaReadyByName.get(schemaName); + } + + // Create and cache the initialization promise + const initPromise = (async () => { + try { + await client.query(SCHEMA_SQL); + } catch (err: unknown) { + // Handle race condition - if object already exists, it's fine + const pgError = err as { code?: string }; + if (pgError.code === "23505" || pgError.code === "42P07") { + // 23505 = unique_violation, 42P07 = duplicate_table + console.log(`[tsdb] Schema ${schemaName} already initialized (concurrent request)`); + return; + } + throw err; + } + })(); + + _schemaReadyByName.set(schemaName, initPromise); + + try { + await initPromise; + } catch (err) { + _schemaReadyByName.delete(schemaName); + throw err; + } + return; + } + + if (_schemaReadyPromise) return _schemaReadyPromise; + + const pool = getTsdbPool(); + _schemaReadyPromise = (async () => { + const executor = await pool.connect(); + try { + await executor.query(SCHEMA_SQL); + } finally { + executor.release(); + } + })(); + + try { + await _schemaReadyPromise; + } catch (err) { + _schemaReadyPromise = null; + throw err; + } +}; + +interface UpsertResult { + rowsWritten: number; + normalized: number; + received?: number; + contentStored?: number; + contentDeduplicated?: number; +} + +/** + * Store content in cold storage (llm_content_store) with deduplication + * Uses ON CONFLICT to increment ref_count for existing content + */ +const storeContentCold = async ( + executor: PoolClient, + teamId: string, + items: ContentToStore[] +): Promise<{ stored: number; deduplicated: number }> => { + if (!items.length) return { stored: 0, deduplicated: 0 }; + + // Batch upsert content items + const cols = ["content_hash", "team_id", "content", "byte_size", "ref_count", "first_seen_at", "last_seen_at"]; + const values: unknown[] = []; + const placeholders: string[] = []; + const now = new Date(); + + items.forEach((item, idx) => { + const base = idx * cols.length; + placeholders.push(`(${cols.map((__, i) => `$${base + i + 1}`).join(", ")})`); + values.push(item.content_hash, teamId, item.content, item.byte_size, 1, now, now); + }); + + const sql = ` + INSERT INTO llm_content_store (${cols.join(", ")}) + VALUES ${placeholders.join(", ")} + ON CONFLICT (content_hash, team_id) + DO UPDATE SET + ref_count = llm_content_store.ref_count + 1, + last_seen_at = EXCLUDED.last_seen_at + RETURNING (xmax = 0) AS inserted + `; + + const result = await executor.query(sql, values); + const inserted = result.rows.filter((r: { inserted: boolean }) => r.inserted).length; + const deduplicated = items.length - inserted; + + return { stored: inserted, deduplicated }; +}; + +/** + * Store content references in warm storage (llm_event_content) + */ +const storeContentWarm = async ( + executor: PoolClient, + events: NormalizedEvent[] +): Promise => { + // Collect all content references from all events + const allRefs: Array<{ + timestamp: Date; + trace_id: string; + call_sequence: number; + team_id: string; + ref: ContentReference; + }> = []; + + for (const ev of events) { + for (const ref of ev.content_refs) { + allRefs.push({ + timestamp: ev.timestamp, + trace_id: ev.trace_id, + call_sequence: ev.call_sequence, + team_id: ev.team_id, + ref, + }); + } + } + + if (!allRefs.length) return 0; + + const cols = [ + '"timestamp"', + "trace_id", + "call_sequence", + "team_id", + "content_type", + "content_hash", + "byte_size", + "message_count", + "truncated_preview", + ]; + const values: unknown[] = []; + const placeholders: string[] = []; + + allRefs.forEach((item, idx) => { + const base = idx * cols.length; + placeholders.push(`(${cols.map((__, i) => `$${base + i + 1}`).join(", ")})`); + values.push( + item.timestamp, + item.trace_id, + item.call_sequence, + item.team_id, + item.ref.content_type, + item.ref.content_hash, + item.ref.byte_size, + item.ref.message_count || null, + item.ref.truncated_preview || null + ); + }); + + const sql = ` + INSERT INTO llm_event_content (${cols.join(", ")}) + VALUES ${placeholders.join(", ")} + `; + + await executor.query(sql, values); + return allRefs.length; +}; + +const upsertEvents = async (rawEvents: RawEvent[] = [], client?: PoolClient): Promise => { + const events = normalizeEvents(rawEvents); + if (!events.length) { + return { rowsWritten: 0, normalized: 0 }; + } + + // Hot table columns (metrics only, no full content_capture) + const cols = [ + '"timestamp"', + "ingest_date", + "team_id", + "user_id", + "trace_id", + "span_id", + "parent_span_id", + "request_id", + "provider", + "call_sequence", + "model", + "stream", + "agent", + "agent_name", + "latency_ms", + "usage_input_tokens", + "usage_output_tokens", + "usage_total_tokens", + "usage_cached_tokens", + "usage_reasoning_tokens", + "usage_accepted_prediction_tokens", + "usage_rejected_prediction_tokens", + "call_site", + "metadata", + "agent_stack", + "cost_total", + // New lightweight content fields + "has_content", + "finish_reason", + "tool_call_count", + // Deprecated: kept for backward compatibility during migration + "content_capture", + ]; + + const values: unknown[] = []; + const placeholders: string[] = []; + events.forEach((ev, idx) => { + const base = idx * cols.length; + placeholders.push(`(${cols.map((__, i) => `$${base + i + 1}`).join(", ")})`); + values.push( + ev.timestamp, + ev.ingest_date, + ev.team_id, + ev.user_id, + ev.trace_id, + ev.span_id, + ev.parent_span_id, + ev.request_id, + ev.provider, + ev.call_sequence, + ev.model, + ev.stream, + ev.agent, + ev.agent_name, + ev.latency_ms, + ev.usage_input_tokens, + ev.usage_output_tokens, + ev.usage_total_tokens, + ev.usage_cached_tokens, + ev.usage_reasoning_tokens, + ev.usage_accepted_prediction_tokens, + ev.usage_rejected_prediction_tokens, + JSON.stringify(ev.call_site || {}), + ev.metadata ? JSON.stringify(ev.metadata) : null, + JSON.stringify(ev.agent_stack || []), + ev.cost_total, + // New fields + ev.has_content, + ev.finish_reason, + ev.tool_call_count, + // Deprecated: store null for new events, keep for backward compat + null + ); + }); + + const sql = ` + INSERT INTO llm_events (${cols.join(", ")}) + VALUES ${placeholders.join(", ")} + ON CONFLICT ("timestamp", trace_id, call_sequence) + DO UPDATE SET + "timestamp" = EXCLUDED."timestamp", + ingest_date = EXCLUDED.ingest_date, + team_id = EXCLUDED.team_id, + user_id = EXCLUDED.user_id, + trace_id = EXCLUDED.trace_id, + span_id = EXCLUDED.span_id, + parent_span_id = EXCLUDED.parent_span_id, + request_id = EXCLUDED.request_id, + provider = EXCLUDED.provider, + model = EXCLUDED.model, + stream = EXCLUDED.stream, + agent = EXCLUDED.agent, + agent_name = EXCLUDED.agent_name, + latency_ms = EXCLUDED.latency_ms, + usage_input_tokens = EXCLUDED.usage_input_tokens, + usage_output_tokens = EXCLUDED.usage_output_tokens, + usage_total_tokens = EXCLUDED.usage_total_tokens, + usage_cached_tokens = EXCLUDED.usage_cached_tokens, + usage_reasoning_tokens = EXCLUDED.usage_reasoning_tokens, + usage_accepted_prediction_tokens = EXCLUDED.usage_accepted_prediction_tokens, + usage_rejected_prediction_tokens = EXCLUDED.usage_rejected_prediction_tokens, + call_site = EXCLUDED.call_site, + metadata = EXCLUDED.metadata, + agent_stack = EXCLUDED.agent_stack, + cost_total = EXCLUDED.cost_total, + has_content = EXCLUDED.has_content, + finish_reason = EXCLUDED.finish_reason, + tool_call_count = EXCLUDED.tool_call_count + WHERE EXCLUDED."timestamp" >= llm_events."timestamp" + `; + + const pool = client ? null : getTsdbPool(); + const executor = client || (await pool!.connect()); + + let contentStored = 0; + let contentDeduplicated = 0; + + try { + // 1. Insert into hot table (llm_events) + await executor.query(sql, values); + + // 2. Collect all content items for cold storage (deduplicated across events) + const allContentItems: ContentToStore[] = []; + const seenHashes = new Set(); + const teamId = events[0]?.team_id; + + for (const ev of events) { + for (const item of ev.content_items) { + if (!seenHashes.has(item.content_hash)) { + seenHashes.add(item.content_hash); + allContentItems.push(item); + } + } + } + + // 3. Store content in cold storage (llm_content_store) + if (allContentItems.length > 0 && teamId) { + const coldResult = await storeContentCold(executor, teamId, allContentItems); + contentStored = coldResult.stored; + contentDeduplicated = coldResult.deduplicated; + } + + // 4. Store content references in warm storage (llm_event_content) + await storeContentWarm(executor, events); + + } finally { + if (!client && executor && 'release' in executor) { + (executor as PoolClient).release(); + } + } + + return { + rowsWritten: events.length, + normalized: events.length, + received: rawEvents.length, + contentStored, + contentDeduplicated, + }; +}; + +/** + * Retrieve content from cold storage by hash + */ +const getContentByHash = async ( + teamId: string, + contentHash: string, + client?: PoolClient +): Promise => { + const pool = client ? null : getTsdbPool(); + const executor = client || (await pool!.connect()); + + try { + const result = await executor.query( + `SELECT content FROM llm_content_store WHERE content_hash = $1 AND team_id = $2`, + [contentHash, teamId] + ); + return result.rows[0]?.content || null; + } finally { + if (!client && executor && "release" in executor) { + (executor as PoolClient).release(); + } + } +}; + +/** + * Get all content references for an event + */ +const getEventContent = async ( + teamId: string, + traceId: string, + callSequence: number, + client?: PoolClient +): Promise> => { + const pool = client ? null : getTsdbPool(); + const executor = client || (await pool!.connect()); + + try { + // Get content references from warm storage + const refsResult = await executor.query( + `SELECT content_type, content_hash, byte_size, message_count, truncated_preview + FROM llm_event_content + WHERE team_id = $1 AND trace_id = $2 AND call_sequence = $3`, + [teamId, traceId, callSequence] + ); + + const refs = refsResult.rows as ContentReference[]; + + // Optionally fetch full content from cold storage + const results: Array = []; + for (const ref of refs) { + const content = await getContentByHash(teamId, ref.content_hash, executor); + results.push({ ...ref, content: content || undefined }); + } + + return results; + } finally { + if (!client && executor && "release" in executor) { + (executor as PoolClient).release(); + } + } +}; + +interface DistinctAgentRecord { + agent: string; + agent_name: string | null; + first_seen: Date; + last_seen: Date; + total_requests: number; + total_cost: number; +} + +/** + * Get all distinct agents from events for a team + * Returns agent identifiers with their first/last seen timestamps and usage stats + */ +const getDistinctAgents = async ( + teamId: string, + options: { + since?: Date; + limit?: number; + } = {}, + client?: PoolClient +): Promise => { + const pool = client ? null : getTsdbPool(); + const executor = client || (await pool!.connect()); + + try { + const { since, limit = 100 } = options; + + let sql = ` + SELECT + agent, + MAX(agent_name) as agent_name, + MIN("timestamp") as first_seen, + MAX("timestamp") as last_seen, + COUNT(*) as total_requests, + COALESCE(SUM(cost_total), 0) as total_cost + FROM llm_events + WHERE team_id = $1 + AND agent IS NOT NULL + AND agent != '' + `; + + const params: unknown[] = [teamId]; + + if (since) { + sql += ` AND "timestamp" >= $${params.length + 1}`; + params.push(since); + } + + sql += ` + GROUP BY agent + ORDER BY last_seen DESC + LIMIT $${params.length + 1} + `; + params.push(limit); + + const result = await executor.query(sql, params); + + return result.rows.map((row: Record) => ({ + agent: row.agent as string, + agent_name: row.agent_name as string | null, + first_seen: new Date(row.first_seen as string), + last_seen: new Date(row.last_seen as string), + total_requests: Number(row.total_requests), + total_cost: Number(row.total_cost), + })); + } finally { + if (!client && executor && "release" in executor) { + (executor as PoolClient).release(); + } + } +}; + +export { + normalizeEvent, + normalizeEvents, + ensureSchema, + upsertEvents, + getTsdbPool, + getContentByHash, + getEventContent, + getDistinctAgents, +}; diff --git a/hive/src/services/tsdb/users_schema.sql b/hive/src/services/tsdb/users_schema.sql new file mode 100644 index 00000000..7a9c7257 --- /dev/null +++ b/hive/src/services/tsdb/users_schema.sql @@ -0,0 +1,149 @@ +-- User Authentication Schema for PostgreSQL (Local Development) +-- This schema mirrors the MySQL user tables for local development +-- Run this on your local PostgreSQL/TimescaleDB instance + +-- ============================================================================= +-- USERS TABLE: Core user accounts +-- ============================================================================= +CREATE TABLE IF NOT EXISTS users ( + id SERIAL PRIMARY KEY, + email VARCHAR(255) UNIQUE NOT NULL, + password VARCHAR(255), + name VARCHAR(255), + firstname VARCHAR(255), + lastname VARCHAR(255), + -- JWT authentication (TEXT for long JWT tokens) + token TEXT UNIQUE, + salt TEXT, + -- Team association + current_team_id INTEGER, + -- Account status + status VARCHAR(50) DEFAULT 'active', + email_verified BOOLEAN DEFAULT false, + -- Metadata + avatar_url TEXT, + preferences JSONB DEFAULT '{}', + -- Timestamps + created_at TIMESTAMPTZ DEFAULT NOW(), + updated_at TIMESTAMPTZ DEFAULT NOW(), + last_login_at TIMESTAMPTZ +); + +-- Indexes for common lookups +CREATE INDEX IF NOT EXISTS idx_users_email ON users (email); +CREATE INDEX IF NOT EXISTS idx_users_token ON users (token); +CREATE INDEX IF NOT EXISTS idx_users_team ON users (current_team_id); + +-- ============================================================================= +-- DEVELOPERS TABLE: API tokens for programmatic access +-- ============================================================================= +CREATE TABLE IF NOT EXISTS developers ( + id SERIAL PRIMARY KEY, + user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE, + team_id INTEGER NOT NULL, + token TEXT UNIQUE NOT NULL, + label VARCHAR(255), + -- System tokens are managed by the platform, not users + "system" BOOLEAN DEFAULT false, + -- Permissions and scope + scopes JSONB DEFAULT '[]', + -- Rate limiting + rate_limit INTEGER DEFAULT 1000, + -- Timestamps + create_time BIGINT DEFAULT EXTRACT(EPOCH FROM NOW())::BIGINT, + last_used_at TIMESTAMPTZ, + expires_at TIMESTAMPTZ, + -- Status + revoked BOOLEAN DEFAULT false, + revoked_at TIMESTAMPTZ +); + +-- Indexes for token lookups +CREATE INDEX IF NOT EXISTS idx_developers_token ON developers (token); +CREATE INDEX IF NOT EXISTS idx_developers_user ON developers (user_id); +CREATE INDEX IF NOT EXISTS idx_developers_team ON developers (team_id); +CREATE INDEX IF NOT EXISTS idx_developers_user_team ON developers (user_id, team_id); + +-- ============================================================================= +-- TEAMS TABLE: Team/Organization accounts +-- ============================================================================= +CREATE TABLE IF NOT EXISTS teams ( + id SERIAL PRIMARY KEY, + name VARCHAR(255) NOT NULL, + slug VARCHAR(255) UNIQUE, + -- Billing and subscription + plan VARCHAR(50) DEFAULT 'free', + billing_email VARCHAR(255), + -- Settings + settings JSONB DEFAULT '{}', + -- Timestamps + created_at TIMESTAMPTZ DEFAULT NOW(), + updated_at TIMESTAMPTZ DEFAULT NOW() +); + +-- ============================================================================= +-- TEAM_MEMBERS TABLE: User-Team associations +-- ============================================================================= +CREATE TABLE IF NOT EXISTS team_members ( + id SERIAL PRIMARY KEY, + user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE, + team_id INTEGER NOT NULL REFERENCES teams(id) ON DELETE CASCADE, + role VARCHAR(50) DEFAULT 'member', + -- Timestamps + joined_at TIMESTAMPTZ DEFAULT NOW(), + UNIQUE(user_id, team_id) +); + +CREATE INDEX IF NOT EXISTS idx_team_members_user ON team_members (user_id); +CREATE INDEX IF NOT EXISTS idx_team_members_team ON team_members (team_id); + +-- ============================================================================= +-- SEED DATA: Default development user and team +-- ============================================================================= + +-- Create a default team +INSERT INTO teams (id, name, slug, plan) +VALUES (1, 'Development Team', 'dev-team', 'enterprise') +ON CONFLICT (id) DO NOTHING; + +-- Create a default development user +-- Email: dev@honeycomb.local +-- Password: honeycomb123 +INSERT INTO users (id, email, password, name, firstname, lastname, token, salt, current_team_id, status, email_verified) +VALUES ( + 1, + 'dev@honeycomb.local', + '$2b$10$BgXnS6Cg7HwimTzBtsnh0.j8s8.ypWFooW9A.7YbNIC4e94HIFxYu', + 'Development User', + 'Dev', + 'User', + 'dev-token-12345', + 'dev-salt-secret-key', + 1, + 'active', + true +) +ON CONFLICT (id) DO NOTHING; + +-- Create a default API token for the development user +INSERT INTO developers (id, user_id, team_id, token, label, "system") +VALUES ( + 1, + 1, + 1, + 'hive_dev_token_abc123xyz', + 'Development API Token', + false +) +ON CONFLICT (id) DO NOTHING; + +-- Add user to team +INSERT INTO team_members (user_id, team_id, role) +VALUES (1, 1, 'admin') +ON CONFLICT (user_id, team_id) DO NOTHING; + +-- Reset sequences to avoid conflicts +SELECT setval('users_id_seq', COALESCE((SELECT MAX(id) FROM users), 1)); +SELECT setval('teams_id_seq', COALESCE((SELECT MAX(id) FROM teams), 1)); +SELECT setval('developers_id_seq', COALESCE((SELECT MAX(id) FROM developers), 1)); +SELECT setval('team_members_id_seq', COALESCE((SELECT MAX(id) FROM team_members), 1)); diff --git a/hive/src/sockets/control.socket.ts b/hive/src/sockets/control.socket.ts new file mode 100644 index 00000000..e5061804 --- /dev/null +++ b/hive/src/sockets/control.socket.ts @@ -0,0 +1,98 @@ +/** + * Control Socket Initialization + * + * Wrapper for initializing control plane WebSockets with proper dependencies. + */ + +import { Server } from 'socket.io'; +import { createAdapter } from '@socket.io/redis-adapter'; +import { Emitter } from '@socket.io/redis-emitter'; +import Redis from 'ioredis'; +import type { Server as HttpServer } from 'http'; + +import initAdenControlSockets, { setUserDbService } from '../services/control/control_sockets'; + +interface ControlEmitter { + emitPolicyUpdate: (teamId: string | number, policyId: string | null, policy: unknown) => void; + emitCommand: (teamId: string | number, command: { action: string; [key: string]: unknown }) => void; + emitAlert: (teamId: string | number, policyId: string | null, alert: unknown) => void; + emitToInstance: (teamId: string | number, instanceId: string, message: unknown) => boolean; + getConnectedCount: (teamId: string | number) => number; + getConnectedInstances: (teamId: string | number) => Array<{ + instance_id: string; + policy_id: string | null; + connected_at: string; + last_heartbeat: string; + }>; + getTotalConnectedCount: () => number; +} + +interface MockEmitter { + of: () => { + to: () => { emit: () => void }; + emit: () => void; + }; +} + +/** + * Initialize WebSockets for the control plane + * @param server - HTTP server instance + * @returns Promise<{io: Server, controlEmitter: Object}> + */ +async function initializeSockets(server: HttpServer): Promise<{ io: Server; controlEmitter: ControlEmitter }> { + // Create Socket.IO server + const io = new Server(server, { + cors: { + origin: '*', + methods: ['GET', 'POST'], + }, + transports: ['websocket', 'polling'], + }); + + let controlEmitter: ControlEmitter; + + // Try to setup Redis adapter for scaling + if (process.env.REDIS_URL) { + try { + const pubClient = new Redis(process.env.REDIS_URL); + const subClient = pubClient.duplicate(); + + await Promise.all([ + new Promise((resolve) => pubClient.on('connect', resolve)), + new Promise((resolve) => subClient.on('connect', resolve)), + ]); + + io.adapter(createAdapter(pubClient, subClient)); + + // Create Redis emitter for cross-instance communication + const redisEmitter = new Emitter(pubClient); + controlEmitter = initAdenControlSockets(io, redisEmitter as unknown as { of: (namespace: string) => { to: (room: string) => { emit: (event: string, payload: unknown) => void }; emit: (event: string, payload: unknown) => void } }); + + console.log('[Sockets] Redis adapter connected'); + } catch (err) { + console.warn('[Sockets] Redis connection failed, using local adapter:', (err as Error).message); + // Create a mock emitter for local development + const mockEmitter: MockEmitter = { + of: () => ({ + to: () => ({ emit: () => {} }), + emit: () => {}, + }), + }; + controlEmitter = initAdenControlSockets(io, mockEmitter as unknown as { of: (namespace: string) => { to: (room: string) => { emit: (event: string, payload: unknown) => void }; emit: (event: string, payload: unknown) => void } }); + } + } else { + console.warn('[Sockets] No REDIS_URL configured, using local adapter'); + // Create a mock emitter for local development + const mockEmitter: MockEmitter = { + of: () => ({ + to: () => ({ emit: () => {} }), + emit: () => {}, + }), + }; + controlEmitter = initAdenControlSockets(io, mockEmitter as unknown as { of: (namespace: string) => { to: (room: string) => { emit: (event: string, payload: unknown) => void }; emit: (event: string, payload: unknown) => void } }); + } + + return { io, controlEmitter }; +} + +export { initializeSockets, setUserDbService }; diff --git a/hive/src/types/acho-inc-administration.d.ts b/hive/src/types/acho-inc-administration.d.ts new file mode 100644 index 00000000..6e40ae19 --- /dev/null +++ b/hive/src/types/acho-inc-administration.d.ts @@ -0,0 +1,123 @@ +declare module '@acho-inc/administration' { + import { Pool } from 'pg'; + import { Strategy } from 'passport-jwt'; + + export interface MySQLPoolConfig { + host?: string; + port?: number; + user?: string; + password?: string; + database?: string; + ssl?: { + ca?: string | Buffer; + key?: string | Buffer; + cert?: string | Buffer; + } | null; + } + + export interface UserDbServiceConfig { + /** MySQL connection pool (for production) */ + mysqlPool?: any; + /** PostgreSQL connection pool (for local development) */ + pgPool?: Pool; + /** Database type: 'mysql' or 'postgres' */ + dbType?: 'mysql' | 'postgres'; + /** Redis client for caching (optional) */ + redisClient?: any; + /** Table name mapping */ + tables: { + USER: string; + DEVELOPERS?: string; + }; + /** Service account salt lookup function (optional) */ + findServiceAccountSalt?: (token: string) => Promise; + } + + export interface DevTokenObject { + id: number; + user_id: number; + team_id: number; + token: string; + label: string; + system?: boolean; + create_time: number; + } + + export interface LoginResult { + token: string; + email: string; + firstname?: string; + lastname?: string; + name?: string; + current_team_id?: number; + created_at?: Date | number; + } + + export interface TokenResult { + token: string; + salt: string; + } + + export interface LoginOptions { + jwtSecret: string; + expiresIn?: string; + } + + export interface RegisterOptions extends LoginOptions { + defaultTeamId?: number; + } + + export interface UserData { + email: string; + password: string; + name?: string; + firstname?: string; + lastname?: string; + } + + export interface RegisterResult { + id: number; + token: string; + email: string; + name?: string; + firstname?: string; + lastname?: string; + current_team_id?: number; + created_at?: Date; + } + + export interface UserDbService { + findSaltByToken: (token: string) => Promise; + findById: (id: number) => Promise; + findByToken: (token: string) => Promise; + findByEmail: (email: string) => Promise; + getLatestUserDevToken: (user: { id: number; current_team_id: number }) => Promise; + // Auth methods + verifyPassword: (password: string, hash: string) => Promise; + hashPassword: (password: string) => Promise; + generateToken: (user: any, options: LoginOptions) => Promise; + updateUserToken: (userId: number, token: string, salt: string) => Promise; + login: (email: string, password: string, options: LoginOptions) => Promise; + register: (userData: UserData, options: RegisterOptions) => Promise; + dbType?: 'mysql' | 'postgres'; + } + + export interface PassportStrategyConfig { + findSaltByToken: (token: string) => Promise; + jwtSecret?: string; + } + + export const auth: { + createPassportStrategy: (config: PassportStrategyConfig) => Strategy; + verifyToken: (token: string, secret: string) => Promise; + }; + + export const database: { + createMySQLPool: (config: MySQLPoolConfig) => any; + createPGPool: (connectionString: string) => Pool; + }; + + export const models: { + createUserDbService: (config: UserDbServiceConfig) => UserDbService; + }; +} diff --git a/hive/tsconfig.json b/hive/tsconfig.json new file mode 100644 index 00000000..29a905f7 --- /dev/null +++ b/hive/tsconfig.json @@ -0,0 +1,26 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "commonjs", + "lib": ["ES2022"], + "outDir": "./dist", + "rootDir": "./src", + "strict": false, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "resolveJsonModule": true, + "declaration": false, + "sourceMap": true, + "moduleResolution": "node", + "allowSyntheticDefaultImports": true, + "noImplicitAny": false, + "strictNullChecks": false, + "noUnusedLocals": false, + "noUnusedParameters": false, + "useUnknownInCatchVariables": false, + "typeRoots": ["./node_modules/@types", "./src/types"] + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "dist", "tests"] +} diff --git a/honeycomb/.env.example b/honeycomb/.env.example new file mode 100644 index 00000000..a73b321a --- /dev/null +++ b/honeycomb/.env.example @@ -0,0 +1,13 @@ +# Frontend Environment Variables +# Copy this file to .env and update values as needed +# Or run `npm run generate:env` from the root to generate from config.yaml + +# Hive API URL (handles all backend endpoints: auth, user, IAM, agent control) +VITE_API_URL=http://localhost:4000 + +# Application settings +VITE_APP_NAME=Beeline +VITE_APP_ENV=development + +# Google OAuth (optional) +VITE_GOOGLE_OAUTH_ID=your-google-oauth-client-id diff --git a/honeycomb/Dockerfile b/honeycomb/Dockerfile new file mode 100644 index 00000000..aec1a827 --- /dev/null +++ b/honeycomb/Dockerfile @@ -0,0 +1,35 @@ +# Build stage +FROM node:20-alpine AS builder + +WORKDIR /app + +# Build argument for API URL (Vite needs this at build time) +ARG VITE_API_URL=http://localhost:4000 +ENV VITE_API_URL=$VITE_API_URL + +# Copy package files +COPY package*.json ./ + +# Install dependencies +RUN npm install + +# Copy source code +COPY . . + +# Build the application +RUN npm run build + +# Production stage +FROM nginx:alpine AS production + +# Copy custom nginx config +COPY nginx.conf /etc/nginx/conf.d/default.conf + +# Copy built assets from builder +COPY --from=builder /app/dist /usr/share/nginx/html + +# Expose port +EXPOSE 3000 + +# Start nginx +CMD ["nginx", "-g", "daemon off;"] diff --git a/honeycomb/Dockerfile.dev b/honeycomb/Dockerfile.dev new file mode 100644 index 00000000..7bec8d10 --- /dev/null +++ b/honeycomb/Dockerfile.dev @@ -0,0 +1,19 @@ +# Development Dockerfile with hot reload +FROM node:20-alpine + +WORKDIR /app + +# Copy package files +COPY package*.json ./ + +# Install dependencies +RUN npm install + +# Copy source code +COPY . . + +# Expose port +EXPOSE 3000 + +# Start development server with hot reload +CMD ["npm", "run", "dev"] diff --git a/honeycomb/components.json b/honeycomb/components.json new file mode 100644 index 00000000..d29aef0b --- /dev/null +++ b/honeycomb/components.json @@ -0,0 +1,20 @@ +{ + "$schema": "https://ui.shadcn.com/schema.json", + "style": "default", + "rsc": false, + "tsx": true, + "tailwind": { + "config": "tailwind.config.js", + "css": "src/styles/index.css", + "baseColor": "slate", + "cssVariables": true, + "prefix": "" + }, + "aliases": { + "components": "@/components", + "utils": "@/lib/utils", + "ui": "@/components/ui", + "lib": "@/lib", + "hooks": "@/hooks" + } +} diff --git a/honeycomb/index.html b/honeycomb/index.html new file mode 100644 index 00000000..9bcf24e7 --- /dev/null +++ b/honeycomb/index.html @@ -0,0 +1,17 @@ + + + + + + + + + + + Beeline + + +
+ + + diff --git a/honeycomb/nginx.conf b/honeycomb/nginx.conf new file mode 100644 index 00000000..4ed4e595 --- /dev/null +++ b/honeycomb/nginx.conf @@ -0,0 +1,42 @@ +server { + listen 3000; + server_name localhost; + root /usr/share/nginx/html; + index index.html; + + # Gzip compression + gzip on; + gzip_vary on; + gzip_min_length 1024; + gzip_proxied expired no-cache no-store private auth; + gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml application/javascript; + + # Security headers + add_header X-Frame-Options "SAMEORIGIN" always; + add_header X-Content-Type-Options "nosniff" always; + add_header X-XSS-Protection "1; mode=block" always; + + # Handle SPA routing - serve index.html for all routes + location / { + try_files $uri $uri/ /index.html; + } + + # Proxy API requests to backend + location /api { + proxy_pass http://hive:4000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_cache_bypass $http_upgrade; + } + + # Cache static assets + location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2)$ { + expires 1y; + add_header Cache-Control "public, immutable"; + } +} diff --git a/honeycomb/package.json b/honeycomb/package.json new file mode 100644 index 00000000..fd5e87e5 --- /dev/null +++ b/honeycomb/package.json @@ -0,0 +1,67 @@ +{ + "name": "honeycomb", + "version": "0.1.0", + "private": true, + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc --noEmit && vite build", + "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0", + "preview": "vite preview", + "test": "vitest", + "test:coverage": "vitest run --coverage", + "clean": "rm -rf dist node_modules" + }, + "dependencies": { + "@hookform/resolvers": "^5.2.2", + "@radix-ui/react-avatar": "^1.1.11", + "@radix-ui/react-dialog": "^1.1.15", + "@radix-ui/react-dropdown-menu": "^2.1.16", + "@radix-ui/react-label": "^2.1.8", + "@radix-ui/react-popover": "^1.1.15", + "@radix-ui/react-progress": "^1.1.8", + "@radix-ui/react-scroll-area": "^1.2.10", + "@radix-ui/react-select": "^2.2.6", + "@radix-ui/react-separator": "^1.1.8", + "@radix-ui/react-slot": "^1.2.4", + "@radix-ui/react-switch": "^1.2.6", + "@radix-ui/react-tabs": "^1.1.13", + "@radix-ui/react-tooltip": "^1.2.8", + "@tanstack/react-query": "^5.90.16", + "class-variance-authority": "^0.7.1", + "clsx": "^2.1.1", + "date-fns": "^4.1.0", + "lucide-react": "^0.562.0", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "react-hook-form": "^7.71.0", + "react-markdown": "^10.1.0", + "react-router-dom": "^6.21.0", + "react-vega": "^8.0.0", + "recharts": "^3.6.0", + "socket.io-client": "^4.8.3", + "tailwind-merge": "^3.4.0", + "tailwindcss-animate": "^1.0.7", + "vega": "^6.2.0", + "vega-embed": "^7.1.0", + "vega-lite": "^6.4.1", + "zod": "^4.3.5", + "zustand": "^5.0.10" + }, + "devDependencies": { + "@types/react": "^18.2.43", + "@types/react-dom": "^18.2.17", + "@typescript-eslint/eslint-plugin": "^6.14.0", + "@typescript-eslint/parser": "^6.14.0", + "@vitejs/plugin-react": "^4.2.1", + "autoprefixer": "^10.4.23", + "eslint": "^8.55.0", + "eslint-plugin-react-hooks": "^4.6.0", + "eslint-plugin-react-refresh": "^0.4.5", + "postcss": "^8.5.6", + "tailwindcss": "^3.4.19", + "typescript": "^5.3.0", + "vite": "^5.0.8", + "vitest": "^1.1.0" + } +} diff --git a/honeycomb/postcss.config.js b/honeycomb/postcss.config.js new file mode 100644 index 00000000..2e7af2b7 --- /dev/null +++ b/honeycomb/postcss.config.js @@ -0,0 +1,6 @@ +export default { + plugins: { + tailwindcss: {}, + autoprefixer: {}, + }, +} diff --git a/honeycomb/public/favicon.svg b/honeycomb/public/favicon.svg new file mode 100644 index 00000000..41fb817d --- /dev/null +++ b/honeycomb/public/favicon.svg @@ -0,0 +1,4 @@ + + + + diff --git a/honeycomb/src/App.tsx b/honeycomb/src/App.tsx new file mode 100644 index 00000000..3f1a36cc --- /dev/null +++ b/honeycomb/src/App.tsx @@ -0,0 +1,40 @@ +import { BrowserRouter, Routes, Route, Navigate } from 'react-router-dom'; +import { AgentControlLayout } from './components/agent-control/AgentControlLayout'; +import { DataPanel } from './components/agent-control/DataPanel'; +import { AnalyticsPanel } from './components/agent-control/AnalyticsPanel'; +import { CostControls } from './components/agent-control/CostControls'; +import { WorkersPanel } from './components/agent-control/WorkersPanel'; +import { NotFoundPage } from './pages/NotFoundPage'; +import { LoginPage } from './pages/LoginPage'; +import { RegisterPage } from './pages/RegisterPage'; +import { ProtectedRoute } from './components/auth/ProtectedRoute'; + +export function App() { + return ( + + + {/* Public routes */} + } /> + } /> + } /> + } /> + + {/* Protected routes */} + } /> + + + + } + > + } /> + } /> + } /> + } /> + + } /> + + + ); +} diff --git a/honeycomb/src/assets/aden-icon.png b/honeycomb/src/assets/aden-icon.png new file mode 100644 index 0000000000000000000000000000000000000000..08e94456a876e7d2d3136f5d10af3ceb1d22d712 GIT binary patch literal 2027 zcmVaE=UlwF&@JuVUUp{goG&SRx!v|* zzs}5^bI!~#2uP}?IVncRY6WL6F(GOMKq=h&A_~J0Fy_e5-T?vatk^x(n{IG|`~o)J zH>RqaHVGE17g973dcjz?5YX7)x$Wq*-ageEQm=knBV4yxw&)}fy5k67`=^pWsq|z` zb7|4tvAs;dM?e?_WAMdjQGCCX-aM4^`?+qWRy8HvSgaLGNVP z1ty|}^bPAOplbPiFp;jyH>LbL(00y&u1hz;1hkNDGF8jwdtdb%m%zYa23mf&0&N}F z<@E^Y3XSO@vS3~U_I&sXe1B*??A*EnbVUp4!y{u5B~bp3Ei2&ciFf7oEYJ-tq+_=j zMNt0P6YqHssyd;C^tN+1Aj%y4q(&?8T1dwyU{jua^99B0GDFekLGI9zOAvJq?pX~b zv*MtdSlOcW`#=LQ=z$-|(k0J8R4GXm!_#F?dUJY}Xd@jtck%DYi?1v%gRbsDr4O%K zNk>pTveM89-E>$er*W;P`#=?124}uH@;j`3=NO!n9}IT;Q3E4*D#vzKQGHl|=HOVE zdcF))f|4-Mk}97gd!czVP7obvyZ|l7{{$5n5qlrbjC#{LoxpiFhFbHL|N3jsDw9sJ z_dLNddF|WBA7|rOo60_gh@34BwZm36;;)cbPFNr7DCc3 zgrr-Daf=;rZdqMdXsRPf4$N%mtyE9sTxv*GK{-cS~kwgqjZqwwzzt*0@&0BA8~ z!!8|LI0-KdT1elyC<4PjG+ew%Z;)6G^)E-6ar<^ zF-elDm=ow{uyud%@h?Ek3}p{;xWXd{I8Gn<@&XVuN161hfL*({f|a`TP*5g4XfUSh z@*oi5piKJ0xxp$yaau-%gE8e&QQKfpCB}5`M0ikPq=hJxPEX?MP$oU-7)`@Lc#!mv z&|xRY>0(UxUX@3jzK|sHsQOVgBQZvL{l*uyZcX{@GBupBOge6r83Yu*L*vsDHW*a> zqqL<5-JAzoyN<_XU}2~3!%sF!=jpwD!ME)2kIBHgI$ZLk8eEitXSSF$yHHw0aZx5w z@+p_X57@6#@sG$F7##KqpQpR5s&=hUb|tI8P+&DPfc=#@K189c^= zj_Ar_bZi{X$Y~t5_{xehnP`dDMg=ENUxj_2p9hs_e_9hp+c3_K2i=(b2hh`h+iT&i z8uVgD9MR>2e*k{G8|&tSu7xWk*nOjDKR2M$X;Vf?&js#NgGrEt>;*9Ao=}i^8yLKo zjr+}~qR_vbQy}va$jIlHjHqhOhZg9LQ z8x!L#Ktvt^tPx+Cf8&Mf!`B5s5{Rhc^mT4uj2HKVGmN{Nfru=!mGAejWB$dhzkYUR zFM8`K25c1&kwSoI?C;!u(EtAfVc5Iz;(fpvP2r-2<9W^E0qIV9^AH~&uag7414I;X zc-&osUbAmbNVs@YIM-c6;tfak*eD#QXb*oYXO<1ckH+K6a4Z1AIMCZzQBh9e2%G8` zq^g@X3E+EaI%fn9m$R_m^9|`oy=hA6J}BR1v0Wg + + diff --git a/honeycomb/src/assets/aden-logo.svg b/honeycomb/src/assets/aden-logo.svg new file mode 100644 index 00000000..38f30b6d --- /dev/null +++ b/honeycomb/src/assets/aden-logo.svg @@ -0,0 +1,15 @@ + + + + + + + + + + + diff --git a/honeycomb/src/components/.gitkeep b/honeycomb/src/components/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/honeycomb/src/components/ErrorBoundary.tsx b/honeycomb/src/components/ErrorBoundary.tsx new file mode 100644 index 00000000..1011e485 --- /dev/null +++ b/honeycomb/src/components/ErrorBoundary.tsx @@ -0,0 +1,50 @@ +import { Component, ErrorInfo, ReactNode } from 'react' + +interface Props { + children: ReactNode +} + +interface State { + hasError: boolean + error?: Error +} + +export class ErrorBoundary extends Component { + constructor(props: Props) { + super(props) + this.state = { hasError: false } + } + + static getDerivedStateFromError(error: Error): State { + return { hasError: true, error } + } + + componentDidCatch(error: Error, errorInfo: ErrorInfo) { + console.error('Uncaught error:', error, errorInfo) + } + + render() { + if (this.state.hasError) { + return ( +
+
+

+ Something went wrong +

+

+ {this.state.error?.message || 'An unexpected error occurred'} +

+ +
+
+ ) + } + + return this.props.children + } +} diff --git a/honeycomb/src/components/agent-control/AgentControlLayout.tsx b/honeycomb/src/components/agent-control/AgentControlLayout.tsx new file mode 100644 index 00000000..5c538926 --- /dev/null +++ b/honeycomb/src/components/agent-control/AgentControlLayout.tsx @@ -0,0 +1,254 @@ +import { useEffect, useState } from 'react' +import { Outlet, NavLink, useNavigate, useLocation } from 'react-router-dom' +import { useControlSocket } from '@/hooks/useControlSocket' +import { useAgentControlStore } from '@/stores/agentControlStore' +import { useUserStore } from '@/stores/userStore' +import { NotificationBell } from './shared/NotificationBell' +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger, +} from '@/components/ui/dropdown-menu' +import { LiveIndicator } from './shared/LiveIndicator' +import { UserAvatar } from '@/components/user/UserAvatar' +import { Button } from '@/components/ui/button' +import adenLogo from '@/assets/aden-logo.svg' +import adenIcon from '@/assets/aden-icon.png' +import { + Tooltip, + TooltipContent, + TooltipProvider, + TooltipTrigger, +} from '@/components/ui/tooltip' +import { cn } from '@/lib/utils' +import { + Database, + BarChart3, + DollarSign, + Users, + PanelLeftClose, + PanelLeft, + Settings, + Sparkles, +} from 'lucide-react' +import { SettingsModal } from '@/components/settings/SettingsModal' + +const navItems = [ + { value: 'agents', label: 'Agents', path: '/agents', icon: Users }, + { value: 'data', label: 'Logs', path: '/data', icon: Database }, + { value: 'analytics', label: 'Performance Dashboard', path: '/analytics', icon: BarChart3 }, + { value: 'cost-control', label: 'Cost Control', path: '/cost-control', icon: DollarSign }, +] + +/** + * Main layout for Agent Control with sidebar navigation and socket lifecycle. + */ +export function AgentControlLayout() { + const { connect, disconnect, isConnected } = useControlSocket() + const hasActiveAgents = useAgentControlStore((state) => state.eventsBuffer.length > 0) + const user = useUserStore((state) => state.user) + const fullName = useUserStore((state) => state.fullName()) + const navigate = useNavigate() + const location = useLocation() + const [sidebarCollapsed, setSidebarCollapsed] = useState(false) + + // Settings modal controlled by URL hash + const settingsOpen = location.hash === '#settings' + const handleSettingsClose = (open: boolean) => { + if (!open) { + navigate(location.pathname, { replace: true }) + } + } + + // Connect socket on mount + useEffect(() => { + connect() + return () => disconnect() + }, [connect, disconnect]) + + const toggleSidebar = () => setSidebarCollapsed((prev) => !prev) + + return ( +
+ {/* Sidebar - full height */} + + + {/* Right side - header bar + content */} +
+ {/* Top bar with connection status + notifications */} +
+ + + {/* Connection status */} +
+ + {isConnected ? 'Connected' : 'Disconnected'} +
+ + +
+ + {/* Content area */} +
+
+ +
+
+
+ + +
+ ) +} diff --git a/honeycomb/src/components/agent-control/AnalyticsPanel.tsx b/honeycomb/src/components/agent-control/AnalyticsPanel.tsx new file mode 100644 index 00000000..9ff60d4d --- /dev/null +++ b/honeycomb/src/components/agent-control/AnalyticsPanel.tsx @@ -0,0 +1,335 @@ +import { useMemo } from 'react' +import { Card } from '@/components/ui/card' +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from '@/components/ui/select' +import { Skeleton } from '@/components/ui/skeleton' +import { KpiCard } from './shared/KpiCard' +import { LiveIndicator } from './shared/LiveIndicator' +import { VegaLiteChart } from './charts/VegaLiteChart' +import { useAnalytics } from '@/hooks/queries/useAnalytics' +import { useAgentControlStore, type TimeRange } from '@/stores/agentControlStore' +import { transformAnalyticsData, type CostByModelData } from './charts/transformers' +import { + createCostTrendSpec, + createTokenUsageSpec, + createCostByModelSpec, + createLatencyDistributionSpec, +} from './charts/specs' +import type { RawJsonData, KPIValues } from '@/types/agentControl' + +const timeRangeOptions: { value: TimeRange; label: string }[] = [ + { value: 'today', label: 'Today' }, + { value: 'week', label: 'Last Week' }, + { value: 'twoWeeks', label: 'Last 2 Weeks' }, + { value: 'month', label: 'Last Month' }, + { value: 'all', label: 'All Time' }, +] + +// Helper to safely extract KPI values from raw API response +function extractKpis(data: RawJsonData | undefined): KPIValues { + const defaults: KPIValues = { + totalCost: 0, + projectedMonthlyCost: 0, + totalRequests: 0, + totalTokens: 0, + successRate: 0.99, + avgLatency: 0, + cacheSavings: 0, + } + + if (!data) return defaults + + // Handle new analytics response shape + const analyticsData = data as any + if (analyticsData?.analytics?.summary) { + const summary = analyticsData.analytics.summary + return { + totalCost: Number(summary.total_cost || 0), + projectedMonthlyCost: Number(summary.total_cost || 0) * 30, + totalRequests: Number(summary.total_requests || 0), + totalTokens: Number(summary.total_tokens || 0), + successRate: 0.99, // Not provided in new API + avgLatency: Number(summary.avg_latency_ms || 0), + cacheSavings: Number(summary.cache_savings || 0), + } + } + + // Fallback to old response shapes + const kpis = (data.kpis || data.summary || data) as Record + + return { + totalCost: Number(kpis.totalCost || kpis.total_cost || 0), + projectedMonthlyCost: Number(kpis.projectedMonthlyCost || kpis.projected_cost || 0), + totalRequests: Number(kpis.totalRequests || kpis.total_requests || 0), + totalTokens: Number(kpis.totalTokens || kpis.total_tokens || 0), + successRate: Number(kpis.successRate || kpis.success_rate || 0.99), + avgLatency: Number(kpis.avgLatency || kpis.avg_latency || 0), + cacheSavings: Number(kpis.cacheSavings || kpis.cache_savings || 0), + } +} + +/** + * Main analytics dashboard with KPIs and VegaLite charts. + */ +export function AnalyticsPanel() { + const timeRange = useAgentControlStore((state) => state.timeRange) + const setTimeRange = useAgentControlStore((state) => state.setTimeRange) + const hasActiveAgents = useAgentControlStore((state) => state.eventsBuffer.length > 0) + + const { data: analytics, isLoading } = useAnalytics() + + const kpis = extractKpis(analytics as RawJsonData | undefined) + + // Transform API data to chart-ready format + const chartData = useMemo( + () => transformAnalyticsData(analytics), + [analytics] + ) + + // Create chart specs with memoization + const costTrendSpec = useMemo( + () => (chartData.costTrends.length > 0 ? createCostTrendSpec(chartData.costTrends) : null), + [chartData.costTrends] + ) + + const tokenUsageSpec = useMemo( + () => (chartData.tokenUsage.length > 0 ? createTokenUsageSpec(chartData.tokenUsage) : null), + [chartData.tokenUsage] + ) + + const costByModelSpec = useMemo( + () => (chartData.costByModel.length > 0 ? createCostByModelSpec(chartData.costByModel) : null), + [chartData.costByModel] + ) + + const latencyDistributionSpec = useMemo( + () => + chartData.latencyDistribution.length > 0 + ? createLatencyDistributionSpec(chartData.latencyDistribution) + : null, + [chartData.latencyDistribution] + ) + + const formatCurrency = (value: number) => + new Intl.NumberFormat('en-US', { + style: 'currency', + currency: 'USD', + minimumFractionDigits: 0, + maximumFractionDigits: 2, + }).format(value) + + const formatNumber = (value: number) => { + if (value >= 1000000) return `${(value / 1000000).toFixed(1)}M` + if (value >= 1000) return `${(value / 1000).toFixed(1)}K` + return value.toLocaleString() + } + + const formatPercent = (value: number) => `${(value * 100).toFixed(1)}%` + + return ( +
+ {/* Header */} +
+
+

Analytics

+ +
+ +
+ + {/* KPI Grid */} +
+ + + + + } + /> + + + + } + /> + + + + } + /> + + + + } + /> + + + + } + /> +
+ + {/* Charts Grid */} + {isLoading ? ( +
+ {[...Array(4)].map((_, i) => ( + + + + + ))} +
+ ) : ( +
+ {/* Cost Trend Chart */} + +

Cost Trend

+ {costTrendSpec ? ( + + ) : ( +
+ No cost data available +
+ )} +
+ + {/* Token Usage Chart */} + +

Token Usage

+ {tokenUsageSpec ? ( + + ) : ( +
+ No token data available +
+ )} +
+ + {/* Cost by Model Chart */} + +

Cost by Model

+ {costByModelSpec ? ( +
+ +
+ {chartData.costByModel.map((model: CostByModelData) => ( +
+
+ {model.name} + {model.value}% + ${model.cost.toFixed(4)} +
+ ))} +
+
+ ) : ( +
+ No model data available +
+ )} + + + {/* Latency Distribution Chart */} + +

Latency Distribution

+ {latencyDistributionSpec ? ( + + ) : ( +
+ No latency data available +
+ )} +
+
+ )} +
+ ) +} diff --git a/honeycomb/src/components/agent-control/CostControls.tsx b/honeycomb/src/components/agent-control/CostControls.tsx new file mode 100644 index 00000000..5b5b2574 --- /dev/null +++ b/honeycomb/src/components/agent-control/CostControls.tsx @@ -0,0 +1,266 @@ +import { useState, useMemo } from 'react' +import { Button } from '@/components/ui/button' +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from '@/components/ui/select' +import { Skeleton } from '@/components/ui/skeleton' +import { BudgetCard } from './shared/BudgetCard' +import { KpiCard } from './shared/KpiCard' +import { AddBudgetDialog } from './budget/AddBudgetDialog' +import { BudgetDetailPanel } from './budget/BudgetDetailPanel' +import { useBudgets } from '@/hooks/queries/useBudgets' +import type { BudgetType, BudgetConfig, RawJsonData } from '@/types/agentControl' + +const budgetTypeOptions: { value: BudgetType | 'all'; label: string }[] = [ + { value: 'all', label: 'All Types' }, + { value: 'global', label: 'Global' }, + { value: 'agent', label: 'Agent' }, + { value: 'customer', label: 'Customer' }, + { value: 'feature', label: 'Feature' }, + { value: 'tag', label: 'Tag' }, +] + +// Extract budgets from API response (handles policy-based structure) +function extractBudgets(data: RawJsonData | undefined): BudgetConfig[] { + if (!data) return [] + if (Array.isArray(data)) return data as BudgetConfig[] + if (data.policies && Array.isArray(data.policies)) { + const allBudgets: BudgetConfig[] = [] + for (const policy of data.policies as Array<{ budgets?: BudgetConfig[] }>) { + if (policy.budgets) allBudgets.push(...policy.budgets) + } + return allBudgets + } + if (data.budgets && Array.isArray(data.budgets)) { + return data.budgets as BudgetConfig[] + } + return [] +} + +/** + * Budget management panel with summary cards and budget list. + */ +export function CostControls() { + const [typeFilter, setTypeFilter] = useState('all') + const [addDialogOpen, setAddDialogOpen] = useState(false) + const [selectedBudget, setSelectedBudget] = useState(null) + const [detailPanelOpen, setDetailPanelOpen] = useState(false) + + const handleBudgetClick = (budget: BudgetConfig) => { + setSelectedBudget(budget) + setDetailPanelOpen(true) + } + + const { data: rawData, isLoading, error } = useBudgets() + + // Parse budgets from API response + const budgets = useMemo( + () => extractBudgets(rawData as RawJsonData | undefined), + [rawData] + ) + + // Compute summary stats + const summary = useMemo(() => { + if (!budgets.length) return null + return { + totalBudget: budgets.reduce((sum: number, b: BudgetConfig) => sum + b.limit, 0), + totalSpent: budgets.reduce((sum: number, b: BudgetConfig) => sum + b.spent, 0), + activeAlerts: budgets.filter((b: BudgetConfig) => + b.alerts.some((a) => a.enabled && b.spent / b.limit >= a.threshold / 100) + ).length, + budgetsAtRisk: budgets.filter((b: BudgetConfig) => b.spent / b.limit >= 0.9).length, + } + }, [budgets]) + + // Filter budgets by type + const filteredBudgets = useMemo( + () => budgets.filter((b: BudgetConfig) => typeFilter === 'all' || b.type === typeFilter), + [budgets, typeFilter] + ) + + const formatCurrency = (value: number) => + new Intl.NumberFormat('en-US', { + style: 'currency', + currency: 'USD', + minimumFractionDigits: 0, + maximumFractionDigits: 0, + }).format(value) + + if (error) { + return ( +
+

Failed to load budgets

+ +
+ ) + } + + return ( +
+ {/* Summary Cards */} +
+ + + + } + /> + 0 + ? { + value: Math.round((summary.totalSpent / summary.totalBudget) * 100), + direction: summary.totalSpent / summary.totalBudget > 0.8 ? 'up' : 'down', + } + : undefined + } + icon={ + + + + } + /> + 0} + icon={ + + + + } + /> + 0} + icon={ + + + + } + /> +
+ + {/* Controls */} +
+

Budgets

+
+ + +
+
+ + {/* Budget List */} + {isLoading ? ( +
+ {[...Array(4)].map((_, i) => ( + + ))} +
+ ) : filteredBudgets.length === 0 ? ( +
+

No budgets found

+ +
+ ) : ( +
+ {filteredBudgets.map((budget: BudgetConfig) => ( + handleBudgetClick(budget)} + /> + ))} +
+ )} + + {/* Add Budget Dialog */} + + + {/* Budget Detail Panel */} + +
+ ) +} diff --git a/honeycomb/src/components/agent-control/DataPanel.tsx b/honeycomb/src/components/agent-control/DataPanel.tsx new file mode 100644 index 00000000..804018f5 --- /dev/null +++ b/honeycomb/src/components/agent-control/DataPanel.tsx @@ -0,0 +1,345 @@ +import { useState, useMemo } from 'react' +import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card' +import { Button } from '@/components/ui/button' +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from '@/components/ui/select' +import { Input } from '@/components/ui/input' +import { + Table, + TableBody, + TableCell, + TableHead, + TableHeader, + TableRow, +} from '@/components/ui/table' +import { Skeleton } from '@/components/ui/skeleton' +import { Badge } from '@/components/ui/badge' +import { LiveIndicator } from './shared/LiveIndicator' +import { useLogs } from '@/hooks/queries/useLogs' +import { useAgentControlStore } from '@/stores/agentControlStore' +import { cn } from '@/lib/utils' + +type ViewMode = 'metrics' | 'requests' + +const dataTypeOptions = [ + { value: 'all', label: 'All Types' }, + { value: 'llm_request', label: 'LLM Requests' }, + { value: 'tool_call', label: 'Tool Calls' }, + { value: 'error', label: 'Errors' }, +] + +// Define log entry type +interface LogEntry { + id?: string + timestamp: string + type?: string + agent?: string + model?: string + success?: boolean + cost?: number + latency?: number + [key: string]: unknown +} + +/** + * Logs viewer with filtering and export capabilities. + */ +export function DataPanel() { + const [viewMode, setViewMode] = useState('requests') + const [dataType, setDataType] = useState('all') + const [searchQuery, setSearchQuery] = useState('') + const [expandedRow, setExpandedRow] = useState(null) + + const hasActiveAgents = useAgentControlStore((state) => state.eventsBuffer.length > 0) + + // Get date range (last 24 hours) + const endDate = useMemo(() => new Date().toISOString(), []) + const startDate = useMemo(() => { + const d = new Date() + d.setHours(d.getHours() - 24) + return d.toISOString() + }, []) + + const { data: logsData, isLoading, error, refetch } = useLogs(startDate, endDate, 500) + + // Parse logs from API response + const logs = useMemo((): LogEntry[] => { + if (!logsData) return [] + // Handle different response shapes + const rawLogs = (logsData as { rows?: unknown[] }).rows || + (logsData as { logs?: unknown[] }).logs || + (Array.isArray(logsData) ? logsData : []) + return (rawLogs as LogEntry[]).map((log, idx) => ({ + ...log, + id: log.id || `log-${idx}`, + })) + }, [logsData]) + + // Filter logs + const filteredLogs = useMemo(() => { + return logs.filter((log) => { + if (dataType !== 'all' && log.type !== dataType) return false + if (searchQuery) { + const query = searchQuery.toLowerCase() + const searchable = JSON.stringify(log).toLowerCase() + return searchable.includes(query) + } + return true + }) + }, [logs, dataType, searchQuery]) + + const handleExport = () => { + if (!filteredLogs.length) return + + const csv = [ + ['Timestamp', 'Type', 'Agent', 'Model', 'Status', 'Cost', 'Latency'].join(','), + ...filteredLogs.map((log) => + [ + log.timestamp, + log.type || '-', + log.agent || '-', + log.model || '-', + log.success !== undefined ? (log.success ? 'Success' : 'Failed') : '-', + log.cost?.toFixed(4) || '-', + log.latency ? `${log.latency}ms` : '-', + ].join(',') + ), + ].join('\n') + + const blob = new Blob([csv], { type: 'text/csv' }) + const url = URL.createObjectURL(blob) + const a = document.createElement('a') + a.href = url + a.download = `agent-logs-${new Date().toISOString().split('T')[0]}.csv` + a.click() + URL.revokeObjectURL(url) + } + + const formatTimestamp = (ts: string) => { + return new Date(ts).toLocaleString(undefined, { + month: 'short', + day: 'numeric', + hour: '2-digit', + minute: '2-digit', + second: '2-digit', + }) + } + + if (error) { + return ( +
+

Failed to load logs

+ +
+ ) + } + + return ( +
+ {/* Controls */} +
+
+ {/* View Mode Toggle */} +
+ + +
+ + + + setSearchQuery(e.target.value)} + className="w-[200px]" + /> +
+ +
+ + +
+
+ + {/* Data Table */} + + + + {viewMode === 'requests' ? 'Request Logs' : 'Metrics Summary'} + + + + {isLoading ? ( +
+ {[...Array(10)].map((_, i) => ( + + ))} +
+ ) : filteredLogs.length === 0 ? ( +
+ No logs found +
+ ) : viewMode === 'requests' ? ( + + + + Timestamp + Type + Agent + Model + Status + Cost + Latency + + + + {filteredLogs.map((log) => ( + <> + + setExpandedRow(expandedRow === log.id ? null : log.id || null) + } + > + + {formatTimestamp(log.timestamp)} + + + + {log.type || 'request'} + + + + {log.agent || '-'} + + + {log.model || '-'} + + + {log.success !== undefined ? ( + + {log.success ? 'Success' : 'Failed'} + + ) : ( + '-' + )} + + + {log.cost ? `$${log.cost.toFixed(4)}` : '-'} + + + {log.latency ? `${log.latency}ms` : '-'} + + + {expandedRow === log.id && ( + + +
+                            {JSON.stringify(log, null, 2)}
+                          
+
+
+ )} + + ))} +
+
+ ) : ( + // Metrics view - simplified + + + + Metric + Value + Period + + + + + Total Requests + {filteredLogs.length} + Last 24h + + + Success Rate + + {( + (filteredLogs.filter((l) => l.success !== false).length / + Math.max(filteredLogs.length, 1)) * + 100 + ).toFixed(1)} + % + + Last 24h + + + Total Cost + + $ + {filteredLogs + .reduce((sum, l) => sum + (l.cost || 0), 0) + .toFixed(2)} + + Last 24h + + +
+ )} +
+
+
+ ) +} diff --git a/honeycomb/src/components/agent-control/WorkersPanel.tsx b/honeycomb/src/components/agent-control/WorkersPanel.tsx new file mode 100644 index 00000000..524c2609 --- /dev/null +++ b/honeycomb/src/components/agent-control/WorkersPanel.tsx @@ -0,0 +1,276 @@ +import { useState, useMemo } from 'react' +import { useQuery } from '@tanstack/react-query' +import { Card, CardContent } from '@/components/ui/card' +import { Avatar, AvatarFallback } from '@/components/ui/avatar' +import { Badge } from '@/components/ui/badge' +import { Skeleton } from '@/components/ui/skeleton' +import { KpiCard } from './shared/KpiCard' +import { WorkerProfilePanel } from './workers/WorkerProfilePanel' +import { useAgentControlStore } from '@/stores/agentControlStore' +import { getAgents } from '@/services/controlApi' +import { cn } from '@/lib/utils' +import type { AgentInfo, LLMEvent } from '@/types/agentControl' + +// Derive workers from events buffer +function deriveWorkersFromEvents(events: LLMEvent[]): AgentInfo[] { + const workerMap = new Map() + + for (const event of events) { + const existing = workerMap.get(event.agent) + if (existing) { + existing.total_requests++ + existing.total_cost += event.cost + if (new Date(event.timestamp) > new Date(existing.last_seen)) { + existing.last_seen = event.timestamp + } + } else { + workerMap.set(event.agent, { + agent: event.agent, + agent_name: null, + status: 'connected', + connection_type: 'websocket', + instance_id: null, + first_seen: event.timestamp, + last_seen: event.timestamp, + total_requests: 1, + total_cost: event.cost, + }) + } + } + + return Array.from(workerMap.values()) +} + +/** + * Worker/Agent management grid with status indicators. + */ +export function WorkersPanel() { + const [selectedWorker, setSelectedWorker] = useState(null) + const [profileOpen, setProfileOpen] = useState(false) + + // Fetch agents from API (past week) + const { data: agentsData, isLoading } = useQuery({ + queryKey: ['agents'], + queryFn: async () => { + const oneWeekAgo = new Date() + oneWeekAgo.setDate(oneWeekAgo.getDate() - 7) + return getAgents(oneWeekAgo.toISOString()) + }, + }) + + // Get real-time events from store + const eventsBuffer = useAgentControlStore((state) => state.eventsBuffer) + const realtimeAgents = useMemo(() => deriveWorkersFromEvents(eventsBuffer), [eventsBuffer]) + + // Merge API agents with real-time updates (real-time overrides API data) + const apiAgents = agentsData?.agents || [] + const workers = useMemo(() => { + const agentMap = new Map() + // Add API agents first + for (const agent of apiAgents) { + agentMap.set(agent.agent, agent) + } + // Override with real-time data + for (const agent of realtimeAgents) { + agentMap.set(agent.agent, agent) + } + return Array.from(agentMap.values()) + }, [apiAgents, realtimeAgents]) + + // Compute summary stats + const onlineCount = workers.filter((w: AgentInfo) => w.status === 'connected').length + const offlineCount = workers.filter((w: AgentInfo) => w.status === 'disconnected').length + const totalRequests = workers.reduce((sum: number, w: AgentInfo) => sum + w.total_requests, 0) + + const handleWorkerClick = (worker: AgentInfo) => { + setSelectedWorker(worker) + setProfileOpen(true) + } + + const formatCurrency = (value: number) => + new Intl.NumberFormat('en-US', { + style: 'currency', + currency: 'USD', + minimumFractionDigits: 2, + }).format(value) + + return ( +
+ {/* Summary Cards */} +
+ + + + } + /> + 0} + icon={ + + + + } + /> + + + + } + /> + + + + } + /> +
+ + {/* Workers Grid */} + {isLoading ? ( +
+ {[...Array(6)].map((_, i) => ( + + ))} +
+ ) : workers.length === 0 ? ( +
+

No agents found

+

+ Agents will appear here when they connect and send events +

+
+ ) : ( +
+ {workers.map((worker: AgentInfo) => ( + handleWorkerClick(worker)} + formatCurrency={formatCurrency} + /> + ))} +
+ )} + + {/* Worker Profile Panel */} + +
+ ) +} + +interface WorkerCardProps { + worker: AgentInfo + onClick: () => void + formatCurrency: (value: number) => string +} + +function WorkerCard({ worker, onClick, formatCurrency }: WorkerCardProps) { + const isOnline = worker.status === 'connected' + + return ( + + +
+ + + {(worker.agent_name || worker.agent).slice(0, 2).toUpperCase()} + + + +
+
+ + {worker.agent_name || worker.agent} + + + {isOnline ? 'Online' : 'Offline'} + +
+ +
+
+ Requests + + {worker.total_requests.toLocaleString()} + +
+
+ Cost + + {formatCurrency(worker.total_cost)} + +
+
+
+
+
+
+ ) +} diff --git a/honeycomb/src/components/agent-control/budget/AddBudgetDialog.tsx b/honeycomb/src/components/agent-control/budget/AddBudgetDialog.tsx new file mode 100644 index 00000000..96ff59a8 --- /dev/null +++ b/honeycomb/src/components/agent-control/budget/AddBudgetDialog.tsx @@ -0,0 +1,209 @@ +import { useState } from 'react' +import { + Dialog, + DialogContent, + DialogDescription, + DialogFooter, + DialogHeader, + DialogTitle, +} from '@/components/ui/dialog' +import { Button } from '@/components/ui/button' +import { Input } from '@/components/ui/input' +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from '@/components/ui/select' +import type { BudgetType, LimitAction } from '@/types/agentControl' + +interface AddBudgetDialogProps { + open: boolean + onOpenChange: (open: boolean) => void +} + +const budgetTypes: { value: BudgetType; label: string }[] = [ + { value: 'global', label: 'Global' }, + { value: 'agent', label: 'Agent' }, + { value: 'customer', label: 'Customer' }, + { value: 'feature', label: 'Feature' }, + { value: 'tag', label: 'Tag' }, +] + +const limitActions: { value: LimitAction; label: string; description: string }[] = [ + { value: 'notify', label: 'Notify Only', description: 'Send alerts but continue' }, + { value: 'throttle', label: 'Throttle', description: 'Reduce request rate' }, + { value: 'degrade', label: 'Degrade', description: 'Switch to cheaper model' }, + { value: 'kill', label: 'Kill', description: 'Stop all requests' }, +] + +/** + * Dialog for creating a new budget configuration. + */ +export function AddBudgetDialog({ open, onOpenChange }: AddBudgetDialogProps) { + const [name, setName] = useState('') + const [type, setType] = useState('agent') + const [limit, setLimit] = useState('100') + const [limitAction, setLimitAction] = useState('notify') + const [alertThreshold, setAlertThreshold] = useState('80') + const [isSubmitting, setIsSubmitting] = useState(false) + const [error, setError] = useState(null) + + const handleSubmit = async (e: React.FormEvent) => { + e.preventDefault() + setError(null) + + if (!name.trim()) { + setError('Name is required') + return + } + + const limitValue = parseFloat(limit) + if (isNaN(limitValue) || limitValue <= 0) { + setError('Limit must be greater than 0') + return + } + + setIsSubmitting(true) + + try { + // TODO: Integrate with actual API when policyId is available + // For now, just close the dialog + console.log('Creating budget:', { + name, + type, + limit: limitValue, + limitAction, + alertThreshold: parseFloat(alertThreshold) || undefined, + }) + + handleClose() + } catch (err) { + setError(err instanceof Error ? err.message : 'Failed to create budget') + } finally { + setIsSubmitting(false) + } + } + + const handleClose = () => { + setName('') + setType('agent') + setLimit('100') + setLimitAction('notify') + setAlertThreshold('80') + setError(null) + onOpenChange(false) + } + + return ( + + + + Create Budget + + Set up a new budget to control costs for agents, models, or features. + + + +
+ {error && ( +
+ {error} +
+ )} + + {/* Name */} +
+ + setName(e.target.value)} + placeholder="e.g., Production Agent Budget" + /> +
+ + {/* Type */} +
+ + +
+ + {/* Limit */} +
+ + setLimit(e.target.value)} + placeholder="100.00" + /> +
+ + {/* Limit Action */} +
+ + +
+ + {/* Alert Threshold */} +
+ + setAlertThreshold(e.target.value)} + placeholder="80" + /> +

+ Receive alerts when spending reaches this percentage +

+
+ + + + + +
+
+
+ ) +} diff --git a/honeycomb/src/components/agent-control/budget/BudgetDetailPanel.tsx b/honeycomb/src/components/agent-control/budget/BudgetDetailPanel.tsx new file mode 100644 index 00000000..fcb71dd9 --- /dev/null +++ b/honeycomb/src/components/agent-control/budget/BudgetDetailPanel.tsx @@ -0,0 +1,480 @@ +import { useState, useEffect } from 'react' +import { + Sheet, + SheetContent, + SheetHeader, + SheetTitle, + SheetFooter, +} from '@/components/ui/sheet' +import { Button } from '@/components/ui/button' +import { Input } from '@/components/ui/input' +import { Label } from '@/components/ui/label' +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from '@/components/ui/select' +import { Progress } from '@/components/ui/progress' +import { Separator } from '@/components/ui/separator' +import { Badge } from '@/components/ui/badge' +import { Switch } from '@/components/ui/switch' +import { cn } from '@/lib/utils' +import { + DollarSign, + Bot, + User, + LayoutGrid, + Tag, + Trash2, + Plus, + X, + Bell, + Mail, +} from 'lucide-react' +import type { BudgetConfig, BudgetType, LimitAction } from '@/types/agentControl' + +interface BudgetDetailPanelProps { + budget: BudgetConfig | null + open: boolean + onOpenChange: (open: boolean) => void +} + +const typeIcons: Record = { + global: DollarSign, + agent: Bot, + customer: User, + feature: LayoutGrid, + tag: Tag, +} + +const typeColors: Record = { + global: 'bg-blue-100 text-blue-700', + agent: 'bg-red-100 text-red-700', + customer: 'bg-purple-100 text-purple-700', + feature: 'bg-orange-100 text-orange-700', + tag: 'bg-green-100 text-green-700', +} + +const limitActions: { value: LimitAction; label: string; description: string }[] = [ + { value: 'notify', label: 'Notify Only', description: 'Send alerts but allow requests to continue' }, + { value: 'throttle', label: 'Throttle', description: 'Rate limit requests when budget is exceeded' }, + { value: 'degrade', label: 'Degrade', description: 'Switch to a cheaper model' }, + { value: 'kill', label: 'Block', description: 'Stop all requests when budget is exceeded' }, +] + +/** + * Right-side slide-over panel for viewing and editing budget details. + */ +export function BudgetDetailPanel({ + budget, + open, + onOpenChange, +}: BudgetDetailPanelProps) { + // Local state for editing + const [limit, setLimit] = useState('') + const [limitAction, setLimitAction] = useState('notify') + const [alerts, setAlerts] = useState<{ threshold: number; enabled: boolean }[]>([]) + const [newThreshold, setNewThreshold] = useState('') + const [emailEnabled, setEmailEnabled] = useState(false) + const [emailRecipients, setEmailRecipients] = useState([]) + const [newEmail, setNewEmail] = useState('') + const [inAppEnabled, setInAppEnabled] = useState(false) + const [isSubmitting, setIsSubmitting] = useState(false) + const [isDirty, setIsDirty] = useState(false) + + // Reset form when budget changes + useEffect(() => { + if (budget) { + setLimit(budget.limit.toString()) + setLimitAction(budget.limitAction) + setAlerts([...budget.alerts]) + setEmailEnabled(budget.notifications.email) + setEmailRecipients([...budget.notifications.emailRecipients]) + setInAppEnabled(budget.notifications.inApp) + setIsDirty(false) + setNewThreshold('') + setNewEmail('') + } + }, [budget]) + + if (!budget) return null + + const percentage = budget.limit > 0 ? (budget.spent / budget.limit) * 100 : 0 + const status = percentage >= 100 ? 'critical' : percentage >= 80 ? 'warning' : 'healthy' + const remaining = Math.max(0, budget.limit - budget.spent) + + const TypeIcon = typeIcons[budget.type] || DollarSign + + const formatCurrency = (value: number) => + new Intl.NumberFormat('en-US', { + style: 'currency', + currency: 'USD', + minimumFractionDigits: 0, + maximumFractionDigits: 0, + }).format(value) + + const handleChange = () => { + setIsDirty(true) + } + + const handleAddThreshold = () => { + const threshold = parseInt(newThreshold) + if (threshold > 0 && threshold <= 100 && !alerts.some(a => a.threshold === threshold)) { + setAlerts([...alerts, { threshold, enabled: true }].sort((a, b) => a.threshold - b.threshold)) + setNewThreshold('') + handleChange() + } + } + + const handleRemoveThreshold = (threshold: number) => { + setAlerts(alerts.filter(a => a.threshold !== threshold)) + handleChange() + } + + const handleToggleThreshold = (threshold: number) => { + setAlerts(alerts.map(a => + a.threshold === threshold ? { ...a, enabled: !a.enabled } : a + )) + handleChange() + } + + const handleSubmit = async () => { + setIsSubmitting(true) + try { + // TODO: Integrate with actual API + console.log('Updating budget:', { + id: budget.id, + limit: parseFloat(limit), + limitAction, + alerts, + notifications: { + inApp: inAppEnabled, + email: emailEnabled, + emailRecipients, + webhook: budget.notifications.webhook, + }, + }) + onOpenChange(false) + } catch (error) { + console.error('Failed to update budget:', error) + } finally { + setIsSubmitting(false) + } + } + + const handleDelete = async () => { + if (confirm(`Are you sure you want to delete "${budget.name}"? This action cannot be undone.`)) { + try { + // TODO: Integrate with actual API + console.log('Deleting budget:', budget.id) + onOpenChange(false) + } catch (error) { + console.error('Failed to delete budget:', error) + } + } + } + + return ( + + + {/* Header */} + +
+
+ +
+
+ {budget.name} +
+ + {budget.type} + + {budget.tagCategory && ( + + {budget.tagCategory} + + )} +
+
+
+

+ {formatCurrency(budget.spent)} of {formatCurrency(budget.limit)} used +

+
+ + {/* Scrollable Content */} +
+ {/* Budget Usage Section */} +
+

Budget Usage

+
+
+ Progress + + {Math.round(percentage)}% + +
+ div]:bg-green-500', + status === 'warning' && '[&>div]:bg-orange-500', + status === 'critical' && '[&>div]:bg-red-500' + )} + /> +
+ Spent: {formatCurrency(budget.spent)} + Remaining: {formatCurrency(remaining)} +
+
+
+ + + + {/* Monthly Limit Section */} +
+ +
+ $ + { + setLimit(e.target.value) + handleChange() + }} + className="pl-7" + /> +
+
+ + + + {/* At Limit Action Section */} +
+ + +

+ {limitActions.find(a => a.value === limitAction)?.description} +

+
+ + + + {/* Alert Thresholds Section */} +
+ + + {alerts.length === 0 ? ( +

No alert thresholds configured

+ ) : ( +
+ {alerts.map((alert) => ( +
+
+ handleToggleThreshold(alert.threshold)} + /> + + {alert.threshold}% + + + ({formatCurrency(budget.limit * alert.threshold / 100)}) + +
+ +
+ ))} +
+ )} + + {/* Add new threshold */} +
+
+ setNewThreshold(e.target.value)} + className="pr-8" + /> + % +
+ +
+
+ + + + {/* Notification Channels Section */} +
+ +
+ + +
+ {emailEnabled && ( +
+ {emailRecipients.length > 0 && ( +
+ {emailRecipients.map((email) => ( +
+ {email} + +
+ ))} +
+ )} +
+ setNewEmail(e.target.value)} + onKeyDown={(e) => { + if (e.key === 'Enter') { + e.preventDefault() + const email = newEmail.trim().toLowerCase() + if (email && email.includes('@') && !emailRecipients.includes(email)) { + setEmailRecipients([...emailRecipients, email]) + setNewEmail('') + handleChange() + } + } + }} + className="flex-1" + /> + +
+
+ )} +
+
+ + {/* Footer */} + + +
+ + + + + + ) +} diff --git a/honeycomb/src/components/agent-control/charts/CostByModelChart.tsx b/honeycomb/src/components/agent-control/charts/CostByModelChart.tsx new file mode 100644 index 00000000..2e30e53b --- /dev/null +++ b/honeycomb/src/components/agent-control/charts/CostByModelChart.tsx @@ -0,0 +1,110 @@ +import { PieChart, Pie, Cell, ResponsiveContainer, Legend, Tooltip } from 'recharts' +import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card' +import type { CostByModelData } from '@/types/agentControl' + +// Extended type with index signature for recharts compatibility +interface ChartData extends CostByModelData { + [key: string]: string | number | undefined +} + +interface CostByModelChartProps { + data: CostByModelData[] + title?: string + className?: string +} + +const COLORS = [ + 'hsl(var(--primary))', + 'hsl(var(--primary) / 0.8)', + 'hsl(var(--primary) / 0.6)', + 'hsl(var(--primary) / 0.4)', + 'hsl(220 70% 50%)', + 'hsl(280 70% 50%)', + 'hsl(340 70% 50%)', + 'hsl(160 70% 50%)', +] + +/** + * Donut chart showing cost distribution by model. + */ +export function CostByModelChart({ + data, + title = 'Cost by Model', + className, +}: CostByModelChartProps) { + const formatCurrency = (value: number) => + new Intl.NumberFormat('en-US', { + style: 'currency', + currency: 'USD', + minimumFractionDigits: 0, + maximumFractionDigits: 2, + }).format(value) + + const formatPercent = (value: number) => `${(value * 100).toFixed(1)}%` + + const totalCost = data.reduce((sum, item) => sum + item.cost, 0) + + // Cast data to chart-compatible type + const chartData: ChartData[] = data.map((d) => ({ ...d })) + + return ( + + + {title} + + +
+ + + + {data.map((entry, index) => ( + + ))} + + [formatCurrency(Number(value) || 0), 'Cost']} + contentStyle={{ + backgroundColor: 'hsl(var(--card))', + border: '1px solid hsl(var(--border))', + borderRadius: '6px', + }} + /> + { + const item = data.find((d) => d.name === value) + return ( + + {value}{' '} + + ({item ? formatPercent(item.cost / totalCost) : ''}) + + + ) + }} + /> + + +
+
+ {formatCurrency(totalCost)} + Total Cost +
+
+
+ ) +} diff --git a/honeycomb/src/components/agent-control/charts/CostTrendChart.tsx b/honeycomb/src/components/agent-control/charts/CostTrendChart.tsx new file mode 100644 index 00000000..3911b9d6 --- /dev/null +++ b/honeycomb/src/components/agent-control/charts/CostTrendChart.tsx @@ -0,0 +1,109 @@ +import { + AreaChart, + Area, + XAxis, + YAxis, + CartesianGrid, + Tooltip, + ResponsiveContainer, + ReferenceLine, +} from 'recharts' +import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card' +import type { CostTrendData } from '@/types/agentControl' + +interface CostTrendChartProps { + data: CostTrendData[] + budgetLine?: number + title?: string + className?: string +} + +/** + * Area chart showing cost trends over time with optional budget line. + */ +export function CostTrendChart({ + data, + budgetLine, + title = 'Cost Trend', + className, +}: CostTrendChartProps) { + const formatCurrency = (value: number) => + new Intl.NumberFormat('en-US', { + style: 'currency', + currency: 'USD', + minimumFractionDigits: 0, + maximumFractionDigits: 0, + }).format(value) + + const formatDate = (dateStr: string) => { + const date = new Date(dateStr) + return date.toLocaleDateString(undefined, { month: 'short', day: 'numeric' }) + } + + return ( + + + {title} + + +
+ + + + + + + + + + + + [formatCurrency(Number(value) || 0), 'Cost']} + labelFormatter={formatDate} + contentStyle={{ + backgroundColor: 'hsl(var(--card))', + border: '1px solid hsl(var(--border))', + borderRadius: '6px', + }} + /> + {budgetLine && ( + + )} + + + +
+
+
+ ) +} diff --git a/honeycomb/src/components/agent-control/charts/LatencyChart.tsx b/honeycomb/src/components/agent-control/charts/LatencyChart.tsx new file mode 100644 index 00000000..e71f904b --- /dev/null +++ b/honeycomb/src/components/agent-control/charts/LatencyChart.tsx @@ -0,0 +1,70 @@ +import { + BarChart, + Bar, + XAxis, + YAxis, + CartesianGrid, + Tooltip, + ResponsiveContainer, +} from 'recharts' +import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card' +import type { LatencyData } from '@/types/agentControl' + +interface LatencyChartProps { + data: LatencyData[] + title?: string + className?: string +} + +/** + * Bar chart showing latency distribution. + */ +export function LatencyChart({ + data, + title = 'Latency Distribution', + className, +}: LatencyChartProps) { + const formatCount = (value: number) => { + if (value >= 1000) return `${(value / 1000).toFixed(1)}K` + return value.toString() + } + + return ( + + + {title} + + +
+ + + + + + [formatCount(Number(value) || 0), 'Requests']} + contentStyle={{ + backgroundColor: 'hsl(var(--card))', + border: '1px solid hsl(var(--border))', + borderRadius: '6px', + }} + /> + + + +
+
+
+ ) +} diff --git a/honeycomb/src/components/agent-control/charts/ModelUsageChart.tsx b/honeycomb/src/components/agent-control/charts/ModelUsageChart.tsx new file mode 100644 index 00000000..38c8aa2f --- /dev/null +++ b/honeycomb/src/components/agent-control/charts/ModelUsageChart.tsx @@ -0,0 +1,81 @@ +import { + BarChart, + Bar, + XAxis, + YAxis, + CartesianGrid, + Tooltip, + ResponsiveContainer, +} from 'recharts' +import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card' +import type { ModelUsageData } from '@/types/agentControl' + +interface ModelUsageChartProps { + data: ModelUsageData[] + title?: string + className?: string +} + +/** + * Bar chart showing model usage by requests. + */ +export function ModelUsageChart({ + data, + title = 'Model Usage', + className, +}: ModelUsageChartProps) { + const formatNumber = (value: number) => { + if (value >= 1000000) return `${(value / 1000000).toFixed(1)}M` + if (value >= 1000) return `${(value / 1000).toFixed(1)}K` + return value.toString() + } + + // Sort by requests descending + const sortedData = [...data].sort((a, b) => b.requests - a.requests) + + return ( + + + {title} + + +
+ + + + + + { + const numValue = Number(value) || 0 + if (name === 'requests') return [formatNumber(numValue), 'Requests'] + return [numValue, String(name)] + }} + contentStyle={{ + backgroundColor: 'hsl(var(--card))', + border: '1px solid hsl(var(--border))', + borderRadius: '6px', + }} + /> + + + +
+
+
+ ) +} diff --git a/honeycomb/src/components/agent-control/charts/TokenUsageChart.tsx b/honeycomb/src/components/agent-control/charts/TokenUsageChart.tsx new file mode 100644 index 00000000..13c58c12 --- /dev/null +++ b/honeycomb/src/components/agent-control/charts/TokenUsageChart.tsx @@ -0,0 +1,96 @@ +import { + BarChart, + Bar, + XAxis, + YAxis, + CartesianGrid, + Tooltip, + ResponsiveContainer, + Legend, +} from 'recharts' +import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card' +import type { TokenUsageData } from '@/types/agentControl' + +interface TokenUsageChartProps { + data: TokenUsageData[] + title?: string + className?: string +} + +/** + * Stacked bar chart showing input/output token usage over time. + */ +export function TokenUsageChart({ + data, + title = 'Token Usage', + className, +}: TokenUsageChartProps) { + const formatNumber = (value: number) => { + if (value >= 1000000) return `${(value / 1000000).toFixed(1)}M` + if (value >= 1000) return `${(value / 1000).toFixed(1)}K` + return value.toString() + } + + const formatDate = (dateStr: string) => { + const date = new Date(dateStr) + return date.toLocaleDateString(undefined, { month: 'short', day: 'numeric' }) + } + + return ( + + + {title} + + +
+ + + + + + [ + formatNumber(Number(value) || 0), + name === 'input' ? 'Input Tokens' : 'Output Tokens', + ]} + labelFormatter={formatDate} + contentStyle={{ + backgroundColor: 'hsl(var(--card))', + border: '1px solid hsl(var(--border))', + borderRadius: '6px', + }} + /> + + + + + +
+
+
+ ) +} diff --git a/honeycomb/src/components/agent-control/charts/TopAgentsChart.tsx b/honeycomb/src/components/agent-control/charts/TopAgentsChart.tsx new file mode 100644 index 00000000..56764f7e --- /dev/null +++ b/honeycomb/src/components/agent-control/charts/TopAgentsChart.tsx @@ -0,0 +1,98 @@ +import { + BarChart, + Bar, + XAxis, + YAxis, + CartesianGrid, + Tooltip, + ResponsiveContainer, + Cell, +} from 'recharts' +import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card' +import type { TopAgentData } from '@/types/agentControl' + +interface TopAgentsChartProps { + data: TopAgentData[] + title?: string + className?: string +} + +/** + * Horizontal bar chart showing top agents by spend. + */ +export function TopAgentsChart({ + data, + title = 'Top Agents by Spend', + className, +}: TopAgentsChartProps) { + const formatCurrency = (value: number) => + new Intl.NumberFormat('en-US', { + style: 'currency', + currency: 'USD', + minimumFractionDigits: 0, + maximumFractionDigits: 0, + }).format(value) + + // Sort by spend descending and take top 10 + const sortedData = [...data].sort((a, b) => b.spend - a.spend).slice(0, 10) + const maxSpend = Math.max(...sortedData.map((d) => d.spend)) + + return ( + + + {title} + + +
+ + + + + + { + const numValue = Number(value) || 0 + if (name === 'spend') return [formatCurrency(numValue), 'Spend'] + return [numValue, String(name)] + }} + contentStyle={{ + backgroundColor: 'hsl(var(--card))', + border: '1px solid hsl(var(--border))', + borderRadius: '6px', + }} + /> + + {sortedData.map((entry, index) => { + // Color based on limit usage + const limitRatio = entry.limit ? entry.spend / entry.limit : 0 + let fill = 'hsl(var(--primary))' + if (limitRatio >= 0.9) fill = 'hsl(var(--destructive))' + else if (limitRatio >= 0.75) fill = 'hsl(38 92% 50%)' + + return + })} + + + +
+
+
+ ) +} diff --git a/honeycomb/src/components/agent-control/charts/VegaLiteChart.tsx b/honeycomb/src/components/agent-control/charts/VegaLiteChart.tsx new file mode 100644 index 00000000..a941507d --- /dev/null +++ b/honeycomb/src/components/agent-control/charts/VegaLiteChart.tsx @@ -0,0 +1,52 @@ +import { useRef, useEffect, useCallback } from 'react' +import vegaEmbed, { type Result, type VisualizationSpec, type EmbedOptions } from 'vega-embed' + +interface VegaLiteChartProps { + spec: VisualizationSpec + className?: string + options?: EmbedOptions +} + +/** + * React wrapper component for VegaLite charts using vega-embed. + * Handles mounting, updating, and cleanup of Vega views. + */ +export function VegaLiteChart({ spec, className, options }: VegaLiteChartProps) { + const containerRef = useRef(null) + const vegaResultRef = useRef(null) + + const renderChart = useCallback(async () => { + if (!containerRef.current || !spec) return + + // Cleanup previous render to prevent memory leaks + if (vegaResultRef.current) { + vegaResultRef.current.finalize() + vegaResultRef.current = null + } + + try { + const result = await vegaEmbed(containerRef.current, spec, { + actions: false, + tooltip: { theme: 'dark' }, + ...options, + }) + vegaResultRef.current = result + } catch (error) { + console.error('Failed to render VegaLite chart:', error) + } + }, [spec, options]) + + useEffect(() => { + renderChart() + + return () => { + // Cleanup on unmount + if (vegaResultRef.current) { + vegaResultRef.current.finalize() + vegaResultRef.current = null + } + } + }, [renderChart]) + + return
+} diff --git a/honeycomb/src/components/agent-control/charts/index.ts b/honeycomb/src/components/agent-control/charts/index.ts new file mode 100644 index 00000000..799db1d2 --- /dev/null +++ b/honeycomb/src/components/agent-control/charts/index.ts @@ -0,0 +1,6 @@ +export { CostTrendChart } from './CostTrendChart' +export { TokenUsageChart } from './TokenUsageChart' +export { LatencyChart } from './LatencyChart' +export { CostByModelChart } from './CostByModelChart' +export { TopAgentsChart } from './TopAgentsChart' +export { ModelUsageChart } from './ModelUsageChart' diff --git a/honeycomb/src/components/agent-control/charts/specs.ts b/honeycomb/src/components/agent-control/charts/specs.ts new file mode 100644 index 00000000..8292cea0 --- /dev/null +++ b/honeycomb/src/components/agent-control/charts/specs.ts @@ -0,0 +1,257 @@ +/** + * VegaLite spec builders for analytics charts. + * Based on patterns from acho-launchpad's AnalyticsPanel.vue + */ + +import type { VisualizationSpec } from 'vega-embed' +import type { + CostTrendData, + TokenUsageData, + CostByModelData, + LatencyDistributionData, + LatencyPercentilesData, +} from './transformers' + +// ============================================================================= +// Cost Trend Chart (Area Chart with optional budget line) +// ============================================================================= + +export function createCostTrendSpec(data: CostTrendData[]): VisualizationSpec { + return { + $schema: 'https://vega.github.io/schema/vega-lite/v5.json', + width: 'container', + height: 220, + padding: { left: 10, right: 10, top: 10, bottom: 10 }, + data: { values: data }, + layer: [ + { + mark: { type: 'area', line: true, color: '#263A99', opacity: 0.3 }, + encoding: { + x: { + field: 'date', + type: 'ordinal', + sort: null, + axis: { title: null, labelAngle: -45 }, + }, + y: { + field: 'cost', + type: 'quantitative', + axis: { title: 'Cost ($)', format: '$.2f' }, + }, + tooltip: [ + { field: 'date', title: 'Date' }, + { field: 'cost', title: 'Cost', format: '$.4f' }, + ], + }, + }, + // Budget reference line (optional) + { + mark: { type: 'rule', color: '#c1392b', strokeDash: [5, 5], strokeWidth: 2 }, + encoding: { + y: { datum: 66.67 }, + }, + }, + ], + config: { view: { stroke: null } }, + } as VisualizationSpec +} + +// ============================================================================= +// Request Volume Chart (Bar Chart) +// ============================================================================= + +export function createRequestVolumeSpec(data: CostTrendData[]): VisualizationSpec { + return { + $schema: 'https://vega.github.io/schema/vega-lite/v5.json', + width: 'container', + height: 220, + padding: { left: 10, right: 10, top: 10, bottom: 10 }, + data: { values: data }, + mark: { + type: 'bar', + color: '#22c55e', + cornerRadiusTopLeft: 4, + cornerRadiusTopRight: 4, + }, + encoding: { + x: { + field: 'date', + type: 'ordinal', + sort: null, + axis: { title: null, labelAngle: -45 }, + }, + y: { + field: 'requests', + type: 'quantitative', + axis: { title: 'Requests' }, + }, + tooltip: [ + { field: 'date', title: 'Date' }, + { field: 'requests', title: 'Requests' }, + ], + }, + config: { view: { stroke: null } }, + } as VisualizationSpec +} + +// ============================================================================= +// Token Usage Chart (Stacked Bar Chart) +// ============================================================================= + +export function createTokenUsageSpec(data: TokenUsageData[]): VisualizationSpec { + return { + $schema: 'https://vega.github.io/schema/vega-lite/v5.json', + width: 'container', + height: 220, + padding: { left: 10, right: 10, top: 10, bottom: 10 }, + data: { values: data }, + mark: { + type: 'bar', + cornerRadiusTopLeft: 4, + cornerRadiusTopRight: 4, + }, + encoding: { + x: { + field: 'date', + type: 'ordinal', + sort: null, + axis: { title: null, labelAngle: -45 }, + }, + y: { + field: 'tokens', + type: 'quantitative', + axis: { title: 'Tokens', format: '.2s' }, + stack: true, + }, + color: { + field: 'type', + type: 'nominal', + scale: { domain: ['Input', 'Output'], range: ['#263A99', '#22c55e'] }, + legend: { orient: 'bottom', title: null }, + }, + tooltip: [ + { field: 'date', title: 'Date' }, + { field: 'type', title: 'Type' }, + { field: 'tokens', title: 'Tokens', format: ',.0f' }, + ], + }, + config: { view: { stroke: null } }, + } as VisualizationSpec +} + +// ============================================================================= +// Cost by Model Chart (Donut Chart) +// ============================================================================= + +export function createCostByModelSpec(data: CostByModelData[]): VisualizationSpec { + return { + $schema: 'https://vega.github.io/schema/vega-lite/v5.json', + width: 180, + height: 180, + data: { values: data }, + mark: { type: 'arc', innerRadius: 50 }, + encoding: { + theta: { field: 'value', type: 'quantitative' }, + color: { + field: 'name', + type: 'nominal', + scale: { + domain: data.map((m) => m.name), + range: data.map((m) => m.color), + }, + legend: null, + }, + tooltip: [ + { field: 'name', title: 'Model' }, + { field: 'value', title: 'Share (%)', format: '.0f' }, + { field: 'cost', title: 'Cost', format: '$.4f' }, + ], + }, + config: { view: { stroke: null } }, + } as VisualizationSpec +} + +// ============================================================================= +// Latency Distribution Chart (Bar Chart) +// ============================================================================= + +export function createLatencyDistributionSpec( + data: LatencyDistributionData[] +): VisualizationSpec { + return { + $schema: 'https://vega.github.io/schema/vega-lite/v5.json', + width: 'container', + height: 220, + padding: { left: 10, right: 10, top: 10, bottom: 10 }, + data: { values: data }, + mark: { + type: 'bar', + color: '#263A99', + cornerRadiusTopLeft: 4, + cornerRadiusTopRight: 4, + }, + encoding: { + x: { + field: 'range', + type: 'ordinal', + axis: { title: 'Latency Range' }, + sort: null, + }, + y: { + field: 'count', + type: 'quantitative', + axis: { title: 'Count' }, + }, + tooltip: [ + { field: 'range', title: 'Range' }, + { field: 'count', title: 'Count' }, + ], + }, + config: { view: { stroke: null } }, + } as VisualizationSpec +} + +// ============================================================================= +// Latency Percentiles Chart (Multi-line Chart) +// ============================================================================= + +export function createLatencyPercentilesSpec( + data: LatencyPercentilesData[] +): VisualizationSpec { + return { + $schema: 'https://vega.github.io/schema/vega-lite/v5.json', + width: 'container', + height: 220, + padding: { left: 10, right: 10, top: 10, bottom: 10 }, + data: { values: data }, + mark: { type: 'line', point: true }, + encoding: { + x: { + field: 'date', + type: 'ordinal', + sort: null, + axis: { title: null, labelAngle: -45 }, + }, + y: { + field: 'latency', + type: 'quantitative', + axis: { title: 'Latency (ms)' }, + }, + color: { + field: 'percentile', + type: 'nominal', + scale: { + domain: ['P50', 'P95', 'P99'], + range: ['#263A99', '#f59e0b', '#c1392b'], + }, + legend: null, + }, + tooltip: [ + { field: 'date', title: 'Date' }, + { field: 'percentile', title: 'Percentile' }, + { field: 'latency', title: 'Latency (ms)', format: '.0f' }, + ], + }, + config: { view: { stroke: null } }, + } as VisualizationSpec +} diff --git a/honeycomb/src/components/agent-control/charts/transformers.ts b/honeycomb/src/components/agent-control/charts/transformers.ts new file mode 100644 index 00000000..5891f678 --- /dev/null +++ b/honeycomb/src/components/agent-control/charts/transformers.ts @@ -0,0 +1,254 @@ +/** + * Data transformation utilities for converting API responses to chart-ready data. + * Based on patterns from acho-launchpad's useAgentControlData.ts + */ + +// Color palette for models (matches launchpad) +export const MODEL_COLORS = ['#263A99', '#22c55e', '#6b21a8', '#f59e0b', '#c1392b', '#06b6d4'] + +// ============================================================================= +// Types for transformed chart data +// ============================================================================= + +export interface CostTrendData { + date: string + cost: number + requests: number + budget?: number +} + +export interface TokenUsageData { + date: string + type: 'Input' | 'Output' + tokens: number +} + +export interface CostByModelData { + name: string + cost: number + value: number // percentage + color: string +} + +export interface LatencyDistributionData { + range: string + count: number +} + +export interface LatencyPercentilesData { + date: string + percentile: 'P50' | 'P95' | 'P99' + latency: number +} + +export interface TopAgentData { + name: string + spend: number + requests: number + avgCost: number +} + +// ============================================================================= +// Format helpers +// ============================================================================= + +/** + * Format bucket label based on resolution + * For hourly: "2 PM", "3 PM", etc. + * For daily: "Dec 14", "Dec 15", etc. + */ +export function formatBucketLabel(bucket: string, resolution: 'day' | 'hour'): string { + const date = new Date(bucket) + if (resolution === 'hour') { + return date.toLocaleTimeString('en-US', { hour: 'numeric', hour12: true }) + } + return date.toLocaleDateString('en-US', { month: 'short', day: 'numeric' }) +} + +// ============================================================================= +// Data transformers +// ============================================================================= + +/** + * Transform analytics API response to chart-ready data + */ +export function transformAnalyticsData(data: any) { + if (!data?.analytics) { + return { + costTrends: [], + tokenUsage: [], + costByModel: [], + latencyDistribution: [], + latencyPercentiles: [], + topAgents: [], + } + } + + const analytics = data.analytics + const resolution = analytics.timeline?.resolution || 'day' + const timeline = resolution === 'hour' ? analytics.timeline?.hourly : analytics.timeline?.daily + + return { + costTrends: transformCostTrends(timeline, resolution), + tokenUsage: transformTokenUsage(timeline, resolution), + costByModel: transformCostByModel(analytics.cost_by_model), + latencyDistribution: transformLatencyDistribution(analytics.latency_distribution), + latencyPercentiles: transformLatencyPercentiles(timeline, resolution), + topAgents: transformTopAgents(analytics.cost_by_agent), + } +} + +/** + * Transform cost timeline to cost trend data + */ +function transformCostTrends( + timeline: any, + resolution: 'day' | 'hour' +): CostTrendData[] { + if (!timeline?.cost || !Array.isArray(timeline.cost)) { + return [] + } + + // Create requests lookup map + const requestsMap = new Map( + (timeline.requests || []).map((r: any) => [r.bucket, r.requests]) + ) + + return timeline.cost.map((d: any) => ({ + date: formatBucketLabel(d.bucket, resolution), + cost: d.cost_total || 0, + requests: requestsMap.get(d.bucket) || 0, + budget: 66.67, // Default daily budget (~$2000/month / 30 days) + })) +} + +/** + * Transform token timeline to stacked bar chart data (flattened) + */ +function transformTokenUsage( + timeline: any, + resolution: 'day' | 'hour' +): TokenUsageData[] { + if (!timeline?.tokens || !Array.isArray(timeline.tokens)) { + return [] + } + + return timeline.tokens.flatMap((d: any) => [ + { + date: formatBucketLabel(d.bucket, resolution), + type: 'Input' as const, + tokens: d.input_tokens || 0, + }, + { + date: formatBucketLabel(d.bucket, resolution), + type: 'Output' as const, + tokens: d.output_tokens || 0, + }, + ]) +} + +/** + * Transform cost by model to pie/donut chart data + */ +function transformCostByModel(costByModel: any): CostByModelData[] { + if (!costByModel?.models || !Array.isArray(costByModel.models)) { + return [] + } + + return costByModel.models.map((m: any, i: number) => ({ + name: m.model?.split('/').pop() || m.model || 'Unknown', + cost: m.cost_total || 0, + value: Math.round((m.share || 0) * 100), + color: MODEL_COLORS[i % MODEL_COLORS.length], + })) +} + +/** + * Aggregate API latency buckets to UI buckets + * API: 0-1s, 1-2s, 2-5s, 5-10s, 10-20s, 20s+ + * UI: 0-2s, 2-5s, 5-10s, 10-20s, 20s+ + */ +function transformLatencyDistribution( + latencyDistribution: any +): LatencyDistributionData[] { + if (!latencyDistribution?.buckets || !Array.isArray(latencyDistribution.buckets)) { + return [] + } + + const aggregated: Record = { + '0-2s': 0, + '2-5s': 0, + '5-10s': 0, + '10-20s': 0, + '20s+': 0, + } + + latencyDistribution.buckets.forEach((b: any) => { + switch (b.bucket) { + case '0-1s': + case '1-2s': + aggregated['0-2s'] += b.count || 0 + break + case '2-5s': + aggregated['2-5s'] += b.count || 0 + break + case '5-10s': + aggregated['5-10s'] += b.count || 0 + break + case '10-20s': + aggregated['10-20s'] += b.count || 0 + break + case '20s+': + aggregated['20s+'] += b.count || 0 + break + } + }) + + return Object.entries(aggregated).map(([range, count]) => ({ range, count })) +} + +/** + * Transform latency percentiles to multi-line chart data (flattened) + */ +function transformLatencyPercentiles( + timeline: any, + resolution: 'day' | 'hour' +): LatencyPercentilesData[] { + if (!timeline?.latency_percentiles || !Array.isArray(timeline.latency_percentiles)) { + return [] + } + + return timeline.latency_percentiles.flatMap((d: any) => [ + { + date: formatBucketLabel(d.bucket, resolution), + percentile: 'P50' as const, + latency: d.p50_ms || 0, + }, + { + date: formatBucketLabel(d.bucket, resolution), + percentile: 'P95' as const, + latency: d.p95_ms || 0, + }, + { + date: formatBucketLabel(d.bucket, resolution), + percentile: 'P99' as const, + latency: d.p99_ms || d.p95_ms || 0, + }, + ]) +} + +/** + * Transform cost by agent to top agents list + */ +function transformTopAgents(costByAgent: any): TopAgentData[] { + if (!costByAgent?.agents || !Array.isArray(costByAgent.agents)) { + return [] + } + + return costByAgent.agents.map((a: any) => ({ + name: a.agent || 'Unknown', + spend: a.cost_total || 0, + requests: a.requests || 0, + avgCost: a.requests > 0 ? (a.cost_total || 0) / a.requests : 0, + })) +} diff --git a/honeycomb/src/components/agent-control/index.ts b/honeycomb/src/components/agent-control/index.ts new file mode 100644 index 00000000..a1671983 --- /dev/null +++ b/honeycomb/src/components/agent-control/index.ts @@ -0,0 +1,24 @@ +// Layout +export { AgentControlLayout } from './AgentControlLayout' + +// Main Panels +export { DataPanel } from './DataPanel' +export { AnalyticsPanel } from './AnalyticsPanel' +export { CostControls } from './CostControls' +export { WorkersPanel } from './WorkersPanel' + +// Shared components +export { LiveIndicator } from './shared/LiveIndicator' +export { KpiCard } from './shared/KpiCard' +export { BudgetCard } from './shared/BudgetCard' +export { NotificationBell } from './shared/NotificationBell' + +// Budget +export { AddBudgetDialog } from './budget/AddBudgetDialog' +export { BudgetDetailPanel } from './budget/BudgetDetailPanel' + +// Workers +export { WorkerProfilePanel } from './workers/WorkerProfilePanel' + +// Charts +export * from './charts' diff --git a/honeycomb/src/components/agent-control/shared/BudgetCard.tsx b/honeycomb/src/components/agent-control/shared/BudgetCard.tsx new file mode 100644 index 00000000..13dca7b4 --- /dev/null +++ b/honeycomb/src/components/agent-control/shared/BudgetCard.tsx @@ -0,0 +1,172 @@ +import { Progress } from '@/components/ui/progress' +import { Badge } from '@/components/ui/badge' +import { cn } from '@/lib/utils' +import { + DollarSign, + Bot, + User, + LayoutGrid, + Tag, + Ban, + Gauge, + ArrowDown, + Bell, + Mail, + Webhook, + ChevronRight, +} from 'lucide-react' +import type { BudgetConfig, BudgetType, LimitAction } from '@/types/agentControl' + +interface BudgetCardProps { + budget: BudgetConfig + onClick?: () => void + className?: string +} + +const typeIcons: Record = { + global: DollarSign, + agent: Bot, + customer: User, + feature: LayoutGrid, + tag: Tag, +} + +const typeColors: Record = { + global: 'bg-blue-100 text-blue-700', + agent: 'bg-red-100 text-red-700', + customer: 'bg-purple-100 text-purple-700', + feature: 'bg-orange-100 text-orange-700', + tag: 'bg-green-100 text-green-700', +} + +const actionIcons: Record = { + kill: Ban, + throttle: Gauge, + degrade: ArrowDown, + notify: Bell, +} + +const actionColors: Record = { + kill: 'bg-red-100 text-red-700', + throttle: 'bg-orange-100 text-orange-700', + degrade: 'bg-blue-100 text-blue-700', + notify: 'bg-green-100 text-green-700', +} + +const actionLabels: Record = { + kill: 'Block', + throttle: 'Throttle', + degrade: 'Degrade', + notify: 'Notify', +} + +/** + * Budget row with horizontal layout matching launchpad style. + */ +export function BudgetCard({ budget, onClick, className }: BudgetCardProps) { + const percentage = budget.limit > 0 ? (budget.spent / budget.limit) * 100 : 0 + const status = percentage >= 100 ? 'critical' : percentage >= 80 ? 'warning' : 'healthy' + + const TypeIcon = typeIcons[budget.type] || DollarSign + const ActionIcon = actionIcons[budget.limitAction] || Gauge + + const formatCurrency = (value: number) => + new Intl.NumberFormat('en-US', { + style: 'currency', + currency: 'USD', + minimumFractionDigits: 0, + maximumFractionDigits: 0, + }).format(value) + + return ( +
+
+ {/* Left: Icon + Name + Badges */} +
+
+ +
+
+

{budget.name}

+
+ + {budget.type} + + {budget.tagCategory && ( + + {budget.tagCategory} + + )} +
+
+
+ + {/* Middle: Progress */} +
+
+ + {formatCurrency(budget.spent)} / {formatCurrency(budget.limit)} + + + {Math.round(percentage)}% + +
+ div]:bg-green-500', + status === 'warning' && '[&>div]:bg-orange-500', + status === 'critical' && '[&>div]:bg-red-500' + )} + /> +
+ + {/* Right: Actions + Notifications + Chevron */} +
+ + + {actionLabels[budget.limitAction]} + + +
+ {budget.notifications.inApp && ( + + )} + {budget.notifications.email && ( + + )} + {budget.notifications.webhook && ( + + )} +
+ + +
+
+
+ ) +} diff --git a/honeycomb/src/components/agent-control/shared/KpiCard.tsx b/honeycomb/src/components/agent-control/shared/KpiCard.tsx new file mode 100644 index 00000000..b01c1c2c --- /dev/null +++ b/honeycomb/src/components/agent-control/shared/KpiCard.tsx @@ -0,0 +1,67 @@ +import { Card, CardContent } from '@/components/ui/card' +import { Skeleton } from '@/components/ui/skeleton' +import { cn } from '@/lib/utils' + +interface KpiCardProps { + label: string + value: string | number + icon?: React.ReactNode + trend?: { value: number; direction: 'up' | 'down' } + highlight?: boolean + loading?: boolean + className?: string +} + +/** + * Real-time KPI display card with optional trend indicator. + */ +export function KpiCard({ + label, + value, + icon, + trend, + highlight, + loading, + className, +}: KpiCardProps) { + if (loading) { + return ( + + + + + + + ) + } + + return ( + + +
+ {label} + {icon && {icon}} +
+
+ {value} + {trend && ( + + {trend.direction === 'up' ? '↑' : '↓'} {Math.abs(trend.value)}% + + )} +
+
+
+ ) +} diff --git a/honeycomb/src/components/agent-control/shared/LiveIndicator.tsx b/honeycomb/src/components/agent-control/shared/LiveIndicator.tsx new file mode 100644 index 00000000..9cf2314d --- /dev/null +++ b/honeycomb/src/components/agent-control/shared/LiveIndicator.tsx @@ -0,0 +1,23 @@ +import { cn } from '@/lib/utils' + +interface LiveIndicatorProps { + isLive?: boolean + className?: string +} + +/** + * Pulsing dot indicator for live/active status. + */ +export function LiveIndicator({ isLive = true, className }: LiveIndicatorProps) { + if (!isLive) return null + + return ( +
+ + + + + Live +
+ ) +} diff --git a/honeycomb/src/components/agent-control/shared/NotificationBell.tsx b/honeycomb/src/components/agent-control/shared/NotificationBell.tsx new file mode 100644 index 00000000..022ec1da --- /dev/null +++ b/honeycomb/src/components/agent-control/shared/NotificationBell.tsx @@ -0,0 +1,137 @@ +import { useNavigate } from 'react-router-dom' +import { Button } from '@/components/ui/button' +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuSeparator, + DropdownMenuTrigger, +} from '@/components/ui/dropdown-menu' +import { ScrollArea } from '@/components/ui/scroll-area' +import { useNotificationStore } from '@/stores/notificationStore' +import { cn } from '@/lib/utils' + +/** + * Header notification bell with dropdown list. + */ +export function NotificationBell() { + const navigate = useNavigate() + const { notifications, unreadCount, markAsRead, markAllAsRead } = useNotificationStore() + + const handleNotificationClick = (notification: (typeof notifications)[0]) => { + markAsRead(notification.id) + + // Navigate to relevant panel based on notification type + if (notification.type === 'budget') { + navigate('/cost-control') + } + } + + const formatTime = (timestamp: string) => { + const date = new Date(timestamp) + const now = new Date() + const diffMs = now.getTime() - date.getTime() + const diffMins = Math.floor(diffMs / 60000) + const diffHours = Math.floor(diffMs / 3600000) + const diffDays = Math.floor(diffMs / 86400000) + + if (diffMins < 1) return 'Just now' + if (diffMins < 60) return `${diffMins}m ago` + if (diffHours < 24) return `${diffHours}h ago` + return `${diffDays}d ago` + } + + const typeIcons: Record = { + info: 'text-blue-500', + success: 'text-green-500', + warning: 'text-yellow-500', + error: 'text-red-500', + budget: 'text-purple-500', + } + + return ( + + + + + + +
+ Notifications + {unreadCount > 0 && ( + + )} +
+ + + + {notifications.length === 0 ? ( +
+ No notifications +
+ ) : ( + + {notifications.map((notification) => ( + handleNotificationClick(notification)} + > +
+ +
+
+ + {notification.title} + + + {formatTime(notification.timestamp)} + +
+

+ {notification.message} +

+
+
+
+ ))} +
+ )} +
+
+ ) +} diff --git a/honeycomb/src/components/agent-control/workers/WorkerProfilePanel.tsx b/honeycomb/src/components/agent-control/workers/WorkerProfilePanel.tsx new file mode 100644 index 00000000..cb4d4106 --- /dev/null +++ b/honeycomb/src/components/agent-control/workers/WorkerProfilePanel.tsx @@ -0,0 +1,171 @@ +import { + Sheet, + SheetContent, + SheetDescription, + SheetHeader, + SheetTitle, +} from '@/components/ui/sheet' +import { Avatar, AvatarFallback } from '@/components/ui/avatar' +import { Badge } from '@/components/ui/badge' +import { Separator } from '@/components/ui/separator' +import { cn } from '@/lib/utils' +import type { AgentInfo } from '@/types/agentControl' + +interface WorkerProfilePanelProps { + worker: AgentInfo | null + open: boolean + onOpenChange: (open: boolean) => void +} + +/** + * Sidebar sheet showing detailed worker/agent information. + */ +export function WorkerProfilePanel({ + worker, + open, + onOpenChange, +}: WorkerProfilePanelProps) { + if (!worker) return null + + const isOnline = worker.status === 'connected' + + const formatDate = (dateString: string) => { + return new Date(dateString).toLocaleString(undefined, { + dateStyle: 'medium', + timeStyle: 'short', + }) + } + + const formatCurrency = (value: number) => + new Intl.NumberFormat('en-US', { + style: 'currency', + currency: 'USD', + minimumFractionDigits: 2, + }).format(value) + + const stats = [ + { + label: 'Total Requests', + value: worker.total_requests.toLocaleString(), + }, + { + label: 'Total Cost', + value: formatCurrency(worker.total_cost), + }, + { + label: 'First Seen', + value: formatDate(worker.first_seen), + }, + { + label: 'Last Seen', + value: formatDate(worker.last_seen), + }, + ] + + return ( + + + +
+ + + {(worker.agent_name || worker.agent).slice(0, 2).toUpperCase()} + + +
+ + {worker.agent_name || worker.agent} + + + {worker.agent} + +
+
+
+ +
+ {/* Status */} +
+ Status + + + {isOnline ? 'Online' : 'Offline'} + +
+ + {/* Connection Type */} + {worker.connection_type && ( +
+ Connection + {worker.connection_type} +
+ )} + + {/* Instance ID */} + {worker.instance_id && ( +
+ Instance ID + + {worker.instance_id.slice(0, 8)}... + +
+ )} + + + + {/* Stats */} +
+

Statistics

+
+ {stats.map((stat) => ( +
+
{stat.label}
+
{stat.value}
+
+ ))} +
+
+ + + + {/* Activity Timeline (placeholder) */} +
+

Recent Activity

+
+
+
+
+
Connected
+
+ {formatDate(worker.last_seen)} +
+
+
+
+
+
+
First request
+
+ {formatDate(worker.first_seen)} +
+
+
+
+
+
+ + + ) +} diff --git a/honeycomb/src/components/auth/LoginForm.tsx b/honeycomb/src/components/auth/LoginForm.tsx new file mode 100644 index 00000000..4fdad098 --- /dev/null +++ b/honeycomb/src/components/auth/LoginForm.tsx @@ -0,0 +1,127 @@ +import { useForm } from 'react-hook-form' +import { zodResolver } from '@hookform/resolvers/zod' +import * as z from 'zod/v3' +import { Button } from '@/components/ui/button' +import { Input } from '@/components/ui/input' +import { useState } from 'react' +import { submitLogin } from '@/services/authApi' +import { useUserStore } from '@/stores/userStore' +import { useNavigate, useSearchParams, Link } from 'react-router-dom' + +const loginSchema = z.object({ + email: z.string().email('Please enter a valid email'), + password: z.string().min(1, 'Please enter your password'), +}) + +type LoginFormData = z.infer + +interface LoginFormProps { + orgPath?: string + orgName?: string + showSignup?: boolean +} + +export function LoginForm({ orgPath, orgName, showSignup = true }: LoginFormProps) { + const [error, setError] = useState('') + const [isSubmitting, setIsSubmitting] = useState(false) + const navigate = useNavigate() + const [searchParams] = useSearchParams() + const initUserProfile = useUserStore((s) => s.initUserProfile) + + const { + register, + handleSubmit, + formState: { errors }, + } = useForm({ + resolver: zodResolver(loginSchema), + }) + + const handleRedirect = () => { + const redirect = searchParams.get('redirect') + navigate(redirect ? decodeURIComponent(redirect) : '/') + } + + const handleLogin = async (data: LoginFormData) => { + setError('') + setIsSubmitting(true) + + try { + const res = await submitLogin(data) + + localStorage.removeItem('context_session_id') + localStorage.setItem('token', `jwt ${res.token}`) + + if (res.mustResetPassword) { + navigate(`/reset-password?token=${res.token}`) + return + } + + await initUserProfile() + handleRedirect() + } catch (err) { + setError((err as Error)?.message || 'Failed to login. Please check your credentials.') + } finally { + setIsSubmitting(false) + } + } + + return ( +
+ {orgName && ( +

+ Welcome to {orgName}'s ARP Platform +

+ )} + + {error &&

{error}

} + +
+
+ + {errors.email && ( +

{errors.email.message}

+ )} +
+ +
+ + {errors.password && ( +

{errors.password.message}

+ )} +
+ + +
+ +
+ + Forgot password? + + + {showSignup && ( + + Don't have an account?{' '} + + Sign up + + + )} +
+
+ ) +} diff --git a/honeycomb/src/components/auth/ProtectedRoute.tsx b/honeycomb/src/components/auth/ProtectedRoute.tsx new file mode 100644 index 00000000..1bcb45c2 --- /dev/null +++ b/honeycomb/src/components/auth/ProtectedRoute.tsx @@ -0,0 +1,52 @@ +import { Navigate, useLocation } from 'react-router-dom' +import { useUserStore } from '@/stores/userStore' +import { useEffect, useState } from 'react' + +interface ProtectedRouteProps { + children: React.ReactNode +} + +export function ProtectedRoute({ children }: ProtectedRouteProps) { + const location = useLocation() + const [isChecking, setIsChecking] = useState(true) + const [isAuthenticated, setIsAuthenticated] = useState(false) + const initUserProfile = useUserStore((s) => s.initUserProfile) + const user = useUserStore((s) => s.user) + + useEffect(() => { + const checkAuth = async () => { + const token = localStorage.getItem('token') + if (!token) { + setIsChecking(false) + return + } + + if (user) { + setIsAuthenticated(true) + setIsChecking(false) + return + } + + const result = await initUserProfile() + setIsAuthenticated(!!result) + setIsChecking(false) + } + + checkAuth() + }, [initUserProfile, user]) + + if (isChecking) { + return ( +
+
+
+ ) + } + + if (!isAuthenticated) { + const returnUrl = encodeURIComponent(location.pathname + location.search) + return + } + + return <>{children} +} diff --git a/honeycomb/src/components/auth/RegisterForm.tsx b/honeycomb/src/components/auth/RegisterForm.tsx new file mode 100644 index 00000000..2942f950 --- /dev/null +++ b/honeycomb/src/components/auth/RegisterForm.tsx @@ -0,0 +1,205 @@ +import { useForm } from 'react-hook-form' +import { zodResolver } from '@hookform/resolvers/zod' +import * as z from 'zod/v3' +import { Button } from '@/components/ui/button' +import { Input } from '@/components/ui/input' +import { useState } from 'react' +import { submitRegister } from '@/services/authApi' +import { useUserStore } from '@/stores/userStore' +import { useNavigate, useSearchParams, Link } from 'react-router-dom' +import { Eye, EyeOff } from 'lucide-react' + +const registerSchema = z + .object({ + firstname: z.string().min(1, 'First name is required'), + lastname: z.string().min(1, 'Last name is required'), + email: z.string().email('Please enter a valid email'), + password: z.string().min(8, 'Password must be at least 8 characters'), + confirmPassword: z.string().min(1, 'Please confirm your password'), + }) + .refine((data) => data.password === data.confirmPassword, { + message: 'Passwords do not match', + path: ['confirmPassword'], + }) + +type RegisterFormData = z.infer + +interface RegisterFormProps { + orgPath?: string + orgName?: string +} + +export function RegisterForm({ orgPath, orgName }: RegisterFormProps) { + const [error, setError] = useState('') + const [isSubmitting, setIsSubmitting] = useState(false) + const [showPassword, setShowPassword] = useState(false) + const [showConfirmPassword, setShowConfirmPassword] = useState(false) + const navigate = useNavigate() + const [searchParams] = useSearchParams() + const initUserProfile = useUserStore((s) => s.initUserProfile) + + const { + register, + handleSubmit, + watch, + formState: { errors, isValid }, + } = useForm({ + resolver: zodResolver(registerSchema), + mode: 'onChange', + }) + + const password = watch('password') + const confirmPassword = watch('confirmPassword') + const passwordsMatch = !confirmPassword || password === confirmPassword + + const handleRedirect = () => { + const redirect = searchParams.get('redirect') + navigate(redirect ? decodeURIComponent(redirect) : '/') + } + + const handleRegister = async (data: RegisterFormData) => { + setError('') + setIsSubmitting(true) + + try { + const res = await submitRegister({ + email: data.email, + password: data.password, + firstname: data.firstname, + lastname: data.lastname, + }) + + localStorage.removeItem('context_session_id') + localStorage.setItem('token', `jwt ${res.token}`) + + await initUserProfile() + handleRedirect() + } catch (err) { + setError((err as Error)?.message || 'Failed to register. Please try again.') + } finally { + setIsSubmitting(false) + } + } + + return ( +
+ {orgName && ( +

+ Join {orgName}'s ARP Platform +

+ )} + + {!orgName && ( +

Create your account

+ )} + + {error &&

{error}

} + +
+
+
+ + {errors.firstname && ( +

{errors.firstname.message}

+ )} +
+
+ + {errors.lastname && ( +

{errors.lastname.message}

+ )} +
+
+ +
+ + {errors.email && ( +

{errors.email.message}

+ )} +
+ +
+
+ + +
+ {errors.password && ( +

{errors.password.message}

+ )} +
+ +
+
+ + +
+ {errors.confirmPassword && ( +

{errors.confirmPassword.message}

+ )} + {!errors.confirmPassword && confirmPassword && !passwordsMatch && ( +

Passwords do not match

+ )} +
+ + +
+ +
+ Already have an account? + + Sign in + +
+
+ ) +} diff --git a/honeycomb/src/components/quickstart/AgentStatusIndicator.tsx b/honeycomb/src/components/quickstart/AgentStatusIndicator.tsx new file mode 100644 index 00000000..474b0728 --- /dev/null +++ b/honeycomb/src/components/quickstart/AgentStatusIndicator.tsx @@ -0,0 +1,91 @@ +import { cn } from '@/lib/utils' +import { + Tooltip, + TooltipContent, + TooltipProvider, + TooltipTrigger, +} from '@/components/ui/tooltip' +import { useAgentStatus } from '@/hooks/useAgentStatus' + +interface AgentStatusIndicatorProps { + className?: string + showDetails?: boolean +} + +export function AgentStatusIndicator({ + className, + showDetails = true, +}: AgentStatusIndicatorProps) { + const { status, isConnected, error, hasActiveAgents, agentCount } = + useAgentStatus({ autoConnect: true, autoReconnect: true }) + + const formatTime = (timestamp: string) => { + return new Date(timestamp).toLocaleTimeString() + } + + const tooltipContent = () => { + if (error) { + return {error} + } + + if (!isConnected) { + return Connecting... + } + + if (!hasActiveAgents) { + return No agents connected + } + + if (showDetails && status?.instances?.length) { + return ( +
+
+ {agentCount} agent{agentCount !== 1 ? 's' : ''} connected +
+
+ {status.instances.slice(0, 5).map((instance) => ( +
+ {instance.instance_id.slice(0, 8)}... -{' '} + {formatTime(instance.connected_at)} +
+ ))} + {status.instances.length > 5 && ( +
+{status.instances.length - 5} more
+ )} +
+
+ ) + } + + return `${agentCount} agent${agentCount !== 1 ? 's' : ''} connected` + } + + const indicator = ( +
+ + {hasActiveAgents ? ( + <> + + + + ) : ( + + )} + + + {hasActiveAgents ? `${agentCount} connected` : 'No agents'} + +
+ ) + + return ( + + + {indicator} + + {tooltipContent()} + + + + ) +} diff --git a/honeycomb/src/components/quickstart/CodeBlock.tsx b/honeycomb/src/components/quickstart/CodeBlock.tsx new file mode 100644 index 00000000..6ed8c3fd --- /dev/null +++ b/honeycomb/src/components/quickstart/CodeBlock.tsx @@ -0,0 +1,63 @@ +import { useState, useCallback } from 'react' +import { Copy, Check } from 'lucide-react' +import { Button } from '@/components/ui/button' +import { cn } from '@/lib/utils' +import { copyToClipboard } from '@/lib/quickstart' +import { useNotificationStore } from '@/stores/notificationStore' + +interface CodeBlockProps { + code: string + language?: string + className?: string +} + +export function CodeBlock({ code, language, className }: CodeBlockProps) { + const [copied, setCopied] = useState(false) + const addNotification = useNotificationStore((s) => s.addNotification) + + const handleCopy = useCallback(async () => { + const success = await copyToClipboard(code) + if (success) { + setCopied(true) + setTimeout(() => setCopied(false), 2000) + } else { + addNotification({ + type: 'error', + title: 'Copy failed', + message: 'Failed to copy code to clipboard', + }) + } + }, [code, addNotification]) + + return ( +
+
+ {language && ( + + {language} + + )} + +
+
+        {code.trimEnd()}
+      
+
+ ) +} diff --git a/honeycomb/src/components/quickstart/MarkdownRenderer.tsx b/honeycomb/src/components/quickstart/MarkdownRenderer.tsx new file mode 100644 index 00000000..20378971 --- /dev/null +++ b/honeycomb/src/components/quickstart/MarkdownRenderer.tsx @@ -0,0 +1,72 @@ +import ReactMarkdown from 'react-markdown' +import type { Components } from 'react-markdown' +import { CodeBlock } from './CodeBlock' + +interface MarkdownRendererProps { + content: string +} + +export function MarkdownRenderer({ content }: MarkdownRendererProps) { + const components: Components = { + // Handle fenced code blocks (wrapped in pre) + pre({ children }) { + return <>{children} + }, + // Handle all code elements + code({ className, children, node }) { + const match = /language-(\w+)/.exec(className || '') + const language = match ? match[1] : undefined + const codeContent = String(children).replace(/\n$/, '') + + // Check if this is inside a pre tag (block code) by looking at parent + const isBlock = node?.position && codeContent.includes('\n') || language + + if (isBlock) { + return + } + + // Inline code + return ( + + {children} + + ) + }, + h1: ({ children }) => ( +

{children}

+ ), + h2: ({ children }) => ( +

{children}

+ ), + h3: ({ children }) => ( +

{children}

+ ), + p: ({ children }) =>

{children}

, + ul: ({ children }) =>
    {children}
, + ol: ({ children }) => ( +
    {children}
+ ), + li: ({ children }) =>
  • {children}
  • , + a: ({ href, children }) => ( + + {children} + + ), + blockquote: ({ children }) => ( +
    + {children} +
    + ), + } + + return ( +
    + {content} +
    + ) +} diff --git a/honeycomb/src/components/quickstart/QuickstartToolbar.tsx b/honeycomb/src/components/quickstart/QuickstartToolbar.tsx new file mode 100644 index 00000000..a813b076 --- /dev/null +++ b/honeycomb/src/components/quickstart/QuickstartToolbar.tsx @@ -0,0 +1,89 @@ +import { Copy, Download } from 'lucide-react' +import { Button } from '@/components/ui/button' +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from '@/components/ui/select' +import type { SdkLanguage, AgentFramework } from '@/types/quickstart' + +interface QuickstartToolbarProps { + languages: SdkLanguage[] + frameworks: AgentFramework[] + selectedLanguage: string + selectedFramework: string + onLanguageChange: (value: string) => void + onFrameworkChange: (value: string) => void + onCopyAll: () => void + onDownload: () => void + isCopyDisabled: boolean + isDownloadDisabled: boolean +} + +export function QuickstartToolbar({ + languages, + frameworks, + selectedLanguage, + selectedFramework, + onLanguageChange, + onFrameworkChange, + onCopyAll, + onDownload, + isCopyDisabled, + isDownloadDisabled, +}: QuickstartToolbarProps) { + return ( +
    +
    + + + +
    + +
    + + +
    +
    + ) +} diff --git a/honeycomb/src/components/quickstart/SDKQuickstart.tsx b/honeycomb/src/components/quickstart/SDKQuickstart.tsx new file mode 100644 index 00000000..5a45a6c0 --- /dev/null +++ b/honeycomb/src/components/quickstart/SDKQuickstart.tsx @@ -0,0 +1,229 @@ +import { useState, useEffect, useMemo, useCallback } from 'react' +import { Loader2, RefreshCw } from 'lucide-react' +import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card' +import { Button } from '@/components/ui/button' +import { Skeleton } from '@/components/ui/skeleton' +import { + useQuickstartOptions, + useGenerateQuickstart, +} from '@/hooks/queries/useQuickstart' +import { useNotificationStore } from '@/stores/notificationStore' +import { + extractCodeBlocks, + copyToClipboard, + downloadAsFile, +} from '@/lib/quickstart' +import { QuickstartToolbar } from './QuickstartToolbar' +import { MarkdownRenderer } from './MarkdownRenderer' +import { AgentStatusIndicator } from './AgentStatusIndicator' +import type { AgentFramework } from '@/types/quickstart' + +export function SDKQuickstart() { + const [selectedLanguage, setSelectedLanguage] = useState('python') + const [selectedFramework, setSelectedFramework] = useState('') + + const addNotification = useNotificationStore((s) => s.addNotification) + + const { + data: options, + isLoading: optionsLoading, + error: optionsError, + } = useQuickstartOptions() + + const generateMutation = useGenerateQuickstart() + + // Filter frameworks by language support + const availableFrameworks = useMemo(() => { + if (!options?.agentFrameworks) return [] + return options.agentFrameworks.filter((fw) => + selectedLanguage === 'python' ? fw.pythonSupport : fw.typescriptSupport + ) + }, [options, selectedLanguage]) + + // Auto-select first framework when options load or language changes + useEffect(() => { + if (availableFrameworks.length > 0 && !selectedFramework) { + setSelectedFramework(availableFrameworks[0].id) + } + }, [availableFrameworks, selectedFramework]) + + // Generate docs + const generateDocs = useCallback(() => { + if (!selectedFramework) return + generateMutation.mutate({ + agentFramework: selectedFramework, + sdkLanguage: selectedLanguage, + }) + }, [selectedFramework, selectedLanguage, generateMutation]) + + // Auto-generate on initial load and when selections change + useEffect(() => { + if (selectedFramework && options) { + generateDocs() + } + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [selectedFramework, selectedLanguage]) + + // Handle language change + const handleLanguageChange = useCallback( + (newLanguage: string) => { + setSelectedLanguage(newLanguage) + + // Check if current framework supports new language + const newFrameworks = options?.agentFrameworks?.filter((fw) => + newLanguage === 'python' ? fw.pythonSupport : fw.typescriptSupport + ) + const frameworkStillValid = newFrameworks?.some( + (fw) => fw.id === selectedFramework + ) + + if (!frameworkStillValid) { + // Will auto-select via useEffect + setSelectedFramework('') + } + }, + [options, selectedFramework] + ) + + // Handle framework change + const handleFrameworkChange = useCallback((frameworkId: string) => { + setSelectedFramework(frameworkId) + }, []) + + // Copy all code blocks + const handleCopyAll = useCallback(async () => { + if (!generateMutation.data?.markdown) return + + const codeBlocks = extractCodeBlocks(generateMutation.data.markdown) + if (codeBlocks.length === 0) { + addNotification({ + type: 'warning', + title: 'No code to copy', + message: 'No code blocks found in the documentation', + }) + return + } + + const success = await copyToClipboard(codeBlocks.join('\n\n')) + if (success) { + addNotification({ + type: 'success', + title: 'Copied', + message: 'All code blocks copied to clipboard', + }) + } + }, [generateMutation.data, addNotification]) + + // Download markdown + const handleDownload = useCallback(() => { + if (!generateMutation.data?.markdown) return + + const filename = `aden-sdk-quickstart-${selectedFramework}-${selectedLanguage}.md` + downloadAsFile(generateMutation.data.markdown, filename) + }, [generateMutation.data, selectedFramework, selectedLanguage]) + + // Error state + if (optionsError) { + return ( + + + SDK Quickstart + + +
    +

    + Failed to load quickstart options +

    + +
    +
    +
    + ) + } + + // Loading state + if (optionsLoading) { + return ( + + + SDK Quickstart + + +
    +
    + + +
    + +
    +
    +
    + ) + } + + const markdown = generateMutation.data?.markdown + const tokenName = generateMutation.data?.metadata?.tokenName + const codeBlocks = markdown ? extractCodeBlocks(markdown) : [] + + return ( + + + SDK Quickstart + + + + {options && ( + + )} + + {/* Token info */} + {tokenName && ( +

    + Using API Key: {tokenName} +

    + )} + + {/* Generation error */} + {generateMutation.isError && ( +
    +

    Failed to generate documentation

    + +
    + )} + + {/* Loading overlay */} + {generateMutation.isPending && ( +
    + + + Generating documentation... + +
    + )} + + {/* Rendered markdown */} + {markdown && !generateMutation.isPending && ( + + )} +
    +
    + ) +} diff --git a/honeycomb/src/components/quickstart/index.ts b/honeycomb/src/components/quickstart/index.ts new file mode 100644 index 00000000..382a0631 --- /dev/null +++ b/honeycomb/src/components/quickstart/index.ts @@ -0,0 +1,5 @@ +export { SDKQuickstart } from './SDKQuickstart' +export { CodeBlock } from './CodeBlock' +export { MarkdownRenderer } from './MarkdownRenderer' +export { QuickstartToolbar } from './QuickstartToolbar' +export { AgentStatusIndicator } from './AgentStatusIndicator' diff --git a/honeycomb/src/components/settings/ChangePasswordDialog.tsx b/honeycomb/src/components/settings/ChangePasswordDialog.tsx new file mode 100644 index 00000000..3114b7df --- /dev/null +++ b/honeycomb/src/components/settings/ChangePasswordDialog.tsx @@ -0,0 +1,160 @@ +import { useState, useEffect } from 'react' +import { + Dialog, + DialogContent, + DialogHeader, + DialogTitle, + DialogFooter, +} from '@/components/ui/dialog' +import { Button } from '@/components/ui/button' +import { Input } from '@/components/ui/input' +import { useUpdatePassword } from '@/hooks/queries/useUser' +import { useNotificationStore } from '@/stores/notificationStore' + +interface ChangePasswordDialogProps { + open: boolean + onOpenChange: (open: boolean) => void +} + +interface FormErrors { + oldPassword?: string + newPassword?: string + confirmPassword?: string +} + +export function ChangePasswordDialog({ + open, + onOpenChange, +}: ChangePasswordDialogProps) { + const [oldPassword, setOldPassword] = useState('') + const [newPassword, setNewPassword] = useState('') + const [confirmPassword, setConfirmPassword] = useState('') + const [errors, setErrors] = useState({}) + + const updatePassword = useUpdatePassword() + const addNotification = useNotificationStore((s) => s.addNotification) + + // Reset form when dialog opens/closes + useEffect(() => { + if (open) { + setOldPassword('') + setNewPassword('') + setConfirmPassword('') + setErrors({}) + } + }, [open]) + + const validate = (): boolean => { + const newErrors: FormErrors = {} + + if (!oldPassword) { + newErrors.oldPassword = 'Please enter your old password' + } else if (oldPassword.length < 10) { + newErrors.oldPassword = 'Password must be at least 10 characters' + } + + if (!newPassword) { + newErrors.newPassword = 'Please enter your new password' + } else if (newPassword.length < 10) { + newErrors.newPassword = 'Password must be at least 10 characters' + } + + if (!confirmPassword) { + newErrors.confirmPassword = 'Please confirm your new password' + } else if (newPassword !== confirmPassword) { + newErrors.confirmPassword = "Passwords don't match" + } + + setErrors(newErrors) + return Object.keys(newErrors).length === 0 + } + + const handleSubmit = async (e: React.FormEvent) => { + e.preventDefault() + + if (!validate()) return + + try { + await updatePassword.mutateAsync({ + oldPassword, + newPassword, + }) + addNotification({ + type: 'success', + title: 'Password updated', + message: 'Your password has been updated successfully.', + }) + onOpenChange(false) + } catch { + addNotification({ + type: 'error', + title: 'Update failed', + message: 'Failed to update password. Please check your old password.', + }) + } + } + + const handleClose = () => { + onOpenChange(false) + } + + return ( + + + + Change password + + +
    +
    + + setOldPassword(e.target.value)} + placeholder="Enter your current password" + /> + {errors.oldPassword && ( +

    {errors.oldPassword}

    + )} +
    + +
    + + setNewPassword(e.target.value)} + placeholder="Enter your new password" + /> + {errors.newPassword && ( +

    {errors.newPassword}

    + )} +
    + +
    + + setConfirmPassword(e.target.value)} + placeholder="Confirm your new password" + /> + {errors.confirmPassword && ( +

    {errors.confirmPassword}

    + )} +
    + + + + + +
    +
    +
    + ) +} diff --git a/honeycomb/src/components/settings/CreateAPIKeyDialog.tsx b/honeycomb/src/components/settings/CreateAPIKeyDialog.tsx new file mode 100644 index 00000000..dfa5aff4 --- /dev/null +++ b/honeycomb/src/components/settings/CreateAPIKeyDialog.tsx @@ -0,0 +1,165 @@ +import { useState, useEffect } from 'react' +import { Copy, Check } from 'lucide-react' +import { + Dialog, + DialogContent, + DialogHeader, + DialogTitle, + DialogFooter, +} from '@/components/ui/dialog' +import { Button } from '@/components/ui/button' +import { Input } from '@/components/ui/input' +import { Textarea } from '@/components/ui/textarea' +import { useCreateAPIToken } from '@/hooks/queries/useUser' +import { useNotificationStore } from '@/stores/notificationStore' +import { isValidTokenLabel } from '@/lib/user' + +interface CreateAPIKeyDialogProps { + open: boolean + onOpenChange: (open: boolean) => void +} + +export function CreateAPIKeyDialog({ + open, + onOpenChange, +}: CreateAPIKeyDialogProps) { + const [tokenName, setTokenName] = useState('') + const [newToken, setNewToken] = useState('') + const [showToken, setShowToken] = useState(false) + const [error, setError] = useState('') + const [copied, setCopied] = useState(false) + + const createToken = useCreateAPIToken() + const addNotification = useNotificationStore((s) => s.addNotification) + + // Reset state when dialog opens/closes + useEffect(() => { + if (open) { + setTokenName('') + setNewToken('') + setShowToken(false) + setError('') + setCopied(false) + } + }, [open]) + + const handleCreate = async () => { + if (!tokenName.trim()) { + setError('Please enter a name for the API key') + return + } + + if (!isValidTokenLabel(tokenName)) { + setError('Please only use letters, numbers, and underscores') + return + } + + setError('') + + try { + const result = await createToken.mutateAsync(tokenName) + setNewToken(result.token) + setShowToken(true) + addNotification({ + type: 'success', + title: 'API key created', + message: 'Your new API key has been created successfully.', + }) + } catch { + addNotification({ + type: 'error', + title: 'Creation failed', + message: 'Failed to create API key. Please try again.', + }) + } + } + + const handleCopy = async () => { + try { + await navigator.clipboard.writeText(newToken) + setCopied(true) + setTimeout(() => setCopied(false), 2000) + } catch { + addNotification({ + type: 'error', + title: 'Copy failed', + message: 'Failed to copy to clipboard.', + }) + } + } + + const handleClose = () => { + onOpenChange(false) + } + + return ( + + + + Create an API Key + + +
    +

    + Please enter a name for the API key +

    + + setTokenName(e.target.value)} + placeholder="Enter letters, numbers, and underscores" + disabled={showToken} + onKeyDown={(e) => { + if (e.key === 'Enter' && !showToken) handleCreate() + }} + /> + + {error &&

    {error}

    } + + {!showToken && ( + + + + )} + + {showToken && ( +
    + +

    + Make sure to copy your API key now. You won't be able to see it again! +

    +
    +