Merge branch 'main' into feature/resumable-sessions
This commit is contained in:
@@ -507,7 +507,7 @@ EventLoopNodes are **auto-created** by `GraphExecutor` at runtime. Both direct `
|
||||
from framework.graph.executor import GraphExecutor
|
||||
from framework.runtime.core import Runtime
|
||||
|
||||
storage_path = Path.home() / ".hive" / "my_agent"
|
||||
storage_path = Path.home() / ".hive" / "agents" / "my_agent"
|
||||
storage_path.mkdir(parents=True, exist_ok=True)
|
||||
runtime = Runtime(storage_path)
|
||||
|
||||
|
||||
@@ -90,7 +90,7 @@ def tui(mock, verbose, debug):
|
||||
agent._event_bus = EventBus()
|
||||
agent._tool_registry = ToolRegistry()
|
||||
|
||||
storage_path = Path.home() / ".hive" / "deep_research_agent"
|
||||
storage_path = Path.home() / ".hive" / "agents" / "deep_research_agent"
|
||||
storage_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
mcp_config_path = Path(__file__).parent / "mcp_servers.json"
|
||||
|
||||
@@ -177,7 +177,7 @@ class DeepResearchAgent:
|
||||
"""Set up the executor with all components."""
|
||||
from pathlib import Path
|
||||
|
||||
storage_path = Path.home() / ".hive" / "deep_research_agent"
|
||||
storage_path = Path.home() / ".hive" / "agents" / "deep_research_agent"
|
||||
storage_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
self._event_bus = EventBus()
|
||||
|
||||
@@ -34,7 +34,7 @@ Before using this skill, ensure:
|
||||
1. You have an exported agent in `exports/{agent_name}/`
|
||||
2. The agent has been run at least once (logs exist)
|
||||
3. Runtime logging is enabled (default in Hive framework)
|
||||
4. You have access to the agent's working directory at `~/.hive/{agent_name}/`
|
||||
4. You have access to the agent's working directory at `~/.hive/agents/{agent_name}/`
|
||||
|
||||
---
|
||||
|
||||
@@ -51,7 +51,7 @@ Before using this skill, ensure:
|
||||
- Confirm the agent exists in `exports/{agent_name}/`
|
||||
|
||||
2. **Determine agent working directory:**
|
||||
- Calculate: `~/.hive/{agent_name}/`
|
||||
- Calculate: `~/.hive/agents/{agent_name}/`
|
||||
- Verify this directory exists and contains session logs
|
||||
|
||||
3. **Read agent configuration:**
|
||||
@@ -617,7 +617,7 @@ Available Checkpoints: (3)
|
||||
**Check if issue is resolved:**
|
||||
```
|
||||
query_runtime_logs(
|
||||
agent_work_dir="~/.hive/{agent_name}",
|
||||
agent_work_dir="~/.hive/agents/{agent_name}",
|
||||
status="needs_attention",
|
||||
limit=5
|
||||
)
|
||||
@@ -627,7 +627,7 @@ Available Checkpoints: (3)
|
||||
**Verify specific node behavior:**
|
||||
```
|
||||
query_runtime_log_details(
|
||||
agent_work_dir="~/.hive/{agent_name}",
|
||||
agent_work_dir="~/.hive/agents/{agent_name}",
|
||||
run_id="{new_run_id}",
|
||||
node_id="{fixed_node_id}"
|
||||
)
|
||||
@@ -756,7 +756,7 @@ You: "I'll help debug the twitter_outreach agent. Let me gather context..."
|
||||
Context:
|
||||
- Agent: twitter_outreach
|
||||
- Goal: twitter-outreach-multi-loop
|
||||
- Working Dir: ~/.hive/twitter_outreach
|
||||
- Working Dir: ~/.hive/agents/twitter_outreach
|
||||
- Success Criteria: ["Successfully send 5 personalized outreach messages"]
|
||||
- Constraints: ["Must verify handle exists", "Must personalize message"]
|
||||
- Nodes: intake-collector, profile-analyzer, message-composer, outreach-sender
|
||||
@@ -919,12 +919,12 @@ Your agent should now work correctly!"
|
||||
## Storage Locations Reference
|
||||
|
||||
**New unified storage (default):**
|
||||
- Logs: `~/.hive/{agent_name}/sessions/session_YYYYMMDD_HHMMSS_{uuid}/logs/`
|
||||
- State: `~/.hive/{agent_name}/sessions/{session_id}/state.json`
|
||||
- Conversations: `~/.hive/{agent_name}/sessions/{session_id}/conversations/`
|
||||
- Logs: `~/.hive/agents/{agent_name}/sessions/session_YYYYMMDD_HHMMSS_{uuid}/logs/`
|
||||
- State: `~/.hive/agents/{agent_name}/sessions/{session_id}/state.json`
|
||||
- Conversations: `~/.hive/agents/{agent_name}/sessions/{session_id}/conversations/`
|
||||
|
||||
**Old storage (deprecated, still supported):**
|
||||
- Logs: `~/.hive/{agent_name}/runtime_logs/runs/{run_id}/`
|
||||
- Logs: `~/.hive/agents/{agent_name}/runtime_logs/runs/{run_id}/`
|
||||
|
||||
The MCP tools automatically check both locations.
|
||||
|
||||
|
||||
@@ -197,16 +197,18 @@ exports/agent_name/
|
||||
|
||||
### What This Phase Does
|
||||
|
||||
Creates comprehensive test suite:
|
||||
- Constraint tests (verify hard requirements)
|
||||
- Success criteria tests (measure goal achievement)
|
||||
- Edge case tests (handle failures gracefully)
|
||||
- Integration tests (end-to-end workflows)
|
||||
### What This Phase Does
|
||||
|
||||
Guides the creation and execution of a comprehensive test suite:
|
||||
- Constraint tests
|
||||
- Success criteria tests
|
||||
- Edge case tests
|
||||
- Integration tests
|
||||
|
||||
### Process
|
||||
|
||||
1. **Analyze agent** - Read goal, constraints, success criteria
|
||||
2. **Generate tests** - Create pytest files in `exports/agent_name/tests/`
|
||||
2. **Generate tests** - The calling agent writes pytest files in `exports/agent_name/tests/` using hive-test guidelines and templates
|
||||
3. **User approval** - Review and approve each test
|
||||
4. **Run evaluation** - Execute tests and collect results
|
||||
5. **Debug failures** - Identify and fix issues
|
||||
|
||||
+3
-3
@@ -1,10 +1,10 @@
|
||||
# Contributing to Aden Agent Framework
|
||||
|
||||
Thank you for your interest in contributing to the Aden Agent Framework! This document provides guidelines and information for contributors. We’re especially looking for help building tools, integrations([check #2805](https://github.com/adenhq/hive/issues/2805)), and example agents for the framework. If you’re interested in extending its functionality, this is the perfect place to start.
|
||||
Thank you for your interest in contributing to the Aden Agent Framework! This document provides guidelines and information for contributors. We’re especially looking for help building tools, integrations ([check #2805](https://github.com/adenhq/hive/issues/2805)), and example agents for the framework. If you’re interested in extending its functionality, this is the perfect place to start.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
By participating in this project, you agree to abide by our [Code of Conduct](CODE_OF_CONDUCT.md).
|
||||
By participating in this project, you agree to abide by our [Code of Conduct](docs/CODE_OF_CONDUCT.md).
|
||||
|
||||
## Issue Assignment Policy
|
||||
|
||||
@@ -159,4 +159,4 @@ By submitting a Pull Request, you agree that your contributions will be licensed
|
||||
|
||||
Feel free to open an issue for questions or join our [Discord community](https://discord.com/invite/MXE49hrKDk).
|
||||
|
||||
Thank you for contributing!
|
||||
Thank you for contributing!
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
<p align="center">
|
||||
<img width="100%" alt="Hive Banner" src="https://storage.googleapis.com/aden-prod-assets/website/aden-title-card.png" />
|
||||
<img width="100%" alt="Hive Banner" src="https://github.com/user-attachments/assets/a027429b-5d3c-4d34-88e4-0feaeaabbab3" />
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@@ -13,16 +13,20 @@
|
||||
<a href="docs/i18n/ko.md">한국어</a>
|
||||
</p>
|
||||
|
||||
[](https://github.com/adenhq/hive/blob/main/LICENSE)
|
||||
[](https://www.ycombinator.com/companies/aden)
|
||||
[](https://discord.com/invite/MXE49hrKDk)
|
||||
[](https://x.com/aden_hq)
|
||||
[](https://www.linkedin.com/company/teamaden/)
|
||||
<p align="center">
|
||||
<a href="https://github.com/adenhq/hive/blob/main/LICENSE"><img src="https://img.shields.io/badge/License-Apache%202.0-blue.svg" alt="Apache 2.0 License" /></a>
|
||||
<a href="https://www.ycombinator.com/companies/aden"><img src="https://img.shields.io/badge/Y%20Combinator-Aden-orange" alt="Y Combinator" /></a>
|
||||
<a href="https://discord.com/invite/MXE49hrKDk"><img src="https://img.shields.io/discord/1172610340073242735?logo=discord&labelColor=%235462eb&logoColor=%23f5f5f5&color=%235462eb" alt="Discord" /></a>
|
||||
<a href="https://x.com/aden_hq"><img src="https://img.shields.io/twitter/follow/teamaden?logo=X&color=%23f5f5f5" alt="Twitter Follow" /></a>
|
||||
<a href="https://www.linkedin.com/company/teamaden/"><img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff" alt="LinkedIn" /></a>
|
||||
<img src="https://img.shields.io/badge/MCP-102_Tools-00ADD8?style=flat-square" alt="MCP" />
|
||||
</p>
|
||||
|
||||
|
||||
<p align="center">
|
||||
<img src="https://img.shields.io/badge/AI_Agents-Self--Improving-brightgreen?style=flat-square" alt="AI Agents" />
|
||||
<img src="https://img.shields.io/badge/Multi--Agent-Systems-blue?style=flat-square" alt="Multi-Agent" />
|
||||
<img src="https://img.shields.io/badge/Goal--Driven-Development-purple?style=flat-square" alt="Goal-Driven" />
|
||||
<img src="https://img.shields.io/badge/Headless-Development-purple?style=flat-square" alt="Headless" />
|
||||
<img src="https://img.shields.io/badge/Human--in--the--Loop-orange?style=flat-square" alt="HITL" />
|
||||
<img src="https://img.shields.io/badge/Production--Ready-red?style=flat-square" alt="Production" />
|
||||
</p>
|
||||
@@ -30,15 +34,16 @@
|
||||
<img src="https://img.shields.io/badge/OpenAI-supported-412991?style=flat-square&logo=openai" alt="OpenAI" />
|
||||
<img src="https://img.shields.io/badge/Anthropic-supported-d4a574?style=flat-square" alt="Anthropic" />
|
||||
<img src="https://img.shields.io/badge/Google_Gemini-supported-4285F4?style=flat-square&logo=google" alt="Gemini" />
|
||||
<img src="https://img.shields.io/badge/MCP-19_Tools-00ADD8?style=flat-square" alt="MCP" />
|
||||
</p>
|
||||
|
||||
## Overview
|
||||
|
||||
Build reliable, self-improving AI agents without hardcoding workflows. Define your goal through conversation with a coding agent, and the framework generates a node graph with dynamically created connection code. When things break, the framework captures failure data, evolves the agent through the coding agent, and redeploys. Built-in human-in-the-loop nodes, credential management, and real-time monitoring give you control without sacrificing adaptability.
|
||||
Build autonomous, reliable, self-improving AI agents without hardcoding workflows. Define your goal through conversation with a coding agent, and the framework generates a node graph with dynamically created connection code. When things break, the framework captures failure data, evolves the agent through the coding agent, and redeploys. Built-in human-in-the-loop nodes, credential management, and real-time monitoring give you control without sacrificing adaptability.
|
||||
|
||||
Visit [adenhq.com](https://adenhq.com) for complete documentation, examples, and guides.
|
||||
|
||||
https://github.com/user-attachments/assets/846c0cc7-ffd6-47fa-b4b7-495494857a55
|
||||
|
||||
## Who Is Hive For?
|
||||
|
||||
Hive is designed for developers and teams who want to build **production-grade AI agents** without manually wiring complex workflows.
|
||||
@@ -58,36 +63,23 @@ Hive may not be the best fit if you’re only experimenting with simple agent ch
|
||||
Use Hive when you need:
|
||||
|
||||
- Long-running, autonomous agents
|
||||
- Multi-agent coordination
|
||||
- Strong guardrails, process, and controls
|
||||
- Continuous improvement based on failures
|
||||
- Strong monitoring, safety, and budget controls
|
||||
- Multi-agent coordination
|
||||
- A framework that evolves with your goals
|
||||
|
||||
## What is Aden
|
||||
|
||||
<p align="center">
|
||||
<img width="100%" alt="Aden Architecture" src="docs/assets/aden-architecture-diagram.jpg" />
|
||||
</p>
|
||||
|
||||
Aden is a platform for building, deploying, operating, and adapting AI agents:
|
||||
|
||||
- **Build** - A Coding Agent generates specialized Worker Agents (Sales, Marketing, Ops) from natural language goals
|
||||
- **Deploy** - Headless deployment with CI/CD integration and full API lifecycle management
|
||||
- **Operate** - Real-time monitoring, observability, and runtime guardrails keep agents reliable
|
||||
- **Adapt** - Continuous evaluation, supervision, and adaptation ensure agents improve over time
|
||||
- **Infra** - Shared memory, LLM integrations, tools, and skills power every agent
|
||||
|
||||
## Quick Links
|
||||
|
||||
- **[Documentation](https://docs.adenhq.com/)** - Complete guides and API reference
|
||||
- **[Self-Hosting Guide](https://docs.adenhq.com/getting-started/quickstart)** - Deploy Hive on your infrastructure
|
||||
- **[Changelog](https://github.com/adenhq/hive/releases)** - Latest updates and releases
|
||||
<!-- - **[Roadmap](https://adenhq.com/roadmap)** - Upcoming features and plans -->
|
||||
- **[Roadmap](docs/roadmap.md)** - Upcoming features and plans
|
||||
- **[Report Issues](https://github.com/adenhq/hive/issues)** - Bug reports and feature requests
|
||||
- **[Contributing](CONTRIBUTING.md)** - How to contribute and submit PRs
|
||||
|
||||
## Quick Start
|
||||
|
||||
## Prerequisites
|
||||
### Prerequisites
|
||||
|
||||
- Python 3.11+ for agent development
|
||||
- Claude Code or Cursor for utilizing agent skills
|
||||
@@ -111,7 +103,7 @@ This sets up:
|
||||
- **aden_tools** - MCP tools for agent capabilities (in `tools/.venv`)
|
||||
- **credential store** - Encrypted API key storage (`~/.hive/credentials`)
|
||||
- **LLM provider** - Interactive default model configuration
|
||||
- All required Python dependencies
|
||||
- All required Python dependencies with `uv`
|
||||
|
||||
### Build Your First Agent
|
||||
|
||||
@@ -131,15 +123,6 @@ hive run exports/your_agent_name --input '{"key": "value"}'
|
||||
|
||||
**[📖 Complete Setup Guide](docs/environment-setup.md)** - Detailed instructions for agent development
|
||||
|
||||
### Cursor IDE Support
|
||||
|
||||
Skills are also available in Cursor. To enable:
|
||||
|
||||
1. Open Command Palette (`Cmd+Shift+P` / `Ctrl+Shift+P`)
|
||||
2. Run `MCP: Enable` to enable MCP servers
|
||||
3. Restart Cursor to load the MCP servers from `.cursor/mcp.json`
|
||||
4. Type `/` in Agent chat and search for skills (e.g., `/hive-create`)
|
||||
|
||||
## Features
|
||||
|
||||
- **[Goal-Driven Development](docs/key_concepts/goals_outcome.md)** - Define objectives in natural language; the coding agent generates the agent graph and connection code to achieve them
|
||||
@@ -152,9 +135,19 @@ Skills are also available in Cursor. To enable:
|
||||
- **Cost & Budget Control** - Set spending limits, throttles, and automatic model degradation policies
|
||||
- **Production-Ready** - Self-hostable, built for scale and reliability
|
||||
|
||||
## Integration
|
||||
|
||||
<a href="https://github.com/adenhq/hive/tree/main/tools/src/aden_tools/tools"><img width="100%" alt="Integration" src="https://github.com/user-attachments/assets/a1573f93-cf02-4bb8-b3d5-b305b05b1e51" /></a>
|
||||
|
||||
Hive is built to be model-agnostic and system-agnostic.
|
||||
|
||||
- **LLM flexibility** - Hive Framework is designed to support various types of LLMs, including hosted and local models through LiteLLM-compatible providers.
|
||||
- **Business system connectivity** - Hive Framework is designed to connect to all kinds of business systems as tools, such as CRM, support, messaging, data, file, and internal APIs via MCP.
|
||||
|
||||
|
||||
## Why Aden
|
||||
|
||||
Hive focuses on generating agents that run real business processes rather than generic agents. Instead of requiring you to manually design workflows, define agent interactions, and handle failures reactively, Hive flips the paradigm: **you describe [outcomes](docs/key_concepts/goals_outcome.md), and the system builds itself**—delivering an outcome-driven, [adaptive](docs/key_concepts/evolution.md) experience with an easy-to-use set of tools and integrations.
|
||||
Hive focuses on generating agents that run real business processes rather than generic agents. Instead of requiring you to manually design workflows, define agent interactions, and handle failures reactively, Hive flips the paradigm: **you describe outcomes, and the system builds itself**—delivering an outcome-driven, adaptive experience with an easy-to-use set of tools and integrations.
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
@@ -188,9 +181,9 @@ flowchart LR
|
||||
style V6 fill:#fff,stroke:#ed8c00,stroke-width:1px,color:#cc5d00
|
||||
```
|
||||
|
||||
### The Aden Advantage
|
||||
### The Hive Advantage
|
||||
|
||||
| Traditional Frameworks | Aden |
|
||||
| Traditional Frameworks | Hive |
|
||||
| -------------------------- | -------------------------------------- |
|
||||
| Hardcode agent workflows | Describe goals in natural language |
|
||||
| Manual graph definition | Auto-generated agent graphs |
|
||||
@@ -239,13 +232,6 @@ See [environment-setup.md](docs/environment-setup.md) for complete setup instruc
|
||||
- [Configuration Guide](docs/configuration.md) - All configuration options
|
||||
- [Architecture Overview](docs/architecture/README.md) - System design and structure
|
||||
|
||||
### Key Concepts
|
||||
|
||||
- [Goals & Outcome-Driven Development](docs/key_concepts/goals_outcome.md) - Why Hive is outcome-driven and how goals define success
|
||||
- [The Agent Graph](docs/key_concepts/graph.md) - Nodes, edges, shared memory, and how agents execute
|
||||
- [The Worker Agent](docs/key_concepts/worker_agent.md) - Sessions, iterations, headless execution, and the runtime
|
||||
- [Evolution](docs/key_concepts/evolution.md) - How agents improve across generations through failure data
|
||||
|
||||
## Roadmap
|
||||
|
||||
Aden Hive Agent Framework aims to help developers build outcome-oriented, self-adaptive agents. See [roadmap.md](docs/roadmap.md) for details.
|
||||
@@ -376,10 +362,6 @@ This project is licensed under the Apache License 2.0 - see the [LICENSE](LICENS
|
||||
|
||||
## Frequently Asked Questions (FAQ)
|
||||
|
||||
**Q: Does Hive depend on LangChain or other agent frameworks?**
|
||||
|
||||
No. Hive is built from the ground up with no dependencies on LangChain, CrewAI, or other agent frameworks. The framework is designed to be lean and flexible, generating agent graphs dynamically rather than relying on predefined components.
|
||||
|
||||
**Q: What LLM providers does Hive support?**
|
||||
|
||||
Hive supports 100+ LLM providers through LiteLLM integration, including OpenAI (GPT-4, GPT-4o), Anthropic (Claude models), Google Gemini, DeepSeek, Mistral, Groq, and many more. Simply set the appropriate API key environment variable and specify the model name.
|
||||
@@ -390,20 +372,12 @@ Yes! Hive supports local models through LiteLLM. Simply use the model name forma
|
||||
|
||||
**Q: What makes Hive different from other agent frameworks?**
|
||||
|
||||
Hive generates your entire agent system from natural language [goals](docs/key_concepts/goals_outcome.md) using a coding agent—you don't hardcode workflows or manually define graphs. When agents fail, the framework automatically captures failure data, [evolves the agent graph](docs/key_concepts/evolution.md), and redeploys. This self-improving loop is unique to Aden.
|
||||
Hive generates your entire agent system from natural language goals using a coding agent—you don't hardcode workflows or manually define graphs. When agents fail, the framework automatically captures failure data, [evolves the agent graph](docs/key_concepts/evolution.md), and redeploys. This self-improving loop is unique to Aden.
|
||||
|
||||
**Q: Is Hive open-source?**
|
||||
|
||||
Yes, Hive is fully open-source under the Apache License 2.0. We actively encourage community contributions and collaboration.
|
||||
|
||||
**Q: Does Hive collect data from users?**
|
||||
|
||||
Hive collects telemetry data for monitoring and observability purposes, including token usage, latency metrics, and cost tracking. Content capture (prompts and responses) is configurable and stored with team-scoped data isolation. All data stays within your infrastructure when self-hosted.
|
||||
|
||||
**Q: What deployment options does Hive support?**
|
||||
|
||||
Hive supports self-hosted deployments via Python packages. See the [Environment Setup Guide](docs/environment-setup.md) for installation instructions. Cloud deployment options and Kubernetes-ready configurations are on the roadmap.
|
||||
|
||||
**Q: Can Hive handle complex, production-scale use cases?**
|
||||
|
||||
Yes. Hive is explicitly designed for production environments with features like automatic failure recovery, real-time observability, cost controls, and horizontal scaling support. The framework handles both simple automations and complex multi-agent workflows.
|
||||
@@ -412,15 +386,11 @@ Yes. Hive is explicitly designed for production environments with features like
|
||||
|
||||
Yes, Hive fully supports [human-in-the-loop](docs/key_concepts/graph.md#human-in-the-loop) workflows through intervention nodes that pause execution for human input. These include configurable timeouts and escalation policies, allowing seamless collaboration between human experts and AI agents.
|
||||
|
||||
**Q: What monitoring and debugging tools does Hive provide?**
|
||||
|
||||
Hive includes comprehensive observability features: real-time WebSocket streaming for live agent execution monitoring, TimescaleDB-powered analytics for cost and performance metrics, health check endpoints for Kubernetes integration, and MCP tools for agent execution, including file operations, web search, data processing, and more.
|
||||
|
||||
**Q: What programming languages does Hive support?**
|
||||
|
||||
The Hive framework is built in Python. A JavaScript/TypeScript SDK is on the roadmap.
|
||||
|
||||
**Q: Can Aden agents interact with external tools and APIs?**
|
||||
**Q: Can Hive agents interact with external tools and APIs?**
|
||||
|
||||
Yes. Aden's SDK-wrapped nodes provide built-in tool access, and the framework supports flexible tool ecosystems. Agents can integrate with external APIs, databases, and services through the node architecture.
|
||||
|
||||
@@ -438,16 +408,12 @@ Contributions are welcome! Fork the repository, create your feature branch, impl
|
||||
|
||||
**Q: When will my team start seeing results from Aden's adaptive agents?**
|
||||
|
||||
Aden's [adaptation loop](docs/key_concepts/evolution.md) begins working from the first execution. When an agent fails, the framework captures the failure data, helping developers evolve the agent graph through the coding agent. How quickly this translates to measurable results depends on the complexity of your use case, the quality of your [goal definitions](docs/key_concepts/goals_outcome.md), and the volume of executions generating feedback.
|
||||
Aden's adaptation loop begins working from the first execution. When an agent fails, the framework captures the failure data, helping developers evolve the agent graph through the coding agent. How quickly this translates to measurable results depends on the complexity of your use case, the quality of your goal definitions, and the volume of executions generating feedback.
|
||||
|
||||
**Q: How does Hive compare to other agent frameworks?**
|
||||
|
||||
Hive focuses on generating agents that run real business processes, rather than generic agents. This vision emphasizes outcome-driven design, adaptability, and an easy-to-use set of tools and integrations.
|
||||
|
||||
**Q: Does Aden offer enterprise support?**
|
||||
|
||||
For enterprise inquiries, contact the Aden team through [adenhq.com](https://adenhq.com) or join our [Discord community](https://discord.com/invite/MXE49hrKDk) for support and discussions.
|
||||
|
||||
---
|
||||
|
||||
<p align="center">
|
||||
|
||||
@@ -639,6 +639,13 @@ class GraphExecutor:
|
||||
if len(value_str) > 200:
|
||||
value_str = value_str[:200] + "..."
|
||||
self.logger.info(f" {key}: {value_str}")
|
||||
|
||||
# Write node outputs to memory BEFORE edge evaluation
|
||||
# This enables direct key access in conditional expressions (e.g., "score > 80")
|
||||
# Without this, conditional edges can only use output['key'] syntax
|
||||
if result.output:
|
||||
for key, value in result.output.items():
|
||||
memory.write(key, value, validate=False)
|
||||
else:
|
||||
self.logger.error(f" ✗ Failed: {result.error}")
|
||||
|
||||
|
||||
@@ -308,9 +308,9 @@ class AgentRunner:
|
||||
self._storage_path = storage_path
|
||||
self._temp_dir = None
|
||||
else:
|
||||
# Use persistent storage in ~/.hive by default
|
||||
# Use persistent storage in ~/.hive/agents/{agent_name}/ per RUNTIME_LOGGING.md spec
|
||||
home = Path.home()
|
||||
default_storage = home / ".hive" / "storage" / agent_path.name
|
||||
default_storage = home / ".hive" / "agents" / agent_path.name
|
||||
default_storage.mkdir(parents=True, exist_ok=True)
|
||||
self._storage_path = default_storage
|
||||
self._temp_dir = None
|
||||
@@ -395,7 +395,7 @@ class AgentRunner:
|
||||
Args:
|
||||
agent_path: Path to agent folder
|
||||
mock_mode: If True, use mock LLM responses
|
||||
storage_path: Path for runtime storage (defaults to ~/.hive/storage/{name})
|
||||
storage_path: Path for runtime storage (defaults to ~/.hive/agents/{name})
|
||||
model: LLM model to use (reads from agent's default_config if None)
|
||||
enable_tui: If True, forces use of AgentRuntime with EventBus
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ This layered approach enables efficient debugging: start with L1 to identify pro
|
||||
**Default since 2026-02-06**
|
||||
|
||||
```
|
||||
~/.hive/{agent_name}/
|
||||
~/.hive/agents/{agent_name}/
|
||||
└── sessions/
|
||||
└── session_YYYYMMDD_HHMMSS_{uuid}/
|
||||
├── state.json # Session state and metadata
|
||||
@@ -42,7 +42,7 @@ This layered approach enables efficient debugging: start with L1 to identify pro
|
||||
**Read-only for backward compatibility**
|
||||
|
||||
```
|
||||
~/.hive/{agent_name}/
|
||||
~/.hive/agents/{agent_name}/
|
||||
├── runtime_logs/
|
||||
│ └── runs/
|
||||
│ └── {run_id}/
|
||||
@@ -215,7 +215,7 @@ Three MCP tools provide access to the logging system:
|
||||
|
||||
```python
|
||||
query_runtime_logs(
|
||||
agent_work_dir: str, # e.g., "~/.hive/twitter_outreach"
|
||||
agent_work_dir: str, # e.g., "~/.hive/agents/twitter_outreach"
|
||||
status: str = "", # "needs_attention", "success", "failure", "degraded"
|
||||
limit: int = 20
|
||||
) -> dict # {"runs": [...], "total": int}
|
||||
@@ -362,14 +362,14 @@ query_runtime_log_raw(agent_work_dir, run_id)
|
||||
```python
|
||||
# 1. Find problematic runs (L1)
|
||||
result = query_runtime_logs(
|
||||
agent_work_dir="~/.hive/twitter_outreach",
|
||||
agent_work_dir="~/.hive/agents/twitter_outreach",
|
||||
status="needs_attention"
|
||||
)
|
||||
run_id = result["runs"][0]["run_id"]
|
||||
|
||||
# 2. Identify failing nodes (L2)
|
||||
details = query_runtime_log_details(
|
||||
agent_work_dir="~/.hive/twitter_outreach",
|
||||
agent_work_dir="~/.hive/agents/twitter_outreach",
|
||||
run_id=run_id,
|
||||
needs_attention_only=True
|
||||
)
|
||||
@@ -377,7 +377,7 @@ problem_node = details["nodes"][0]["node_id"]
|
||||
|
||||
# 3. Analyze root cause (L3)
|
||||
raw = query_runtime_log_raw(
|
||||
agent_work_dir="~/.hive/twitter_outreach",
|
||||
agent_work_dir="~/.hive/agents/twitter_outreach",
|
||||
run_id=run_id,
|
||||
node_id=problem_node
|
||||
)
|
||||
@@ -390,12 +390,12 @@ raw = query_runtime_log_raw(
|
||||
|
||||
```python
|
||||
# Get recent runs
|
||||
runs = query_runtime_logs("~/.hive/my_agent", limit=10)
|
||||
runs = query_runtime_logs("~/.hive/agents/my_agent", limit=10)
|
||||
|
||||
# For each run, check specific node
|
||||
for run in runs["runs"]:
|
||||
node_details = query_runtime_log_details(
|
||||
"~/.hive/my_agent",
|
||||
"~/.hive/agents/my_agent",
|
||||
run["run_id"],
|
||||
node_id="problematic-node"
|
||||
)
|
||||
@@ -411,7 +411,7 @@ import time
|
||||
|
||||
while True:
|
||||
result = query_runtime_logs(
|
||||
agent_work_dir="~/.hive/my_agent",
|
||||
agent_work_dir="~/.hive/agents/my_agent",
|
||||
status="needs_attention",
|
||||
limit=1
|
||||
)
|
||||
@@ -574,10 +574,10 @@ The system automatically handles both old and new formats:
|
||||
|
||||
```python
|
||||
# MCP tools check both locations automatically
|
||||
result = query_runtime_logs("~/.hive/old_agent")
|
||||
result = query_runtime_logs("~/.hive/agents/old_agent")
|
||||
# Returns logs from both:
|
||||
# - ~/.hive/old_agent/runtime_logs/runs/*/
|
||||
# - ~/.hive/old_agent/sessions/session_*/logs/
|
||||
# - ~/.hive/agents/old_agent/runtime_logs/runs/*/
|
||||
# - ~/.hive/agents/old_agent/sessions/session_*/logs/
|
||||
```
|
||||
|
||||
### Deprecation Warnings
|
||||
@@ -636,9 +636,9 @@ Typical session with 5 nodes, 20 steps:
|
||||
**Symptom:** MCP tools return empty results
|
||||
|
||||
**Check:**
|
||||
1. Verify storage path exists: `~/.hive/{agent_name}/`
|
||||
2. Check session directories: `ls ~/.hive/{agent_name}/sessions/`
|
||||
3. Verify logs directory exists: `ls ~/.hive/{agent_name}/sessions/session_*/logs/`
|
||||
1. Verify storage path exists: `~/.hive/agents/{agent_name}/`
|
||||
2. Check session directories: `ls ~/.hive/agents/{agent_name}/sessions/`
|
||||
3. Verify logs directory exists: `ls ~/.hive/agents/{agent_name}/sessions/session_*/logs/`
|
||||
4. Check file permissions
|
||||
|
||||
### Issue: Corrupt JSONL files
|
||||
@@ -661,7 +661,7 @@ query_runtime_log_details(agent_work_dir, run_id)
|
||||
**Solution:**
|
||||
```bash
|
||||
# Archive old sessions
|
||||
cd ~/.hive/{agent_name}/sessions/
|
||||
cd ~/.hive/agents/{agent_name}/sessions/
|
||||
find . -name "session_2025*" -type d -exec tar -czf archive.tar.gz {} +
|
||||
rm -rf session_2025*
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ class SessionStore:
|
||||
Initialize session store.
|
||||
|
||||
Args:
|
||||
base_path: Base path for storage (e.g., ~/.hive/twitter_outreach)
|
||||
base_path: Base path for storage (e.g., ~/.hive/agents/twitter_outreach)
|
||||
"""
|
||||
self.base_path = Path(base_path)
|
||||
self.sessions_dir = self.base_path / "sessions"
|
||||
|
||||
@@ -0,0 +1,344 @@
|
||||
"""
|
||||
Regression tests for conditional edge direct key access (Issue #3599).
|
||||
|
||||
Verifies that node outputs are written to memory before edge evaluation,
|
||||
enabling direct key access in conditional expressions (e.g., 'score > 80')
|
||||
instead of requiring output['score'] > 80 syntax.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
from framework.graph.edge import EdgeCondition, EdgeSpec, GraphSpec
|
||||
from framework.graph.executor import GraphExecutor
|
||||
from framework.graph.goal import Goal
|
||||
from framework.graph.node import NodeContext, NodeProtocol, NodeResult, NodeSpec
|
||||
from framework.runtime.core import Runtime
|
||||
|
||||
|
||||
class SimpleRuntime(Runtime):
|
||||
"""Minimal runtime for testing."""
|
||||
|
||||
def start_run(self, **kwargs):
|
||||
return "test-run"
|
||||
|
||||
def end_run(self, **kwargs):
|
||||
pass
|
||||
|
||||
def report_problem(self, **kwargs):
|
||||
pass
|
||||
|
||||
def decide(self, **kwargs):
|
||||
return "test-decision"
|
||||
|
||||
def record_outcome(self, **kwargs):
|
||||
pass
|
||||
|
||||
def set_node(self, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
class ScoreNode(NodeProtocol):
|
||||
"""Node that outputs a score value."""
|
||||
|
||||
async def execute(self, ctx: NodeContext) -> NodeResult:
|
||||
return NodeResult(success=True, output={"score": 85})
|
||||
|
||||
|
||||
class HighScoreNode(NodeProtocol):
|
||||
"""Consumer node for high scores."""
|
||||
|
||||
async def execute(self, ctx: NodeContext) -> NodeResult:
|
||||
return NodeResult(success=True, output={"result": "high_score_path"})
|
||||
|
||||
|
||||
class MultiKeyNode(NodeProtocol):
|
||||
"""Node that outputs multiple keys."""
|
||||
|
||||
async def execute(self, ctx: NodeContext) -> NodeResult:
|
||||
return NodeResult(success=True, output={"x": 100, "y": 50})
|
||||
|
||||
|
||||
class ConsumerNode(NodeProtocol):
|
||||
"""Generic consumer node."""
|
||||
|
||||
async def execute(self, ctx: NodeContext) -> NodeResult:
|
||||
return NodeResult(success=True, output={"processed": True})
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_direct_key_access_in_conditional_edge():
|
||||
"""
|
||||
Verify direct key access works in conditional edges (e.g., 'score > 80').
|
||||
|
||||
This is the core regression test for issue #3599. Before the fix,
|
||||
node outputs were only written to memory during input mapping (after
|
||||
edge evaluation), causing NameError when edges tried to access keys directly.
|
||||
"""
|
||||
goal = Goal(
|
||||
id="test-direct-key",
|
||||
name="Test Direct Key Access",
|
||||
description="Test that direct key access works in conditional edges",
|
||||
)
|
||||
|
||||
nodes = [
|
||||
NodeSpec(
|
||||
id="score_node",
|
||||
name="ScoreNode",
|
||||
description="Outputs a score",
|
||||
node_type="function",
|
||||
output_keys=["score"],
|
||||
),
|
||||
NodeSpec(
|
||||
id="high_score_node",
|
||||
name="HighScoreNode",
|
||||
description="Handles high scores",
|
||||
node_type="function",
|
||||
input_keys=["score"],
|
||||
output_keys=["result"],
|
||||
),
|
||||
]
|
||||
|
||||
# Edge with DIRECT key access: 'score > 80' (not 'output["score"] > 80')
|
||||
edges = [
|
||||
EdgeSpec(
|
||||
id="score_to_high",
|
||||
source="score_node",
|
||||
target="high_score_node",
|
||||
condition=EdgeCondition.CONDITIONAL,
|
||||
condition_expr="score > 80", # Direct key access
|
||||
)
|
||||
]
|
||||
|
||||
graph = GraphSpec(
|
||||
id="test-graph",
|
||||
goal_id="test-direct-key",
|
||||
entry_node="score_node",
|
||||
nodes=nodes,
|
||||
edges=edges,
|
||||
terminal_nodes=["high_score_node"],
|
||||
)
|
||||
|
||||
runtime = SimpleRuntime(storage_path="/tmp/test")
|
||||
executor = GraphExecutor(runtime=runtime)
|
||||
executor.register_node("score_node", ScoreNode())
|
||||
executor.register_node("high_score_node", HighScoreNode())
|
||||
|
||||
result = await executor.execute(graph, goal, {})
|
||||
|
||||
# Verify the edge was followed (high_score_node executed)
|
||||
assert result.success, "Execution should succeed"
|
||||
assert "high_score_node" in result.path, (
|
||||
f"Expected high_score_node in path. "
|
||||
f"Condition 'score > 80' should evaluate to True (score=85). "
|
||||
f"Path: {result.path}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_backward_compatibility_output_syntax():
|
||||
"""
|
||||
Verify backward compatibility: output['key'] syntax still works.
|
||||
|
||||
The fix should not break existing code that uses the explicit
|
||||
output dictionary syntax in conditional expressions.
|
||||
"""
|
||||
goal = Goal(
|
||||
id="test-backward-compat",
|
||||
name="Test Backward Compatibility",
|
||||
description="Test that output['key'] syntax still works",
|
||||
)
|
||||
|
||||
nodes = [
|
||||
NodeSpec(
|
||||
id="score_node",
|
||||
name="ScoreNode",
|
||||
description="Outputs a score",
|
||||
node_type="function",
|
||||
output_keys=["score"],
|
||||
),
|
||||
NodeSpec(
|
||||
id="consumer_node",
|
||||
name="ConsumerNode",
|
||||
description="Consumer",
|
||||
node_type="function",
|
||||
input_keys=["score"],
|
||||
output_keys=["processed"],
|
||||
),
|
||||
]
|
||||
|
||||
# Edge with OLD syntax: output['score'] > 80
|
||||
edges = [
|
||||
EdgeSpec(
|
||||
id="score_to_consumer",
|
||||
source="score_node",
|
||||
target="consumer_node",
|
||||
condition=EdgeCondition.CONDITIONAL,
|
||||
condition_expr="output['score'] > 80", # Old explicit syntax
|
||||
)
|
||||
]
|
||||
|
||||
graph = GraphSpec(
|
||||
id="test-graph-compat",
|
||||
goal_id="test-backward-compat",
|
||||
entry_node="score_node",
|
||||
nodes=nodes,
|
||||
edges=edges,
|
||||
terminal_nodes=["consumer_node"],
|
||||
)
|
||||
|
||||
runtime = SimpleRuntime(storage_path="/tmp/test")
|
||||
executor = GraphExecutor(runtime=runtime)
|
||||
executor.register_node("score_node", ScoreNode())
|
||||
executor.register_node("consumer_node", ConsumerNode())
|
||||
|
||||
result = await executor.execute(graph, goal, {})
|
||||
|
||||
# Verify backward compatibility maintained
|
||||
assert result.success, "Execution should succeed"
|
||||
assert "consumer_node" in result.path, (
|
||||
f"Expected consumer_node in path. "
|
||||
f"Old syntax output['score'] > 80 should still work. "
|
||||
f"Path: {result.path}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_multiple_keys_in_expression():
|
||||
"""
|
||||
Verify multiple direct keys work in complex expressions.
|
||||
|
||||
Tests that expressions like 'x > y and y < 100' work correctly
|
||||
when both x and y are written to memory before edge evaluation.
|
||||
"""
|
||||
goal = Goal(
|
||||
id="test-multi-key",
|
||||
name="Test Multiple Keys",
|
||||
description="Test multiple keys in conditional expression",
|
||||
)
|
||||
|
||||
nodes = [
|
||||
NodeSpec(
|
||||
id="multi_key_node",
|
||||
name="MultiKeyNode",
|
||||
description="Outputs multiple keys",
|
||||
node_type="function",
|
||||
output_keys=["x", "y"],
|
||||
),
|
||||
NodeSpec(
|
||||
id="consumer_node",
|
||||
name="ConsumerNode",
|
||||
description="Consumer",
|
||||
node_type="function",
|
||||
input_keys=["x", "y"],
|
||||
output_keys=["processed"],
|
||||
),
|
||||
]
|
||||
|
||||
# Complex expression with multiple direct keys
|
||||
edges = [
|
||||
EdgeSpec(
|
||||
id="multi_to_consumer",
|
||||
source="multi_key_node",
|
||||
target="consumer_node",
|
||||
condition=EdgeCondition.CONDITIONAL,
|
||||
condition_expr="x > y and y < 100", # Multiple keys
|
||||
)
|
||||
]
|
||||
|
||||
graph = GraphSpec(
|
||||
id="test-graph-multi",
|
||||
goal_id="test-multi-key",
|
||||
entry_node="multi_key_node",
|
||||
nodes=nodes,
|
||||
edges=edges,
|
||||
terminal_nodes=["consumer_node"],
|
||||
)
|
||||
|
||||
runtime = SimpleRuntime(storage_path="/tmp/test")
|
||||
executor = GraphExecutor(runtime=runtime)
|
||||
executor.register_node("multi_key_node", MultiKeyNode())
|
||||
executor.register_node("consumer_node", ConsumerNode())
|
||||
|
||||
result = await executor.execute(graph, goal, {})
|
||||
|
||||
# Verify multiple keys work correctly
|
||||
assert result.success, "Execution should succeed"
|
||||
assert "consumer_node" in result.path, (
|
||||
f"Expected consumer_node in path. "
|
||||
f"Condition 'x > y and y < 100' should be True (x=100, y=50). "
|
||||
f"Path: {result.path}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_negative_case_condition_false():
|
||||
"""
|
||||
Verify conditions correctly evaluate to False when not met.
|
||||
|
||||
Tests that when a condition fails, the edge is NOT followed
|
||||
and execution doesn't proceed to the target node.
|
||||
"""
|
||||
goal = Goal(
|
||||
id="test-negative",
|
||||
name="Test Negative Case",
|
||||
description="Test condition evaluates to False correctly",
|
||||
)
|
||||
|
||||
class LowScoreNode(NodeProtocol):
|
||||
"""Node that outputs a LOW score."""
|
||||
|
||||
async def execute(self, ctx: NodeContext) -> NodeResult:
|
||||
return NodeResult(success=True, output={"score": 30})
|
||||
|
||||
nodes = [
|
||||
NodeSpec(
|
||||
id="low_score_node",
|
||||
name="LowScoreNode",
|
||||
description="Outputs low score",
|
||||
node_type="function",
|
||||
output_keys=["score"],
|
||||
),
|
||||
NodeSpec(
|
||||
id="high_score_handler",
|
||||
name="HighScoreHandler",
|
||||
description="Should NOT execute",
|
||||
node_type="function",
|
||||
input_keys=["score"],
|
||||
output_keys=["result"],
|
||||
),
|
||||
]
|
||||
|
||||
# Condition should be FALSE (30 is not > 80)
|
||||
edges = [
|
||||
EdgeSpec(
|
||||
id="low_to_high",
|
||||
source="low_score_node",
|
||||
target="high_score_handler",
|
||||
condition=EdgeCondition.CONDITIONAL,
|
||||
condition_expr="score > 80", # Should be False
|
||||
)
|
||||
]
|
||||
|
||||
graph = GraphSpec(
|
||||
id="test-graph-negative",
|
||||
goal_id="test-negative",
|
||||
entry_node="low_score_node",
|
||||
nodes=nodes,
|
||||
edges=edges,
|
||||
terminal_nodes=["high_score_handler"],
|
||||
)
|
||||
|
||||
runtime = SimpleRuntime(storage_path="/tmp/test")
|
||||
executor = GraphExecutor(runtime=runtime)
|
||||
executor.register_node("low_score_node", LowScoreNode())
|
||||
executor.register_node("high_score_handler", HighScoreNode())
|
||||
|
||||
result = await executor.execute(graph, goal, {})
|
||||
|
||||
# Verify condition correctly evaluated to False
|
||||
assert result.success, "Execution should succeed"
|
||||
assert "high_score_handler" not in result.path, (
|
||||
f"high_score_handler should NOT be in path. "
|
||||
f"Condition 'score > 80' should be False (score=30). "
|
||||
f"Path: {result.path}"
|
||||
)
|
||||
@@ -96,14 +96,14 @@ MCP (Model Context Protocol) servers are configured in `.mcp.json` at the projec
|
||||
{
|
||||
"mcpServers": {
|
||||
"agent-builder": {
|
||||
"command": "core/.venv/bin/python",
|
||||
"args": ["-m", "framework.mcp.agent_builder_server"],
|
||||
"cwd": "."
|
||||
"command": "uv",
|
||||
"args": ["run", "-m", "framework.mcp.agent_builder_server"],
|
||||
"cwd": "core"
|
||||
},
|
||||
"tools": {
|
||||
"command": "tools/.venv/bin/python",
|
||||
"args": ["-m", "aden_tools.mcp_server", "--stdio"],
|
||||
"cwd": "."
|
||||
"command": "uv",
|
||||
"args": ["run", "mcp_server.py", "--stdio"],
|
||||
"cwd": "tools"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
+10
-1
@@ -107,6 +107,15 @@ This installs agent-related Claude Code skills:
|
||||
- `/hive-patterns` - Best practices and design patterns
|
||||
- `/hive-test` - Test and validate agents
|
||||
|
||||
### Cursor IDE Support
|
||||
|
||||
Skills are also available in Cursor. To enable:
|
||||
|
||||
1. Open Command Palette (`Cmd+Shift+P` / `Ctrl+Shift+P`)
|
||||
2. Run `MCP: Enable` to enable MCP servers
|
||||
3. Restart Cursor to load the MCP servers from `.cursor/mcp.json`
|
||||
4. Type `/` in Agent chat and search for skills (e.g., `/hive-create`)
|
||||
|
||||
### Verify Setup
|
||||
|
||||
```bash
|
||||
@@ -200,7 +209,7 @@ hive/ # Repository root
|
||||
├── CONTRIBUTING.md # Contribution guidelines
|
||||
├── CHANGELOG.md # Version history
|
||||
├── LICENSE # Apache 2.0 License
|
||||
├── CODE_OF_CONDUCT.md # Community guidelines
|
||||
├── docs/CODE_OF_CONDUCT.md # Community guidelines
|
||||
└── SECURITY.md # Security policy
|
||||
```
|
||||
|
||||
|
||||
+31
-27
@@ -112,19 +112,9 @@ uv run python -c "import litellm; print('✓ litellm OK')"
|
||||
- Internet connection (for LLM API calls)
|
||||
- For Windows users: WSL 2 is recommended for full compatibility.
|
||||
|
||||
### API Keys (Optional)
|
||||
### API Keys
|
||||
|
||||
For running agents with real LLMs:
|
||||
|
||||
```bash
|
||||
export ANTHROPIC_API_KEY="your-key-here"
|
||||
```
|
||||
|
||||
Windows (PowerShell):
|
||||
|
||||
```powershell
|
||||
$env:ANTHROPIC_API_KEY="your-key-here"
|
||||
```
|
||||
We recommend using quickstart.sh for LLM API credential setup and /hive-credentials for the tools credentials
|
||||
|
||||
## Running Agents
|
||||
|
||||
@@ -185,6 +175,15 @@ This verifies agent-related Claude Code skills are available:
|
||||
- `/hive-patterns` - Best practices
|
||||
- `/hive-test` - Test and validate agents
|
||||
|
||||
### Cursor IDE Support
|
||||
|
||||
Skills are also available in Cursor. To enable:
|
||||
|
||||
1. Open Command Palette (`Cmd+Shift+P` / `Ctrl+Shift+P`)
|
||||
2. Run `MCP: Enable` to enable MCP servers
|
||||
3. Restart Cursor to load the MCP servers from `.cursor/mcp.json`
|
||||
4. Type `/` in Agent chat and search for skills (e.g., `/hive-create`)
|
||||
|
||||
### 2. Build an Agent
|
||||
|
||||
```
|
||||
@@ -371,7 +370,9 @@ hive/
|
||||
|
||||
## Separate Virtual Environments
|
||||
|
||||
The project uses **separate virtual environments** for `core` and `tools` packages to:
|
||||
Hive primarily uses **uv** to create and manage separate virtual environments for `core` and `tools`.
|
||||
|
||||
The project uses separate virtual environments to:
|
||||
|
||||
- Isolate dependencies and avoid conflicts
|
||||
- Allow independent development and testing of each package
|
||||
@@ -379,11 +380,18 @@ The project uses **separate virtual environments** for `core` and `tools` packag
|
||||
|
||||
### How It Works
|
||||
|
||||
When you run `./quickstart.sh` or `uv sync` in each directory:
|
||||
When you run `./quickstart.sh`, `uv` sets up:
|
||||
|
||||
1. **core/.venv/** - Contains the `framework` package and its dependencies (anthropic, litellm, mcp, etc.)
|
||||
2. **tools/.venv/** - Contains the `aden_tools` package and its dependencies (beautifulsoup4, pandas, etc.)
|
||||
|
||||
If you need to refresh environments manually, use `uv`:
|
||||
|
||||
```bash
|
||||
cd core && uv sync
|
||||
cd ../tools && uv sync
|
||||
```
|
||||
|
||||
### Cross-Package Imports
|
||||
|
||||
The `core` and `tools` packages are **intentionally independent**:
|
||||
@@ -392,38 +400,34 @@ The `core` and `tools` packages are **intentionally independent**:
|
||||
- **Communication via MCP**: Tools are exposed to agents through MCP servers, not direct Python imports
|
||||
- **Runtime integration**: The agent runner loads tools via the MCP protocol at runtime
|
||||
|
||||
If you need to use both packages in a single script (e.g., for testing), you have two options:
|
||||
If you need to use both packages in a single script (e.g., for testing), prefer `uv run` with `PYTHONPATH`:
|
||||
|
||||
```bash
|
||||
# Option 1: Install both in a shared environment
|
||||
uv venv
|
||||
source .venv/bin/activate
|
||||
uv pip install -e core/ -e tools/
|
||||
|
||||
# Option 2: Use PYTHONPATH (for quick testing)
|
||||
PYTHONPATH=tools/src uv run python your_script.py
|
||||
```
|
||||
|
||||
### MCP Server Configuration
|
||||
|
||||
The `.mcp.json` at project root configures MCP servers to use their respective virtual environments:
|
||||
The `.mcp.json` at project root configures MCP servers to run through `uv run` in each package directory:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"agent-builder": {
|
||||
"command": "core/.venv/bin/python",
|
||||
"args": ["-m", "framework.mcp.agent_builder_server"]
|
||||
"command": "uv",
|
||||
"args": ["run", "-m", "framework.mcp.agent_builder_server"],
|
||||
"cwd": "core"
|
||||
},
|
||||
"tools": {
|
||||
"command": "tools/.venv/bin/python",
|
||||
"args": ["-m", "aden_tools.mcp_server", "--stdio"]
|
||||
"command": "uv",
|
||||
"args": ["run", "mcp_server.py", "--stdio"],
|
||||
"cwd": "tools"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This ensures each MCP server runs with its correct dependencies.
|
||||
This ensures each MCP server runs with the correct project environment managed by `uv`.
|
||||
|
||||
### Why PYTHONPATH is Required
|
||||
|
||||
|
||||
@@ -93,12 +93,12 @@ hive/
|
||||
│ └── pyproject.toml # Package metadata
|
||||
│
|
||||
├── tools/ # MCP Tools Package
|
||||
│ ├── mcp_server.py # MCP server entry point
|
||||
│ └── src/aden_tools/ # Tools for agent capabilities
|
||||
│ ├── tools/ # Individual tool implementations
|
||||
│ │ ├── web_search_tool/
|
||||
│ │ ├── web_scrape_tool/
|
||||
│ │ └── file_system_toolkits/
|
||||
│ └── mcp_server.py # HTTP MCP server
|
||||
│ └── tools/ # Individual tool implementations
|
||||
│ ├── web_search_tool/
|
||||
│ ├── web_scrape_tool/
|
||||
│ └── file_system_toolkits/
|
||||
│
|
||||
├── exports/ # Agent Packages (user-generated, not in repo)
|
||||
│ └── your_agent/ # Your agents created via /hive
|
||||
|
||||
@@ -0,0 +1,22 @@
|
||||
# Deep Research Agent
|
||||
|
||||
A template agent designed to perform comprehensive research on a specific topic and generate a structured report.
|
||||
|
||||
## Usage
|
||||
|
||||
Run the agent using the following command:
|
||||
|
||||
### Linux / Mac
|
||||
```bash
|
||||
PYTHONPATH=core:examples/templates python -m deep_research_agent run --mock --topic "Artificial Intelligence"
|
||||
|
||||
### Windows
|
||||
```powershell
|
||||
$env:PYTHONPATH="core;examples\templates"
|
||||
python -m deep_research_agent run --mock --topic "Artificial Intelligence"
|
||||
|
||||
## Options
|
||||
|
||||
- `-t, --topic`: The research topic (required).
|
||||
- `--mock`: Run without calling real LLM APIs (simulated execution).
|
||||
- `--help`: Show all available options.
|
||||
@@ -88,7 +88,7 @@ def tui(verbose, debug):
|
||||
agent._event_bus = EventBus()
|
||||
agent._tool_registry = ToolRegistry()
|
||||
|
||||
storage_path = Path.home() / ".hive" / "deep_research_agent"
|
||||
storage_path = Path.home() / ".hive" / "agents" / "deep_research_agent"
|
||||
storage_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
mcp_config_path = Path(__file__).parent / "mcp_servers.json"
|
||||
|
||||
@@ -177,7 +177,7 @@ class DeepResearchAgent:
|
||||
"""Set up the executor with all components."""
|
||||
from pathlib import Path
|
||||
|
||||
storage_path = Path.home() / ".hive" / "deep_research_agent"
|
||||
storage_path = Path.home() / ".hive" / "agents" / "deep_research_agent"
|
||||
storage_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
self._event_bus = EventBus()
|
||||
|
||||
@@ -86,7 +86,7 @@ def tui(verbose, debug):
|
||||
agent._event_bus = EventBus()
|
||||
agent._tool_registry = ToolRegistry()
|
||||
|
||||
storage_path = Path.home() / ".hive" / "tech_news_reporter"
|
||||
storage_path = Path.home() / ".hive" / "agents" / "tech_news_reporter"
|
||||
storage_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
mcp_config_path = Path(__file__).parent / "mcp_servers.json"
|
||||
|
||||
@@ -1,92 +0,0 @@
|
||||
# Issue: Remove LLM Dependency from Agent Builder MCP Server
|
||||
|
||||
## Summary
|
||||
|
||||
The `agent_builder_server.py` MCP server has a hardcoded dependency on `AnthropicProvider` for test generation, which:
|
||||
1. Breaks when users don't have an Anthropic API key
|
||||
2. Is redundant since the calling agent (Claude) can write tests directly
|
||||
3. Violates the principle that MCP servers should be provider-agnostic utilities
|
||||
|
||||
## Affected Code
|
||||
|
||||
**File:** `core/framework/mcp/agent_builder_server.py`
|
||||
|
||||
**Lines:** 2350-2351, 2413-2414
|
||||
|
||||
```python
|
||||
# Line 2350-2351 (generate_constraint_tests)
|
||||
from framework.llm import AnthropicProvider
|
||||
llm = AnthropicProvider()
|
||||
|
||||
# Line 2413-2414 (generate_success_tests)
|
||||
from framework.llm import AnthropicProvider
|
||||
llm = AnthropicProvider()
|
||||
```
|
||||
|
||||
**Introduced by:** bryan (commit e2945b6c, 2026-01-20)
|
||||
|
||||
## Problem
|
||||
|
||||
When a user configures their agent to use a non-Anthropic LLM provider (e.g., `LiteLLMProvider` with Cerebras, OpenAI, or other backends), the MCP test generation tools fail with:
|
||||
|
||||
```
|
||||
{"error": "Failed to initialize LLM: Anthropic API key required. Set ANTHROPIC_API_KEY env var or pass api_key."}
|
||||
```
|
||||
|
||||
This happens even though:
|
||||
- The user has valid credentials for their chosen provider
|
||||
- The calling Claude agent is fully capable of writing tests
|
||||
- MCP is an open standard that shouldn't mandate specific LLM providers
|
||||
|
||||
## Root Cause
|
||||
|
||||
The test generation functions (`generate_constraint_tests`, `generate_success_tests`) embed an LLM call to generate Python test code from goal definitions. This design:
|
||||
|
||||
1. **Duplicates capability** - The outer Claude agent already writes code; delegating to an inner LLM is redundant
|
||||
2. **Creates provider lock-in** - Hardcoding `AnthropicProvider` breaks multi-provider workflows
|
||||
3. **Adds complexity** - Requires managing credentials in two places (outer agent + MCP server)
|
||||
|
||||
## Proposed Solution
|
||||
|
||||
**Option A: Remove LLM dependency entirely (Recommended)**
|
||||
|
||||
Refactor the MCP server to only provide test execution utilities:
|
||||
- `run_tests` - Execute pytest and return structured results
|
||||
- `list_tests` - Scan test files in agent directory
|
||||
- `debug_test` - Re-run single test with verbose output
|
||||
|
||||
Test *generation* becomes the responsibility of the calling agent, which:
|
||||
- Already has LLM capability
|
||||
- Already knows the goal/constraints
|
||||
- Can write tests directly using `Write` tool
|
||||
|
||||
**Option B: Make LLM provider configurable**
|
||||
|
||||
If LLM-based generation must stay in the MCP server:
|
||||
```python
|
||||
# Accept model parameter, use LiteLLM for provider-agnostic support
|
||||
from framework.llm.litellm import LiteLLMProvider
|
||||
|
||||
def generate_constraint_tests(goal_id, goal_json, agent_path, model="gpt-4o-mini"):
|
||||
llm = LiteLLMProvider(model=model)
|
||||
# ...
|
||||
```
|
||||
|
||||
## Impact
|
||||
|
||||
- Users with non-Anthropic setups cannot use `generate_constraint_tests` or `generate_success_tests`
|
||||
- Workaround: Write tests manually (as done in this session)
|
||||
- Skills documentation (`testing-agent`) mandates MCP tools but they don't work universally
|
||||
|
||||
## Recommendation
|
||||
|
||||
Implement **Option A**. The MCP server should be a thin utility layer for test execution, not a code generator. This:
|
||||
- Eliminates provider dependency
|
||||
- Simplifies the codebase
|
||||
- Aligns with MCP's role as a protocol, not an LLM wrapper
|
||||
|
||||
## Related Files
|
||||
|
||||
- `core/framework/mcp/agent_builder_server.py` - Main file to modify
|
||||
- `.claude/skills/hive-test/SKILL.md` - Update documentation if tools change
|
||||
- `core/framework/testing/` - Test generation utilities that could be removed
|
||||
@@ -1,7 +0,0 @@
|
||||
{
|
||||
"extraPaths": ["core", "tools/src"],
|
||||
"pythonVersion": "3.11",
|
||||
"typeCheckingMode": "basic",
|
||||
"include": ["core", "tools/src", "exports"],
|
||||
"exclude": ["**/node_modules", "**/__pycache__", "**/.*"]
|
||||
}
|
||||
+62
-41
@@ -379,6 +379,38 @@ fi
|
||||
HIVE_CONFIG_DIR="$HOME/.hive"
|
||||
HIVE_CONFIG_FILE="$HIVE_CONFIG_DIR/configuration.json"
|
||||
|
||||
# Detect user's shell rc file
|
||||
detect_shell_rc() {
|
||||
local shell_name
|
||||
shell_name=$(basename "$SHELL")
|
||||
|
||||
case "$shell_name" in
|
||||
zsh)
|
||||
if [ -f "$HOME/.zshrc" ]; then
|
||||
echo "$HOME/.zshrc"
|
||||
else
|
||||
echo "$HOME/.zshenv"
|
||||
fi
|
||||
;;
|
||||
bash)
|
||||
if [ -f "$HOME/.bashrc" ]; then
|
||||
echo "$HOME/.bashrc"
|
||||
elif [ -f "$HOME/.bash_profile" ]; then
|
||||
echo "$HOME/.bash_profile"
|
||||
else
|
||||
echo "$HOME/.profile"
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
# Fallback to .profile for other shells
|
||||
echo "$HOME/.profile"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
SHELL_RC_FILE=$(detect_shell_rc)
|
||||
SHELL_NAME=$(basename "$SHELL")
|
||||
|
||||
# Function to save configuration
|
||||
save_configuration() {
|
||||
local provider_id="$1"
|
||||
@@ -404,18 +436,11 @@ print(json.dumps(config, indent=2))
|
||||
" 2>/dev/null
|
||||
}
|
||||
|
||||
# Check for .env files (temporarily disable set -e for robustness on Bash 3.2)
|
||||
# Source shell rc file to pick up existing env vars (temporarily disable set -e)
|
||||
set +e
|
||||
if [ -f "$SCRIPT_DIR/.env" ]; then
|
||||
set -a
|
||||
source "$SCRIPT_DIR/.env" 2>/dev/null
|
||||
set +a
|
||||
fi
|
||||
|
||||
if [ -f "$HOME/.env" ]; then
|
||||
set -a
|
||||
source "$HOME/.env" 2>/dev/null
|
||||
set +a
|
||||
if [ -f "$SHELL_RC_FILE" ]; then
|
||||
# Extract only export statements to avoid running shell config commands
|
||||
eval "$(grep -E '^export [A-Z_]+=' "$SHELL_RC_FILE" 2>/dev/null)"
|
||||
fi
|
||||
set -e
|
||||
|
||||
@@ -540,7 +565,7 @@ if [ -z "$SELECTED_PROVIDER_ID" ]; then
|
||||
echo -e "${YELLOW}Skipped.${NC} An LLM API key is required to test and use worker agents."
|
||||
echo -e "Add your API key later by running:"
|
||||
echo ""
|
||||
echo -e " ${CYAN}echo 'ANTHROPIC_API_KEY=your-key' >> .env${NC}"
|
||||
echo -e " ${CYAN}echo 'export ANTHROPIC_API_KEY=\"your-key\"' >> $SHELL_RC_FILE${NC}"
|
||||
echo ""
|
||||
SELECTED_ENV_VAR=""
|
||||
SELECTED_PROVIDER_ID=""
|
||||
@@ -554,15 +579,16 @@ if [ -z "$SELECTED_PROVIDER_ID" ]; then
|
||||
read -r -p "Paste your $PROVIDER_NAME API key (or press Enter to skip): " API_KEY
|
||||
|
||||
if [ -n "$API_KEY" ]; then
|
||||
# Save to .env
|
||||
echo "" >> "$SCRIPT_DIR/.env"
|
||||
echo "$SELECTED_ENV_VAR=$API_KEY" >> "$SCRIPT_DIR/.env"
|
||||
# Save to shell rc file
|
||||
echo "" >> "$SHELL_RC_FILE"
|
||||
echo "# Hive Agent Framework - $PROVIDER_NAME API key" >> "$SHELL_RC_FILE"
|
||||
echo "export $SELECTED_ENV_VAR=\"$API_KEY\"" >> "$SHELL_RC_FILE"
|
||||
export "$SELECTED_ENV_VAR=$API_KEY"
|
||||
echo ""
|
||||
echo -e "${GREEN}⬢${NC} API key saved to .env"
|
||||
echo -e "${GREEN}⬢${NC} API key saved to $SHELL_RC_FILE"
|
||||
else
|
||||
echo ""
|
||||
echo -e "${YELLOW}Skipped.${NC} Add your API key to .env when ready."
|
||||
echo -e "${YELLOW}Skipped.${NC} Add your API key to $SHELL_RC_FILE when ready."
|
||||
SELECTED_ENV_VAR=""
|
||||
SELECTED_PROVIDER_ID=""
|
||||
fi
|
||||
@@ -591,7 +617,7 @@ echo ""
|
||||
|
||||
HIVE_CRED_DIR="$HOME/.hive/credentials"
|
||||
|
||||
# Check if HIVE_CREDENTIAL_KEY already exists (from env or .env)
|
||||
# Check if HIVE_CREDENTIAL_KEY already exists (from env or shell rc)
|
||||
if [ -n "$HIVE_CREDENTIAL_KEY" ]; then
|
||||
echo -e "${GREEN} ✓ HIVE_CREDENTIAL_KEY already set${NC}"
|
||||
else
|
||||
@@ -606,16 +632,13 @@ else
|
||||
else
|
||||
echo -e "${GREEN}ok${NC}"
|
||||
|
||||
# Save to .env file
|
||||
if [ ! -f "$SCRIPT_DIR/.env" ]; then
|
||||
touch "$SCRIPT_DIR/.env"
|
||||
fi
|
||||
echo "" >> "$SCRIPT_DIR/.env"
|
||||
echo "# Encryption key for Hive credential store (~/.hive/credentials)" >> "$SCRIPT_DIR/.env"
|
||||
echo "HIVE_CREDENTIAL_KEY=$GENERATED_KEY" >> "$SCRIPT_DIR/.env"
|
||||
# Save to shell rc file
|
||||
echo "" >> "$SHELL_RC_FILE"
|
||||
echo "# Encryption key for Hive credential store (~/.hive/credentials)" >> "$SHELL_RC_FILE"
|
||||
echo "export HIVE_CREDENTIAL_KEY=\"$GENERATED_KEY\"" >> "$SHELL_RC_FILE"
|
||||
export HIVE_CREDENTIAL_KEY="$GENERATED_KEY"
|
||||
|
||||
echo -e "${GREEN} ✓ Encryption key saved to .env${NC}"
|
||||
echo -e "${GREEN} ✓ Encryption key saved to $SHELL_RC_FILE${NC}"
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -772,11 +795,6 @@ if [ -n "$HIVE_CREDENTIAL_KEY" ]; then
|
||||
echo ""
|
||||
fi
|
||||
|
||||
echo -e "${BOLD}Run an Agent:${NC}"
|
||||
echo ""
|
||||
echo -e " Launch the interactive dashboard to browse and run agents:"
|
||||
echo -e " ${CYAN}hive tui${NC}"
|
||||
echo ""
|
||||
echo -e "${BOLD}Build a New Agent:${NC}"
|
||||
echo ""
|
||||
echo -e " 1. Open Claude Code in this directory:"
|
||||
@@ -788,15 +806,18 @@ echo ""
|
||||
echo -e " 3. Test an existing agent:"
|
||||
echo -e " ${CYAN}/hive-test${NC}"
|
||||
echo ""
|
||||
echo -e "${BOLD}Skills:${NC}"
|
||||
if [ -d "$SCRIPT_DIR/.claude/skills" ]; then
|
||||
for skill_dir in "$SCRIPT_DIR/.claude/skills"/*/; do
|
||||
skill_name=$(basename "$skill_dir")
|
||||
echo -e " ⬡ ${CYAN}/$skill_name${NC}"
|
||||
done
|
||||
echo -e "${BOLD}Run an Agent:${NC}"
|
||||
echo ""
|
||||
echo -e " Launch the interactive dashboard to browse and run agents:"
|
||||
echo -e " You can start a example agent or an agent built by yourself:"
|
||||
echo -e " ${CYAN}hive tui${NC}"
|
||||
echo ""
|
||||
# Show shell sourcing reminder if we added environment variables
|
||||
if [ -n "$SELECTED_PROVIDER_ID" ] || [ -n "$HIVE_CREDENTIAL_KEY" ]; then
|
||||
echo -e "${BOLD}Note:${NC} To use the new environment variables in this shell, run:"
|
||||
echo -e " ${CYAN}source $SHELL_RC_FILE${NC}"
|
||||
echo ""
|
||||
fi
|
||||
echo ""
|
||||
echo -e "${BOLD}Examples:${NC} ${CYAN}exports/${NC}"
|
||||
echo ""
|
||||
|
||||
echo -e "${DIM}Run ./quickstart.sh again to reconfigure.${NC}"
|
||||
echo ""
|
||||
echo ""
|
||||
@@ -36,6 +36,7 @@ Credential categories:
|
||||
- llm.py: LLM provider credentials (anthropic, openai, etc.)
|
||||
- search.py: Search tool credentials (brave_search, google_search, etc.)
|
||||
- email.py: Email provider credentials (resend, google/gmail)
|
||||
- apollo.py: Apollo.io API credentials
|
||||
- github.py: GitHub API credentials
|
||||
- hubspot.py: HubSpot CRM credentials
|
||||
- slack.py: Slack workspace credentials
|
||||
@@ -49,6 +50,7 @@ To add a new credential:
|
||||
3. If new category, import and merge it in this __init__.py
|
||||
"""
|
||||
|
||||
from .apollo import APOLLO_CREDENTIALS
|
||||
from .base import CredentialError, CredentialSpec
|
||||
from .browser import get_aden_auth_url, get_aden_setup_url, open_browser
|
||||
from .email import EMAIL_CREDENTIALS
|
||||
@@ -71,6 +73,7 @@ CREDENTIAL_SPECS = {
|
||||
**LLM_CREDENTIALS,
|
||||
**SEARCH_CREDENTIALS,
|
||||
**EMAIL_CREDENTIALS,
|
||||
**APOLLO_CREDENTIALS,
|
||||
**GITHUB_CREDENTIALS,
|
||||
**HUBSPOT_CREDENTIALS,
|
||||
**SLACK_CREDENTIALS,
|
||||
@@ -104,4 +107,5 @@ __all__ = [
|
||||
"GITHUB_CREDENTIALS",
|
||||
"HUBSPOT_CREDENTIALS",
|
||||
"SLACK_CREDENTIALS",
|
||||
"APOLLO_CREDENTIALS",
|
||||
]
|
||||
|
||||
@@ -0,0 +1,43 @@
|
||||
"""
|
||||
Apollo.io tool credentials.
|
||||
|
||||
Contains credentials for Apollo.io API integration.
|
||||
"""
|
||||
|
||||
from .base import CredentialSpec
|
||||
|
||||
APOLLO_CREDENTIALS = {
|
||||
"apollo": CredentialSpec(
|
||||
env_var="APOLLO_API_KEY",
|
||||
tools=[
|
||||
"apollo_enrich_person",
|
||||
"apollo_enrich_company",
|
||||
"apollo_search_people",
|
||||
"apollo_search_companies",
|
||||
],
|
||||
required=True,
|
||||
startup_required=False,
|
||||
help_url="https://apolloio.github.io/apollo-api-docs/",
|
||||
description="Apollo.io API key for contact and company data enrichment",
|
||||
# Auth method support
|
||||
aden_supported=False,
|
||||
direct_api_key_supported=True,
|
||||
api_key_instructions="""To get an Apollo.io API key:
|
||||
1. Sign up or log in at https://app.apollo.io/
|
||||
2. Go to Settings > Integrations > API
|
||||
3. Click "Connect" to generate your API key
|
||||
4. Copy the API key
|
||||
|
||||
Note: Apollo uses export credits for enrichment:
|
||||
- Free plan: 10 credits/month
|
||||
- Basic ($49/user/mo): 1,000 credits/month
|
||||
- Professional ($79/user/mo): 2,000 credits/month
|
||||
- Overage: $0.20/credit""",
|
||||
# Health check configuration
|
||||
health_check_endpoint="https://api.apollo.io/v1/auth/health",
|
||||
health_check_method="GET",
|
||||
# Credential store mapping
|
||||
credential_id="apollo",
|
||||
credential_key="api_key",
|
||||
),
|
||||
}
|
||||
@@ -21,6 +21,7 @@ if TYPE_CHECKING:
|
||||
from aden_tools.credentials import CredentialStoreAdapter
|
||||
|
||||
# Import register_tools from each tool module
|
||||
from .apollo_tool import register_tools as register_apollo
|
||||
from .csv_tool import register_tools as register_csv
|
||||
from .email_tool import register_tools as register_email
|
||||
from .example_tool import register_tools as register_example
|
||||
@@ -76,6 +77,7 @@ def register_all_tools(
|
||||
# email supports multiple providers (Resend) with auto-detection
|
||||
register_email(mcp, credentials=credentials)
|
||||
register_hubspot(mcp, credentials=credentials)
|
||||
register_apollo(mcp, credentials=credentials)
|
||||
register_slack(mcp, credentials=credentials)
|
||||
|
||||
# Register file system toolkits
|
||||
@@ -112,6 +114,10 @@ def register_all_tools(
|
||||
"csv_append",
|
||||
"csv_info",
|
||||
"csv_sql",
|
||||
"apollo_enrich_person",
|
||||
"apollo_enrich_company",
|
||||
"apollo_search_people",
|
||||
"apollo_search_companies",
|
||||
"github_list_repos",
|
||||
"github_get_repo",
|
||||
"github_search_repos",
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
# Apollo.io Tool
|
||||
|
||||
B2B contact and company data enrichment via the Apollo.io API.
|
||||
|
||||
## Tools
|
||||
|
||||
| Tool | Description |
|
||||
|------|-------------|
|
||||
| `apollo_enrich_person` | Enrich a contact by email, LinkedIn URL, or name+domain |
|
||||
| `apollo_enrich_company` | Enrich a company by domain |
|
||||
| `apollo_search_people` | Search contacts with filters (titles, seniorities, locations, etc.) |
|
||||
| `apollo_search_companies` | Search companies with filters (industries, employee counts, etc.) |
|
||||
|
||||
## Authentication
|
||||
|
||||
Requires an Apollo.io API key passed via `APOLLO_API_KEY` environment variable or the credential store.
|
||||
|
||||
**How to get an API key:**
|
||||
|
||||
1. Sign up or log in at https://app.apollo.io/
|
||||
2. Go to Settings > Integrations > API
|
||||
3. Click "Connect" to generate your API key
|
||||
4. Copy the API key
|
||||
|
||||
## Pricing
|
||||
|
||||
| Plan | Price | Export Credits/month |
|
||||
|------|-------|---------------------|
|
||||
| Free | $0 | 10 |
|
||||
| Basic | $49/user/mo | 1,000 |
|
||||
| Professional | $79/user/mo | 2,000 |
|
||||
| Overage | - | $0.20/credit |
|
||||
|
||||
## Error Handling
|
||||
|
||||
Returns error dicts for common failure modes:
|
||||
|
||||
- `401` - Invalid API key
|
||||
- `403` - Insufficient credits or permissions
|
||||
- `404` - Resource not found
|
||||
- `422` - Invalid parameters
|
||||
- `429` - Rate limit exceeded
|
||||
@@ -0,0 +1,13 @@
|
||||
"""
|
||||
Apollo.io Tool - Contact and company data enrichment via Apollo API.
|
||||
|
||||
Supports API key authentication for:
|
||||
- Person enrichment by email or LinkedIn
|
||||
- Company enrichment by domain
|
||||
- People search with filters
|
||||
- Company search with filters
|
||||
"""
|
||||
|
||||
from .apollo_tool import register_tools
|
||||
|
||||
__all__ = ["register_tools"]
|
||||
@@ -0,0 +1,581 @@
|
||||
"""
|
||||
Apollo.io Tool - Contact and company data enrichment via Apollo API.
|
||||
|
||||
Supports:
|
||||
- API key authentication (APOLLO_API_KEY)
|
||||
|
||||
Use Cases:
|
||||
- Enrich contacts by email or LinkedIn URL
|
||||
- Enrich companies by domain
|
||||
- Search for people by titles, seniorities, locations
|
||||
- Search for companies by industries, employee counts, technologies
|
||||
|
||||
API Reference: https://apolloio.github.io/apollo-api-docs/
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import httpx
|
||||
from fastmcp import FastMCP
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from aden_tools.credentials import CredentialStoreAdapter
|
||||
|
||||
APOLLO_API_BASE = "https://api.apollo.io/api/v1"
|
||||
|
||||
|
||||
class _ApolloClient:
|
||||
"""Internal client wrapping Apollo.io API calls."""
|
||||
|
||||
def __init__(self, api_key: str):
|
||||
self._api_key = api_key
|
||||
|
||||
@property
|
||||
def _headers(self) -> dict[str, str]:
|
||||
return {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"Cache-Control": "no-cache",
|
||||
"X-Api-Key": self._api_key,
|
||||
}
|
||||
|
||||
def _handle_response(self, response: httpx.Response) -> dict[str, Any]:
|
||||
"""Handle common HTTP error codes."""
|
||||
if response.status_code == 401:
|
||||
return {"error": "Invalid Apollo API key"}
|
||||
if response.status_code == 403:
|
||||
return {
|
||||
"error": "Insufficient credits or permissions. Check your Apollo plan.",
|
||||
"help": "Apollo uses export credits for enrichment. Visit https://app.apollo.io/#/settings/plans",
|
||||
}
|
||||
if response.status_code == 404:
|
||||
return {"error": "Resource not found"}
|
||||
if response.status_code == 422:
|
||||
try:
|
||||
detail = response.json().get("error", response.text)
|
||||
except Exception:
|
||||
detail = response.text
|
||||
return {"error": f"Invalid parameters: {detail}"}
|
||||
if response.status_code == 429:
|
||||
return {"error": "Apollo rate limit exceeded. Try again later."}
|
||||
if response.status_code >= 400:
|
||||
try:
|
||||
detail = response.json().get("error", response.text)
|
||||
except Exception:
|
||||
detail = response.text
|
||||
return {"error": f"Apollo API error (HTTP {response.status_code}): {detail}"}
|
||||
return response.json()
|
||||
|
||||
def enrich_person(
|
||||
self,
|
||||
email: str | None = None,
|
||||
linkedin_url: str | None = None,
|
||||
first_name: str | None = None,
|
||||
last_name: str | None = None,
|
||||
name: str | None = None,
|
||||
domain: str | None = None,
|
||||
reveal_personal_emails: bool = False,
|
||||
reveal_phone_number: bool = False,
|
||||
) -> dict[str, Any]:
|
||||
"""Enrich a person by email, LinkedIn URL, or name and domain."""
|
||||
body: dict[str, Any] = {
|
||||
"reveal_personal_emails": reveal_personal_emails,
|
||||
"reveal_phone_number": reveal_phone_number,
|
||||
}
|
||||
|
||||
if email:
|
||||
body["email"] = email
|
||||
if linkedin_url:
|
||||
body["linkedin_url"] = linkedin_url
|
||||
if first_name:
|
||||
body["first_name"] = first_name
|
||||
if last_name:
|
||||
body["last_name"] = last_name
|
||||
if name:
|
||||
body["name"] = name
|
||||
if domain:
|
||||
body["domain"] = domain
|
||||
|
||||
response = httpx.post(
|
||||
f"{APOLLO_API_BASE}/people/match",
|
||||
headers=self._headers,
|
||||
params=body if not email and not linkedin_url else None,
|
||||
json=body,
|
||||
timeout=30.0,
|
||||
)
|
||||
result = self._handle_response(response)
|
||||
|
||||
# Handle "not found" gracefully
|
||||
if "error" not in result and result.get("person") is None:
|
||||
return {"match_found": False, "message": "No matching person found"}
|
||||
|
||||
if "error" not in result:
|
||||
person = result.get("person", {})
|
||||
return {
|
||||
"match_found": True,
|
||||
"person": {
|
||||
"id": person.get("id"),
|
||||
"first_name": person.get("first_name"),
|
||||
"last_name": person.get("last_name"),
|
||||
"name": person.get("name"),
|
||||
"title": person.get("title"),
|
||||
"email": person.get("email"),
|
||||
"email_status": person.get("email_status"),
|
||||
"phone_numbers": person.get("phone_numbers", []),
|
||||
"linkedin_url": person.get("linkedin_url"),
|
||||
"twitter_url": person.get("twitter_url"),
|
||||
"city": person.get("city"),
|
||||
"state": person.get("state"),
|
||||
"country": person.get("country"),
|
||||
"organization": {
|
||||
"id": person.get("organization", {}).get("id"),
|
||||
"name": person.get("organization", {}).get("name"),
|
||||
"domain": person.get("organization", {}).get("primary_domain"),
|
||||
"industry": person.get("organization", {}).get("industry"),
|
||||
"employee_count": person.get("organization", {}).get(
|
||||
"estimated_num_employees"
|
||||
),
|
||||
},
|
||||
},
|
||||
}
|
||||
return result
|
||||
|
||||
def enrich_company(self, domain: str) -> dict[str, Any]:
|
||||
"""Enrich a company by domain."""
|
||||
body: dict[str, Any] = {
|
||||
"domain": domain,
|
||||
}
|
||||
|
||||
response = httpx.post(
|
||||
f"{APOLLO_API_BASE}/organizations/enrich",
|
||||
headers=self._headers,
|
||||
json=body,
|
||||
timeout=30.0,
|
||||
)
|
||||
result = self._handle_response(response)
|
||||
|
||||
# Handle "not found" gracefully
|
||||
if "error" not in result and result.get("organization") is None:
|
||||
return {"match_found": False, "message": "No matching company found"}
|
||||
|
||||
if "error" not in result:
|
||||
org = result.get("organization", {})
|
||||
return {
|
||||
"match_found": True,
|
||||
"organization": {
|
||||
"id": org.get("id"),
|
||||
"name": org.get("name"),
|
||||
"domain": org.get("primary_domain"),
|
||||
"website_url": org.get("website_url"),
|
||||
"linkedin_url": org.get("linkedin_url"),
|
||||
"twitter_url": org.get("twitter_url"),
|
||||
"facebook_url": org.get("facebook_url"),
|
||||
"industry": org.get("industry"),
|
||||
"keywords": org.get("keywords", []),
|
||||
"employee_count": org.get("estimated_num_employees"),
|
||||
"employee_count_range": org.get("employee_count_range"),
|
||||
"annual_revenue": org.get("annual_revenue"),
|
||||
"annual_revenue_printed": org.get("annual_revenue_printed"),
|
||||
"total_funding": org.get("total_funding"),
|
||||
"total_funding_printed": org.get("total_funding_printed"),
|
||||
"latest_funding_round_date": org.get("latest_funding_round_date"),
|
||||
"latest_funding_stage": org.get("latest_funding_stage"),
|
||||
"founded_year": org.get("founded_year"),
|
||||
"phone": org.get("phone"),
|
||||
"city": org.get("city"),
|
||||
"state": org.get("state"),
|
||||
"country": org.get("country"),
|
||||
"street_address": org.get("street_address"),
|
||||
"technologies": org.get("technologies", []),
|
||||
"short_description": org.get("short_description"),
|
||||
},
|
||||
}
|
||||
return result
|
||||
|
||||
def search_people(
|
||||
self,
|
||||
titles: list[str] | None = None,
|
||||
seniorities: list[str] | None = None,
|
||||
locations: list[str] | None = None,
|
||||
company_sizes: list[str] | None = None,
|
||||
industries: list[str] | None = None,
|
||||
technologies: list[str] | None = None,
|
||||
limit: int = 10,
|
||||
) -> dict[str, Any]:
|
||||
"""Search for people with filters."""
|
||||
body: dict[str, Any] = {
|
||||
"per_page": min(limit, 100),
|
||||
"page": 1,
|
||||
}
|
||||
|
||||
if titles:
|
||||
body["person_titles"] = titles
|
||||
if seniorities:
|
||||
body["person_seniorities"] = seniorities
|
||||
if locations:
|
||||
body["person_locations"] = locations
|
||||
if company_sizes:
|
||||
body["organization_num_employees_ranges"] = company_sizes
|
||||
if industries:
|
||||
body["organization_industry_tag_ids"] = industries
|
||||
if technologies:
|
||||
body["currently_using_any_of_technology_uids"] = technologies
|
||||
|
||||
response = httpx.post(
|
||||
f"{APOLLO_API_BASE}/mixed_people/search",
|
||||
headers=self._headers,
|
||||
json=body,
|
||||
timeout=30.0,
|
||||
)
|
||||
result = self._handle_response(response)
|
||||
|
||||
if "error" not in result:
|
||||
people = result.get("people", [])
|
||||
return {
|
||||
"total": result.get("pagination", {}).get("total_entries", len(people)),
|
||||
"page": result.get("pagination", {}).get("page", 1),
|
||||
"per_page": result.get("pagination", {}).get("per_page", limit),
|
||||
"results": [
|
||||
{
|
||||
"id": p.get("id"),
|
||||
"first_name": p.get("first_name"),
|
||||
"last_name": p.get("last_name"),
|
||||
"name": p.get("name"),
|
||||
"title": p.get("title"),
|
||||
"email": p.get("email"),
|
||||
"email_status": p.get("email_status"),
|
||||
"linkedin_url": p.get("linkedin_url"),
|
||||
"city": p.get("city"),
|
||||
"state": p.get("state"),
|
||||
"country": p.get("country"),
|
||||
"seniority": p.get("seniority"),
|
||||
"organization": {
|
||||
"id": p.get("organization", {}).get("id")
|
||||
if p.get("organization")
|
||||
else None,
|
||||
"name": p.get("organization", {}).get("name")
|
||||
if p.get("organization")
|
||||
else None,
|
||||
"domain": p.get("organization", {}).get("primary_domain")
|
||||
if p.get("organization")
|
||||
else None,
|
||||
},
|
||||
}
|
||||
for p in people
|
||||
],
|
||||
}
|
||||
return result
|
||||
|
||||
def search_companies(
|
||||
self,
|
||||
industries: list[str] | None = None,
|
||||
employee_counts: list[str] | None = None,
|
||||
locations: list[str] | None = None,
|
||||
technologies: list[str] | None = None,
|
||||
limit: int = 10,
|
||||
) -> dict[str, Any]:
|
||||
"""Search for companies with filters."""
|
||||
body: dict[str, Any] = {
|
||||
"per_page": min(limit, 100),
|
||||
"page": 1,
|
||||
}
|
||||
|
||||
if industries:
|
||||
body["organization_industry_tag_ids"] = industries
|
||||
if employee_counts:
|
||||
body["organization_num_employees_ranges"] = employee_counts
|
||||
if locations:
|
||||
body["organization_locations"] = locations
|
||||
if technologies:
|
||||
body["currently_using_any_of_technology_uids"] = technologies
|
||||
|
||||
response = httpx.post(
|
||||
f"{APOLLO_API_BASE}/mixed_companies/search",
|
||||
headers=self._headers,
|
||||
json=body,
|
||||
timeout=30.0,
|
||||
)
|
||||
result = self._handle_response(response)
|
||||
|
||||
if "error" not in result:
|
||||
orgs = result.get("organizations", [])
|
||||
return {
|
||||
"total": result.get("pagination", {}).get("total_entries", len(orgs)),
|
||||
"page": result.get("pagination", {}).get("page", 1),
|
||||
"per_page": result.get("pagination", {}).get("per_page", limit),
|
||||
"results": [
|
||||
{
|
||||
"id": o.get("id"),
|
||||
"name": o.get("name"),
|
||||
"domain": o.get("primary_domain"),
|
||||
"website_url": o.get("website_url"),
|
||||
"linkedin_url": o.get("linkedin_url"),
|
||||
"industry": o.get("industry"),
|
||||
"employee_count": o.get("estimated_num_employees"),
|
||||
"employee_count_range": o.get("employee_count_range"),
|
||||
"annual_revenue_printed": o.get("annual_revenue_printed"),
|
||||
"city": o.get("city"),
|
||||
"state": o.get("state"),
|
||||
"country": o.get("country"),
|
||||
"short_description": o.get("short_description"),
|
||||
}
|
||||
for o in orgs
|
||||
],
|
||||
}
|
||||
return result
|
||||
|
||||
|
||||
def register_tools(
|
||||
mcp: FastMCP,
|
||||
credentials: CredentialStoreAdapter | None = None,
|
||||
) -> None:
|
||||
"""Register Apollo.io data enrichment tools with the MCP server."""
|
||||
|
||||
def _get_api_key() -> str | None:
|
||||
"""Get Apollo API key from credential manager or environment."""
|
||||
if credentials is not None:
|
||||
api_key = credentials.get("apollo")
|
||||
# Defensive check: ensure we get a string, not a complex object
|
||||
if api_key is not None and not isinstance(api_key, str):
|
||||
raise TypeError(
|
||||
f"Expected string from credentials.get('apollo'), got {type(api_key).__name__}"
|
||||
)
|
||||
return api_key
|
||||
return os.getenv("APOLLO_API_KEY")
|
||||
|
||||
def _get_client() -> _ApolloClient | dict[str, str]:
|
||||
"""Get an Apollo client, or return an error dict if no credentials."""
|
||||
api_key = _get_api_key()
|
||||
if not api_key:
|
||||
return {
|
||||
"error": "Apollo credentials not configured",
|
||||
"help": (
|
||||
"Set APOLLO_API_KEY environment variable "
|
||||
"or configure via credential store. "
|
||||
"Get your API key at https://app.apollo.io/#/settings/integrations/api"
|
||||
),
|
||||
}
|
||||
return _ApolloClient(api_key)
|
||||
|
||||
# --- Person Enrichment ---
|
||||
|
||||
@mcp.tool()
|
||||
def apollo_enrich_person(
|
||||
email: str | None = None,
|
||||
linkedin_url: str | None = None,
|
||||
first_name: str | None = None,
|
||||
last_name: str | None = None,
|
||||
name: str | None = None,
|
||||
domain: str | None = None,
|
||||
reveal_personal_emails: bool = False,
|
||||
reveal_phone_number: bool = False,
|
||||
) -> dict:
|
||||
"""
|
||||
Enrich a person's information by email, LinkedIn URL, or name and domain.
|
||||
|
||||
Args:
|
||||
email: Person's email address
|
||||
linkedin_url: Person's LinkedIn profile URL
|
||||
first_name: Person's first name (use with last_name and domain)
|
||||
last_name: Person's last name (use with first_name and domain)
|
||||
name: Person's full name (use with domain)
|
||||
domain: Person's company domain (e.g., "acme.com")
|
||||
reveal_personal_emails: Whether to reveal personal email addresses (default: False)
|
||||
reveal_phone_number: Whether to reveal phone numbers (default: False)
|
||||
|
||||
Returns:
|
||||
Dict with person details including:
|
||||
- Full name, title
|
||||
- Email and email status
|
||||
- Phone numbers (if revealed)
|
||||
- Location (city, state, country)
|
||||
- LinkedIn/Twitter URLs
|
||||
- Company info (name, industry, size)
|
||||
Or error dict if enrichment fails
|
||||
|
||||
Example:
|
||||
apollo_enrich_person(email="john@acme.com")
|
||||
apollo_enrich_person(name="John Doe", domain="acme.com")
|
||||
"""
|
||||
client = _get_client()
|
||||
if isinstance(client, dict):
|
||||
return client
|
||||
|
||||
# Validate that we have enough info to match
|
||||
has_email_or_linkedin = bool(email or linkedin_url)
|
||||
has_name_and_domain = bool((first_name and last_name and domain) or (name and domain))
|
||||
|
||||
if not has_email_or_linkedin and not has_name_and_domain:
|
||||
return {
|
||||
"error": (
|
||||
"Invalid search criteria. Provide either (email), (linkedin_url), "
|
||||
"or (name/first_name+last_name AND domain)."
|
||||
)
|
||||
}
|
||||
try:
|
||||
return client.enrich_person(
|
||||
email=email,
|
||||
linkedin_url=linkedin_url,
|
||||
first_name=first_name,
|
||||
last_name=last_name,
|
||||
name=name,
|
||||
domain=domain,
|
||||
reveal_personal_emails=reveal_personal_emails,
|
||||
reveal_phone_number=reveal_phone_number,
|
||||
)
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": f"Network error: {e}"}
|
||||
|
||||
# --- Company Enrichment ---
|
||||
|
||||
@mcp.tool()
|
||||
def apollo_enrich_company(domain: str) -> dict:
|
||||
"""
|
||||
Enrich a company by domain.
|
||||
|
||||
Args:
|
||||
domain: Company domain (e.g., "acme.com")
|
||||
|
||||
Returns:
|
||||
Dict with company firmographics including:
|
||||
- name, domain, website URL
|
||||
- Industry, keywords
|
||||
- Employee count and range
|
||||
- Annual revenue, funding info
|
||||
- Founded year, location
|
||||
- Technologies used
|
||||
Or error dict if enrichment fails
|
||||
|
||||
Example:
|
||||
apollo_enrich_company(domain="openai.com")
|
||||
"""
|
||||
client = _get_client()
|
||||
if isinstance(client, dict):
|
||||
return client
|
||||
try:
|
||||
return client.enrich_company(domain)
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": f"Network error: {e}"}
|
||||
|
||||
# --- People Search ---
|
||||
|
||||
@mcp.tool()
|
||||
def apollo_search_people(
|
||||
titles: list[str] | None = None,
|
||||
seniorities: list[str] | None = None,
|
||||
locations: list[str] | None = None,
|
||||
company_sizes: list[str] | None = None,
|
||||
industries: list[str] | None = None,
|
||||
technologies: list[str] | None = None,
|
||||
limit: int = 10,
|
||||
) -> dict:
|
||||
"""
|
||||
Search for contacts with filters.
|
||||
|
||||
Args:
|
||||
titles: Job titles to search for
|
||||
(e.g., ["VP Sales", "Director of Marketing"])
|
||||
seniorities: Seniority levels
|
||||
(e.g., ["vp", "director", "c_suite", "manager", "senior"])
|
||||
locations: Geographic locations
|
||||
(e.g., ["San Francisco, CA", "New York, NY"])
|
||||
company_sizes: Company employee count ranges
|
||||
(e.g., ["1-10", "11-50", "51-200", "201-500", "501-1000", "1001-5000"])
|
||||
industries: Industry tags
|
||||
(e.g., ["technology", "finance", "healthcare"])
|
||||
technologies: Technologies used by company
|
||||
(e.g., ["salesforce", "hubspot", "aws"])
|
||||
limit: Maximum results (1-100, default 10)
|
||||
|
||||
Returns:
|
||||
Dict with:
|
||||
- total: Total matching results
|
||||
- results: List of matching contacts with email and company info
|
||||
Or error dict if search fails
|
||||
|
||||
Example:
|
||||
apollo_search_people(
|
||||
titles=["VP Sales", "Head of Sales"],
|
||||
seniorities=["vp", "director"],
|
||||
company_sizes=["51-200", "201-500"],
|
||||
limit=25
|
||||
)
|
||||
"""
|
||||
client = _get_client()
|
||||
if isinstance(client, dict):
|
||||
return client
|
||||
try:
|
||||
return client.search_people(
|
||||
titles=titles,
|
||||
seniorities=seniorities,
|
||||
locations=locations,
|
||||
company_sizes=company_sizes,
|
||||
industries=industries,
|
||||
technologies=technologies,
|
||||
limit=limit,
|
||||
)
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": f"Network error: {e}"}
|
||||
|
||||
# --- Company Search ---
|
||||
|
||||
@mcp.tool()
|
||||
def apollo_search_companies(
|
||||
industries: list[str] | None = None,
|
||||
employee_counts: list[str] | None = None,
|
||||
locations: list[str] | None = None,
|
||||
technologies: list[str] | None = None,
|
||||
limit: int = 10,
|
||||
) -> dict:
|
||||
"""
|
||||
Search for companies with filters.
|
||||
|
||||
Args:
|
||||
industries: Industry tags
|
||||
(e.g., ["technology", "finance", "healthcare"])
|
||||
employee_counts: Employee count ranges
|
||||
(e.g., ["1-10", "11-50", "51-200", "201-500", "501-1000"])
|
||||
locations: Geographic locations
|
||||
(e.g., ["San Francisco, CA", "United States"])
|
||||
technologies: Technologies used
|
||||
(e.g., ["salesforce", "hubspot", "aws", "kubernetes"])
|
||||
limit: Maximum results (1-100, default 10)
|
||||
|
||||
Returns:
|
||||
Dict with:
|
||||
- total: Total matching results
|
||||
- results: List of matching companies with firmographic data
|
||||
Or error dict if search fails
|
||||
|
||||
Example:
|
||||
apollo_search_companies(
|
||||
industries=["technology"],
|
||||
employee_counts=["51-200", "201-500"],
|
||||
technologies=["kubernetes"],
|
||||
limit=20
|
||||
)
|
||||
"""
|
||||
client = _get_client()
|
||||
if isinstance(client, dict):
|
||||
return client
|
||||
try:
|
||||
return client.search_companies(
|
||||
industries=industries,
|
||||
employee_counts=employee_counts,
|
||||
locations=locations,
|
||||
technologies=technologies,
|
||||
limit=limit,
|
||||
)
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Request timed out"}
|
||||
except httpx.RequestError as e:
|
||||
return {"error": f"Network error: {e}"}
|
||||
@@ -0,0 +1,675 @@
|
||||
"""
|
||||
Tests for Apollo.io data enrichment tool.
|
||||
|
||||
Covers:
|
||||
- _ApolloClient methods (enrich_person, enrich_company, search_people, search_companies)
|
||||
- Error handling (401, 403, 404, 422, 429, 500, timeout)
|
||||
- Credential retrieval (CredentialStoreAdapter vs env var)
|
||||
- All 4 MCP tool functions
|
||||
- "Not found" graceful handling
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
|
||||
from aden_tools.tools.apollo_tool.apollo_tool import (
|
||||
APOLLO_API_BASE,
|
||||
_ApolloClient,
|
||||
register_tools,
|
||||
)
|
||||
|
||||
# --- _ApolloClient tests ---
|
||||
|
||||
|
||||
class TestApolloClient:
|
||||
def setup_method(self):
|
||||
self.client = _ApolloClient("test-api-key")
|
||||
|
||||
def test_headers(self):
|
||||
headers = self.client._headers
|
||||
assert headers["Content-Type"] == "application/json"
|
||||
assert headers["Accept"] == "application/json"
|
||||
# API key is passed in X-Api-Key header
|
||||
assert headers["X-Api-Key"] == "test-api-key"
|
||||
|
||||
def test_handle_response_success(self):
|
||||
response = MagicMock()
|
||||
response.status_code = 200
|
||||
response.json.return_value = {"person": {"id": "123"}}
|
||||
assert self.client._handle_response(response) == {"person": {"id": "123"}}
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"status_code,expected_substring",
|
||||
[
|
||||
(401, "Invalid Apollo API key"),
|
||||
(403, "Insufficient credits"),
|
||||
(404, "not found"),
|
||||
(422, "Invalid parameters"),
|
||||
(429, "rate limit"),
|
||||
],
|
||||
)
|
||||
def test_handle_response_errors(self, status_code, expected_substring):
|
||||
response = MagicMock()
|
||||
response.status_code = status_code
|
||||
response.json.return_value = {"error": "Test error"}
|
||||
response.text = "Test error"
|
||||
result = self.client._handle_response(response)
|
||||
assert "error" in result
|
||||
assert expected_substring in result["error"]
|
||||
|
||||
def test_handle_response_generic_error(self):
|
||||
response = MagicMock()
|
||||
response.status_code = 500
|
||||
response.json.return_value = {"error": "Internal Server Error"}
|
||||
result = self.client._handle_response(response)
|
||||
assert "error" in result
|
||||
assert "500" in result["error"]
|
||||
|
||||
@patch("aden_tools.tools.apollo_tool.apollo_tool.httpx.post")
|
||||
def test_enrich_person_by_email(self, mock_post):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
"person": {
|
||||
"id": "p123",
|
||||
"first_name": "John",
|
||||
"last_name": "Doe",
|
||||
"name": "John Doe",
|
||||
"title": "VP Sales",
|
||||
"email": "john@acme.com",
|
||||
"email_status": "verified",
|
||||
"phone_numbers": [{"sanitized_number": "+1234567890"}],
|
||||
"linkedin_url": "https://linkedin.com/in/johndoe",
|
||||
"twitter_url": None,
|
||||
"city": "San Francisco",
|
||||
"state": "California",
|
||||
"country": "United States",
|
||||
"organization": {
|
||||
"id": "o456",
|
||||
"name": "Acme Inc",
|
||||
"primary_domain": "acme.com",
|
||||
"industry": "Technology",
|
||||
"estimated_num_employees": 250,
|
||||
},
|
||||
}
|
||||
}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
result = self.client.enrich_person(email="john@acme.com")
|
||||
|
||||
mock_post.assert_called_once_with(
|
||||
f"{APOLLO_API_BASE}/people/match",
|
||||
headers=self.client._headers,
|
||||
params=None,
|
||||
json={
|
||||
"email": "john@acme.com",
|
||||
"reveal_personal_emails": False,
|
||||
"reveal_phone_number": False,
|
||||
},
|
||||
timeout=30.0,
|
||||
)
|
||||
assert result["match_found"] is True
|
||||
assert result["person"]["first_name"] == "John"
|
||||
assert result["person"]["title"] == "VP Sales"
|
||||
assert result["person"]["organization"]["name"] == "Acme Inc"
|
||||
|
||||
@patch("aden_tools.tools.apollo_tool.apollo_tool.httpx.post")
|
||||
def test_enrich_person_by_linkedin(self, mock_post):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
"person": {
|
||||
"id": "p456",
|
||||
"first_name": "Jane",
|
||||
"last_name": "Smith",
|
||||
"name": "Jane Smith",
|
||||
"title": "CTO",
|
||||
"email": "jane@startup.io",
|
||||
"linkedin_url": "https://linkedin.com/in/janesmith",
|
||||
"organization": {},
|
||||
}
|
||||
}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
result = self.client.enrich_person(linkedin_url="https://linkedin.com/in/janesmith")
|
||||
|
||||
call_json = mock_post.call_args.kwargs["json"]
|
||||
assert call_json["linkedin_url"] == "https://linkedin.com/in/janesmith"
|
||||
assert result["match_found"] is True
|
||||
assert result["person"]["title"] == "CTO"
|
||||
|
||||
@patch("aden_tools.tools.apollo_tool.apollo_tool.httpx.post")
|
||||
def test_enrich_person_by_name_and_domain(self, mock_post):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {"person": {"id": "p123"}}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
self.client.enrich_person(name="John Doe", domain="acme.com")
|
||||
|
||||
call_json = mock_post.call_args.kwargs["json"]
|
||||
assert call_json["name"] == "John Doe"
|
||||
assert call_json["domain"] == "acme.com"
|
||||
|
||||
@patch("aden_tools.tools.apollo_tool.apollo_tool.httpx.post")
|
||||
def test_enrich_person_with_reveal_flags(self, mock_post):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {"person": {"id": "p123"}}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
self.client.enrich_person(
|
||||
email="john@acme.com",
|
||||
reveal_personal_emails=True,
|
||||
reveal_phone_number=True,
|
||||
)
|
||||
|
||||
call_json = mock_post.call_args.kwargs["json"]
|
||||
assert call_json["reveal_personal_emails"] is True
|
||||
assert call_json["reveal_phone_number"] is True
|
||||
|
||||
@patch("aden_tools.tools.apollo_tool.apollo_tool.httpx.post")
|
||||
def test_enrich_person_with_optional_params(self, mock_post):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {"person": {"id": "p789"}}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
self.client.enrich_person(
|
||||
email="john@acme.com",
|
||||
first_name="John",
|
||||
last_name="Doe",
|
||||
domain="acme.com",
|
||||
)
|
||||
|
||||
call_json = mock_post.call_args.kwargs["json"]
|
||||
assert call_json["email"] == "john@acme.com"
|
||||
assert call_json["first_name"] == "John"
|
||||
assert call_json["last_name"] == "Doe"
|
||||
assert call_json["domain"] == "acme.com"
|
||||
|
||||
@patch("aden_tools.tools.apollo_tool.apollo_tool.httpx.post")
|
||||
def test_enrich_person_not_found(self, mock_post):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {"person": None}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
result = self.client.enrich_person(email="nobody@nowhere.xyz")
|
||||
|
||||
assert result["match_found"] is False
|
||||
assert "No matching person found" in result["message"]
|
||||
|
||||
@patch("aden_tools.tools.apollo_tool.apollo_tool.httpx.post")
|
||||
def test_enrich_company(self, mock_post):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
"organization": {
|
||||
"id": "o123",
|
||||
"name": "OpenAI",
|
||||
"primary_domain": "openai.com",
|
||||
"website_url": "https://openai.com",
|
||||
"linkedin_url": "https://linkedin.com/company/openai",
|
||||
"industry": "Artificial Intelligence",
|
||||
"keywords": ["ai", "machine learning", "gpt"],
|
||||
"estimated_num_employees": 1500,
|
||||
"employee_count_range": "1001-5000",
|
||||
"annual_revenue": 1000000000,
|
||||
"annual_revenue_printed": "$1B",
|
||||
"total_funding": 11000000000,
|
||||
"total_funding_printed": "$11B",
|
||||
"latest_funding_round_date": "2023-01-23",
|
||||
"latest_funding_stage": "Series D",
|
||||
"founded_year": 2015,
|
||||
"phone": "+1-415-123-4567",
|
||||
"city": "San Francisco",
|
||||
"state": "California",
|
||||
"country": "United States",
|
||||
"street_address": "123 Mission St",
|
||||
"technologies": ["python", "kubernetes", "aws"],
|
||||
"short_description": "AI research and deployment company",
|
||||
}
|
||||
}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
result = self.client.enrich_company("openai.com")
|
||||
|
||||
mock_post.assert_called_once_with(
|
||||
f"{APOLLO_API_BASE}/organizations/enrich",
|
||||
headers=self.client._headers,
|
||||
json={"domain": "openai.com"},
|
||||
timeout=30.0,
|
||||
)
|
||||
assert result["match_found"] is True
|
||||
assert result["organization"]["name"] == "OpenAI"
|
||||
assert result["organization"]["industry"] == "Artificial Intelligence"
|
||||
assert result["organization"]["employee_count"] == 1500
|
||||
assert "python" in result["organization"]["technologies"]
|
||||
|
||||
@patch("aden_tools.tools.apollo_tool.apollo_tool.httpx.post")
|
||||
def test_enrich_company_not_found(self, mock_post):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {"organization": None}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
result = self.client.enrich_company("notarealcompany12345.xyz")
|
||||
|
||||
assert result["match_found"] is False
|
||||
assert "No matching company found" in result["message"]
|
||||
|
||||
@patch("aden_tools.tools.apollo_tool.apollo_tool.httpx.post")
|
||||
def test_search_people(self, mock_post):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
"pagination": {"total_entries": 150, "page": 1, "per_page": 10},
|
||||
"people": [
|
||||
{
|
||||
"id": "p1",
|
||||
"first_name": "Alice",
|
||||
"last_name": "Johnson",
|
||||
"name": "Alice Johnson",
|
||||
"title": "VP Sales",
|
||||
"email": "alice@company.com",
|
||||
"email_status": "verified",
|
||||
"linkedin_url": "https://linkedin.com/in/alicejohnson",
|
||||
"city": "New York",
|
||||
"state": "New York",
|
||||
"country": "United States",
|
||||
"seniority": "vp",
|
||||
"organization": {
|
||||
"id": "o1",
|
||||
"name": "Company Inc",
|
||||
"primary_domain": "company.com",
|
||||
},
|
||||
},
|
||||
{
|
||||
"id": "p2",
|
||||
"first_name": "Bob",
|
||||
"last_name": "Smith",
|
||||
"name": "Bob Smith",
|
||||
"title": "Director of Sales",
|
||||
"email": "bob@another.com",
|
||||
"email_status": "verified",
|
||||
"linkedin_url": "https://linkedin.com/in/bobsmith",
|
||||
"city": "Chicago",
|
||||
"state": "Illinois",
|
||||
"country": "United States",
|
||||
"seniority": "director",
|
||||
"organization": None,
|
||||
},
|
||||
],
|
||||
}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
result = self.client.search_people(
|
||||
titles=["VP Sales", "Director of Sales"],
|
||||
seniorities=["vp", "director"],
|
||||
company_sizes=["51-200", "201-500"],
|
||||
limit=10,
|
||||
)
|
||||
|
||||
mock_post.assert_called_once()
|
||||
call_json = mock_post.call_args.kwargs["json"]
|
||||
assert call_json["person_titles"] == ["VP Sales", "Director of Sales"]
|
||||
assert call_json["person_seniorities"] == ["vp", "director"]
|
||||
assert call_json["organization_num_employees_ranges"] == ["51-200", "201-500"]
|
||||
assert call_json["per_page"] == 10
|
||||
|
||||
assert result["total"] == 150
|
||||
assert len(result["results"]) == 2
|
||||
assert result["results"][0]["title"] == "VP Sales"
|
||||
assert result["results"][0]["organization"]["name"] == "Company Inc"
|
||||
# Bob has no organization
|
||||
assert result["results"][1]["organization"]["name"] is None
|
||||
|
||||
@patch("aden_tools.tools.apollo_tool.apollo_tool.httpx.post")
|
||||
def test_search_people_limit_capped(self, mock_post):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {"pagination": {}, "people": []}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
self.client.search_people(limit=200)
|
||||
|
||||
call_json = mock_post.call_args.kwargs["json"]
|
||||
assert call_json["per_page"] == 100
|
||||
|
||||
@patch("aden_tools.tools.apollo_tool.apollo_tool.httpx.post")
|
||||
def test_search_companies(self, mock_post):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
"pagination": {"total_entries": 50, "page": 1, "per_page": 10},
|
||||
"organizations": [
|
||||
{
|
||||
"id": "o1",
|
||||
"name": "Tech Startup",
|
||||
"primary_domain": "techstartup.io",
|
||||
"website_url": "https://techstartup.io",
|
||||
"linkedin_url": "https://linkedin.com/company/techstartup",
|
||||
"industry": "Technology",
|
||||
"estimated_num_employees": 75,
|
||||
"employee_count_range": "51-200",
|
||||
"annual_revenue_printed": "$10M",
|
||||
"city": "Austin",
|
||||
"state": "Texas",
|
||||
"country": "United States",
|
||||
"short_description": "A tech startup",
|
||||
},
|
||||
],
|
||||
}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
result = self.client.search_companies(
|
||||
industries=["technology"],
|
||||
employee_counts=["51-200"],
|
||||
technologies=["kubernetes"],
|
||||
limit=10,
|
||||
)
|
||||
|
||||
mock_post.assert_called_once()
|
||||
call_json = mock_post.call_args.kwargs["json"]
|
||||
assert call_json["organization_industry_tag_ids"] == ["technology"]
|
||||
assert call_json["organization_num_employees_ranges"] == ["51-200"]
|
||||
assert call_json["currently_using_any_of_technology_uids"] == ["kubernetes"]
|
||||
|
||||
assert result["total"] == 50
|
||||
assert len(result["results"]) == 1
|
||||
assert result["results"][0]["name"] == "Tech Startup"
|
||||
assert result["results"][0]["industry"] == "Technology"
|
||||
|
||||
|
||||
# --- MCP tool registration and credential tests ---
|
||||
|
||||
|
||||
class TestToolRegistration:
|
||||
def test_register_tools_registers_all_tools(self):
|
||||
mcp = MagicMock()
|
||||
mcp.tool.return_value = lambda fn: fn
|
||||
register_tools(mcp)
|
||||
assert mcp.tool.call_count == 4
|
||||
|
||||
def test_no_credentials_returns_error(self):
|
||||
mcp = MagicMock()
|
||||
registered_fns = []
|
||||
mcp.tool.return_value = lambda fn: registered_fns.append(fn) or fn
|
||||
|
||||
with patch.dict("os.environ", {}, clear=True):
|
||||
register_tools(mcp, credentials=None)
|
||||
|
||||
enrich_fn = next(fn for fn in registered_fns if fn.__name__ == "apollo_enrich_person")
|
||||
result = enrich_fn(email="test@test.com")
|
||||
assert "error" in result
|
||||
assert "not configured" in result["error"]
|
||||
|
||||
def test_credentials_from_credential_manager(self):
|
||||
mcp = MagicMock()
|
||||
registered_fns = []
|
||||
mcp.tool.return_value = lambda fn: registered_fns.append(fn) or fn
|
||||
|
||||
cred_manager = MagicMock()
|
||||
cred_manager.get.return_value = "test-api-key"
|
||||
|
||||
register_tools(mcp, credentials=cred_manager)
|
||||
|
||||
enrich_fn = next(fn for fn in registered_fns if fn.__name__ == "apollo_enrich_company")
|
||||
|
||||
with patch("aden_tools.tools.apollo_tool.apollo_tool.httpx.post") as mock_post:
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {"organization": {"id": "123", "name": "Test"}}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
result = enrich_fn(domain="test.com")
|
||||
|
||||
cred_manager.get.assert_called_with("apollo")
|
||||
assert result["match_found"] is True
|
||||
|
||||
def test_credentials_from_env_var(self):
|
||||
mcp = MagicMock()
|
||||
registered_fns = []
|
||||
mcp.tool.return_value = lambda fn: registered_fns.append(fn) or fn
|
||||
|
||||
register_tools(mcp, credentials=None)
|
||||
|
||||
enrich_fn = next(fn for fn in registered_fns if fn.__name__ == "apollo_enrich_company")
|
||||
|
||||
with (
|
||||
patch.dict("os.environ", {"APOLLO_API_KEY": "env-api-key"}),
|
||||
patch("aden_tools.tools.apollo_tool.apollo_tool.httpx.post") as mock_post,
|
||||
):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {"organization": {"id": "123", "name": "Test"}}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
result = enrich_fn(domain="test.com")
|
||||
|
||||
assert result["match_found"] is True
|
||||
# Verify API key was used in X-Api-Key header
|
||||
call_headers = mock_post.call_args.kwargs["headers"]
|
||||
assert call_headers["X-Api-Key"] == "env-api-key"
|
||||
|
||||
|
||||
# --- Individual tool function tests ---
|
||||
|
||||
|
||||
class TestEnrichPersonTool:
|
||||
def setup_method(self):
|
||||
self.mcp = MagicMock()
|
||||
self.fns = []
|
||||
self.mcp.tool.return_value = lambda fn: self.fns.append(fn) or fn
|
||||
cred = MagicMock()
|
||||
cred.get.return_value = "test-key"
|
||||
register_tools(self.mcp, credentials=cred)
|
||||
|
||||
def _fn(self, name):
|
||||
return next(f for f in self.fns if f.__name__ == name)
|
||||
|
||||
def test_enrich_person_requires_email_or_linkedin(self):
|
||||
result = self._fn("apollo_enrich_person")()
|
||||
assert "error" in result
|
||||
assert "Invalid search criteria" in result["error"]
|
||||
|
||||
@patch("aden_tools.tools.apollo_tool.apollo_tool.httpx.post")
|
||||
def test_enrich_person_success(self, mock_post):
|
||||
mock_post.return_value = MagicMock(
|
||||
status_code=200,
|
||||
json=MagicMock(
|
||||
return_value={
|
||||
"person": {
|
||||
"id": "p1",
|
||||
"first_name": "John",
|
||||
"last_name": "Doe",
|
||||
"title": "CEO",
|
||||
"organization": {},
|
||||
}
|
||||
}
|
||||
),
|
||||
)
|
||||
result = self._fn("apollo_enrich_person")(email="john@acme.com")
|
||||
assert result["match_found"] is True
|
||||
assert result["person"]["title"] == "CEO"
|
||||
|
||||
@patch("aden_tools.tools.apollo_tool.apollo_tool.httpx.post")
|
||||
def test_enrich_person_timeout(self, mock_post):
|
||||
mock_post.side_effect = httpx.TimeoutException("timed out")
|
||||
result = self._fn("apollo_enrich_person")(email="test@test.com")
|
||||
assert "error" in result
|
||||
assert "timed out" in result["error"]
|
||||
|
||||
@patch("aden_tools.tools.apollo_tool.apollo_tool.httpx.post")
|
||||
def test_enrich_person_network_error(self, mock_post):
|
||||
mock_post.side_effect = httpx.RequestError("connection failed")
|
||||
result = self._fn("apollo_enrich_person")(email="test@test.com")
|
||||
assert "error" in result
|
||||
assert "Network error" in result["error"]
|
||||
|
||||
|
||||
class TestEnrichCompanyTool:
|
||||
def setup_method(self):
|
||||
self.mcp = MagicMock()
|
||||
self.fns = []
|
||||
self.mcp.tool.return_value = lambda fn: self.fns.append(fn) or fn
|
||||
cred = MagicMock()
|
||||
cred.get.return_value = "test-key"
|
||||
register_tools(self.mcp, credentials=cred)
|
||||
|
||||
def _fn(self, name):
|
||||
return next(f for f in self.fns if f.__name__ == name)
|
||||
|
||||
@patch("aden_tools.tools.apollo_tool.apollo_tool.httpx.post")
|
||||
def test_enrich_company_success(self, mock_post):
|
||||
mock_post.return_value = MagicMock(
|
||||
status_code=200,
|
||||
json=MagicMock(
|
||||
return_value={
|
||||
"organization": {
|
||||
"id": "o1",
|
||||
"name": "Acme Inc",
|
||||
"industry": "Technology",
|
||||
"estimated_num_employees": 500,
|
||||
}
|
||||
}
|
||||
),
|
||||
)
|
||||
result = self._fn("apollo_enrich_company")(domain="acme.com")
|
||||
assert result["match_found"] is True
|
||||
assert result["organization"]["name"] == "Acme Inc"
|
||||
|
||||
@patch("aden_tools.tools.apollo_tool.apollo_tool.httpx.post")
|
||||
def test_enrich_company_not_found(self, mock_post):
|
||||
mock_post.return_value = MagicMock(
|
||||
status_code=200, json=MagicMock(return_value={"organization": None})
|
||||
)
|
||||
result = self._fn("apollo_enrich_company")(domain="notreal.xyz")
|
||||
assert result["match_found"] is False
|
||||
|
||||
|
||||
class TestSearchPeopleTool:
|
||||
def setup_method(self):
|
||||
self.mcp = MagicMock()
|
||||
self.fns = []
|
||||
self.mcp.tool.return_value = lambda fn: self.fns.append(fn) or fn
|
||||
cred = MagicMock()
|
||||
cred.get.return_value = "test-key"
|
||||
register_tools(self.mcp, credentials=cred)
|
||||
|
||||
def _fn(self, name):
|
||||
return next(f for f in self.fns if f.__name__ == name)
|
||||
|
||||
@patch("aden_tools.tools.apollo_tool.apollo_tool.httpx.post")
|
||||
def test_search_people_success(self, mock_post):
|
||||
mock_post.return_value = MagicMock(
|
||||
status_code=200,
|
||||
json=MagicMock(
|
||||
return_value={
|
||||
"pagination": {"total_entries": 100},
|
||||
"people": [{"id": "p1", "name": "Alice", "title": "VP Sales"}],
|
||||
}
|
||||
),
|
||||
)
|
||||
result = self._fn("apollo_search_people")(titles=["VP Sales"])
|
||||
assert result["total"] == 100
|
||||
assert len(result["results"]) == 1
|
||||
|
||||
@patch("aden_tools.tools.apollo_tool.apollo_tool.httpx.post")
|
||||
def test_search_people_with_all_filters(self, mock_post):
|
||||
mock_post.return_value = MagicMock(
|
||||
status_code=200, json=MagicMock(return_value={"pagination": {}, "people": []})
|
||||
)
|
||||
self._fn("apollo_search_people")(
|
||||
titles=["CEO"],
|
||||
seniorities=["c_suite"],
|
||||
locations=["San Francisco"],
|
||||
company_sizes=["51-200"],
|
||||
industries=["technology"],
|
||||
technologies=["salesforce"],
|
||||
limit=25,
|
||||
)
|
||||
call_json = mock_post.call_args.kwargs["json"]
|
||||
assert call_json["person_titles"] == ["CEO"]
|
||||
assert call_json["person_seniorities"] == ["c_suite"]
|
||||
assert call_json["person_locations"] == ["San Francisco"]
|
||||
assert call_json["organization_num_employees_ranges"] == ["51-200"]
|
||||
|
||||
|
||||
class TestSearchCompaniesTool:
|
||||
def setup_method(self):
|
||||
self.mcp = MagicMock()
|
||||
self.fns = []
|
||||
self.mcp.tool.return_value = lambda fn: self.fns.append(fn) or fn
|
||||
cred = MagicMock()
|
||||
cred.get.return_value = "test-key"
|
||||
register_tools(self.mcp, credentials=cred)
|
||||
|
||||
def _fn(self, name):
|
||||
return next(f for f in self.fns if f.__name__ == name)
|
||||
|
||||
@patch("aden_tools.tools.apollo_tool.apollo_tool.httpx.post")
|
||||
def test_search_companies_success(self, mock_post):
|
||||
mock_post.return_value = MagicMock(
|
||||
status_code=200,
|
||||
json=MagicMock(
|
||||
return_value={
|
||||
"pagination": {"total_entries": 50},
|
||||
"organizations": [{"id": "o1", "name": "Tech Corp", "industry": "Technology"}],
|
||||
}
|
||||
),
|
||||
)
|
||||
result = self._fn("apollo_search_companies")(industries=["technology"])
|
||||
assert result["total"] == 50
|
||||
assert len(result["results"]) == 1
|
||||
assert result["results"][0]["industry"] == "Technology"
|
||||
|
||||
@patch("aden_tools.tools.apollo_tool.apollo_tool.httpx.post")
|
||||
def test_search_companies_with_all_filters(self, mock_post):
|
||||
mock_post.return_value = MagicMock(
|
||||
status_code=200, json=MagicMock(return_value={"pagination": {}, "organizations": []})
|
||||
)
|
||||
self._fn("apollo_search_companies")(
|
||||
industries=["finance"],
|
||||
employee_counts=["201-500"],
|
||||
locations=["New York"],
|
||||
technologies=["aws"],
|
||||
limit=15,
|
||||
)
|
||||
call_json = mock_post.call_args.kwargs["json"]
|
||||
assert call_json["organization_industry_tag_ids"] == ["finance"]
|
||||
assert call_json["organization_num_employees_ranges"] == ["201-500"]
|
||||
assert call_json["organization_locations"] == ["New York"]
|
||||
assert call_json["currently_using_any_of_technology_uids"] == ["aws"]
|
||||
assert call_json["per_page"] == 15
|
||||
|
||||
|
||||
# --- Credential spec tests ---
|
||||
|
||||
|
||||
class TestCredentialSpec:
|
||||
def test_apollo_credential_spec_exists(self):
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS
|
||||
|
||||
assert "apollo" in CREDENTIAL_SPECS
|
||||
|
||||
def test_apollo_spec_env_var(self):
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS
|
||||
|
||||
spec = CREDENTIAL_SPECS["apollo"]
|
||||
assert spec.env_var == "APOLLO_API_KEY"
|
||||
|
||||
def test_apollo_spec_tools(self):
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS
|
||||
|
||||
spec = CREDENTIAL_SPECS["apollo"]
|
||||
assert "apollo_enrich_person" in spec.tools
|
||||
assert "apollo_enrich_company" in spec.tools
|
||||
assert "apollo_search_people" in spec.tools
|
||||
assert "apollo_search_companies" in spec.tools
|
||||
assert len(spec.tools) == 4
|
||||
Reference in New Issue
Block a user