Merge branch 'adenhq:main' into main

This commit is contained in:
Timothy @aden
2026-01-26 12:31:25 -08:00
committed by GitHub
9 changed files with 49 additions and 17 deletions
+17 -1
View File
@@ -6,6 +6,10 @@ Thank you for your interest in contributing to the Aden Agent Framework! This do
By participating in this project, you agree to abide by our [Code of Conduct](CODE_OF_CONDUCT.md).
## Contributor License Agreement
By submitting a Pull Request, you agree that your contributions will be licensed under the Aden Agent Framework license.
## Issue Assignment Policy
To prevent duplicate work and respect contributors' time, we require issue assignment before submitting PRs.
@@ -55,6 +59,12 @@ python -c "import framework; import aden_tools; print('✓ Setup complete')"
./quickstart.sh
```
> **Windows Users:**
> If you are on native Windows, it is recommended to use **WSL (Windows Subsystem for Linux)**.
> Alternatively, make sure to run PowerShell or Git Bash with Python 3.11+ installed, and disable "App Execution Aliases" in Windows settings.
> **Tip:** Installing Claude Code skills is optional for running existing agents, but required if you plan to **build new agents**.
## Commit Convention
We follow [Conventional Commits](https://www.conventionalcommits.org/):
@@ -119,6 +129,12 @@ feat(component): add new feature description
## Testing
> **Note:** When testing agents in `exports/`, always set PYTHONPATH:
>
> ```bash
> PYTHONPATH=core:exports python -m agent_name test
> ```
```bash
# Run all tests for the framework
cd core && python -m pytest
@@ -134,4 +150,4 @@ PYTHONPATH=core:exports python -m agent_name test
Feel free to open an issue for questions or join our [Discord community](https://discord.com/invite/MXE49hrKDk).
Thank you for contributing!
Thank you for contributing!
+9 -1
View File
@@ -9,6 +9,10 @@ Complete setup guide for building and running goal-driven agents with the Aden A
./scripts/setup-python.sh
```
> **Note for Windows Users:**
> Running the setup script on native Windows shells (PowerShell / Git Bash) may sometimes fail due to Python App Execution Aliases.
> It is **strongly recommended to use WSL (Windows Subsystem for Linux)** for a smoother setup experience.
This will:
- Check Python version (requires 3.11+)
@@ -50,6 +54,9 @@ python -c "import aden_tools; print('✓ aden_tools OK')"
python -c "import litellm; print('✓ litellm OK')"
```
> **Windows Tip:**
> On Windows, if the verification commands fail, ensure you are running them in **WSL** or after **disabling Python App Execution Aliases** in Windows Settings → Apps → App Execution Aliases.
## Requirements
### Python Version
@@ -63,6 +70,7 @@ python -c "import litellm; print('✓ litellm OK')"
- pip (latest version)
- 2GB+ RAM
- Internet connection (for LLM API calls)
- For Windows users: WSL 2 is recommended for full compatibility.
### API Keys (Optional)
@@ -368,4 +376,4 @@ When contributing agent packages:
- **Issues:** https://github.com/adenhq/hive/issues
- **Discord:** https://discord.com/invite/MXE49hrKDk
- **Documentation:** https://docs.adenhq.com/
- **Documentation:** https://docs.adenhq.com/
+1 -1
View File
@@ -264,7 +264,7 @@ See [ENVIRONMENT_SETUP.md](ENVIRONMENT_SETUP.md) for complete setup instructions
- **[Developer Guide](DEVELOPER.md)** - Comprehensive guide for developers
- [Getting Started](docs/getting-started.md) - Quick setup instructions
- [Configuration Guide](docs/configuration.md) - All configuration options
- [Architecture Overview](docs/architecture.md) - System design and structure
- [Architecture Overview](docs/architecture/README.md) - System design and structure
## Roadmap
+8 -4
View File
@@ -167,10 +167,14 @@ class GraphExecutor:
# Restore session state if provided
if session_state and "memory" in session_state:
# Restore memory from previous session
for key, value in session_state["memory"].items():
memory.write(key, value)
self.logger.info(f"📥 Restored session state with {len(session_state['memory'])} memory keys")
memory_data = session_state["memory"]
if isinstance(memory_data, dict):
# Restore memory from previous session
for key, value in memory_data.items():
memory.write(key, value)
self.logger.info(f"📥 Restored session state with {len(memory_data)} memory keys")
else:
self.logger.warning(f"⚠️ Invalid memory data type in session state: {type(memory_data).__name__}, expected dict")
# Write new input data to memory (each key individually)
if input_data:
+3 -3
View File
@@ -1,9 +1,9 @@
"""Anthropic Claude LLM provider - backward compatible wrapper around LiteLLM."""
import os
from typing import Any
from typing import Any, Callable
from framework.llm.provider import LLMProvider, LLMResponse, Tool
from framework.llm.provider import LLMProvider, LLMResponse, Tool, ToolUse, ToolResult
from framework.llm.litellm import LiteLLMProvider
@@ -85,7 +85,7 @@ class AnthropicProvider(LLMProvider):
messages: list[dict[str, Any]],
system: str,
tools: list[Tool],
tool_executor: callable,
tool_executor: Callable[[ToolUse], ToolResult],
max_iterations: int = 10,
) -> LLMResponse:
"""Run a tool-use loop until Claude produces a final response (via LiteLLM)."""
+3 -3
View File
@@ -8,14 +8,14 @@ See: https://docs.litellm.ai/docs/providers
"""
import json
from typing import Any
from typing import Any, Callable
try:
import litellm
except ImportError:
litellm = None
from framework.llm.provider import LLMProvider, LLMResponse, Tool, ToolUse
from framework.llm.provider import LLMProvider, LLMResponse, Tool, ToolUse, ToolResult
class LiteLLMProvider(LLMProvider):
@@ -154,7 +154,7 @@ class LiteLLMProvider(LLMProvider):
messages: list[dict[str, Any]],
system: str,
tools: list[Tool],
tool_executor: callable,
tool_executor: Callable[[ToolUse], ToolResult],
max_iterations: int = 10,
) -> LLMResponse:
"""Run a tool-use loop until the LLM produces a final response."""
+2 -2
View File
@@ -2,7 +2,7 @@
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Any
from typing import Any, Callable
@dataclass
@@ -86,7 +86,7 @@ class LLMProvider(ABC):
messages: list[dict[str, Any]],
system: str,
tools: list[Tool],
tool_executor: callable,
tool_executor: Callable[["ToolUse"], "ToolResult"],
max_iterations: int = 10,
) -> LLMResponse:
"""
+3 -1
View File
@@ -289,7 +289,9 @@ class AgentRuntime:
ExecutionResult or None if timeout
"""
exec_id = await self.trigger(entry_point_id, input_data, session_state=session_state)
stream = self._streams[entry_point_id]
stream = self._streams.get(entry_point_id)
if stream is None:
raise ValueError(f"Entry point '{entry_point_id}' not found")
return await stream.wait_for_completion(exec_id, timeout)
async def get_goal_progress(self) -> dict[str, Any]:
@@ -7,7 +7,9 @@ from ..security import get_secure_path
def register_tools(mcp: FastMCP) -> None:
"""Register file view tools with the MCP server."""
if getattr(mcp, "_file_tools_registered", False):
return
mcp._file_tools_registered = True
@mcp.tool()
def view_file(
path: str,