Compare commits
45 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| b3adbe745f | |||
| f3fefe0cbc | |||
| c8a25a0287 | |||
| 5823513fde | |||
| 97ce8dfc54 | |||
| 5e628c7606 | |||
| 5b931982e3 | |||
| 8174f330ae | |||
| 9774e53720 | |||
| cf3296984c | |||
| ebb6605a86 | |||
| e9c1731c0f | |||
| 0e2333daaf | |||
| 5167c29aed | |||
| 4da4d3b2c0 | |||
| 3e622af484 | |||
| 6600ce0ef9 | |||
| 74d5dd03dd | |||
| d18091bb2c | |||
| d1a1f36d6e | |||
| 051b0fcef2 | |||
| e270d3210d | |||
| d4a66d4b5f | |||
| ad39b6ea50 | |||
| 71baf6166d | |||
| 25afdae093 | |||
| 21700eb2ec | |||
| 617462df52 | |||
| b3c1f1436b | |||
| 310b922ce8 | |||
| 20b6553b07 | |||
| 1035cc9481 | |||
| 5d6dd1caa6 | |||
| 45ba771650 | |||
| a4b15c0320 | |||
| 211619120e | |||
| a78bb16e4b | |||
| c93bcee933 | |||
| 08160a004a | |||
| ccd5de7496 | |||
| c332ef8823 | |||
| 06db11eebf | |||
| 859db7f056 | |||
| 6e0b5c7250 | |||
| becbdb3706 |
@@ -1,9 +1,8 @@
|
||||
"""Queen global cross-session memory.
|
||||
|
||||
Three-tier memory architecture:
|
||||
Two-tier memory architecture:
|
||||
~/.hive/queen/MEMORY.md — semantic (who, what, why)
|
||||
~/.hive/queen/memories/MEMORY-YYYY-MM-DD.md — episodic (daily journals)
|
||||
~/.hive/queen/session/{id}/data/adapt.md — working (session-scoped)
|
||||
|
||||
Semantic and episodic files are injected at queen session start.
|
||||
|
||||
@@ -197,21 +196,14 @@ no preamble, no code fences.
|
||||
|
||||
|
||||
def read_session_context(session_dir: Path, max_messages: int = 80) -> str:
|
||||
"""Extract a readable transcript from conversation parts + adapt.md.
|
||||
"""Extract a readable transcript from conversation parts.
|
||||
|
||||
Reads the last ``max_messages`` conversation parts and the session's
|
||||
adapt.md (working memory). Tool results are omitted — only user and
|
||||
assistant turns (with tool-call names noted) are included.
|
||||
Reads the last ``max_messages`` conversation parts. Tool results are
|
||||
omitted — only user and assistant turns (with tool-call names noted)
|
||||
are included.
|
||||
"""
|
||||
parts: list[str] = []
|
||||
|
||||
# Working notes
|
||||
adapt_path = session_dir / "data" / "adapt.md"
|
||||
if adapt_path.exists():
|
||||
text = adapt_path.read_text(encoding="utf-8").strip()
|
||||
if text:
|
||||
parts.append(f"## Session Working Notes (adapt.md)\n\n{text}")
|
||||
|
||||
# Conversation transcript
|
||||
parts_dir = session_dir / "conversations" / "parts"
|
||||
if parts_dir.exists():
|
||||
@@ -306,12 +298,12 @@ async def consolidate_queen_memory(
|
||||
) -> None:
|
||||
"""Update MEMORY.md and append a diary entry based on the current session.
|
||||
|
||||
Reads conversation parts and adapt.md from session_dir. Called
|
||||
periodically in the background and once at session end. Failures are
|
||||
logged and silently swallowed so they never block teardown.
|
||||
Reads conversation parts from session_dir. Called periodically in
|
||||
the background and once at session end. Failures are logged and
|
||||
silently swallowed so they never block teardown.
|
||||
|
||||
Args:
|
||||
session_id: The session ID (used for the adapt.md path reference).
|
||||
session_id: The session ID.
|
||||
session_dir: Path to the session directory (~/.hive/queen/session/{id}).
|
||||
llm: LLMProvider instance (must support acomplete()).
|
||||
"""
|
||||
@@ -337,7 +329,6 @@ async def consolidate_queen_memory(
|
||||
today_journal = read_episodic_memory()
|
||||
today = date.today()
|
||||
today_str = format_memory_date(today)
|
||||
adapt_path = session_dir / "data" / "adapt.md"
|
||||
|
||||
user_msg = (
|
||||
f"## Existing Semantic Memory (MEMORY.md)\n\n"
|
||||
@@ -347,7 +338,7 @@ async def consolidate_queen_memory(
|
||||
f"{session_context}\n\n"
|
||||
f"## Session Reference\n\n"
|
||||
f"Session ID: {session_id}\n"
|
||||
f"Session path: {adapt_path}\n"
|
||||
f"Session dir: {session_dir}\n"
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
|
||||
@@ -20,7 +20,6 @@
|
||||
│ │ ├── conversation_2.md
|
||||
│ │ └── ...
|
||||
│ └── data/
|
||||
│ ├── adapt.md ← Working memory (session-scoped)
|
||||
│ ├── web_search_1.txt ← Spillover: large tool results
|
||||
│ ├── web_search_2.txt
|
||||
│ └── ...
|
||||
@@ -28,13 +27,12 @@
|
||||
|
||||
---
|
||||
|
||||
## The three memory tiers
|
||||
## The two memory tiers
|
||||
|
||||
| File | Tier | Written by | Read at |
|
||||
|---|---|---|---|
|
||||
| `MEMORY.md` | Semantic | Consolidation LLM (auto, post-session) | Session start (injected into system prompt) |
|
||||
| `memories/MEMORY-YYYY-MM-DD.md` | Episodic | Queen via `write_to_diary` tool + consolidation LLM | Session start (today's file injected) |
|
||||
| `data/adapt.md` | Working | Queen via `update_session_notes` tool | Every turn (inlined in system prompt) |
|
||||
|
||||
---
|
||||
|
||||
@@ -52,7 +50,6 @@ in the original directory rather than fragmenting across multiple folders.
|
||||
end. It reads:
|
||||
|
||||
1. `conversations/parts/*.json` — full message history (user + assistant turns; tool results skipped)
|
||||
2. `data/adapt.md` — current working notes
|
||||
|
||||
It then makes two LLM writes:
|
||||
|
||||
|
||||
@@ -580,8 +580,6 @@ def build_emergency_summary(
|
||||
|
||||
# 5. Spillover files — list actual files so the LLM can load
|
||||
# them immediately instead of having to call list_data_files first.
|
||||
# Inline adapt.md (agent memory) directly — it contains user rules
|
||||
# and identity preferences that must survive emergency compaction.
|
||||
spillover_dir = config.spillover_dir if config else None
|
||||
if spillover_dir:
|
||||
try:
|
||||
@@ -589,16 +587,7 @@ def build_emergency_summary(
|
||||
|
||||
data_dir = Path(spillover_dir)
|
||||
if data_dir.is_dir():
|
||||
# Inline adapt.md content directly
|
||||
adapt_path = data_dir / "adapt.md"
|
||||
if adapt_path.is_file():
|
||||
adapt_text = adapt_path.read_text(encoding="utf-8").strip()
|
||||
if adapt_text:
|
||||
parts.append(f"AGENT MEMORY (adapt.md):\n{adapt_text}")
|
||||
|
||||
all_files = sorted(
|
||||
f.name for f in data_dir.iterdir() if f.is_file() and f.name != "adapt.md"
|
||||
)
|
||||
all_files = sorted(f.name for f in data_dir.iterdir() if f.is_file())
|
||||
# Separate conversation history files from regular data files
|
||||
conv_files = [f for f in all_files if re.match(r"conversation_\d+\.md$", f)]
|
||||
data_files = [f for f in all_files if f not in conv_files]
|
||||
|
||||
@@ -8,6 +8,7 @@ the context-window-exceeded error detector.
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import contextvars
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
@@ -446,8 +447,11 @@ async def execute_tool(
|
||||
# Offload the executor call to a thread. Sync MCP executors
|
||||
# block on future.result() — running in a thread keeps the
|
||||
# event loop free so asyncio.wait_for can fire the timeout.
|
||||
# Copy the current context so contextvars (e.g. data_dir from
|
||||
# execution context) propagate into the worker thread.
|
||||
loop = asyncio.get_running_loop()
|
||||
result = await loop.run_in_executor(None, tool_executor, tool_use)
|
||||
ctx = contextvars.copy_context()
|
||||
result = await loop.run_in_executor(None, ctx.run, tool_executor, tool_use)
|
||||
# Async executors return a coroutine — await it on the loop
|
||||
if asyncio.iscoroutine(result) or asyncio.isfuture(result):
|
||||
result = await result
|
||||
@@ -472,49 +476,6 @@ async def execute_tool(
|
||||
return result
|
||||
|
||||
|
||||
def record_learning(key: str, value: Any, spillover_dir: str | None) -> None:
|
||||
"""Append a set_output value to adapt.md as a learning entry.
|
||||
|
||||
Called at set_output time — the moment knowledge is produced — so that
|
||||
adapt.md accumulates the agent's outputs across the session. Since
|
||||
adapt.md is injected into the system prompt, these persist through
|
||||
any compaction.
|
||||
"""
|
||||
if not spillover_dir:
|
||||
return
|
||||
try:
|
||||
adapt_path = Path(spillover_dir) / "adapt.md"
|
||||
adapt_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
content = adapt_path.read_text(encoding="utf-8") if adapt_path.exists() else ""
|
||||
|
||||
if "## Outputs" not in content:
|
||||
content += "\n\n## Outputs\n"
|
||||
|
||||
# Truncate long values for memory (full value is in shared memory)
|
||||
v_str = str(value)
|
||||
if len(v_str) > 500:
|
||||
v_str = v_str[:500] + "…"
|
||||
|
||||
entry = f"- {key}: {v_str}\n"
|
||||
|
||||
# Replace existing entry for same key (update, not duplicate)
|
||||
lines = content.splitlines(keepends=True)
|
||||
replaced = False
|
||||
for i, line in enumerate(lines):
|
||||
if line.startswith(f"- {key}:"):
|
||||
lines[i] = entry
|
||||
replaced = True
|
||||
break
|
||||
if replaced:
|
||||
content = "".join(lines)
|
||||
else:
|
||||
content += entry
|
||||
|
||||
adapt_path.write_text(content, encoding="utf-8")
|
||||
except Exception as e:
|
||||
logger.warning("Failed to record learning for key=%s: %s", key, e)
|
||||
|
||||
|
||||
def next_spill_filename(tool_name: str, counter: int) -> str:
|
||||
"""Return a short, monotonic filename for a tool result spill."""
|
||||
# Shorten common tool name prefixes to save tokens
|
||||
|
||||
@@ -19,7 +19,6 @@ import re
|
||||
import time
|
||||
from collections.abc import Awaitable, Callable
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from framework.graph.conversation import ConversationStore, NodeConversation
|
||||
@@ -80,7 +79,6 @@ from framework.graph.event_loop.tool_result_handler import (
|
||||
execute_tool,
|
||||
extract_json_metadata,
|
||||
is_transient_error,
|
||||
record_learning,
|
||||
restore_spill_counter,
|
||||
truncate_tool_result,
|
||||
)
|
||||
@@ -477,38 +475,6 @@ class EventLoopNode(NodeProtocol):
|
||||
system_prompt = f"{system_prompt}\n\n{ctx.default_skill_batch_nudge}"
|
||||
logger.info("[%s] DS-12: batch scenario detected, nudge injected", node_id)
|
||||
|
||||
# Inject agent working memory (adapt.md).
|
||||
# If it doesn't exist yet, seed it with available context.
|
||||
if self._config.spillover_dir:
|
||||
_adapt_path = Path(self._config.spillover_dir) / "adapt.md"
|
||||
if not _adapt_path.exists():
|
||||
_adapt_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
seed = (
|
||||
f"## Identity\n{ctx.accounts_prompt}\n"
|
||||
if ctx.accounts_prompt
|
||||
else "# Session Working Memory\n"
|
||||
)
|
||||
_adapt_path.write_text(seed, encoding="utf-8")
|
||||
if _adapt_path.exists():
|
||||
_adapt_text = _adapt_path.read_text(encoding="utf-8").strip()
|
||||
if _adapt_text:
|
||||
system_prompt = (
|
||||
f"{system_prompt}\n\n"
|
||||
"--- Session Working Memory ---\n"
|
||||
f"{_adapt_text}\n"
|
||||
"--- End Session Working Memory ---\n\n"
|
||||
"Maintain your session working memory by calling "
|
||||
'save_data("adapt.md", ...) or edit_data("adapt.md", ...)'
|
||||
" as you work.\n"
|
||||
"This is session-scoped scratch space. "
|
||||
"IMMEDIATELY save: account/identity rules, "
|
||||
"behavioral constraints, and preferences specific to "
|
||||
"this session. Also record current task state, "
|
||||
"decisions, and working notes. "
|
||||
"For lasting knowledge about the user, use "
|
||||
"update_queen_memory() and append_queen_journal() instead."
|
||||
)
|
||||
|
||||
conversation = NodeConversation(
|
||||
system_prompt=system_prompt,
|
||||
max_context_tokens=self._config.max_context_tokens,
|
||||
@@ -2215,7 +2181,6 @@ class EventLoopNode(NodeProtocol):
|
||||
),
|
||||
is_error=False,
|
||||
)
|
||||
self._record_learning(key, stored)
|
||||
outputs_set_this_turn.append(key)
|
||||
await self._publish_output_key_set(stream_id, node_id, key, execution_id)
|
||||
logged_tool_calls.append(
|
||||
@@ -2979,20 +2944,6 @@ class EventLoopNode(NodeProtocol):
|
||||
skill_dirs=getattr(self, "_skill_dirs", []),
|
||||
)
|
||||
|
||||
def _record_learning(self, key: str, value: Any) -> None:
|
||||
"""Append a set_output value to adapt.md as a learning entry.
|
||||
|
||||
Called at set_output time — the moment knowledge is produced — so that
|
||||
adapt.md accumulates the agent's outputs across the session. Since
|
||||
adapt.md is injected into the system prompt, these persist through
|
||||
any compaction.
|
||||
"""
|
||||
return record_learning(
|
||||
key=key,
|
||||
value=value,
|
||||
spillover_dir=self._config.spillover_dir,
|
||||
)
|
||||
|
||||
def _next_spill_filename(self, tool_name: str) -> str:
|
||||
"""Return a short, monotonic filename for a tool result spill."""
|
||||
self._spill_counter += 1
|
||||
|
||||
@@ -1439,23 +1439,6 @@ class GraphExecutor:
|
||||
# Build Layer 2 (narrative) from current state
|
||||
narrative = build_narrative(memory, path, graph)
|
||||
|
||||
# Read agent working memory (adapt.md) once for both
|
||||
# system prompt and transition marker.
|
||||
_adapt_text: str | None = None
|
||||
if self._storage_path:
|
||||
_adapt_path = self._storage_path / "data" / "adapt.md"
|
||||
if _adapt_path.exists():
|
||||
_raw = _adapt_path.read_text(encoding="utf-8").strip()
|
||||
_adapt_text = _raw or None
|
||||
|
||||
# Merge adapt.md into narrative for system prompt
|
||||
if _adapt_text:
|
||||
narrative = (
|
||||
f"{narrative}\n\n--- Agent Memory ---\n{_adapt_text}"
|
||||
if narrative
|
||||
else _adapt_text
|
||||
)
|
||||
|
||||
# Build per-node accounts prompt for the next node
|
||||
_node_accounts = self.accounts_prompt or None
|
||||
if self.accounts_data and self.tool_provider_map:
|
||||
@@ -1490,7 +1473,6 @@ class GraphExecutor:
|
||||
memory=memory,
|
||||
cumulative_tool_names=sorted(cumulative_tool_names),
|
||||
data_dir=data_dir,
|
||||
adapt_content=_adapt_text,
|
||||
)
|
||||
await continuous_conversation.add_user_message(
|
||||
marker,
|
||||
|
||||
@@ -264,7 +264,6 @@ def build_transition_marker(
|
||||
memory: SharedMemory,
|
||||
cumulative_tool_names: list[str],
|
||||
data_dir: Path | str | None = None,
|
||||
adapt_content: str | None = None,
|
||||
) -> str:
|
||||
"""Build a 'State of the World' transition marker.
|
||||
|
||||
@@ -278,7 +277,6 @@ def build_transition_marker(
|
||||
memory: Current shared memory state.
|
||||
cumulative_tool_names: All tools available (cumulative set).
|
||||
data_dir: Path to spillover data directory.
|
||||
adapt_content: Agent working memory (adapt.md) content.
|
||||
|
||||
Returns:
|
||||
Transition marker message text.
|
||||
@@ -344,10 +342,6 @@ def build_transition_marker(
|
||||
"\nData files (use load_data to access):\n" + "\n".join(file_lines)
|
||||
)
|
||||
|
||||
# Agent working memory
|
||||
if adapt_content:
|
||||
sections.append(f"\n--- Agent Memory ---\n{adapt_content}")
|
||||
|
||||
# Available tools
|
||||
if cumulative_tool_names:
|
||||
sections.append("\nAvailable tools: " + ", ".join(sorted(cumulative_tool_names)))
|
||||
|
||||
@@ -9,6 +9,7 @@ import asyncio
|
||||
import logging
|
||||
import time
|
||||
import uuid
|
||||
from collections import OrderedDict
|
||||
from collections.abc import Callable
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
@@ -44,6 +45,9 @@ class AgentRuntimeConfig:
|
||||
max_history: int = 1000
|
||||
execution_result_max: int = 1000
|
||||
execution_result_ttl_seconds: float | None = None
|
||||
# Idempotency cache for trigger() deduplication
|
||||
idempotency_ttl_seconds: float = 300.0
|
||||
idempotency_max_keys: int = 10000
|
||||
# Webhook server config (only starts if webhook_routes is non-empty)
|
||||
webhook_host: str = "127.0.0.1"
|
||||
webhook_port: int = 8080
|
||||
@@ -250,6 +254,10 @@ class AgentRuntime:
|
||||
# Next fire time for each timer entry point (ep_id -> datetime)
|
||||
self._timer_next_fire: dict[str, float] = {}
|
||||
|
||||
# Idempotency cache for trigger() deduplication
|
||||
self._idempotency_keys: OrderedDict[str, str] = OrderedDict()
|
||||
self._idempotency_times: dict[str, float] = {}
|
||||
|
||||
# State
|
||||
self._running = False
|
||||
self._timers_paused = False
|
||||
@@ -853,12 +861,29 @@ class AgentRuntime:
|
||||
# Primary graph (also stored in self._streams)
|
||||
return self._streams.get(entry_point_id)
|
||||
|
||||
def _prune_idempotency_keys(self) -> None:
|
||||
"""Prune expired idempotency keys based on TTL and max size."""
|
||||
ttl = self._config.idempotency_ttl_seconds
|
||||
if ttl > 0:
|
||||
cutoff = time.time() - ttl
|
||||
for key, recorded_at in list(self._idempotency_times.items()):
|
||||
if recorded_at < cutoff:
|
||||
self._idempotency_times.pop(key, None)
|
||||
self._idempotency_keys.pop(key, None)
|
||||
|
||||
max_keys = self._config.idempotency_max_keys
|
||||
if max_keys > 0:
|
||||
while len(self._idempotency_keys) > max_keys:
|
||||
old_key, _ = self._idempotency_keys.popitem(last=False)
|
||||
self._idempotency_times.pop(old_key, None)
|
||||
|
||||
async def trigger(
|
||||
self,
|
||||
entry_point_id: str,
|
||||
input_data: dict[str, Any],
|
||||
correlation_id: str | None = None,
|
||||
session_state: dict[str, Any] | None = None,
|
||||
idempotency_key: str | None = None,
|
||||
graph_id: str | None = None,
|
||||
) -> str:
|
||||
"""
|
||||
@@ -871,6 +896,10 @@ class AgentRuntime:
|
||||
input_data: Input data for the execution
|
||||
correlation_id: Optional ID to correlate related executions
|
||||
session_state: Optional session state to resume from (with paused_at, memory)
|
||||
idempotency_key: Optional key for deduplication. If a trigger with
|
||||
the same key was already processed within the TTL window, the
|
||||
cached execution_id is returned instead of starting a new
|
||||
execution. Useful for webhook providers that retry on timeout.
|
||||
graph_id: Graph to trigger on. ``None`` uses the active graph
|
||||
first, then falls back to the primary graph.
|
||||
|
||||
@@ -884,12 +913,32 @@ class AgentRuntime:
|
||||
if not self._running:
|
||||
raise RuntimeError("AgentRuntime is not running")
|
||||
|
||||
# Idempotency check: return cached execution_id for duplicate keys.
|
||||
if idempotency_key is not None:
|
||||
self._prune_idempotency_keys()
|
||||
cached = self._idempotency_keys.get(idempotency_key)
|
||||
if cached is not None:
|
||||
logger.debug(
|
||||
"Idempotent trigger: key '%s' already seen, returning %s",
|
||||
idempotency_key,
|
||||
cached,
|
||||
)
|
||||
return cached
|
||||
|
||||
stream = self._resolve_stream(entry_point_id, graph_id)
|
||||
if stream is None:
|
||||
raise ValueError(f"Entry point '{entry_point_id}' not found")
|
||||
|
||||
run_id = uuid.uuid4().hex[:12]
|
||||
return await stream.execute(input_data, correlation_id, session_state, run_id=run_id)
|
||||
exec_id = await stream.execute(input_data, correlation_id, session_state, run_id=run_id)
|
||||
|
||||
# Cache after execute() so the value is always a real execution_id
|
||||
# that callers can use for tracking.
|
||||
if idempotency_key is not None:
|
||||
self._idempotency_keys[idempotency_key] = exec_id
|
||||
self._idempotency_times[idempotency_key] = time.time()
|
||||
|
||||
return exec_id
|
||||
|
||||
async def trigger_and_wait(
|
||||
self,
|
||||
@@ -897,6 +946,7 @@ class AgentRuntime:
|
||||
input_data: dict[str, Any],
|
||||
timeout: float | None = None,
|
||||
session_state: dict[str, Any] | None = None,
|
||||
idempotency_key: str | None = None,
|
||||
) -> ExecutionResult | None:
|
||||
"""
|
||||
Trigger execution and wait for completion.
|
||||
@@ -906,11 +956,17 @@ class AgentRuntime:
|
||||
input_data: Input data for the execution
|
||||
timeout: Maximum time to wait (seconds)
|
||||
session_state: Optional session state to resume from (with paused_at, memory)
|
||||
idempotency_key: Optional key for deduplication (see trigger() for details).
|
||||
|
||||
Returns:
|
||||
ExecutionResult or None if timeout
|
||||
"""
|
||||
exec_id = await self.trigger(entry_point_id, input_data, session_state=session_state)
|
||||
exec_id = await self.trigger(
|
||||
entry_point_id,
|
||||
input_data,
|
||||
session_state=session_state,
|
||||
idempotency_key=idempotency_key,
|
||||
)
|
||||
stream = self._resolve_stream(entry_point_id)
|
||||
if stream is None:
|
||||
raise ValueError(f"Entry point '{entry_point_id}' not found")
|
||||
|
||||
@@ -0,0 +1,268 @@
|
||||
"""Tests for webhook idempotency key support in AgentRuntime.trigger()."""
|
||||
|
||||
import asyncio
|
||||
import time
|
||||
from collections import OrderedDict
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from framework.runtime.agent_runtime import AgentRuntime, AgentRuntimeConfig
|
||||
|
||||
|
||||
def _make_runtime(ttl=300.0, max_keys=10000):
|
||||
"""Create a minimal AgentRuntime with idempotency cache attributes.
|
||||
|
||||
Uses ``object.__new__`` to skip ``__init__`` and its heavy dependencies
|
||||
(storage, LLM, skills) — we only need the cache and config for these tests.
|
||||
"""
|
||||
runtime = object.__new__(AgentRuntime)
|
||||
runtime._config = AgentRuntimeConfig(idempotency_ttl_seconds=ttl, idempotency_max_keys=max_keys)
|
||||
runtime._running = True
|
||||
runtime._lock = asyncio.Lock()
|
||||
runtime._idempotency_keys = OrderedDict()
|
||||
runtime._idempotency_times = {}
|
||||
runtime._graphs = {}
|
||||
runtime._active_graph_id = "primary"
|
||||
runtime._graph_id = "primary"
|
||||
runtime._streams = {}
|
||||
runtime._entry_points = {}
|
||||
return runtime
|
||||
|
||||
|
||||
def _make_runtime_with_stream(ttl=300.0, max_keys=10000):
|
||||
"""Create a mock runtime whose stream.execute() returns unique IDs."""
|
||||
runtime = _make_runtime(ttl=ttl, max_keys=max_keys)
|
||||
|
||||
call_count = 0
|
||||
|
||||
async def _fake_execute(*args, **kwargs):
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
return f"session-{call_count:04d}"
|
||||
|
||||
stream = MagicMock()
|
||||
stream.execute = _fake_execute
|
||||
runtime._streams = {"webhook": stream}
|
||||
runtime._entry_points = {"webhook": MagicMock()}
|
||||
return runtime
|
||||
|
||||
|
||||
class TestIdempotencyConfig:
|
||||
"""Verify idempotency configuration defaults."""
|
||||
|
||||
def test_default_ttl(self):
|
||||
config = AgentRuntimeConfig()
|
||||
assert config.idempotency_ttl_seconds == 300.0
|
||||
|
||||
def test_default_max_keys(self):
|
||||
config = AgentRuntimeConfig()
|
||||
assert config.idempotency_max_keys == 10000
|
||||
|
||||
def test_custom_config(self):
|
||||
config = AgentRuntimeConfig(idempotency_ttl_seconds=60.0, idempotency_max_keys=100)
|
||||
assert config.idempotency_ttl_seconds == 60.0
|
||||
assert config.idempotency_max_keys == 100
|
||||
|
||||
|
||||
class TestIdempotencyCache:
|
||||
"""Test the idempotency cache and pruning logic directly."""
|
||||
|
||||
def test_cache_stores_and_retrieves_key(self):
|
||||
runtime = _make_runtime()
|
||||
runtime._idempotency_keys["stripe-evt-123"] = "exec-001"
|
||||
runtime._idempotency_times["stripe-evt-123"] = time.time()
|
||||
|
||||
assert runtime._idempotency_keys.get("stripe-evt-123") == "exec-001"
|
||||
|
||||
def test_cache_returns_none_for_unknown_key(self):
|
||||
runtime = _make_runtime()
|
||||
assert runtime._idempotency_keys.get("unknown") is None
|
||||
|
||||
def test_prune_removes_expired_keys(self):
|
||||
runtime = _make_runtime(ttl=0.1)
|
||||
|
||||
runtime._idempotency_keys["old-key"] = "exec-old"
|
||||
runtime._idempotency_times["old-key"] = time.time() - 1.0 # expired
|
||||
|
||||
runtime._prune_idempotency_keys()
|
||||
|
||||
assert "old-key" not in runtime._idempotency_keys
|
||||
assert "old-key" not in runtime._idempotency_times
|
||||
|
||||
def test_prune_keeps_fresh_keys(self):
|
||||
runtime = _make_runtime(ttl=300.0)
|
||||
|
||||
runtime._idempotency_keys["fresh-key"] = "exec-fresh"
|
||||
runtime._idempotency_times["fresh-key"] = time.time()
|
||||
|
||||
runtime._prune_idempotency_keys()
|
||||
|
||||
assert "fresh-key" in runtime._idempotency_keys
|
||||
|
||||
def test_prune_respects_max_keys(self):
|
||||
runtime = _make_runtime(max_keys=2)
|
||||
|
||||
for i in range(3):
|
||||
key = f"key-{i}"
|
||||
runtime._idempotency_keys[key] = f"exec-{i}"
|
||||
runtime._idempotency_times[key] = time.time()
|
||||
|
||||
runtime._prune_idempotency_keys()
|
||||
|
||||
assert len(runtime._idempotency_keys) == 2
|
||||
# Oldest (key-0) should be evicted
|
||||
assert "key-0" not in runtime._idempotency_keys
|
||||
assert "key-1" in runtime._idempotency_keys
|
||||
assert "key-2" in runtime._idempotency_keys
|
||||
|
||||
def test_prune_evicts_fifo(self):
|
||||
runtime = _make_runtime(max_keys=1)
|
||||
|
||||
runtime._idempotency_keys["first"] = "exec-1"
|
||||
runtime._idempotency_times["first"] = time.time()
|
||||
runtime._idempotency_keys["second"] = "exec-2"
|
||||
runtime._idempotency_times["second"] = time.time()
|
||||
|
||||
runtime._prune_idempotency_keys()
|
||||
|
||||
assert len(runtime._idempotency_keys) == 1
|
||||
assert "second" in runtime._idempotency_keys
|
||||
assert "first" not in runtime._idempotency_keys
|
||||
|
||||
def test_mixed_expired_and_max_size(self):
|
||||
runtime = _make_runtime(ttl=0.1, max_keys=2)
|
||||
|
||||
# Add expired key
|
||||
runtime._idempotency_keys["expired"] = "exec-e"
|
||||
runtime._idempotency_times["expired"] = time.time() - 1.0
|
||||
|
||||
# Add fresh keys
|
||||
runtime._idempotency_keys["fresh-1"] = "exec-f1"
|
||||
runtime._idempotency_times["fresh-1"] = time.time()
|
||||
runtime._idempotency_keys["fresh-2"] = "exec-f2"
|
||||
runtime._idempotency_times["fresh-2"] = time.time()
|
||||
|
||||
runtime._prune_idempotency_keys()
|
||||
|
||||
assert "expired" not in runtime._idempotency_keys
|
||||
assert "fresh-1" in runtime._idempotency_keys
|
||||
assert "fresh-2" in runtime._idempotency_keys
|
||||
|
||||
|
||||
class TestTriggerIdempotency:
|
||||
"""Tests for trigger() idempotency deduplication."""
|
||||
|
||||
def test_trigger_accepts_idempotency_key(self):
|
||||
"""trigger() accepts idempotency_key as a keyword argument."""
|
||||
import inspect
|
||||
|
||||
sig = inspect.signature(AgentRuntime.trigger)
|
||||
assert "idempotency_key" in sig.parameters
|
||||
|
||||
def test_idempotency_key_defaults_to_none(self):
|
||||
"""idempotency_key defaults to None (backward compatible)."""
|
||||
import inspect
|
||||
|
||||
sig = inspect.signature(AgentRuntime.trigger)
|
||||
assert sig.parameters["idempotency_key"].default is None
|
||||
|
||||
def test_trigger_and_wait_accepts_idempotency_key(self):
|
||||
"""trigger_and_wait() also accepts idempotency_key."""
|
||||
import inspect
|
||||
|
||||
sig = inspect.signature(AgentRuntime.trigger_and_wait)
|
||||
assert "idempotency_key" in sig.parameters
|
||||
|
||||
def test_trigger_and_wait_idempotency_key_defaults_to_none(self):
|
||||
"""trigger_and_wait() idempotency_key defaults to None."""
|
||||
import inspect
|
||||
|
||||
sig = inspect.signature(AgentRuntime.trigger_and_wait)
|
||||
assert sig.parameters["idempotency_key"].default is None
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_duplicate_key_returns_cached_id(self):
|
||||
"""Same idempotency key within TTL returns the cached execution ID."""
|
||||
runtime = _make_runtime_with_stream()
|
||||
|
||||
first = await runtime.trigger("webhook", {}, idempotency_key="stripe-evt-001")
|
||||
second = await runtime.trigger("webhook", {}, idempotency_key="stripe-evt-001")
|
||||
|
||||
assert first == second
|
||||
assert first == "session-0001"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_different_keys_produce_different_ids(self):
|
||||
"""Different idempotency keys start separate executions."""
|
||||
runtime = _make_runtime_with_stream()
|
||||
|
||||
id_a = await runtime.trigger("webhook", {}, idempotency_key="evt-aaa")
|
||||
id_b = await runtime.trigger("webhook", {}, idempotency_key="evt-bbb")
|
||||
|
||||
assert id_a != id_b
|
||||
assert id_a == "session-0001"
|
||||
assert id_b == "session-0002"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_none_key_always_starts_new_execution(self):
|
||||
"""key=None (default) skips dedup — every call starts fresh."""
|
||||
runtime = _make_runtime_with_stream()
|
||||
|
||||
id_1 = await runtime.trigger("webhook", {})
|
||||
id_2 = await runtime.trigger("webhook", {})
|
||||
|
||||
assert id_1 != id_2
|
||||
assert len(runtime._idempotency_keys) == 0 # nothing cached
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_expired_key_allows_new_execution(self):
|
||||
"""After TTL expires, the same key starts a new execution."""
|
||||
runtime = _make_runtime_with_stream(ttl=0.1)
|
||||
|
||||
first = await runtime.trigger("webhook", {}, idempotency_key="evt-expire")
|
||||
|
||||
# Backdate the cached timestamp so the key looks expired
|
||||
runtime._idempotency_times["evt-expire"] = time.time() - 1.0
|
||||
|
||||
second = await runtime.trigger("webhook", {}, idempotency_key="evt-expire")
|
||||
|
||||
assert first != second
|
||||
assert first == "session-0001"
|
||||
assert second == "session-0002"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_stream_not_found_does_not_cache(self):
|
||||
"""If entry point doesn't exist, nothing is cached."""
|
||||
runtime = _make_runtime_with_stream()
|
||||
|
||||
with pytest.raises(ValueError, match="not found"):
|
||||
await runtime.trigger("nonexistent", {}, idempotency_key="evt-orphan")
|
||||
|
||||
assert "evt-orphan" not in runtime._idempotency_keys
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_execute_error_does_not_cache(self):
|
||||
"""If stream.execute() raises, nothing is cached so retries can go through."""
|
||||
runtime = _make_runtime()
|
||||
|
||||
failing_stream = MagicMock()
|
||||
failing_stream.execute = AsyncMock(side_effect=RuntimeError("stream not running"))
|
||||
runtime._streams = {"webhook": failing_stream}
|
||||
runtime._entry_points = {"webhook": MagicMock()}
|
||||
|
||||
with pytest.raises(RuntimeError, match="stream not running"):
|
||||
await runtime.trigger("webhook", {}, idempotency_key="evt-123")
|
||||
|
||||
assert "evt-123" not in runtime._idempotency_keys
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cache_holds_real_execution_id(self):
|
||||
"""Cached value matches the actual execution ID from execute()."""
|
||||
runtime = _make_runtime_with_stream()
|
||||
|
||||
exec_id = await runtime.trigger("webhook", {}, idempotency_key="evt-real")
|
||||
|
||||
cached = runtime._idempotency_keys.get("evt-real")
|
||||
assert cached == exec_id
|
||||
assert cached == "session-0001"
|
||||
@@ -7,8 +7,8 @@ tooling, CI gates, and hive skill doctor.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import stat
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
|
||||
@@ -134,11 +134,11 @@ def validate_strict(path: Path) -> ValidationResult:
|
||||
if not frontmatter.get("license"):
|
||||
warnings.append("No 'license' field — consider adding a license (e.g. MIT, Apache-2.0).")
|
||||
|
||||
# 11. Scripts in scripts/ exist and are executable (POSIX only —
|
||||
# Windows does not use POSIX permission bits)
|
||||
# 11. Scripts in scripts/ exist and are executable
|
||||
# Windows has no POSIX executable bits; skip this check there.
|
||||
base_dir = path.parent
|
||||
scripts_dir = base_dir / "scripts"
|
||||
if scripts_dir.is_dir() and os.name != "nt":
|
||||
if scripts_dir.is_dir() and sys.platform != "win32":
|
||||
for script_path in sorted(scripts_dir.iterdir()):
|
||||
if script_path.is_file():
|
||||
if not (script_path.stat().st_mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)):
|
||||
|
||||
@@ -5,8 +5,11 @@ One test per strict check — happy path plus each individual failure mode.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from framework.skills.validator import validate_strict
|
||||
|
||||
|
||||
@@ -272,6 +275,7 @@ license: MIT
|
||||
|
||||
|
||||
class TestCheck11Scripts:
|
||||
@pytest.mark.skipif(sys.platform == "win32", reason="Windows has no POSIX executable bits")
|
||||
def test_error_on_non_executable_script(self, tmp_path):
|
||||
path = _write_skill(tmp_path, _VALID_CONTENT)
|
||||
scripts_dir = path.parent / "scripts"
|
||||
@@ -285,6 +289,7 @@ class TestCheck11Scripts:
|
||||
assert result.passed is False
|
||||
assert any("executable" in e.lower() for e in result.errors)
|
||||
|
||||
@pytest.mark.skipif(sys.platform == "win32", reason="Windows has no POSIX executable bits")
|
||||
def test_passes_with_executable_script(self, tmp_path):
|
||||
path = _write_skill(tmp_path, _VALID_CONTENT)
|
||||
scripts_dir = path.parent / "scripts"
|
||||
|
||||
@@ -0,0 +1,89 @@
|
||||
"""Regression test for contextvars propagation through tool executor threads.
|
||||
|
||||
When execute_tool offloads a sync tool executor to a thread pool via
|
||||
run_in_executor, Python does not automatically propagate contextvars.
|
||||
This caused auto-injected params like data_dir to be lost, making MCP
|
||||
tools (save_data, serve_file_to_user, etc.) fail with
|
||||
"Missing required argument: data_dir".
|
||||
|
||||
Fix: wrap the executor call in contextvars.copy_context().run().
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
|
||||
from framework.graph.event_loop.tool_result_handler import execute_tool
|
||||
from framework.llm.provider import ToolResult, ToolUse
|
||||
from framework.runner.tool_registry import _execution_context
|
||||
|
||||
|
||||
class _ToolCallEvent:
|
||||
"""Minimal stand-in for ToolCallEvent used by execute_tool."""
|
||||
|
||||
def __init__(self, tool_name: str, tool_input: dict) -> None:
|
||||
self.tool_use_id = "test_id"
|
||||
self.tool_name = tool_name
|
||||
self.tool_input = tool_input
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_execution_context_propagates_to_tool_executor() -> None:
|
||||
"""data_dir set via set_execution_context must be visible inside
|
||||
the tool executor even though it runs in a thread pool."""
|
||||
|
||||
captured: dict = {}
|
||||
|
||||
def capturing_executor(tool_use: ToolUse) -> ToolResult:
|
||||
# This runs inside run_in_executor (a worker thread).
|
||||
# Before the fix, _execution_context.get() returned None here.
|
||||
ctx = _execution_context.get()
|
||||
captured["exec_ctx"] = ctx
|
||||
return ToolResult(
|
||||
tool_use_id=tool_use.id,
|
||||
content="ok",
|
||||
)
|
||||
|
||||
token = _execution_context.set({"data_dir": "/tmp/test_data"})
|
||||
try:
|
||||
tc = _ToolCallEvent("test_tool", {"arg": "value"})
|
||||
result = await execute_tool(
|
||||
tool_executor=capturing_executor,
|
||||
tc=tc,
|
||||
timeout=10,
|
||||
)
|
||||
finally:
|
||||
_execution_context.reset(token)
|
||||
|
||||
assert result.content == "ok"
|
||||
assert captured["exec_ctx"] is not None, (
|
||||
"execution context was None inside worker thread, "
|
||||
"contextvars did not propagate through run_in_executor"
|
||||
)
|
||||
assert captured["exec_ctx"]["data_dir"] == "/tmp/test_data"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_execution_context_none_when_not_set() -> None:
|
||||
"""When no execution context is set, executor should still work
|
||||
(context is None, not an error)."""
|
||||
|
||||
captured: dict = {}
|
||||
|
||||
def capturing_executor(tool_use: ToolUse) -> ToolResult:
|
||||
captured["exec_ctx"] = _execution_context.get()
|
||||
return ToolResult(
|
||||
tool_use_id=tool_use.id,
|
||||
content="ok",
|
||||
)
|
||||
|
||||
# Don't set any execution context
|
||||
tc = _ToolCallEvent("test_tool", {})
|
||||
result = await execute_tool(
|
||||
tool_executor=capturing_executor,
|
||||
tc=tc,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
assert result.content == "ok"
|
||||
assert captured["exec_ctx"] is None
|
||||
@@ -393,3 +393,407 @@ def test_tool_execution_error_truncates_large_inputs(caplog):
|
||||
log_messages = [record.message for record in caplog.records]
|
||||
full_log = " ".join(log_messages)
|
||||
assert "...(truncated)" in full_log
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# register_function — type inference and required/optional parameters
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_register_function_infers_type_hints():
|
||||
"""register_function should map Python type annotations to JSON schema types."""
|
||||
registry = ToolRegistry()
|
||||
|
||||
def my_func(a: int, b: float, c: bool, d: dict, e: list, f: str = "x") -> None:
|
||||
pass
|
||||
|
||||
registry.register_function(my_func)
|
||||
|
||||
tool = registry.get_tools()["my_func"]
|
||||
props = tool.parameters["properties"]
|
||||
assert props["a"]["type"] == "integer"
|
||||
assert props["b"]["type"] == "number"
|
||||
assert props["c"]["type"] == "boolean"
|
||||
assert props["d"]["type"] == "object"
|
||||
assert props["e"]["type"] == "array"
|
||||
assert props["f"]["type"] == "string"
|
||||
|
||||
|
||||
def test_register_function_required_vs_optional():
|
||||
"""Parameters without defaults should appear in 'required'."""
|
||||
registry = ToolRegistry()
|
||||
|
||||
def my_func(required_param: str, optional_param: int = 5) -> None:
|
||||
pass
|
||||
|
||||
registry.register_function(my_func)
|
||||
|
||||
tool = registry.get_tools()["my_func"]
|
||||
required = tool.parameters["required"]
|
||||
assert "required_param" in required
|
||||
assert "optional_param" not in required
|
||||
|
||||
|
||||
def test_register_function_custom_name_and_description():
|
||||
"""register_function should accept explicit name and description overrides."""
|
||||
registry = ToolRegistry()
|
||||
|
||||
def original_name() -> None:
|
||||
"""Original docstring."""
|
||||
pass
|
||||
|
||||
registry.register_function(original_name, name="custom_name", description="Custom desc")
|
||||
tools = registry.get_tools()
|
||||
assert "custom_name" in tools
|
||||
assert "original_name" not in tools
|
||||
assert tools["custom_name"].description == "Custom desc"
|
||||
|
||||
|
||||
def test_register_function_falls_back_to_docstring():
|
||||
"""register_function should use the docstring if no description is given."""
|
||||
registry = ToolRegistry()
|
||||
|
||||
def my_tool() -> None:
|
||||
"""My docstring."""
|
||||
pass
|
||||
|
||||
registry.register_function(my_tool)
|
||||
tool = registry.get_tools()["my_tool"]
|
||||
assert tool.description == "My docstring."
|
||||
|
||||
|
||||
def test_register_function_executor_calls_function():
|
||||
"""The executor created by register_function should call the underlying function."""
|
||||
registry = ToolRegistry()
|
||||
calls = []
|
||||
|
||||
def multiply(x: int, y: int) -> int:
|
||||
calls.append((x, y))
|
||||
return x * y
|
||||
|
||||
registry.register_function(multiply)
|
||||
tool_use = ToolUse(id="call_1", name="multiply", input={"x": 3, "y": 4})
|
||||
executor = registry.get_executor()
|
||||
result = executor(tool_use)
|
||||
|
||||
assert calls == [(3, 4)]
|
||||
assert "12" in result.content
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# @tool decorator discovery via discover_from_module
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_discover_from_module_finds_tool_decorated_functions(tmp_path):
|
||||
"""discover_from_module should pick up functions decorated with @tool."""
|
||||
module_src = """
|
||||
from framework.runner.tool_registry import tool
|
||||
|
||||
@tool(description="Say hello")
|
||||
def greet(name: str) -> str:
|
||||
return f"Hello {name}"
|
||||
"""
|
||||
module_path = tmp_path / "agent_tools.py"
|
||||
module_path.write_text(textwrap.dedent(module_src))
|
||||
|
||||
registry = ToolRegistry()
|
||||
count = registry.discover_from_module(module_path)
|
||||
assert count == 1
|
||||
assert "greet" in registry.get_tools()
|
||||
|
||||
|
||||
def test_discover_from_module_returns_zero_for_missing_file(tmp_path):
|
||||
"""discover_from_module should return 0 when the file does not exist."""
|
||||
registry = ToolRegistry()
|
||||
count = registry.discover_from_module(tmp_path / "nonexistent.py")
|
||||
assert count == 0
|
||||
|
||||
|
||||
def test_discover_from_module_registers_mock_executor_without_tool_executor(tmp_path):
|
||||
"""When TOOLS dict exists but no tool_executor, a mock executor is used."""
|
||||
module_src = """
|
||||
from framework.llm.provider import Tool
|
||||
|
||||
TOOLS = {
|
||||
"mock_tool": Tool(
|
||||
name="mock_tool",
|
||||
description="Has no executor",
|
||||
parameters={"type": "object", "properties": {}},
|
||||
),
|
||||
}
|
||||
"""
|
||||
module_path = tmp_path / "agent_tools.py"
|
||||
module_path.write_text(textwrap.dedent(module_src))
|
||||
|
||||
registry = ToolRegistry()
|
||||
count = registry.discover_from_module(module_path)
|
||||
assert count == 1
|
||||
|
||||
registered = registry._tools["mock_tool"] # noqa: SLF001
|
||||
result = registered.executor({"foo": "bar"})
|
||||
assert result == {"mock": True, "inputs": {"foo": "bar"}}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# has_tool / get_registered_names
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_has_tool_returns_true_for_registered_tool():
|
||||
registry = ToolRegistry()
|
||||
tool = Tool(name="t", description="d", parameters={"type": "object", "properties": {}})
|
||||
registry.register("t", tool, lambda inputs: inputs)
|
||||
assert registry.has_tool("t") is True
|
||||
|
||||
|
||||
def test_has_tool_returns_false_for_missing_tool():
|
||||
registry = ToolRegistry()
|
||||
assert registry.has_tool("not_there") is False
|
||||
|
||||
|
||||
def test_get_registered_names_lists_all_tools():
|
||||
registry = ToolRegistry()
|
||||
for name in ("alpha", "beta", "gamma"):
|
||||
t = Tool(name=name, description="d", parameters={"type": "object", "properties": {}})
|
||||
registry.register(name, t, lambda inputs: inputs)
|
||||
assert set(registry.get_registered_names()) == {"alpha", "beta", "gamma"}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Session context injection
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_session_context_is_injected_into_mcp_tool_call(monkeypatch):
|
||||
"""Context params in session context should be forwarded to MCP tool calls."""
|
||||
registry = ToolRegistry()
|
||||
registry.set_session_context(workspace_id="ws-123", agent_id="agent-99")
|
||||
|
||||
received: list[dict] = []
|
||||
|
||||
class FakeClient:
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
|
||||
def connect(self):
|
||||
pass
|
||||
|
||||
def disconnect(self):
|
||||
pass
|
||||
|
||||
def list_tools(self):
|
||||
return [
|
||||
SimpleNamespace(
|
||||
name="ctx_tool",
|
||||
description="context tool",
|
||||
input_schema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"workspace_id": {"type": "string"},
|
||||
"agent_id": {"type": "string"},
|
||||
},
|
||||
"required": [],
|
||||
},
|
||||
)
|
||||
]
|
||||
|
||||
def call_tool(self, tool_name, arguments):
|
||||
received.append(dict(arguments))
|
||||
return {"result": "ok"}
|
||||
|
||||
monkeypatch.setattr("framework.runner.mcp_client.MCPClient", FakeClient)
|
||||
|
||||
registry.register_mcp_server(
|
||||
{"name": "ctx-server", "transport": "stdio", "command": "echo"},
|
||||
use_connection_manager=False,
|
||||
)
|
||||
|
||||
tool_use = ToolUse(id="c1", name="ctx_tool", input={})
|
||||
executor = registry.get_executor()
|
||||
executor(tool_use)
|
||||
|
||||
assert received, "call_tool was never called"
|
||||
assert received[0].get("workspace_id") == "ws-123"
|
||||
assert received[0].get("agent_id") == "agent-99"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Execution context (contextvars isolation)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_execution_context_overrides_session_context(monkeypatch):
|
||||
"""Execution context values should win over session context for the same key."""
|
||||
registry = ToolRegistry()
|
||||
registry.set_session_context(workspace_id="session-ws")
|
||||
received: list[dict] = []
|
||||
|
||||
class FakeClient:
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
|
||||
def connect(self):
|
||||
pass
|
||||
|
||||
def disconnect(self):
|
||||
pass
|
||||
|
||||
def list_tools(self):
|
||||
return [
|
||||
SimpleNamespace(
|
||||
name="exec_tool",
|
||||
description="execution context tool",
|
||||
input_schema={
|
||||
"type": "object",
|
||||
"properties": {"workspace_id": {"type": "string"}},
|
||||
"required": [],
|
||||
},
|
||||
)
|
||||
]
|
||||
|
||||
def call_tool(self, tool_name, arguments):
|
||||
received.append(dict(arguments))
|
||||
return {"result": "ok"}
|
||||
|
||||
monkeypatch.setattr("framework.runner.mcp_client.MCPClient", FakeClient)
|
||||
registry.register_mcp_server(
|
||||
{"name": "exec-server", "transport": "stdio", "command": "echo"},
|
||||
use_connection_manager=False,
|
||||
)
|
||||
|
||||
token = ToolRegistry.set_execution_context(workspace_id="exec-ws")
|
||||
try:
|
||||
tool_use = ToolUse(id="c2", name="exec_tool", input={})
|
||||
executor = registry.get_executor()
|
||||
executor(tool_use)
|
||||
finally:
|
||||
ToolRegistry.reset_execution_context(token)
|
||||
|
||||
assert received, "call_tool was never called"
|
||||
assert received[0]["workspace_id"] == "exec-ws"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _convert_mcp_tool_to_framework_tool — CONTEXT_PARAMS stripped
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_convert_mcp_tool_strips_context_params():
|
||||
"""CONTEXT_PARAMS should be removed from the LLM-facing tool schema."""
|
||||
registry = ToolRegistry()
|
||||
mcp_tool = SimpleNamespace(
|
||||
name="some_tool",
|
||||
description="a tool",
|
||||
input_schema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"workspace_id": {"type": "string"}, # context param → stripped
|
||||
"agent_id": {"type": "string"}, # context param → stripped
|
||||
"query": {"type": "string"}, # regular param → kept
|
||||
},
|
||||
"required": ["workspace_id", "query"],
|
||||
},
|
||||
)
|
||||
tool = registry._convert_mcp_tool_to_framework_tool(mcp_tool) # noqa: SLF001
|
||||
props = tool.parameters["properties"]
|
||||
assert "workspace_id" not in props
|
||||
assert "agent_id" not in props
|
||||
assert "query" in props
|
||||
# workspace_id should also be stripped from required
|
||||
assert "workspace_id" not in tool.parameters["required"]
|
||||
assert "query" in tool.parameters["required"]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# load_mcp_config — both JSON config formats
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_load_mcp_config_list_format(tmp_path, monkeypatch):
|
||||
"""load_mcp_config should accept the {\"servers\": [...]} list format."""
|
||||
config_file = tmp_path / "mcp_servers.json"
|
||||
config_file.write_text(
|
||||
'{"servers": [{"name": "s1", "transport": "http", "url": "http://localhost:9000"}]}'
|
||||
)
|
||||
|
||||
called_with = []
|
||||
|
||||
def fake_load_registry(server_list, **kwargs):
|
||||
called_with.extend(server_list)
|
||||
return []
|
||||
|
||||
registry = ToolRegistry()
|
||||
monkeypatch.setattr(registry, "load_registry_servers", fake_load_registry)
|
||||
registry.load_mcp_config(config_file)
|
||||
|
||||
assert len(called_with) == 1
|
||||
assert called_with[0]["name"] == "s1"
|
||||
|
||||
|
||||
def test_load_mcp_config_dict_format(tmp_path, monkeypatch):
|
||||
"""load_mcp_config should accept the {\"server-name\": {...}} dict format."""
|
||||
config_file = tmp_path / "mcp_servers.json"
|
||||
config_file.write_text('{"my-server": {"transport": "http", "url": "http://localhost:9001"}}')
|
||||
|
||||
called_with = []
|
||||
|
||||
def fake_load_registry(server_list, **kwargs):
|
||||
called_with.extend(server_list)
|
||||
return []
|
||||
|
||||
registry = ToolRegistry()
|
||||
monkeypatch.setattr(registry, "load_registry_servers", fake_load_registry)
|
||||
registry.load_mcp_config(config_file)
|
||||
|
||||
assert len(called_with) == 1
|
||||
assert called_with[0]["name"] == "my-server"
|
||||
|
||||
|
||||
def test_load_mcp_config_handles_invalid_json(tmp_path, caplog):
|
||||
"""load_mcp_config should log a warning and return gracefully on bad JSON."""
|
||||
bad_file = tmp_path / "bad.json"
|
||||
bad_file.write_text("{not valid json")
|
||||
|
||||
registry = ToolRegistry()
|
||||
with caplog.at_level(logging.WARNING):
|
||||
registry.load_mcp_config(bad_file)
|
||||
|
||||
assert any("Failed to load MCP config" in r.message for r in caplog.records)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# resync_mcp_servers_if_needed — no-op when nothing changed
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_resync_returns_false_when_no_clients():
|
||||
"""resync_mcp_servers_if_needed should return False immediately with no clients."""
|
||||
registry = ToolRegistry()
|
||||
assert registry.resync_mcp_servers_if_needed() is False
|
||||
|
||||
|
||||
def test_resync_returns_false_when_credentials_unchanged(tmp_path, monkeypatch):
|
||||
"""Resync should return False when neither credentials nor ADEN_API_KEY changed."""
|
||||
config_file = tmp_path / "mcp_servers.json"
|
||||
config_file.write_text('{"servers": []}')
|
||||
|
||||
registry = ToolRegistry()
|
||||
# Simulate that MCP was loaded (need at least one client and a config path)
|
||||
registry._mcp_config_path = config_file # noqa: SLF001
|
||||
|
||||
class _FakeClient:
|
||||
config = SimpleNamespace(name="stub")
|
||||
|
||||
def disconnect(self):
|
||||
pass
|
||||
|
||||
registry._mcp_clients.append(_FakeClient()) # noqa: SLF001
|
||||
registry._mcp_cred_snapshot = set() # noqa: SLF001
|
||||
registry._mcp_aden_key_snapshot = None # noqa: SLF001
|
||||
|
||||
# No credentials on disk and env var not set → nothing changed
|
||||
monkeypatch.delenv("ADEN_API_KEY", raising=False)
|
||||
monkeypatch.setattr(registry, "_snapshot_credentials", lambda: set())
|
||||
|
||||
assert registry.resync_mcp_servers_if_needed() is False
|
||||
|
||||
@@ -231,7 +231,6 @@ flowchart LR
|
||||
File1["web_search_1.txt"]
|
||||
File2["web_scrape_2.txt"]
|
||||
Conv1["conversation_1.md"]
|
||||
Adapt["adapt.md"]
|
||||
end
|
||||
|
||||
SaveFile --> SpilloverDir
|
||||
@@ -256,7 +255,6 @@ flowchart LR
|
||||
subgraph SysPrompt [System Prompt Injection]
|
||||
FileList["DATA FILES:<br/> - web_search_1.txt<br/> - web_scrape_2.txt"]
|
||||
ConvList["CONVERSATION HISTORY:<br/> - conversation_1.md"]
|
||||
AdaptInline["AGENT MEMORY:<br/>(adapt.md inlined)"]
|
||||
end
|
||||
|
||||
SpilloverDir -->|"Listed on<br/>every turn"| SysPrompt
|
||||
@@ -277,7 +275,7 @@ flowchart LR
|
||||
|
||||
**4. File pointers survive compaction.** When the conversation exceeds the context budget, structure-preserving compaction (`compact_preserving_structure`) keeps tool-call messages (which are already tiny pointers) and spills freeform text (user/assistant prose) to numbered `conversation_N.md` files. A reference message replaces the removed text: `"[Previous conversation saved to 'conversation_1.md'. Use load_data('conversation_1.md') to review if needed.]"`. This means the agent retains exact knowledge of every tool it called and where each result is stored.
|
||||
|
||||
**5. The system prompt lists all files** in the spillover directory on every turn. Data files (spilled tool results) and conversation history files are listed separately. `adapt.md` (agent memory / learned preferences) is inlined directly into the system prompt rather than listed — it survives even emergency compaction.
|
||||
**5. The system prompt lists all files** in the spillover directory on every turn. Data files (spilled tool results) and conversation history files are listed separately.
|
||||
|
||||
### Why This Pattern
|
||||
|
||||
@@ -291,7 +289,7 @@ flowchart LR
|
||||
|
||||
## Memory Reflection Logic
|
||||
|
||||
Agents in Hive maintain memory through four interconnected mechanisms: a durable working memory file (`adapt.md`), the conversation history itself, a structured output accumulator, and a three-layer prompt composition system. Together they form a reflection loop where outputs, judge feedback, and execution state are continuously folded back into the agent's context.
|
||||
Agents in Hive maintain memory through three interconnected mechanisms: the conversation history itself, a structured output accumulator, and a three-layer prompt composition system. Together they form a reflection loop where outputs, judge feedback, and execution state are continuously folded back into the agent's context.
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
@@ -318,18 +316,6 @@ flowchart TB
|
||||
SetOutput --> OA_Mem
|
||||
OA_Mem --> OA_Cursor
|
||||
|
||||
%% =========================================
|
||||
%% ADAPT.MD (AGENT WORKING MEMORY)
|
||||
%% =========================================
|
||||
subgraph AdaptMD [adapt.md — Agent Working Memory]
|
||||
Seed["Seeded with<br/>identity + accounts"]
|
||||
RecordLearning["_record_learning():<br/>append output entry<br/>(truncated to 500 chars)"]
|
||||
AgentEdit["Agent calls<br/>save_data / edit_data<br/>to write rules,<br/>preferences, notes"]
|
||||
end
|
||||
|
||||
SetOutput -->|"triggers"| RecordLearning
|
||||
Seed -.->|"first run"| AdaptMD
|
||||
|
||||
%% =========================================
|
||||
%% JUDGE EVALUATION PIPELINE
|
||||
%% =========================================
|
||||
@@ -394,11 +380,9 @@ flowchart TB
|
||||
Layer1["Layer 1 — Identity<br/>(static, never changes)"]
|
||||
Layer2["Layer 2 — Narrative<br/>(auto-built from<br/>SharedMemory +<br/>execution path)"]
|
||||
Layer3["Layer 3 — Focus<br/>(current node's<br/>system_prompt)"]
|
||||
InlinedAdapt["adapt.md inlined<br/>(survives compaction)"]
|
||||
end
|
||||
|
||||
SharedMem -->|"read_all()"| Layer2
|
||||
AdaptMD -->|"inlined every turn"| InlinedAdapt
|
||||
|
||||
%% =========================================
|
||||
%% NEXT ITERATION
|
||||
@@ -417,7 +401,6 @@ flowchart TB
|
||||
%% =========================================
|
||||
%% STYLING
|
||||
%% =========================================
|
||||
style AdaptMD fill:#e8f5e9
|
||||
style PromptOnion fill:#e3f2fd
|
||||
style JudgePipeline fill:#fff3e0
|
||||
style ConvHistory fill:#f3e5f5
|
||||
@@ -425,17 +408,15 @@ flowchart TB
|
||||
|
||||
### How It Works
|
||||
|
||||
**1. Outputs trigger dual persistence.** When the LLM calls `set_output(key, value)`, two things happen simultaneously: the `OutputAccumulator` stores the value in memory and writes through to the `ConversationStore` cursor (for crash recovery), and `_record_learning()` appends a truncated entry (≤500 chars) to `adapt.md` under an `## Outputs` section. Duplicate keys are updated in-place, not appended.
|
||||
**1. Outputs are persisted via the accumulator.** When the LLM calls `set_output(key, value)`, the `OutputAccumulator` stores the value in memory and writes through to the `ConversationStore` cursor (for crash recovery).
|
||||
|
||||
**2. adapt.md is the agent's durable working memory.** It is seeded on first run with identity and account info. The agent can also write to it directly via `save_data("adapt.md", ...)` or `edit_data("adapt.md", ...)` — storing user rules, behavioral constraints, preferences, and working notes. Unlike conversation history, `adapt.md` is inlined directly into the system prompt every turn, so it survives all compaction tiers including emergency compaction. It is the last thing standing when context is tight.
|
||||
**2. Judge feedback becomes conversation memory.** When the judge issues a RETRY verdict with feedback, that feedback is injected as a `[Judge feedback]: ...` user message into the conversation. On the next LLM turn, the agent sees its prior attempt, the judge's critique, and can adjust. This is the core reflexion mechanism — in-context learning without model retraining.
|
||||
|
||||
**3. Judge feedback becomes conversation memory.** When the judge issues a RETRY verdict with feedback, that feedback is injected as a `[Judge feedback]: ...` user message into the conversation. On the next LLM turn, the agent sees its prior attempt, the judge's critique, and can adjust. This is the core reflexion mechanism — in-context learning without model retraining.
|
||||
**3. The three-layer prompt onion refreshes each turn.** Layer 1 (identity) is static. Layer 2 (narrative) is rebuilt deterministically from `SharedMemory.read_all()` and the execution path — listing completed phases and current state values. Layer 3 (focus) is the current node's `system_prompt`. At phase transitions in continuous mode, Layer 3 swaps while Layers 1-2 and the full conversation history carry forward.
|
||||
|
||||
**4. The three-layer prompt onion refreshes each turn.** Layer 1 (identity) is static. Layer 2 (narrative) is rebuilt deterministically from `SharedMemory.read_all()` and the execution path — listing completed phases and current state values. Layer 3 (focus) is the current node's `system_prompt`. At phase transitions in continuous mode, Layer 3 swaps while Layers 1-2 and the full conversation history carry forward.
|
||||
**4. Phase transitions inject structured reflection.** When execution moves between nodes, a transition marker is inserted into the conversation containing: what phase completed, all outputs in memory, available data files, available tools, and an explicit reflection prompt: *"Before proceeding, briefly reflect: what went well in the previous phase? Are there any gaps or surprises worth noting?"* This engineered metacognition surfaces issues before they compound.
|
||||
|
||||
**5. Phase transitions inject structured reflection.** When execution moves between nodes, a transition marker is inserted into the conversation containing: what phase completed, all outputs in memory, available data files, agent memory content, available tools, and an explicit reflection prompt: *"Before proceeding, briefly reflect: what went well in the previous phase? Are there any gaps or surprises worth noting?"* This engineered metacognition surfaces issues before they compound.
|
||||
|
||||
**6. Shared memory connects phases.** On ACCEPT, the accumulator's outputs are written to `SharedMemory`. The narrative layer reads these values to describe progress. In continuous mode, subsequent nodes see both the conversation history (what was discussed) and the structured memory (what was decided). In isolated mode, a `ContextHandoff` summarizes the prior node's conversation for the next node's input.
|
||||
**5. Shared memory connects phases.** On ACCEPT, the accumulator's outputs are written to `SharedMemory`. The narrative layer reads these values to describe progress. In continuous mode, subsequent nodes see both the conversation history (what was discussed) and the structured memory (what was decided). In isolated mode, a `ContextHandoff` summarizes the prior node's conversation for the next node's input.
|
||||
|
||||
### The Judge Evaluation Pipeline
|
||||
|
||||
@@ -1078,7 +1059,7 @@ class SignalWeights:
|
||||
| **Rule Generation** | Research | Transforming human decisions into deterministic rules (closing the loop) |
|
||||
| **HybridJudge** | Engineering | Implementation of triangulation with priority-ordered evaluation |
|
||||
| **Reflexion Loop** | Engineering | Worker-Judge architecture with RETRY/REPLAN/ESCALATE |
|
||||
| **Memory Reflection** | Engineering | adapt.md durable memory, 3-layer prompt onion, judge feedback injection |
|
||||
| **Memory Reflection** | Engineering | 3-layer prompt onion, judge feedback injection, shared memory |
|
||||
| **Graph Execution** | Engineering | Node composition, shared memory, edge traversal, sub-agent delegation |
|
||||
| **HITL Protocol** | Engineering | Pause/resume, approval workflows, escalation handling |
|
||||
|
||||
@@ -1096,7 +1077,7 @@ The Hive Agent Framework addresses the fundamental reliability crisis in agentic
|
||||
|
||||
4. **The Foundation**: Goal-driven architecture ensures we're optimizing for user intent, not metric gaming. The reflexion loop between Worker Bees and Judge enables learning from failure without expensive search.
|
||||
|
||||
5. **The Memory System**: Agents reflect through four mechanisms — `adapt.md` (durable working memory inlined into the system prompt, surviving all compaction), the conversation history (carrying judge feedback as injected user messages), the three-layer prompt onion (identity → narrative → focus, rebuilt each turn from shared memory), and structured phase transition markers with explicit reflection prompts at node boundaries.
|
||||
5. **The Memory System**: Agents reflect through three mechanisms — the conversation history (carrying judge feedback as injected user messages), the three-layer prompt onion (identity → narrative → focus, rebuilt each turn from shared memory), and structured phase transition markers with explicit reflection prompts at node boundaries.
|
||||
|
||||
6. **The Learning Path**: Human escalations aren't just fallbacks—they're training signals. Confidence calibration tunes thresholds automatically. Rule generation transforms repeated human decisions into deterministic automation.
|
||||
|
||||
|
||||
@@ -58,6 +58,25 @@ The framework supports 100+ LLM providers through [LiteLLM](https://docs.litellm
|
||||
|
||||
### Provider Examples
|
||||
|
||||
Native Supported Providers (DeepSeek, Mistral, Together AI, xAI, Perplexity):
|
||||
|
||||
```json
|
||||
{
|
||||
"llm": {
|
||||
"provider": "deepseek",
|
||||
"model": "deepseek-chat",
|
||||
"max_tokens": 8192,
|
||||
"api_key_env_var": "DEEPSEEK_API_KEY"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- Set `provider` to `deepseek` (or `mistral`, `together`, `xai`, `perplexity`)
|
||||
- Use the standard model name in `model`, for example `deepseek-chat`
|
||||
- **No `api_base` is required** for these natively supported providers
|
||||
|
||||
OpenRouter:
|
||||
|
||||
```json
|
||||
|
||||
@@ -884,14 +884,13 @@ Phase 0 and Phase 1 can proceed in parallel — default skills depend on the pro
|
||||
| Q1 | Should the registry repo live under `aden-hive` org or a shared `agentskills` org? | Platform | Open |
|
||||
| Q2 | Should default skill protocols be adaptive (e.g., `hive.batch-ledger` adjusts checkpoint frequency based on item size)? | Engineering | Open |
|
||||
| Q3 | Should default skills be tunable per-node (not just per-agent)? | Engineering | Open |
|
||||
| Q4 | How should default skill protocols interact with existing `adapt.md` working memory? Should `_working_notes` replace or supplement it? | Engineering | Open |
|
||||
| Q5 | Should `hive.quality-monitor` self-assessments feed into judge decisions (auto-trigger RETRY on self-reported degradation)? | Engineering | Open |
|
||||
| Q6 | What is the right combined token budget for default skill prompts? 2000 tokens proposed — configurable or fixed? | Engineering | Open |
|
||||
| Q7 | Should Hive support subagent delegation for skill execution (run skill in isolated session, return summary)? | Engineering | Open |
|
||||
| Q8 | Should Hive also scan `.claude/skills/` for pragmatic compatibility with Claude Code's native skill location? | Engineering | Open |
|
||||
| Q9 | What is the process for promoting a `community` skill to `verified`? | Platform + Security | Open |
|
||||
| Q10 | Should the registry support private/enterprise skill indexes (`hive skill config --index-url`)? | Platform | Open |
|
||||
| Q11 | Should `hive skill test` use the official `skills-ref` library or a Hive-native implementation? | Engineering | Open |
|
||||
| Q4 | Should `hive.quality-monitor` self-assessments feed into judge decisions (auto-trigger RETRY on self-reported degradation)? | Engineering | Open |
|
||||
| Q5 | What is the right combined token budget for default skill prompts? 2000 tokens proposed — configurable or fixed? | Engineering | Open |
|
||||
| Q6 | Should Hive support subagent delegation for skill execution (run skill in isolated session, return summary)? | Engineering | Open |
|
||||
| Q7 | Should Hive also scan `.claude/skills/` for pragmatic compatibility with Claude Code's native skill location? | Engineering | Open |
|
||||
| Q8 | What is the process for promoting a `community` skill to `verified`? | Platform + Security | Open |
|
||||
| Q9 | Should the registry support private/enterprise skill indexes (`hive skill config --index-url`)? | Platform | Open |
|
||||
| Q10 | Should `hive skill test` use the official `skills-ref` library or a Hive-native implementation? | Engineering | Open |
|
||||
| Q12 | How should skill-level telemetry (activation counts, eval pass rates) be collected without compromising privacy? | Product + Privacy | Open |
|
||||
|
||||
---
|
||||
|
||||
@@ -70,12 +70,14 @@ from .brevo import BREVO_CREDENTIALS
|
||||
from .browser import get_aden_auth_url, get_aden_setup_url, open_browser
|
||||
from .calcom import CALCOM_CREDENTIALS
|
||||
from .calendly import CALENDLY_CREDENTIALS
|
||||
from .cloudflare import CLOUDFLARE_CREDENTIALS
|
||||
from .cloudinary import CLOUDINARY_CREDENTIALS
|
||||
from .confluence import CONFLUENCE_CREDENTIALS
|
||||
from .databricks import DATABRICKS_CREDENTIALS
|
||||
from .discord import DISCORD_CREDENTIALS
|
||||
from .docker_hub import DOCKER_HUB_CREDENTIALS
|
||||
from .email import EMAIL_CREDENTIALS
|
||||
from .freshdesk import FRESHDESK_CREDENTIALS
|
||||
from .gcp_vision import GCP_VISION_CREDENTIALS
|
||||
from .github import GITHUB_CREDENTIALS
|
||||
from .gitlab import GITLAB_CREDENTIALS
|
||||
@@ -159,6 +161,7 @@ CREDENTIAL_SPECS = {
|
||||
**BREVO_CREDENTIALS,
|
||||
**CALCOM_CREDENTIALS,
|
||||
**CALENDLY_CREDENTIALS,
|
||||
**CLOUDFLARE_CREDENTIALS,
|
||||
**CLOUDINARY_CREDENTIALS,
|
||||
**CONFLUENCE_CREDENTIALS,
|
||||
**DATABRICKS_CREDENTIALS,
|
||||
@@ -219,6 +222,7 @@ CREDENTIAL_SPECS = {
|
||||
**ZENDESK_CREDENTIALS,
|
||||
**ZOHO_CRM_CREDENTIALS,
|
||||
**ZOOM_CREDENTIALS,
|
||||
**FRESHDESK_CREDENTIALS,
|
||||
}
|
||||
|
||||
__all__ = [
|
||||
@@ -252,6 +256,7 @@ __all__ = [
|
||||
"BREVO_CREDENTIALS",
|
||||
"CALCOM_CREDENTIALS",
|
||||
"CALENDLY_CREDENTIALS",
|
||||
"CLOUDFLARE_CREDENTIALS",
|
||||
"CLOUDINARY_CREDENTIALS",
|
||||
"CONFLUENCE_CREDENTIALS",
|
||||
"DATABRICKS_CREDENTIALS",
|
||||
@@ -312,4 +317,5 @@ __all__ = [
|
||||
"ZENDESK_CREDENTIALS",
|
||||
"ZOHO_CRM_CREDENTIALS",
|
||||
"ZOOM_CREDENTIALS",
|
||||
"FRESHDESK_CREDENTIALS",
|
||||
]
|
||||
|
||||
@@ -0,0 +1,75 @@
|
||||
from aden_tools.credentials import CredentialSpec
|
||||
|
||||
CLOUDFLARE_CREDENTIALS = {
|
||||
"cloudflare": CredentialSpec(
|
||||
env_var="CLOUDFLARE_API_TOKEN",
|
||||
description=(
|
||||
"Cloudflare API token (DNS/Zone Read-Write). Provide via env var or credential store."
|
||||
),
|
||||
required=True,
|
||||
help_url="https://dash.cloudflare.com/profile/api-tokens",
|
||||
credential_id="cloudflare_api_token",
|
||||
api_key_instructions=(
|
||||
"Create an API token with Zone/DNS read and edit permissions: "
|
||||
"https://dash.cloudflare.com/profile/api-tokens"
|
||||
),
|
||||
direct_api_key_supported=True,
|
||||
health_check_endpoint="https://api.cloudflare.com/client/v4/user/tokens/verify",
|
||||
tools=[
|
||||
"cloudflare_list_zones",
|
||||
"cloudflare_get_zone",
|
||||
"cloudflare_get_zone_settings",
|
||||
"cloudflare_list_zone_custom_pages",
|
||||
"cloudflare_get_ssl_verification",
|
||||
"cloudflare_list_zone_certificates",
|
||||
"cloudflare_list_zone_subscriptions",
|
||||
"cloudflare_get_dnssec_status",
|
||||
"cloudflare_update_zone_setting",
|
||||
"cloudflare_list_dns_records",
|
||||
"cloudflare_get_dns_record",
|
||||
"cloudflare_list_dns_record_scan",
|
||||
"cloudflare_get_dns_settings",
|
||||
"cloudflare_list_dns_analytics_report",
|
||||
"cloudflare_check_domain_dns_health",
|
||||
"cloudflare_create_dns_record",
|
||||
"cloudflare_update_dns_record",
|
||||
"cloudflare_delete_dns_record",
|
||||
"cloudflare_get_zone_analytics",
|
||||
"cloudflare_get_top_analytics",
|
||||
"cloudflare_get_security_analytics",
|
||||
"cloudflare_get_cache_analytics",
|
||||
"cloudflare_get_performance_analytics",
|
||||
"cloudflare_get_http_analytics_report",
|
||||
"cloudflare_list_firewall_events",
|
||||
"cloudflare_get_security_settings",
|
||||
"cloudflare_list_page_rules",
|
||||
"cloudflare_list_waf_rulesets",
|
||||
"cloudflare_get_bot_management_settings",
|
||||
"cloudflare_list_managed_transforms",
|
||||
"cloudflare_get_ddos_protection_settings",
|
||||
"cloudflare_create_firewall_rule",
|
||||
"cloudflare_delete_firewall_rule",
|
||||
"cloudflare_get_speed_settings",
|
||||
"cloudflare_get_cache_settings",
|
||||
"cloudflare_get_http_config",
|
||||
"cloudflare_get_network_settings",
|
||||
"cloudflare_purge_cache_all",
|
||||
"cloudflare_purge_cache_files",
|
||||
"cloudflare_list_advanced_services",
|
||||
"cloudflare_list_accounts",
|
||||
"cloudflare_get_account_details",
|
||||
"cloudflare_list_account_members",
|
||||
"cloudflare_invite_account_member",
|
||||
"cloudflare_delete_account_member",
|
||||
"cloudflare_list_custom_hostnames",
|
||||
"cloudflare_list_audit_logs",
|
||||
"cloudflare_list_firewall_rules",
|
||||
"cloudflare_list_access_applications",
|
||||
"cloudflare_list_r2_buckets",
|
||||
"cloudflare_list_pages_projects",
|
||||
"cloudflare_create_access_policy",
|
||||
"cloudflare_create_worker_route",
|
||||
"cloudflare_set_ssl_mode",
|
||||
],
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,67 @@
|
||||
"""
|
||||
Freshdesk credentials.
|
||||
|
||||
Contains credentials for Freshdesk API v2 (tickets, contacts, agents, groups).
|
||||
Requires FRESHDESK_API_KEY and FRESHDESK_DOMAIN (or credential store equivalents).
|
||||
"""
|
||||
|
||||
from .base import CredentialSpec
|
||||
|
||||
# Shared tool coverage for both Freshdesk credentials.
|
||||
_FRESHDESK_TOOLS = [
|
||||
"freshdesk_list_tickets",
|
||||
"freshdesk_filter_tickets",
|
||||
"freshdesk_list_ticket_conversations",
|
||||
"freshdesk_get_ticket",
|
||||
"freshdesk_create_ticket",
|
||||
"freshdesk_update_ticket",
|
||||
"freshdesk_add_ticket_reply",
|
||||
"freshdesk_list_contacts",
|
||||
"freshdesk_get_contact",
|
||||
"freshdesk_create_contact",
|
||||
"freshdesk_update_contact",
|
||||
"freshdesk_list_agents",
|
||||
"freshdesk_get_agent",
|
||||
"freshdesk_list_groups",
|
||||
"freshdesk_get_group",
|
||||
"freshdesk_list_companies",
|
||||
"freshdesk_get_company",
|
||||
]
|
||||
|
||||
# Credential specs for Freshdesk API key and domain.
|
||||
FRESHDESK_CREDENTIALS = {
|
||||
# API key used as Basic auth username (`api_key:X`).
|
||||
"freshdesk": CredentialSpec(
|
||||
env_var="FRESHDESK_API_KEY",
|
||||
tools=_FRESHDESK_TOOLS,
|
||||
required=True,
|
||||
startup_required=False,
|
||||
help_url="https://support.freshdesk.com/en/support/solutions/articles/215517-how-to-find-your-api-key",
|
||||
description="Freshdesk API key for ticket, contact, agent, and group management",
|
||||
direct_api_key_supported=True,
|
||||
api_key_instructions="""To get a Freshdesk API key:
|
||||
1. Log in to Freshdesk
|
||||
2. Click your profile avatar (top-right) and select Profile Settings
|
||||
3. Copy the value under 'Your API Key'
|
||||
4. Set the environment variable:
|
||||
export FRESHDESK_API_KEY=your-api-key""",
|
||||
health_check_endpoint="",
|
||||
credential_id="freshdesk",
|
||||
credential_key="api_key",
|
||||
),
|
||||
# Freshdesk tenant hostname used to build API base URLs.
|
||||
"freshdesk_domain": CredentialSpec(
|
||||
env_var="FRESHDESK_DOMAIN",
|
||||
tools=_FRESHDESK_TOOLS,
|
||||
required=True,
|
||||
startup_required=False,
|
||||
help_url="https://developers.freshdesk.com/api/#introduction",
|
||||
description="Freshdesk hostname (e.g. acme.freshdesk.com). Required with API key.",
|
||||
direct_api_key_supported=True,
|
||||
api_key_instructions="""Set your Freshdesk hostname:
|
||||
export FRESHDESK_DOMAIN=acme.freshdesk.com""",
|
||||
health_check_endpoint="",
|
||||
credential_id="freshdesk_domain",
|
||||
credential_key="domain",
|
||||
),
|
||||
}
|
||||
@@ -1307,6 +1307,11 @@ class YouTubeHealthChecker(BaseHttpHealthChecker):
|
||||
AUTH_QUERY_PARAM_NAME = "key"
|
||||
|
||||
|
||||
class CloudflareHealthChecker(BaseHttpHealthChecker):
|
||||
ENDPOINT = "https://api.cloudflare.com/client/v4/user/tokens/verify"
|
||||
SERVICE_NAME = "Cloudflare"
|
||||
|
||||
|
||||
# Registry of health checkers
|
||||
HEALTH_CHECKERS: dict[str, CredentialHealthChecker] = {
|
||||
"apify": ApifyHealthChecker(),
|
||||
@@ -1317,6 +1322,7 @@ HEALTH_CHECKERS: dict[str, CredentialHealthChecker] = {
|
||||
"brevo": BrevoHealthChecker(),
|
||||
"calcom": CalcomHealthChecker(),
|
||||
"calendly_pat": CalendlyHealthChecker(),
|
||||
"cloudflare": CloudflareHealthChecker(),
|
||||
"discord": DiscordHealthChecker(),
|
||||
"docker_hub": DockerHubHealthChecker(),
|
||||
"exa_search": ExaSearchHealthChecker(),
|
||||
|
||||
@@ -44,6 +44,7 @@ from .brevo_tool import register_tools as register_brevo
|
||||
from .calcom_tool import register_tools as register_calcom
|
||||
from .calendar_tool import register_tools as register_calendar
|
||||
from .calendly_tool import register_tools as register_calendly
|
||||
from .cloudflare_tool import register_tools as register_cloudflare
|
||||
from .cloudinary_tool import register_tools as register_cloudinary
|
||||
from .confluence_tool import register_tools as register_confluence
|
||||
from .csv_tool import register_tools as register_csv
|
||||
@@ -70,6 +71,7 @@ from .file_system_toolkits.list_dir import register_tools as register_list_dir
|
||||
from .file_system_toolkits.replace_file_content import (
|
||||
register_tools as register_replace_file_content,
|
||||
)
|
||||
from .freshdesk_tool import register_tools as register_freshdesk
|
||||
from .github_tool import register_tools as register_github
|
||||
from .gitlab_tool import register_tools as register_gitlab
|
||||
from .gmail_tool import register_tools as register_gmail
|
||||
@@ -252,6 +254,7 @@ def _register_unverified(
|
||||
register_stripe(mcp, credentials=credentials)
|
||||
register_postgres(mcp, credentials=credentials)
|
||||
register_calendly(mcp, credentials=credentials)
|
||||
register_cloudflare(mcp, credentials=credentials)
|
||||
register_cloudinary(mcp, credentials=credentials)
|
||||
register_confluence(mcp, credentials=credentials)
|
||||
register_databricks(mcp, credentials=credentials)
|
||||
@@ -297,6 +300,7 @@ def _register_unverified(
|
||||
register_zendesk(mcp, credentials=credentials)
|
||||
register_zoho_crm(mcp, credentials=credentials)
|
||||
register_zoom(mcp, credentials=credentials)
|
||||
register_freshdesk(mcp, credentials=credentials)
|
||||
|
||||
|
||||
def register_all_tools(
|
||||
|
||||
@@ -0,0 +1,271 @@
|
||||
# Cloudflare DNS/Zone Management Tool
|
||||
|
||||
Provides comprehensive Cloudflare DNS/Zone management tools for agents to inspect domains, DNS records, manage infrastructure, and diagnose DNS configuration issues.
|
||||
|
||||
## Features
|
||||
|
||||
- **Zone Management**: List zones, get details, and manage 30+ settings (SSL, IPv6, WebSockets, etc.).
|
||||
- **DNS Management**: Create, Update, Delete, and List DNS records (A, CNAME, TXT, MX, etc.).
|
||||
- **Security & Firewall**: Manage firewall rules, WAF rulesets, Bot management, and Zero Trust Access policies.
|
||||
- **Analytics & Metrics**: Get traffic (bandwidth), security (threats), cache, and performance analytics.
|
||||
- **Performance & Cache**: Purge everything or specific files from cache, manage speed settings (Minify, Brotli).
|
||||
- **Eco-system Support**: List R2 Buckets, Pages Projects, and manage Workers routing.
|
||||
- **Diagnostics**: Specialized DNS health diagnosis for domains with structured troubleshooting output.
|
||||
|
||||
## Authentication
|
||||
|
||||
Requires a Cloudflare API token with the following permissions:
|
||||
|
||||
### Recommended Permissions
|
||||
|
||||
- `Zone:Read`, `Zone:Edit` — zone information and settings
|
||||
- `DNS:Read`, `DNS:Edit` — DNS records management
|
||||
- `Account:Read`, `Account:Edit` — (Optional) account members and R2/Pages listing
|
||||
- `Analytics:Read` — Analytics dashboards
|
||||
|
||||
### Setup
|
||||
|
||||
1. Navigate to [Cloudflare API Tokens](https://dash.cloudflare.com/profile/api-tokens)
|
||||
2. Create/Configure an API token with appropriate permissions.
|
||||
3. Set the environment variable:
|
||||
```bash
|
||||
export CLOUDFLARE_API_TOKEN="your_api_token_here"
|
||||
```
|
||||
_Note: Can also be configured via the Aden Credential Store._
|
||||
|
||||
## Key Tools Summary
|
||||
|
||||
### Infrastructure & Zones
|
||||
|
||||
- `cloudflare_list_zones`: List zones in the account.
|
||||
- `cloudflare_get_zone_settings`: Read 30+ common zone settings.
|
||||
- `cloudflare_update_zone_setting`: Update specific settings (IPv6, settings IDs, etc.).
|
||||
- `cloudflare_set_ssl_mode`: Quick toggle for SSL modes (Strict, Flexible, etc.).
|
||||
|
||||
### DNS Operations
|
||||
|
||||
- `cloudflare_list_dns_records`: List and filter DNS records.
|
||||
- `cloudflare_create_dns_record`: Add new A, CNAME, TXT, etc. records.
|
||||
- `cloudflare_update_dns_record`: Modify existing records.
|
||||
- `cloudflare_delete_dns_record`: Remove records permanently.
|
||||
|
||||
### Analytics & Health
|
||||
|
||||
- `cloudflare_get_zone_analytics`: Get last 24h traffic/threat stats.
|
||||
- `cloudflare_check_domain_dns_health`: Deep diagnostic check for domain misconfigurations.
|
||||
- `cloudflare_get_http_analytics_report`: Detailed status code and content type distribution.
|
||||
|
||||
### Security
|
||||
|
||||
- `cloudflare_create_firewall_rule`: Create custom blocking/allow rules.
|
||||
- `cloudflare_list_waf_rulesets`: View modern WAF configuration.
|
||||
- `cloudflare_create_access_policy`: Set Zero Trust Access policies.
|
||||
|
||||
### Cache & Performance
|
||||
|
||||
- `cloudflare_purge_cache_all`: Clear the entire zone cache.
|
||||
- `cloudflare_purge_cache_files`: Clear specific URLs from cache.
|
||||
- `cloudflare_get_speed_settings`: Check Minify, Brotli, and Rocket Loader status.
|
||||
|
||||
### Account & Advanced
|
||||
|
||||
- `cloudflare_list_accounts`: List all accessible accounts.
|
||||
- `cloudflare_invite_account_member`: Manage team access.
|
||||
- `cloudflare_list_r2_buckets`: Overview of R2 storage.
|
||||
- `cloudflare_create_worker_route`: Bind Worker scripts to URL patterns.
|
||||
|
||||
---
|
||||
|
||||
_Generated for the Model Context Protocol (MCP) as part of the Aden Tools suite._
|
||||
|
||||
### `cloudflare_get_zone`
|
||||
|
||||
Get details for a specific zone.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- `zone_id` (str, required): Zone ID (32-character hex string)
|
||||
|
||||
**Returns:**
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "023e105f4ecef8ad9ca31a8372d0c353",
|
||||
"name": "example.com",
|
||||
"status": "active",
|
||||
"name_servers": ["ns1.cloudflare.com", "ns2.cloudflare.com"],
|
||||
"created_on": "2014-01-01T23:27:06.000Z",
|
||||
"modified_on": "2014-07-10T05:35:15.000Z",
|
||||
"plan": "pro",
|
||||
"type": "full"
|
||||
}
|
||||
```
|
||||
|
||||
### `cloudflare_list_dns_records`
|
||||
|
||||
List DNS records for a zone.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- `zone_id` (str, required): Zone ID
|
||||
- `name` (str, optional): Filter by DNS record name
|
||||
- `type` (str, optional): Filter by record type (A, AAAA, CNAME, MX, TXT, etc.)
|
||||
- `page` (int, default=1): Page number for pagination
|
||||
- `per_page` (int, default=20): Results per page (max 100)
|
||||
|
||||
**Returns:**
|
||||
|
||||
```json
|
||||
{
|
||||
"records": [
|
||||
{
|
||||
"id": "372e67954025e0ba6aaa6d586b9e0b59",
|
||||
"type": "A",
|
||||
"name": "example.com",
|
||||
"content": "192.0.2.1",
|
||||
"ttl": 3600,
|
||||
"proxied": true,
|
||||
"priority": null
|
||||
}
|
||||
],
|
||||
"zone_id": "023e105f4ecef8ad9ca31a8372d0c353",
|
||||
"page": 1,
|
||||
"per_page": 20,
|
||||
"total": 1
|
||||
}
|
||||
```
|
||||
|
||||
### `cloudflare_get_dns_record`
|
||||
|
||||
Get a specific DNS record by ID.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- `zone_id` (str, required): Zone ID
|
||||
- `record_id` (str, required): DNS record ID
|
||||
|
||||
**Returns:**
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "372e67954025e0ba6aaa6d586b9e0b59",
|
||||
"type": "A",
|
||||
"name": "example.com",
|
||||
"content": "192.0.2.1",
|
||||
"ttl": 3600,
|
||||
"proxied": true,
|
||||
"priority": null,
|
||||
"created_on": "2014-01-01T23:28:48.000Z",
|
||||
"modified_on": "2014-07-10T05:35:15.000Z"
|
||||
}
|
||||
```
|
||||
|
||||
### `cloudflare_check_domain_dns_health`
|
||||
|
||||
Perform a comprehensive DNS health check for a domain, identifying common configuration issues.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- `domain` (str, required): Domain name (e.g., "example.com")
|
||||
|
||||
**Returns:**
|
||||
|
||||
```json
|
||||
{
|
||||
"domain": "example.com",
|
||||
"zone_found": true,
|
||||
"zone_id": "023e105f4ecef8ad9ca31a8372d0c353",
|
||||
"zone_status": "active",
|
||||
"root_records": [
|
||||
{
|
||||
"id": "372e67954025e0ba6aaa6d586b9e0b59",
|
||||
"type": "A",
|
||||
"name": "example.com",
|
||||
"content": "192.0.2.1",
|
||||
"ttl": 3600,
|
||||
"proxied": false
|
||||
}
|
||||
],
|
||||
"www_records": [
|
||||
{
|
||||
"id": "372e67954025e0ba6aaa6d586b9e0b60",
|
||||
"type": "A",
|
||||
"name": "www.example.com",
|
||||
"content": "192.0.2.1",
|
||||
"ttl": 3600,
|
||||
"proxied": false
|
||||
}
|
||||
],
|
||||
"mx_records": [],
|
||||
"ns_records": [],
|
||||
"total_records": 2,
|
||||
"issues": [
|
||||
{
|
||||
"code": "MX_MISSING",
|
||||
"message": "No MX records configured for example.com"
|
||||
}
|
||||
],
|
||||
"summary": "Zone is active. Found 2 DNS records. Issues detected: MX_MISSING."
|
||||
}
|
||||
```
|
||||
|
||||
## Common Issue Codes
|
||||
|
||||
- `ZONE_NOT_FOUND` — Zone not found for the domain in Cloudflare
|
||||
- `ZONE_INACTIVE` — Zone status is not "active"
|
||||
- `ROOT_MISSING` — No A/AAAA records for root domain
|
||||
- `WWW_MISSING` — No www subdomain DNS record
|
||||
- `MX_MISSING` — No MX records configured
|
||||
- `PROXY_INVALID` — Proxied record has no valid target
|
||||
|
||||
## Usage Examples
|
||||
|
||||
```python
|
||||
# List all zones
|
||||
zones = mcp.tools["cloudflare_list_zones"](page=1, per_page=20)
|
||||
|
||||
# Get zone details
|
||||
zone = mcp.tools["cloudflare_get_zone"](zone_id="023e105f4ecef8ad9ca31a8372d0c353")
|
||||
|
||||
# List DNS records for a zone
|
||||
records = mcp.tools["cloudflare_list_dns_records"](
|
||||
zone_id="023e105f4ecef8ad9ca31a8372d0c353",
|
||||
type="A"
|
||||
)
|
||||
|
||||
# Check DNS health for a domain
|
||||
health = mcp.tools["cloudflare_check_domain_dns_health"](domain="example.com")
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
All tools return structured error dictionaries on failure:
|
||||
|
||||
```json
|
||||
{
|
||||
"error": "Unauthorized - invalid or missing CLOUDFLARE_API_TOKEN",
|
||||
"status_code": 401
|
||||
}
|
||||
```
|
||||
|
||||
Common error codes:
|
||||
|
||||
- `401` — Invalid or missing credentials
|
||||
- `403` — Insufficient permissions
|
||||
- `404` — Resource not found
|
||||
- `429` — Rate limited (check `retry_after` header)
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
- Tools encompass both resource query (GET) and update/create (POST/PATCH/DELETE) operations
|
||||
- Credentials are retrieved from the `CLOUDFLARE_API_TOKEN` environment variable
|
||||
- Requests are validated and sanitized for security
|
||||
- Responses are normalized into compact, agent-friendly objects
|
||||
- Pagination defaults to 20 results, maximum 100 per page
|
||||
- API timeout is 30 seconds
|
||||
|
||||
## Files
|
||||
|
||||
- `cloudflare_tool.py` — Main tool implementation
|
||||
- `__init__.py` — Package export
|
||||
- `README.md` — This documentation
|
||||
@@ -0,0 +1,5 @@
|
||||
"""Cloudflare DNS/Zone management tool package for Aden Tools."""
|
||||
|
||||
from .cloudflare_tool import register_tools
|
||||
|
||||
__all__ = ["register_tools"]
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,109 @@
|
||||
# Freshdesk Tool
|
||||
|
||||
Ticket, contact, agent, and group management via the Freshdesk API v2.
|
||||
|
||||
## Description
|
||||
|
||||
This tool integrates with Freshdesk’s REST API v2 to list, create, and update tickets; manage contacts, agents, groups, and companies; and add public replies or private notes. Use it when an agent needs to query or modify support data in Freshdesk. Domain and API key are resolved from the credential store or environment; tools do not accept a domain parameter.
|
||||
|
||||
## Setup
|
||||
|
||||
Both API key and domain are required. Configure once via environment or credential store.
|
||||
|
||||
```bash
|
||||
export FRESHDESK_API_KEY=your-freshdesk-api-key
|
||||
export FRESHDESK_DOMAIN=acme.freshdesk.com
|
||||
```
|
||||
|
||||
- **API key:** Profile → Profile Settings → Your API Key. Set `FRESHDESK_API_KEY` (or store as `freshdesk` in the credential store).
|
||||
- **Domain:** Your Freshdesk hostname (e.g. `acme.freshdesk.com`). Set `FRESHDESK_DOMAIN` (or store as `freshdesk_domain`).
|
||||
|
||||
## Environment Variables
|
||||
|
||||
| Variable | Required | Description |
|
||||
|----------|----------|-------------|
|
||||
| `FRESHDESK_API_KEY` | Yes | API key from Freshdesk profile settings |
|
||||
| `FRESHDESK_DOMAIN` | Yes | Freshdesk hostname (e.g. `acme.freshdesk.com`) |
|
||||
|
||||
## Tools (17)
|
||||
|
||||
| Tool | Description |
|
||||
|------|-------------|
|
||||
| `freshdesk_list_tickets` | List tickets with optional filter and `updated_since` |
|
||||
| `freshdesk_filter_tickets` | Search tickets by query (e.g. priority:3, status:2; pages 1-10 only per Freshdesk search API) |
|
||||
| `freshdesk_list_ticket_conversations` | List replies and notes for a ticket |
|
||||
| `freshdesk_get_ticket` | Get a single ticket by ID |
|
||||
| `freshdesk_create_ticket` | Create a ticket from requester email, subject, description |
|
||||
| `freshdesk_update_ticket` | Update ticket status/priority/tags; optionally add a note (via Notes API) |
|
||||
| `freshdesk_add_ticket_reply` | Add a public reply (customer-visible) or private note (internal only) |
|
||||
| `freshdesk_list_contacts` | List contacts, optionally filtered by email |
|
||||
| `freshdesk_get_contact` | Get a contact by ID or email |
|
||||
| `freshdesk_create_contact` | Create a contact with email/name/phone/company_id |
|
||||
| `freshdesk_update_contact` | Update a contact (name, email, phone, company_id) |
|
||||
| `freshdesk_list_agents` | List agents for routing/assignment logic |
|
||||
| `freshdesk_get_agent` | Get a single agent by ID |
|
||||
| `freshdesk_list_groups` | List groups for routing/assignment logic |
|
||||
| `freshdesk_get_group` | Get a single group by ID |
|
||||
| `freshdesk_list_companies` | List companies (with optional updated_since) |
|
||||
| `freshdesk_get_company` | Get a single company by ID |
|
||||
|
||||
## Usage Examples
|
||||
|
||||
```python
|
||||
freshdesk_list_tickets(
|
||||
page=1,
|
||||
per_page=30,
|
||||
updated_since="2026-03-01T00:00:00Z",
|
||||
)
|
||||
freshdesk_get_ticket(ticket_id=123)
|
||||
freshdesk_create_ticket(
|
||||
email="user@example.com",
|
||||
subject="Cannot log in",
|
||||
description="User reports login failure on Chrome",
|
||||
priority=2,
|
||||
status=2,
|
||||
tags="login,bug",
|
||||
)
|
||||
freshdesk_update_ticket(
|
||||
ticket_id=123,
|
||||
status=3,
|
||||
note="Waiting for user to confirm the fix",
|
||||
note_private=True,
|
||||
)
|
||||
freshdesk_add_ticket_reply(
|
||||
ticket_id=123,
|
||||
body="We've deployed a fix. Can you try again?",
|
||||
public=True,
|
||||
)
|
||||
freshdesk_list_contacts(email="user@example.com")
|
||||
freshdesk_create_contact(
|
||||
email="user@example.com",
|
||||
name="Jane Doe",
|
||||
phone="+1-555-1234",
|
||||
)
|
||||
freshdesk_add_ticket_reply(
|
||||
ticket_id=123,
|
||||
body="Internal: escalated to L2.",
|
||||
public=False,
|
||||
)
|
||||
freshdesk_list_agents()
|
||||
freshdesk_list_groups()
|
||||
```
|
||||
|
||||
## API behavior (Freshdesk v2)
|
||||
|
||||
- Public replies use the Reply endpoint; private notes use the Notes endpoint (tool chooses by `public`).
|
||||
- `freshdesk_update_ticket` with `note`: ticket fields via PUT, then note via POST `/tickets/{id}/notes` (PUT does not accept notes). If the note POST fails, the result includes `"note_error"`.
|
||||
- `freshdesk_filter_tickets` page range is 1-10 (Freshdesk search API limit); values outside this range are clamped.
|
||||
|
||||
## Error Handling
|
||||
|
||||
Tools return a dict with an `"error"` key (and optional `"help"`) on failure. Common cases:
|
||||
|
||||
- **Missing API key:** `"error": "FRESHDESK_API_KEY environment variable not set"` — set `FRESHDESK_API_KEY` or configure `freshdesk` in the credential store.
|
||||
- **Missing domain:** `"error": "FRESHDESK_DOMAIN is required"` — set `FRESHDESK_DOMAIN` or configure `freshdesk_domain`.
|
||||
- **Missing ID:** `"error": "ticket_id is required"` (or similar) — required ID parameter omitted.
|
||||
- **No updates:** `"error": "At least one field (status, priority, tags, note) is required"` — `freshdesk_update_ticket` called with no updates.
|
||||
- **API error:** `"error": "Freshdesk API error <status>: <message>"` — upstream Freshdesk error; see status and message for details.
|
||||
|
||||
See the [Freshdesk API v2 docs](https://developers.freshdesk.com/api/) for field semantics.
|
||||
@@ -0,0 +1,5 @@
|
||||
"""Freshdesk tool package for Aden Tools."""
|
||||
|
||||
from .freshdesk_tool import register_tools
|
||||
|
||||
__all__ = ["register_tools"]
|
||||
@@ -0,0 +1,890 @@
|
||||
"""
|
||||
Freshdesk tool — tickets, contacts, agents, groups via Freshdesk API v2.
|
||||
|
||||
Use when an agent needs to query or modify support data in Freshdesk.
|
||||
Auth: Basic (API key as username, X as password). Domain from credentials/env.
|
||||
API: https://developers.freshdesk.com/api/
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import httpx
|
||||
from fastmcp import FastMCP
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from aden_tools.credentials import CredentialStoreAdapter
|
||||
|
||||
|
||||
def _get_api_key(credentials: CredentialStoreAdapter | None) -> str | None:
|
||||
"""Return Freshdesk API key from credential store or env."""
|
||||
if credentials is not None:
|
||||
return credentials.get("freshdesk")
|
||||
return os.getenv("FRESHDESK_API_KEY")
|
||||
|
||||
|
||||
def _get_domain(credentials: CredentialStoreAdapter | None) -> str | None:
|
||||
"""Return Freshdesk domain from credential store or env (e.g. acme.freshdesk.com)."""
|
||||
if credentials is not None:
|
||||
value = credentials.get("freshdesk_domain")
|
||||
if value:
|
||||
return value.strip()
|
||||
value = os.getenv("FRESHDESK_DOMAIN")
|
||||
return value.strip() if value else None
|
||||
|
||||
|
||||
def _base_url(domain: str) -> str:
|
||||
"""Build base API URL from domain (e.g. acme.freshdesk.com)."""
|
||||
domain = domain.strip()
|
||||
if domain.startswith("https://"):
|
||||
domain = domain[len("https://") :]
|
||||
if domain.startswith("http://"):
|
||||
domain = domain[len("http://") :]
|
||||
return f"https://{domain}/api/v2"
|
||||
|
||||
|
||||
def _auth_header(api_key: str) -> str:
|
||||
"""Build Basic auth header for Freshdesk API key."""
|
||||
encoded = base64.b64encode(f"{api_key}:X".encode()).decode()
|
||||
return f"Basic {encoded}"
|
||||
|
||||
|
||||
def _request(
|
||||
method: str,
|
||||
url: str,
|
||||
api_key: str,
|
||||
**kwargs: Any,
|
||||
) -> dict[str, Any] | list[Any]:
|
||||
"""Make a request to the Freshdesk API with standard error handling."""
|
||||
headers = kwargs.pop("headers", {})
|
||||
headers["Authorization"] = _auth_header(api_key)
|
||||
headers.setdefault("Content-Type", "application/json")
|
||||
headers.setdefault("Accept", "application/json")
|
||||
try:
|
||||
if method == "get":
|
||||
resp = httpx.get(
|
||||
url,
|
||||
headers=headers,
|
||||
timeout=30.0,
|
||||
**kwargs,
|
||||
)
|
||||
elif method == "post":
|
||||
resp = httpx.post(
|
||||
url,
|
||||
headers=headers,
|
||||
timeout=30.0,
|
||||
**kwargs,
|
||||
)
|
||||
elif method == "put":
|
||||
resp = httpx.put(
|
||||
url,
|
||||
headers=headers,
|
||||
timeout=30.0,
|
||||
**kwargs,
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unsupported HTTP method: {method}")
|
||||
if resp.status_code == 401:
|
||||
return {"error": "Unauthorized. Check your Freshdesk API key."}
|
||||
if resp.status_code == 403:
|
||||
return {"error": "Forbidden. Check your Freshdesk permissions."}
|
||||
if resp.status_code == 404:
|
||||
return {"error": "Not found."}
|
||||
if resp.status_code == 429:
|
||||
return {"error": "Rate limited. Try again shortly."}
|
||||
if resp.status_code not in (200, 201, 202):
|
||||
return {"error": f"Freshdesk API error {resp.status_code}: {resp.text[:500]}"}
|
||||
try:
|
||||
return resp.json()
|
||||
except Exception:
|
||||
return {"error": "Failed to parse Freshdesk response"}
|
||||
except httpx.TimeoutException:
|
||||
return {"error": "Request to Freshdesk timed out"}
|
||||
except Exception as e: # pragma: no cover
|
||||
return {"error": f"Freshdesk request failed: {e!s}"}
|
||||
|
||||
|
||||
def _auth_error() -> dict[str, Any]:
|
||||
"""Return standardized missing-API-key error payload."""
|
||||
return {
|
||||
"error": "FRESHDESK_API_KEY environment variable not set",
|
||||
"help": "Get your API key from Freshdesk profile settings and set "
|
||||
"FRESHDESK_API_KEY, or configure it via the credential store.",
|
||||
}
|
||||
|
||||
|
||||
def _domain_error() -> dict[str, Any]:
|
||||
"""Return standardized missing-domain error payload."""
|
||||
return {
|
||||
"error": "FRESHDESK_DOMAIN is required",
|
||||
"help": "Set FRESHDESK_DOMAIN (e.g. acme.freshdesk.com) or configure freshdesk_domain.",
|
||||
}
|
||||
|
||||
|
||||
def _extract_ticket(ticket: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Normalize Freshdesk ticket into a compact dict."""
|
||||
return {
|
||||
"id": ticket.get("id"),
|
||||
"subject": ticket.get("subject", ""),
|
||||
"description": (ticket.get("description") or "")[:500],
|
||||
"status": ticket.get("status"),
|
||||
"priority": ticket.get("priority"),
|
||||
"type": ticket.get("type"),
|
||||
"tags": ticket.get("tags", []),
|
||||
"requester_id": ticket.get("requester_id"),
|
||||
"responder_id": ticket.get("responder_id"),
|
||||
"created_at": ticket.get("created_at"),
|
||||
"updated_at": ticket.get("updated_at"),
|
||||
}
|
||||
|
||||
|
||||
def _extract_contact(contact: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Normalize Freshdesk contact into a compact dict."""
|
||||
return {
|
||||
"id": contact.get("id"),
|
||||
"name": contact.get("name"),
|
||||
"email": contact.get("email"),
|
||||
"phone": contact.get("phone"),
|
||||
"mobile": contact.get("mobile"),
|
||||
"company_id": contact.get("company_id"),
|
||||
"active": contact.get("active"),
|
||||
"created_at": contact.get("created_at"),
|
||||
"updated_at": contact.get("updated_at"),
|
||||
}
|
||||
|
||||
|
||||
def _extract_agent(agent: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Normalize Freshdesk agent into a compact dict."""
|
||||
return {
|
||||
"id": agent.get("id"),
|
||||
"contact_id": agent.get("contact_id"),
|
||||
"email": agent.get("email"),
|
||||
"occasional": agent.get("occasional"),
|
||||
"available": agent.get("available"),
|
||||
"name": agent.get("contact", {}).get("name"),
|
||||
}
|
||||
|
||||
|
||||
def _extract_group(group: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Normalize Freshdesk group into a compact dict."""
|
||||
return {
|
||||
"id": group.get("id"),
|
||||
"name": group.get("name"),
|
||||
"description": group.get("description"),
|
||||
"unassigned_for": group.get("unassigned_for"),
|
||||
"created_at": group.get("created_at"),
|
||||
"updated_at": group.get("updated_at"),
|
||||
}
|
||||
|
||||
|
||||
def _extract_company(company: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Normalize Freshdesk company into a compact dict."""
|
||||
return {
|
||||
"id": company.get("id"),
|
||||
"name": company.get("name"),
|
||||
"description": company.get("description"),
|
||||
"domains": company.get("domains", []),
|
||||
"note": company.get("note"),
|
||||
"created_at": company.get("created_at"),
|
||||
"updated_at": company.get("updated_at"),
|
||||
}
|
||||
|
||||
|
||||
def register_tools(
|
||||
mcp: FastMCP,
|
||||
credentials: CredentialStoreAdapter | None = None,
|
||||
) -> None:
|
||||
"""Register Freshdesk tools with the MCP server."""
|
||||
|
||||
@mcp.tool()
|
||||
def freshdesk_list_tickets(
|
||||
page: int = 1,
|
||||
per_page: int = 30,
|
||||
filter: str | None = None,
|
||||
updated_since: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
List tickets in Freshdesk.
|
||||
|
||||
Use this to get a page of tickets for digests, triage, or reporting.
|
||||
|
||||
Args:
|
||||
page: Page number (1-based, default 1)
|
||||
per_page: Tickets per page (1-100, default 30)
|
||||
filter: Optional built-in filter name (e.g. \"new_and_my_open\")
|
||||
updated_since: Optional ISO8601 timestamp to list tickets updated since
|
||||
|
||||
Returns:
|
||||
Dict with tickets list and count.
|
||||
"""
|
||||
api_key = _get_api_key(credentials)
|
||||
if not api_key:
|
||||
return _auth_error()
|
||||
domain = _get_domain(credentials)
|
||||
if not domain:
|
||||
return _domain_error()
|
||||
|
||||
url = f"{_base_url(domain)}/tickets"
|
||||
per_page_clamped = max(1, min(per_page, 100))
|
||||
params: dict[str, Any] = {"page": max(1, page), "per_page": per_page_clamped}
|
||||
if filter:
|
||||
params["filter"] = filter
|
||||
if updated_since:
|
||||
params["updated_since"] = updated_since
|
||||
|
||||
data = _request("get", url, api_key, params=params)
|
||||
if "error" in data:
|
||||
return data
|
||||
|
||||
tickets = [_extract_ticket(t) for t in data]
|
||||
return {"tickets": tickets, "count": len(tickets)}
|
||||
|
||||
@mcp.tool()
|
||||
def freshdesk_filter_tickets(
|
||||
query: str,
|
||||
page: int = 1,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Search/filter Freshdesk tickets by query.
|
||||
|
||||
Use Freshdesk query syntax (e.g. "priority:3", "status:2"). API requires
|
||||
the query enclosed in double quotes; the tool adds them if missing.
|
||||
|
||||
Args:
|
||||
query: Search query (e.g. "priority:3", "status:2 OR status:3");
|
||||
tool wraps in double quotes for API.
|
||||
page: Page number (1-10 for search API, default 1)
|
||||
|
||||
Returns:
|
||||
Dict with results list, total count, and count on this page.
|
||||
"""
|
||||
api_key = _get_api_key(credentials)
|
||||
if not api_key:
|
||||
return _auth_error()
|
||||
domain = _get_domain(credentials)
|
||||
if not domain:
|
||||
return _domain_error()
|
||||
if not query:
|
||||
return {"error": "query is required"}
|
||||
|
||||
query_value = query.strip()
|
||||
if not (query_value.startswith('"') and query_value.endswith('"')):
|
||||
query_value = f'"{query_value}"'
|
||||
|
||||
url = f"{_base_url(domain)}/search/tickets"
|
||||
params = {"query": query_value, "page": max(1, min(page, 10))}
|
||||
data = _request("get", url, api_key, params=params)
|
||||
if "error" in data:
|
||||
return data
|
||||
|
||||
results = data.get("results", [])
|
||||
total = data.get("total", len(results))
|
||||
tickets = [_extract_ticket(t) for t in results]
|
||||
return {"tickets": tickets, "count": len(tickets), "total": total}
|
||||
|
||||
@mcp.tool()
|
||||
def freshdesk_list_ticket_conversations(
|
||||
ticket_id: int,
|
||||
page: int = 1,
|
||||
per_page: int = 30,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
List conversations (replies and notes) for a Freshdesk ticket.
|
||||
|
||||
Args:
|
||||
ticket_id: Freshdesk ticket ID (required)
|
||||
page: Page number (default 1)
|
||||
per_page: Items per page (default 30)
|
||||
|
||||
Returns:
|
||||
Dict with conversations list and count.
|
||||
"""
|
||||
api_key = _get_api_key(credentials)
|
||||
if not api_key:
|
||||
return _auth_error()
|
||||
domain = _get_domain(credentials)
|
||||
if not domain:
|
||||
return _domain_error()
|
||||
if not ticket_id:
|
||||
return {"error": "ticket_id is required"}
|
||||
|
||||
url = f"{_base_url(domain)}/tickets/{ticket_id}/conversations"
|
||||
params = {"page": max(1, page), "per_page": max(1, min(per_page, 100))}
|
||||
data = _request("get", url, api_key, params=params)
|
||||
if "error" in data:
|
||||
return data
|
||||
|
||||
raw = data if isinstance(data, list) else data.get("conversations", []) or []
|
||||
convos = [
|
||||
{
|
||||
"id": c.get("id"),
|
||||
"body_text": (c.get("body_text") or "")[:500],
|
||||
"private": c.get("private"),
|
||||
"user_id": c.get("user_id"),
|
||||
"created_at": c.get("created_at"),
|
||||
}
|
||||
for c in raw
|
||||
]
|
||||
return {"conversations": convos, "count": len(convos)}
|
||||
|
||||
@mcp.tool()
|
||||
def freshdesk_get_ticket(
|
||||
ticket_id: int,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Get details about a specific Freshdesk ticket.
|
||||
|
||||
Args:
|
||||
ticket_id: Freshdesk ticket ID (required)
|
||||
|
||||
Returns:
|
||||
Dict with ticket details.
|
||||
"""
|
||||
api_key = _get_api_key(credentials)
|
||||
if not api_key:
|
||||
return _auth_error()
|
||||
domain = _get_domain(credentials)
|
||||
if not domain:
|
||||
return _domain_error()
|
||||
if not ticket_id:
|
||||
return {"error": "ticket_id is required"}
|
||||
|
||||
url = f"{_base_url(domain)}/tickets/{ticket_id}"
|
||||
data = _request("get", url, api_key)
|
||||
if "error" in data:
|
||||
return data
|
||||
|
||||
return _extract_ticket(data)
|
||||
|
||||
@mcp.tool()
|
||||
def freshdesk_create_ticket(
|
||||
email: str,
|
||||
subject: str,
|
||||
description: str,
|
||||
priority: int | None = None,
|
||||
status: int | None = None,
|
||||
tags: str = "",
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Create a new Freshdesk ticket.
|
||||
|
||||
Args:
|
||||
email: Requester email address (required)
|
||||
subject: Ticket subject (required)
|
||||
description: Ticket description/first message (required)
|
||||
priority: Optional priority (1-4) as defined in Freshdesk
|
||||
status: Optional status (e.g. 2=open, 3=pending, 4=resolved)
|
||||
tags: Optional comma-separated tags
|
||||
|
||||
Returns:
|
||||
Dict with created ticket id, subject, status, and URL.
|
||||
"""
|
||||
api_key = _get_api_key(credentials)
|
||||
if not api_key:
|
||||
return _auth_error()
|
||||
domain = _get_domain(credentials)
|
||||
if not domain:
|
||||
return _domain_error()
|
||||
if not email or not subject or not description:
|
||||
return {"error": "email, subject, and description are required"}
|
||||
|
||||
payload: dict[str, Any] = {
|
||||
"email": email,
|
||||
"subject": subject,
|
||||
"description": description,
|
||||
}
|
||||
if priority is not None:
|
||||
payload["priority"] = priority
|
||||
if status is not None:
|
||||
payload["status"] = status
|
||||
if tags:
|
||||
payload["tags"] = [t.strip() for t in tags.split(",") if t.strip()]
|
||||
|
||||
url = f"{_base_url(domain)}/tickets"
|
||||
data = _request("post", url, api_key, json=payload)
|
||||
if "error" in data:
|
||||
return data
|
||||
|
||||
ticket_id = data.get("id")
|
||||
return {
|
||||
"id": ticket_id,
|
||||
"subject": data.get("subject", ""),
|
||||
"status": data.get("status"),
|
||||
"url": f"https://{domain}/a/tickets/{ticket_id}" if ticket_id else None,
|
||||
"result": "created",
|
||||
}
|
||||
|
||||
@mcp.tool()
|
||||
def freshdesk_update_ticket(
|
||||
ticket_id: int,
|
||||
status: int | None = None,
|
||||
priority: int | None = None,
|
||||
tags: str = "",
|
||||
note: str = "",
|
||||
note_private: bool = True,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Update a Freshdesk ticket and optionally add a note.
|
||||
|
||||
Args:
|
||||
ticket_id: Freshdesk ticket ID (required)
|
||||
status: Optional new status code
|
||||
priority: Optional new priority code
|
||||
tags: Optional comma-separated tags (replaces existing tags)
|
||||
note: Optional note to add to the ticket
|
||||
note_private: Whether the note is private (default True)
|
||||
|
||||
Returns:
|
||||
Dict with updated ticket details.
|
||||
"""
|
||||
api_key = _get_api_key(credentials)
|
||||
if not api_key:
|
||||
return _auth_error()
|
||||
domain = _get_domain(credentials)
|
||||
if not domain:
|
||||
return _domain_error()
|
||||
if not ticket_id:
|
||||
return {"error": "ticket_id is required"}
|
||||
|
||||
updates: dict[str, Any] = {}
|
||||
if status is not None:
|
||||
updates["status"] = status
|
||||
if priority is not None:
|
||||
updates["priority"] = priority
|
||||
if tags:
|
||||
updates["tags"] = [t.strip() for t in tags.split(",") if t.strip()]
|
||||
|
||||
if not updates and not note:
|
||||
return {"error": "At least one field (status, priority, tags, note) is required"}
|
||||
|
||||
if updates:
|
||||
url = f"{_base_url(domain)}/tickets/{ticket_id}"
|
||||
data = _request("put", url, api_key, json=updates)
|
||||
if "error" in data:
|
||||
return data
|
||||
ticket_data = data
|
||||
else:
|
||||
url = f"{_base_url(domain)}/tickets/{ticket_id}"
|
||||
ticket_data = _request("get", url, api_key)
|
||||
if "error" in ticket_data:
|
||||
return ticket_data
|
||||
|
||||
result = _extract_ticket(ticket_data)
|
||||
|
||||
if note:
|
||||
notes_url = f"{_base_url(domain)}/tickets/{ticket_id}/notes"
|
||||
note_payload = {"body": note, "private": note_private}
|
||||
note_result = _request("post", notes_url, api_key, json=note_payload)
|
||||
if "error" in note_result:
|
||||
result["note_error"] = note_result["error"]
|
||||
|
||||
return result
|
||||
|
||||
@mcp.tool()
|
||||
def freshdesk_add_ticket_reply(
|
||||
ticket_id: int,
|
||||
body: str,
|
||||
public: bool = True,
|
||||
from_email: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Add a reply to a Freshdesk ticket.
|
||||
|
||||
Args:
|
||||
ticket_id: Freshdesk ticket ID (required)
|
||||
body: Reply body (required)
|
||||
public: Whether reply is visible to requester (default True)
|
||||
from_email: Optional agent email; if omitted, API key user is used
|
||||
|
||||
Returns:
|
||||
Dict with reply metadata or error.
|
||||
"""
|
||||
api_key = _get_api_key(credentials)
|
||||
if not api_key:
|
||||
return _auth_error()
|
||||
domain = _get_domain(credentials)
|
||||
if not domain:
|
||||
return _domain_error()
|
||||
if not ticket_id:
|
||||
return {"error": "ticket_id is required"}
|
||||
if not body:
|
||||
return {"error": "body is required"}
|
||||
|
||||
if public:
|
||||
payload: dict[str, Any] = {"body": body}
|
||||
if from_email is not None:
|
||||
payload["from_email"] = from_email
|
||||
url = f"{_base_url(domain)}/tickets/{ticket_id}/reply"
|
||||
else:
|
||||
payload = {"body": body, "private": True}
|
||||
url = f"{_base_url(domain)}/tickets/{ticket_id}/notes"
|
||||
|
||||
data = _request("post", url, api_key, json=payload)
|
||||
if "error" in data:
|
||||
return data
|
||||
|
||||
return {
|
||||
"id": data.get("id"),
|
||||
"body": data.get("body") or data.get("body_text", ""),
|
||||
"public": public,
|
||||
"created_at": data.get("created_at"),
|
||||
}
|
||||
|
||||
@mcp.tool()
|
||||
def freshdesk_list_contacts(
|
||||
page: int = 1,
|
||||
per_page: int = 30,
|
||||
email: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
List contacts in Freshdesk, optionally filtered by email.
|
||||
|
||||
Args:
|
||||
page: Page number (1-based, default 1)
|
||||
per_page: Contacts per page (1-100, default 30)
|
||||
email: Optional email filter to return matching contacts
|
||||
|
||||
Returns:
|
||||
Dict with contacts list and count.
|
||||
"""
|
||||
api_key = _get_api_key(credentials)
|
||||
if not api_key:
|
||||
return _auth_error()
|
||||
domain = _get_domain(credentials)
|
||||
if not domain:
|
||||
return _domain_error()
|
||||
|
||||
url = f"{_base_url(domain)}/contacts"
|
||||
per_page_clamped = max(1, min(per_page, 100))
|
||||
params: dict[str, Any] = {"page": max(1, page), "per_page": per_page_clamped}
|
||||
if email:
|
||||
params["email"] = email
|
||||
|
||||
data = _request("get", url, api_key, params=params)
|
||||
if "error" in data:
|
||||
return data
|
||||
|
||||
contacts = [_extract_contact(c) for c in data]
|
||||
return {"contacts": contacts, "count": len(contacts)}
|
||||
|
||||
@mcp.tool()
|
||||
def freshdesk_get_contact(
|
||||
contact_id: int | None = None,
|
||||
email: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Get a Freshdesk contact by ID or email.
|
||||
|
||||
Args:
|
||||
contact_id: Contact ID (preferred)
|
||||
email: Contact email (used when contact_id is not provided)
|
||||
|
||||
Returns:
|
||||
Dict with contact details.
|
||||
"""
|
||||
api_key = _get_api_key(credentials)
|
||||
if not api_key:
|
||||
return _auth_error()
|
||||
domain = _get_domain(credentials)
|
||||
if not domain:
|
||||
return _domain_error()
|
||||
if not contact_id and not email:
|
||||
return {"error": "contact_id or email is required"}
|
||||
|
||||
if contact_id:
|
||||
url = f"{_base_url(domain)}/contacts/{contact_id}"
|
||||
data = _request("get", url, api_key)
|
||||
if "error" in data:
|
||||
return data
|
||||
return _extract_contact(data)
|
||||
|
||||
url = f"{_base_url(domain)}/contacts"
|
||||
params = {"email": email}
|
||||
data = _request("get", url, api_key, params=params)
|
||||
if "error" in data:
|
||||
return data
|
||||
if not data:
|
||||
return {"error": "Contact not found"}
|
||||
|
||||
contact = data[0] if isinstance(data, list) else data
|
||||
return _extract_contact(contact)
|
||||
|
||||
@mcp.tool()
|
||||
def freshdesk_create_contact(
|
||||
email: str,
|
||||
name: str | None = None,
|
||||
phone: str | None = None,
|
||||
company_id: int | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Create a new Freshdesk contact.
|
||||
|
||||
Args:
|
||||
email: Contact email (required)
|
||||
name: Optional name
|
||||
phone: Optional phone number
|
||||
company_id: Optional company ID
|
||||
|
||||
Returns:
|
||||
Dict with created contact details.
|
||||
"""
|
||||
api_key = _get_api_key(credentials)
|
||||
if not api_key:
|
||||
return _auth_error()
|
||||
domain = _get_domain(credentials)
|
||||
if not domain:
|
||||
return _domain_error()
|
||||
if not email:
|
||||
return {"error": "email is required"}
|
||||
|
||||
payload: dict[str, Any] = {"email": email}
|
||||
if name:
|
||||
payload["name"] = name
|
||||
if phone:
|
||||
payload["phone"] = phone
|
||||
if company_id is not None:
|
||||
payload["company_id"] = company_id
|
||||
|
||||
url = f"{_base_url(domain)}/contacts"
|
||||
data = _request("post", url, api_key, json=payload)
|
||||
if "error" in data:
|
||||
return data
|
||||
|
||||
return _extract_contact(data)
|
||||
|
||||
@mcp.tool()
|
||||
def freshdesk_update_contact(
|
||||
contact_id: int,
|
||||
name: str | None = None,
|
||||
email: str | None = None,
|
||||
phone: str | None = None,
|
||||
company_id: int | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Update a Freshdesk contact.
|
||||
|
||||
Args:
|
||||
contact_id: Freshdesk contact ID (required)
|
||||
name: Optional new name
|
||||
email: Optional new email
|
||||
phone: Optional new phone
|
||||
company_id: Optional company ID (set to -1 to clear)
|
||||
|
||||
Returns:
|
||||
Dict with updated contact details.
|
||||
"""
|
||||
api_key = _get_api_key(credentials)
|
||||
if not api_key:
|
||||
return _auth_error()
|
||||
domain = _get_domain(credentials)
|
||||
if not domain:
|
||||
return _domain_error()
|
||||
if not contact_id:
|
||||
return {"error": "contact_id is required"}
|
||||
|
||||
payload: dict[str, Any] = {}
|
||||
if name is not None:
|
||||
payload["name"] = name
|
||||
if email is not None:
|
||||
payload["email"] = email
|
||||
if phone is not None:
|
||||
payload["phone"] = phone
|
||||
if company_id is not None:
|
||||
payload["company_id"] = company_id
|
||||
if not payload:
|
||||
return {"error": "At least one field (name, email, phone, company_id) is required"}
|
||||
|
||||
url = f"{_base_url(domain)}/contacts/{contact_id}"
|
||||
data = _request("put", url, api_key, json=payload)
|
||||
if "error" in data:
|
||||
return data
|
||||
return _extract_contact(data)
|
||||
|
||||
@mcp.tool()
|
||||
def freshdesk_list_agents(
|
||||
page: int = 1,
|
||||
per_page: int = 30,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
List agents in Freshdesk.
|
||||
|
||||
Args:
|
||||
page: Page number (1-based, default 1)
|
||||
per_page: Agents per page (1-100, default 30)
|
||||
|
||||
Returns:
|
||||
Dict with agents list and count.
|
||||
"""
|
||||
api_key = _get_api_key(credentials)
|
||||
if not api_key:
|
||||
return _auth_error()
|
||||
domain = _get_domain(credentials)
|
||||
if not domain:
|
||||
return _domain_error()
|
||||
|
||||
url = f"{_base_url(domain)}/agents"
|
||||
per_page_clamped = max(1, min(per_page, 100))
|
||||
params = {"page": max(1, page), "per_page": per_page_clamped}
|
||||
|
||||
data = _request("get", url, api_key, params=params)
|
||||
if "error" in data:
|
||||
return data
|
||||
|
||||
agents = [_extract_agent(a) for a in data]
|
||||
return {"agents": agents, "count": len(agents)}
|
||||
|
||||
@mcp.tool()
|
||||
def freshdesk_get_agent(
|
||||
agent_id: int,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Get a single Freshdesk agent by ID.
|
||||
|
||||
Args:
|
||||
agent_id: Freshdesk agent ID (required)
|
||||
|
||||
Returns:
|
||||
Dict with agent details.
|
||||
"""
|
||||
api_key = _get_api_key(credentials)
|
||||
if not api_key:
|
||||
return _auth_error()
|
||||
domain = _get_domain(credentials)
|
||||
if not domain:
|
||||
return _domain_error()
|
||||
if not agent_id:
|
||||
return {"error": "agent_id is required"}
|
||||
|
||||
url = f"{_base_url(domain)}/agents/{agent_id}"
|
||||
data = _request("get", url, api_key)
|
||||
if "error" in data:
|
||||
return data
|
||||
return _extract_agent(data)
|
||||
|
||||
@mcp.tool()
|
||||
def freshdesk_list_groups(
|
||||
page: int = 1,
|
||||
per_page: int = 30,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
List groups in Freshdesk.
|
||||
|
||||
Args:
|
||||
page: Page number (1-based, default 1)
|
||||
per_page: Groups per page (1-100, default 30)
|
||||
|
||||
Returns:
|
||||
Dict with groups list and count.
|
||||
"""
|
||||
api_key = _get_api_key(credentials)
|
||||
if not api_key:
|
||||
return _auth_error()
|
||||
domain = _get_domain(credentials)
|
||||
if not domain:
|
||||
return _domain_error()
|
||||
|
||||
url = f"{_base_url(domain)}/groups"
|
||||
per_page_clamped = max(1, min(per_page, 100))
|
||||
params = {"page": max(1, page), "per_page": per_page_clamped}
|
||||
data = _request("get", url, api_key, params=params)
|
||||
if "error" in data:
|
||||
return data
|
||||
|
||||
groups = [_extract_group(g) for g in data]
|
||||
return {"groups": groups, "count": len(groups)}
|
||||
|
||||
@mcp.tool()
|
||||
def freshdesk_get_group(
|
||||
group_id: int,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Get a single Freshdesk group by ID.
|
||||
|
||||
Args:
|
||||
group_id: Freshdesk group ID (required)
|
||||
|
||||
Returns:
|
||||
Dict with group details.
|
||||
"""
|
||||
api_key = _get_api_key(credentials)
|
||||
if not api_key:
|
||||
return _auth_error()
|
||||
domain = _get_domain(credentials)
|
||||
if not domain:
|
||||
return _domain_error()
|
||||
if not group_id:
|
||||
return {"error": "group_id is required"}
|
||||
|
||||
url = f"{_base_url(domain)}/groups/{group_id}"
|
||||
data = _request("get", url, api_key)
|
||||
if "error" in data:
|
||||
return data
|
||||
return _extract_group(data)
|
||||
|
||||
@mcp.tool()
|
||||
def freshdesk_list_companies(
|
||||
page: int = 1,
|
||||
per_page: int = 30,
|
||||
updated_since: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
List companies in Freshdesk.
|
||||
|
||||
Args:
|
||||
page: Page number (default 1)
|
||||
per_page: Companies per page (1-100, default 30)
|
||||
updated_since: Optional ISO8601 timestamp to list companies updated since
|
||||
|
||||
Returns:
|
||||
Dict with companies list and count.
|
||||
"""
|
||||
api_key = _get_api_key(credentials)
|
||||
if not api_key:
|
||||
return _auth_error()
|
||||
domain = _get_domain(credentials)
|
||||
if not domain:
|
||||
return _domain_error()
|
||||
|
||||
url = f"{_base_url(domain)}/companies"
|
||||
per_page_clamped = max(1, min(per_page, 100))
|
||||
params: dict[str, Any] = {"page": max(1, page), "per_page": per_page_clamped}
|
||||
if updated_since:
|
||||
params["updated_since"] = updated_since
|
||||
|
||||
data = _request("get", url, api_key, params=params)
|
||||
if "error" in data:
|
||||
return data
|
||||
|
||||
companies = [_extract_company(c) for c in data]
|
||||
return {"companies": companies, "count": len(companies)}
|
||||
|
||||
@mcp.tool()
|
||||
def freshdesk_get_company(
|
||||
company_id: int,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Get a single Freshdesk company by ID.
|
||||
|
||||
Args:
|
||||
company_id: Freshdesk company ID (required)
|
||||
|
||||
Returns:
|
||||
Dict with company details.
|
||||
"""
|
||||
api_key = _get_api_key(credentials)
|
||||
if not api_key:
|
||||
return _auth_error()
|
||||
domain = _get_domain(credentials)
|
||||
if not domain:
|
||||
return _domain_error()
|
||||
if not company_id:
|
||||
return {"error": "company_id is required"}
|
||||
|
||||
url = f"{_base_url(domain)}/companies/{company_id}"
|
||||
data = _request("get", url, api_key)
|
||||
if "error" in data:
|
||||
return data
|
||||
return _extract_company(data)
|
||||
@@ -66,6 +66,7 @@ class TestHealthCheckerRegistry:
|
||||
"brevo",
|
||||
"calcom",
|
||||
"calendly_pat",
|
||||
"cloudflare",
|
||||
"discord",
|
||||
"docker_hub",
|
||||
"exa_search",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user