Compare commits
41 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 7052978a43 | |||
| d9f7f658be | |||
| a55de566b9 | |||
| 9dc25987e0 | |||
| 8a044142cb | |||
| 410f0c48b5 | |||
| 1f59e945af | |||
| f394c0d8c8 | |||
| 950821cb9b | |||
| 2bb1a2dfa2 | |||
| b970993425 | |||
| ec8a8cae38 | |||
| d78ed5c8f2 | |||
| f9ff3a698d | |||
| c2332bb790 | |||
| 3a61126824 | |||
| 11f557a2c6 | |||
| e8572b9d0c | |||
| 80a7446fd6 | |||
| cd12821134 | |||
| 30d619de08 | |||
| 4e72410154 | |||
| c42ae3af79 | |||
| bd35cd39aa | |||
| b90f219bd1 | |||
| 96d00f6073 | |||
| c43c803f66 | |||
| dbd777fe62 | |||
| 1ca2621285 | |||
| 5ba1dacf25 | |||
| 085c13edc7 | |||
| ef04174194 | |||
| 6dce26a52e | |||
| fc94e90f6c | |||
| f2013f47aa | |||
| 4be857f64b | |||
| c99865f53d | |||
| 05f1da03e5 | |||
| a62ca5dd47 | |||
| f514e35a36 | |||
| 7c87dc5bca |
@@ -40,6 +40,7 @@ coverage/
|
||||
skills/custom/*
|
||||
logs/
|
||||
log/
|
||||
debug.log
|
||||
|
||||
# Local git hooks (keep only on this machine, do not push)
|
||||
.githooks/
|
||||
|
||||
@@ -0,0 +1,33 @@
|
||||
repos:
|
||||
# Backend: ruff lint + format via uv (uses the same ruff version as backend deps)
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: ruff
|
||||
name: ruff lint
|
||||
entry: bash -c 'cd backend && uv run ruff check --fix "${@/#backend\//}"' --
|
||||
language: system
|
||||
types_or: [python]
|
||||
files: ^backend/
|
||||
- id: ruff-format
|
||||
name: ruff format
|
||||
entry: bash -c 'cd backend && uv run ruff format "${@/#backend\//}"' --
|
||||
language: system
|
||||
types_or: [python]
|
||||
files: ^backend/
|
||||
|
||||
# Frontend: eslint + prettier (must run from frontend/ for node_modules resolution)
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: frontend-eslint
|
||||
name: eslint (frontend)
|
||||
entry: bash -c 'cd frontend && npx eslint --fix "${@/#frontend\//}"' --
|
||||
language: system
|
||||
types_or: [javascript, tsx, ts]
|
||||
files: ^frontend/
|
||||
|
||||
- id: frontend-prettier
|
||||
name: prettier (frontend)
|
||||
entry: bash -c 'cd frontend && npx prettier --write "${@/#frontend\//}"' --
|
||||
language: system
|
||||
files: ^frontend/
|
||||
types_or: [javascript, tsx, ts, json, css]
|
||||
+1
-1
@@ -166,7 +166,7 @@ Required tools:
|
||||
|
||||
1. **Configure the application** (same as Docker setup above)
|
||||
|
||||
2. **Install dependencies**:
|
||||
2. **Install dependencies** (this also sets up pre-commit hooks):
|
||||
```bash
|
||||
make install
|
||||
```
|
||||
|
||||
@@ -23,7 +23,7 @@ help:
|
||||
@echo " make config - Generate local config files (aborts if config already exists)"
|
||||
@echo " make config-upgrade - Merge new fields from config.example.yaml into config.yaml"
|
||||
@echo " make check - Check if all required tools are installed"
|
||||
@echo " make install - Install all dependencies (frontend + backend)"
|
||||
@echo " make install - Install all dependencies (frontend + backend + pre-commit hooks)"
|
||||
@echo " make setup-sandbox - Pre-pull sandbox container image (recommended)"
|
||||
@echo " make dev - Start all services in development mode (with hot-reloading)"
|
||||
@echo " make dev-pro - Start in dev + Gateway mode (experimental, no LangGraph server)"
|
||||
@@ -73,6 +73,8 @@ install:
|
||||
@cd backend && uv sync
|
||||
@echo "Installing frontend dependencies..."
|
||||
@cd frontend && pnpm install
|
||||
@echo "Installing pre-commit hooks..."
|
||||
@$(BACKEND_UV_RUN) --with pre-commit pre-commit install
|
||||
@echo "✓ All dependencies installed"
|
||||
@echo ""
|
||||
@echo "=========================================="
|
||||
@@ -99,7 +101,7 @@ setup-sandbox:
|
||||
echo ""; \
|
||||
if command -v container >/dev/null 2>&1 && [ "$$(uname)" = "Darwin" ]; then \
|
||||
echo "Detected Apple Container on macOS, pulling image..."; \
|
||||
container pull "$$IMAGE" || echo "⚠ Apple Container pull failed, will try Docker"; \
|
||||
container image pull "$$IMAGE" || echo "⚠ Apple Container pull failed, will try Docker"; \
|
||||
fi; \
|
||||
if command -v docker >/dev/null 2>&1; then \
|
||||
echo "Pulling image using Docker..."; \
|
||||
|
||||
@@ -264,7 +264,7 @@ On Windows, run the local development flow from Git Bash. Native `cmd.exe` and P
|
||||
|
||||
2. **Install dependencies**:
|
||||
```bash
|
||||
make install # Install backend + frontend dependencies
|
||||
make install # Install backend + frontend dependencies + pre-commit hooks
|
||||
```
|
||||
|
||||
3. **(Optional) Pre-pull sandbox image**:
|
||||
|
||||
@@ -23,6 +23,16 @@ _CHANNEL_REGISTRY: dict[str, str] = {
|
||||
"wecom": "app.channels.wecom:WeComChannel",
|
||||
}
|
||||
|
||||
# Keys that indicate a user has configured credentials for a channel.
|
||||
_CHANNEL_CREDENTIAL_KEYS: dict[str, list[str]] = {
|
||||
"discord": ["bot_token"],
|
||||
"feishu": ["app_id", "app_secret"],
|
||||
"slack": ["bot_token", "app_token"],
|
||||
"telegram": ["bot_token"],
|
||||
"wecom": ["bot_id", "bot_secret"],
|
||||
"wechat": ["bot_token"],
|
||||
}
|
||||
|
||||
_CHANNELS_LANGGRAPH_URL_ENV = "DEER_FLOW_CHANNELS_LANGGRAPH_URL"
|
||||
_CHANNELS_GATEWAY_URL_ENV = "DEER_FLOW_CHANNELS_GATEWAY_URL"
|
||||
|
||||
@@ -88,7 +98,16 @@ class ChannelService:
|
||||
if not isinstance(channel_config, dict):
|
||||
continue
|
||||
if not channel_config.get("enabled", False):
|
||||
logger.info("Channel %s is disabled, skipping", name)
|
||||
cred_keys = _CHANNEL_CREDENTIAL_KEYS.get(name, [])
|
||||
has_creds = any(not isinstance(channel_config.get(k), bool) and channel_config.get(k) is not None and str(channel_config[k]).strip() for k in cred_keys)
|
||||
if has_creds:
|
||||
logger.warning(
|
||||
"Channel '%s' has credentials configured but is disabled. Set enabled: true under channels.%s in config.yaml to activate it.",
|
||||
name,
|
||||
name,
|
||||
)
|
||||
else:
|
||||
logger.info("Channel %s is disabled, skipping", name)
|
||||
continue
|
||||
|
||||
await self._start_channel(name, channel_config)
|
||||
|
||||
@@ -16,13 +16,31 @@ logger = logging.getLogger(__name__)
|
||||
_slack_md_converter = SlackMarkdownConverter()
|
||||
|
||||
|
||||
def _normalize_allowed_users(allowed_users: Any) -> set[str]:
|
||||
if allowed_users is None:
|
||||
return set()
|
||||
if isinstance(allowed_users, str):
|
||||
values = [allowed_users]
|
||||
elif isinstance(allowed_users, list | tuple | set):
|
||||
values = allowed_users
|
||||
else:
|
||||
logger.warning(
|
||||
"Slack allowed_users should be a list of Slack user IDs or a single Slack user ID string; treating %s as one string value",
|
||||
type(allowed_users).__name__,
|
||||
)
|
||||
values = [allowed_users]
|
||||
return {str(user_id) for user_id in values if str(user_id)}
|
||||
|
||||
|
||||
class SlackChannel(Channel):
|
||||
"""Slack IM channel using Socket Mode (WebSocket, no public IP).
|
||||
|
||||
Configuration keys (in ``config.yaml`` under ``channels.slack``):
|
||||
- ``bot_token``: Slack Bot User OAuth Token (xoxb-...).
|
||||
- ``app_token``: Slack App-Level Token (xapp-...) for Socket Mode.
|
||||
- ``allowed_users``: (optional) List of allowed Slack user IDs. Empty = allow all.
|
||||
- ``allowed_users``: (optional) List of allowed Slack user IDs, or a
|
||||
single Slack user ID string as shorthand. Empty = allow all. Other
|
||||
scalar values are treated as a single string with a warning.
|
||||
"""
|
||||
|
||||
def __init__(self, bus: MessageBus, config: dict[str, Any]) -> None:
|
||||
@@ -30,7 +48,7 @@ class SlackChannel(Channel):
|
||||
self._socket_client = None
|
||||
self._web_client = None
|
||||
self._loop: asyncio.AbstractEventLoop | None = None
|
||||
self._allowed_users: set[str] = {str(user_id) for user_id in config.get("allowed_users", [])}
|
||||
self._allowed_users = _normalize_allowed_users(config.get("allowed_users", []))
|
||||
|
||||
async def start(self) -> None:
|
||||
if self._running:
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import asyncio
|
||||
import logging
|
||||
from collections.abc import AsyncGenerator
|
||||
from contextlib import asynccontextmanager
|
||||
@@ -32,6 +33,11 @@ logging.basicConfig(
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Upper bound (seconds) each lifespan shutdown hook is allowed to run.
|
||||
# Bounds worker exit time so uvicorn's reload supervisor does not keep
|
||||
# firing signals into a worker that is stuck waiting for shutdown cleanup.
|
||||
_SHUTDOWN_HOOK_TIMEOUT_SECONDS = 5.0
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
|
||||
@@ -63,11 +69,19 @@ async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
|
||||
|
||||
yield
|
||||
|
||||
# Stop channel service on shutdown
|
||||
# Stop channel service on shutdown (bounded to prevent worker hang)
|
||||
try:
|
||||
from app.channels.service import stop_channel_service
|
||||
|
||||
await stop_channel_service()
|
||||
await asyncio.wait_for(
|
||||
stop_channel_service(),
|
||||
timeout=_SHUTDOWN_HOOK_TIMEOUT_SECONDS,
|
||||
)
|
||||
except TimeoutError:
|
||||
logger.warning(
|
||||
"Channel service shutdown exceeded %.1fs; proceeding with worker exit.",
|
||||
_SHUTDOWN_HOOK_TIMEOUT_SECONDS,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to stop channel service")
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ class AgentResponse(BaseModel):
|
||||
description: str = Field(default="", description="Agent description")
|
||||
model: str | None = Field(default=None, description="Optional model override")
|
||||
tool_groups: list[str] | None = Field(default=None, description="Optional tool group whitelist")
|
||||
skills: list[str] | None = Field(default=None, description="Optional skill whitelist (None=all, []=none)")
|
||||
soul: str | None = Field(default=None, description="SOUL.md content")
|
||||
|
||||
|
||||
@@ -41,6 +42,7 @@ class AgentCreateRequest(BaseModel):
|
||||
description: str = Field(default="", description="Agent description")
|
||||
model: str | None = Field(default=None, description="Optional model override")
|
||||
tool_groups: list[str] | None = Field(default=None, description="Optional tool group whitelist")
|
||||
skills: list[str] | None = Field(default=None, description="Optional skill whitelist (None=all enabled, []=none)")
|
||||
soul: str = Field(default="", description="SOUL.md content — agent personality and behavioral guardrails")
|
||||
|
||||
|
||||
@@ -50,6 +52,7 @@ class AgentUpdateRequest(BaseModel):
|
||||
description: str | None = Field(default=None, description="Updated description")
|
||||
model: str | None = Field(default=None, description="Updated model override")
|
||||
tool_groups: list[str] | None = Field(default=None, description="Updated tool group whitelist")
|
||||
skills: list[str] | None = Field(default=None, description="Updated skill whitelist (None=all, []=none)")
|
||||
soul: str | None = Field(default=None, description="Updated SOUL.md content")
|
||||
|
||||
|
||||
@@ -94,6 +97,7 @@ def _agent_config_to_response(agent_cfg: AgentConfig, include_soul: bool = False
|
||||
description=agent_cfg.description,
|
||||
model=agent_cfg.model,
|
||||
tool_groups=agent_cfg.tool_groups,
|
||||
skills=agent_cfg.skills,
|
||||
soul=soul,
|
||||
)
|
||||
|
||||
@@ -215,6 +219,8 @@ async def create_agent_endpoint(request: AgentCreateRequest) -> AgentResponse:
|
||||
config_data["model"] = request.model
|
||||
if request.tool_groups is not None:
|
||||
config_data["tool_groups"] = request.tool_groups
|
||||
if request.skills is not None:
|
||||
config_data["skills"] = request.skills
|
||||
|
||||
config_file = agent_dir / "config.yaml"
|
||||
with open(config_file, "w", encoding="utf-8") as f:
|
||||
@@ -271,21 +277,32 @@ async def update_agent(name: str, request: AgentUpdateRequest) -> AgentResponse:
|
||||
|
||||
try:
|
||||
# Update config if any config fields changed
|
||||
config_changed = any(v is not None for v in [request.description, request.model, request.tool_groups])
|
||||
# Use model_fields_set to distinguish "field omitted" from "explicitly set to null".
|
||||
# This is critical for skills where None means "inherit all" (not "don't change").
|
||||
fields_set = request.model_fields_set
|
||||
config_changed = bool(fields_set & {"description", "model", "tool_groups", "skills"})
|
||||
|
||||
if config_changed:
|
||||
updated: dict = {
|
||||
"name": agent_cfg.name,
|
||||
"description": request.description if request.description is not None else agent_cfg.description,
|
||||
"description": request.description if "description" in fields_set else agent_cfg.description,
|
||||
}
|
||||
new_model = request.model if request.model is not None else agent_cfg.model
|
||||
new_model = request.model if "model" in fields_set else agent_cfg.model
|
||||
if new_model is not None:
|
||||
updated["model"] = new_model
|
||||
|
||||
new_tool_groups = request.tool_groups if request.tool_groups is not None else agent_cfg.tool_groups
|
||||
new_tool_groups = request.tool_groups if "tool_groups" in fields_set else agent_cfg.tool_groups
|
||||
if new_tool_groups is not None:
|
||||
updated["tool_groups"] = new_tool_groups
|
||||
|
||||
# skills: None = inherit all, [] = no skills, ["a","b"] = whitelist
|
||||
if "skills" in fields_set:
|
||||
new_skills = request.skills
|
||||
else:
|
||||
new_skills = agent_cfg.skills
|
||||
if new_skills is not None:
|
||||
updated["skills"] = new_skills
|
||||
|
||||
config_file = agent_dir / "config.yaml"
|
||||
with open(config_file, "w", encoding="utf-8") as f:
|
||||
yaml.dump(updated, f, default_flow_style=False, allow_unicode=True)
|
||||
|
||||
@@ -121,7 +121,7 @@ async def generate_suggestions(thread_id: str, request: SuggestionsRequest) -> S
|
||||
|
||||
try:
|
||||
model = create_chat_model(name=request.model_name, thinking_enabled=False)
|
||||
response = await model.ainvoke([SystemMessage(content=system_instruction), HumanMessage(content=user_content)])
|
||||
response = await model.ainvoke([SystemMessage(content=system_instruction), HumanMessage(content=user_content)], config={"run_name": "suggest_agent"})
|
||||
raw = _extract_response_text(response.content)
|
||||
suggestions = _parse_json_string_list(raw) or []
|
||||
cleaned = [s.replace("\n", " ").strip() for s in suggestions if s.strip()]
|
||||
|
||||
@@ -12,6 +12,7 @@ import json
|
||||
import logging
|
||||
import re
|
||||
import time
|
||||
from collections.abc import Mapping
|
||||
from typing import Any
|
||||
|
||||
from fastapi import HTTPException, Request
|
||||
@@ -101,9 +102,10 @@ def resolve_agent_factory(assistant_id: str | None):
|
||||
"""Resolve the agent factory callable from config.
|
||||
|
||||
Custom agents are implemented as ``lead_agent`` + an ``agent_name``
|
||||
injected into ``configurable`` — see :func:`build_run_config`. All
|
||||
``assistant_id`` values therefore map to the same factory; the routing
|
||||
happens inside ``make_lead_agent`` when it reads ``cfg["agent_name"]``.
|
||||
injected into ``configurable`` or ``context`` — see
|
||||
:func:`build_run_config`. All ``assistant_id`` values therefore map to the
|
||||
same factory; the routing happens inside ``make_lead_agent`` when it reads
|
||||
``cfg["agent_name"]``.
|
||||
"""
|
||||
from deerflow.agents.lead_agent.agent import make_lead_agent
|
||||
|
||||
@@ -120,10 +122,12 @@ def build_run_config(
|
||||
"""Build a RunnableConfig dict for the agent.
|
||||
|
||||
When *assistant_id* refers to a custom agent (anything other than
|
||||
``"lead_agent"`` / ``None``), the name is forwarded as
|
||||
``configurable["agent_name"]``. ``make_lead_agent`` reads this key to
|
||||
load the matching ``agents/<name>/SOUL.md`` and per-agent config —
|
||||
without it the agent silently runs as the default lead agent.
|
||||
``"lead_agent"`` / ``None``), the name is forwarded as ``agent_name`` in
|
||||
whichever runtime options container is active: ``context`` for
|
||||
LangGraph >= 0.6.0 requests, otherwise ``configurable``.
|
||||
``make_lead_agent`` reads this key to load the matching
|
||||
``agents/<name>/SOUL.md`` and per-agent config — without it the agent
|
||||
silently runs as the default lead agent.
|
||||
|
||||
This mirrors the channel manager's ``_resolve_run_params`` logic so that
|
||||
the LangGraph Platform-compatible HTTP API and the IM channel path behave
|
||||
@@ -142,7 +146,14 @@ def build_run_config(
|
||||
thread_id,
|
||||
list(request_config.get("configurable", {}).keys()),
|
||||
)
|
||||
config["context"] = request_config["context"]
|
||||
context_value = request_config["context"]
|
||||
if context_value is None:
|
||||
context = {}
|
||||
elif isinstance(context_value, Mapping):
|
||||
context = dict(context_value)
|
||||
else:
|
||||
raise ValueError("request config 'context' must be a mapping or null.")
|
||||
config["context"] = context
|
||||
else:
|
||||
configurable = {"thread_id": thread_id}
|
||||
configurable.update(request_config.get("configurable", {}))
|
||||
@@ -154,13 +165,19 @@ def build_run_config(
|
||||
config["configurable"] = {"thread_id": thread_id}
|
||||
|
||||
# Inject custom agent name when the caller specified a non-default assistant.
|
||||
# Honour an explicit configurable["agent_name"] in the request if already set.
|
||||
if assistant_id and assistant_id != _DEFAULT_ASSISTANT_ID and "configurable" in config:
|
||||
if "agent_name" not in config["configurable"]:
|
||||
normalized = assistant_id.strip().lower().replace("_", "-")
|
||||
if not normalized or not re.fullmatch(r"[a-z0-9-]+", normalized):
|
||||
raise ValueError(f"Invalid assistant_id {assistant_id!r}: must contain only letters, digits, and hyphens after normalization.")
|
||||
config["configurable"]["agent_name"] = normalized
|
||||
# Honour an explicit agent_name in the active runtime options container.
|
||||
if assistant_id and assistant_id != _DEFAULT_ASSISTANT_ID:
|
||||
normalized = assistant_id.strip().lower().replace("_", "-")
|
||||
if not normalized or not re.fullmatch(r"[a-z0-9-]+", normalized):
|
||||
raise ValueError(f"Invalid assistant_id {assistant_id!r}: must contain only letters, digits, and hyphens after normalization.")
|
||||
if "configurable" in config:
|
||||
target = config["configurable"]
|
||||
elif "context" in config:
|
||||
target = config["context"]
|
||||
else:
|
||||
target = config.setdefault("configurable", {})
|
||||
if target is not None and "agent_name" not in target:
|
||||
target["agent_name"] = normalized
|
||||
if metadata:
|
||||
config.setdefault("metadata", {}).update(metadata)
|
||||
return config
|
||||
|
||||
+78
-13
@@ -19,24 +19,78 @@ import asyncio
|
||||
import logging
|
||||
|
||||
from dotenv import load_dotenv
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
from deerflow.agents import make_lead_agent
|
||||
try:
|
||||
from prompt_toolkit import PromptSession
|
||||
from prompt_toolkit.history import InMemoryHistory
|
||||
|
||||
_HAS_PROMPT_TOOLKIT = True
|
||||
except ImportError:
|
||||
_HAS_PROMPT_TOOLKIT = False
|
||||
|
||||
load_dotenv()
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
||||
datefmt="%Y-%m-%d %H:%M:%S",
|
||||
)
|
||||
_LOG_FMT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
_LOG_DATEFMT = "%Y-%m-%d %H:%M:%S"
|
||||
|
||||
|
||||
def _logging_level_from_config(name: str) -> int:
|
||||
"""Map ``config.yaml`` ``log_level`` string to a ``logging`` level constant."""
|
||||
mapping = logging.getLevelNamesMapping()
|
||||
return mapping.get((name or "info").strip().upper(), logging.INFO)
|
||||
|
||||
|
||||
def _setup_logging(log_level: str) -> None:
|
||||
"""Send application logs to ``debug.log`` at *log_level*; do not print them on the console.
|
||||
|
||||
Idempotent: any pre-existing handlers on the root logger (e.g. installed by
|
||||
``logging.basicConfig`` in transitively imported modules) are removed so the
|
||||
debug session output only lands in ``debug.log``.
|
||||
"""
|
||||
level = _logging_level_from_config(log_level)
|
||||
root = logging.root
|
||||
for h in list(root.handlers):
|
||||
root.removeHandler(h)
|
||||
h.close()
|
||||
root.setLevel(level)
|
||||
|
||||
file_handler = logging.FileHandler("debug.log", mode="a", encoding="utf-8")
|
||||
file_handler.setLevel(level)
|
||||
file_handler.setFormatter(logging.Formatter(_LOG_FMT, datefmt=_LOG_DATEFMT))
|
||||
root.addHandler(file_handler)
|
||||
|
||||
|
||||
def _update_logging_level(log_level: str) -> None:
|
||||
"""Update the root logger and existing handlers to *log_level*."""
|
||||
level = _logging_level_from_config(log_level)
|
||||
root = logging.root
|
||||
root.setLevel(level)
|
||||
for handler in root.handlers:
|
||||
handler.setLevel(level)
|
||||
|
||||
|
||||
async def main():
|
||||
# Install file logging first so warnings emitted while loading config do not
|
||||
# leak onto the interactive terminal via Python's lastResort handler.
|
||||
_setup_logging("info")
|
||||
|
||||
from deerflow.config import get_app_config
|
||||
|
||||
app_config = get_app_config()
|
||||
_update_logging_level(app_config.log_level)
|
||||
|
||||
# Delay the rest of the deerflow imports until *after* logging is installed
|
||||
# so that any import-time side effects (e.g. deerflow.agents starts a
|
||||
# background skill-loader thread on import) emit logs to debug.log instead
|
||||
# of leaking onto the interactive terminal via Python's lastResort handler.
|
||||
from langchain_core.messages import HumanMessage
|
||||
from langgraph.runtime import Runtime
|
||||
|
||||
from deerflow.agents import make_lead_agent
|
||||
from deerflow.mcp import initialize_mcp_tools
|
||||
|
||||
# Initialize MCP tools at startup
|
||||
try:
|
||||
from deerflow.mcp import initialize_mcp_tools
|
||||
|
||||
await initialize_mcp_tools()
|
||||
except Exception as e:
|
||||
print(f"Warning: Failed to initialize MCP tools: {e}")
|
||||
@@ -52,16 +106,27 @@ async def main():
|
||||
}
|
||||
}
|
||||
|
||||
runtime = Runtime(context={"thread_id": config["configurable"]["thread_id"]})
|
||||
config["configurable"]["__pregel_runtime"] = runtime
|
||||
|
||||
agent = make_lead_agent(config)
|
||||
|
||||
session = PromptSession(history=InMemoryHistory()) if _HAS_PROMPT_TOOLKIT else None
|
||||
|
||||
print("=" * 50)
|
||||
print("Lead Agent Debug Mode")
|
||||
print("Type 'quit' or 'exit' to stop")
|
||||
print(f"Logs: debug.log (log_level={app_config.log_level})")
|
||||
if not _HAS_PROMPT_TOOLKIT:
|
||||
print("Tip: `uv sync --group dev` to enable arrow-key & history support")
|
||||
print("=" * 50)
|
||||
|
||||
while True:
|
||||
try:
|
||||
user_input = input("\nYou: ").strip()
|
||||
if session:
|
||||
user_input = (await session.prompt_async("\nYou: ")).strip()
|
||||
else:
|
||||
user_input = input("\nYou: ").strip()
|
||||
if not user_input:
|
||||
continue
|
||||
if user_input.lower() in ("quit", "exit"):
|
||||
@@ -70,15 +135,15 @@ async def main():
|
||||
|
||||
# Invoke the agent
|
||||
state = {"messages": [HumanMessage(content=user_input)]}
|
||||
result = await agent.ainvoke(state, config=config, context={"thread_id": "debug-thread-001"})
|
||||
result = await agent.ainvoke(state, config=config)
|
||||
|
||||
# Print the response
|
||||
if result.get("messages"):
|
||||
last_message = result["messages"][-1]
|
||||
print(f"\nAgent: {last_message.content}")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\nInterrupted. Goodbye!")
|
||||
except (KeyboardInterrupt, EOFError):
|
||||
print("\nGoodbye!")
|
||||
break
|
||||
except Exception as e:
|
||||
print(f"\nError: {e}")
|
||||
|
||||
@@ -199,7 +199,7 @@ class ThreadState(AgentState):
|
||||
│ Built-in Tools │ │ Configured Tools │ │ MCP Tools │
|
||||
│ (packages/harness/deerflow/tools/) │ │ (config.yaml) │ │ (extensions.json) │
|
||||
├─────────────────────┤ ├─────────────────────┤ ├─────────────────────┤
|
||||
│ - present_file │ │ - web_search │ │ - github │
|
||||
│ - present_files │ │ - web_search │ │ - github │
|
||||
│ - ask_clarification │ │ - web_fetch │ │ - filesystem │
|
||||
│ - view_image │ │ - bash │ │ - postgres │
|
||||
│ │ │ - read_file │ │ - brave-search │
|
||||
|
||||
@@ -296,7 +296,7 @@ These are the tool names your provider will see in `request.tool_name`:
|
||||
| `web_search` | Web search query |
|
||||
| `web_fetch` | Fetch URL content |
|
||||
| `image_search` | Image search |
|
||||
| `present_file` | Present file to user |
|
||||
| `present_files` | Present file to user |
|
||||
| `view_image` | Display image |
|
||||
| `ask_clarification` | Ask user a question |
|
||||
| `task` | Delegate to subagent |
|
||||
|
||||
@@ -45,6 +45,41 @@ Example:
|
||||
}
|
||||
```
|
||||
|
||||
## Custom Tool Interceptors
|
||||
|
||||
You can register custom interceptors that run before every MCP tool call. This is useful for injecting per-request headers (e.g., user auth tokens from the LangGraph execution context), logging, or metrics.
|
||||
|
||||
Declare interceptors in `extensions_config.json` using the `mcpInterceptors` field:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpInterceptors": [
|
||||
"my_package.mcp.auth:build_auth_interceptor"
|
||||
],
|
||||
"mcpServers": { ... }
|
||||
}
|
||||
```
|
||||
|
||||
Each entry is a Python import path in `module:variable` format (resolved via `resolve_variable`). The variable must be a **no-arg builder function** that returns an async interceptor compatible with `MultiServerMCPClient`’s `tool_interceptors` interface, or `None` to skip.
|
||||
|
||||
Example interceptor that injects auth headers from LangGraph metadata:
|
||||
|
||||
```python
|
||||
def build_auth_interceptor():
|
||||
async def interceptor(request, handler):
|
||||
from langgraph.config import get_config
|
||||
metadata = get_config().get("metadata", {})
|
||||
headers = dict(request.headers or {})
|
||||
if token := metadata.get("auth_token"):
|
||||
headers["X-Auth-Token"] = token
|
||||
return await handler(request.override(headers=headers))
|
||||
return interceptor
|
||||
```
|
||||
|
||||
- A single string value is accepted and normalized to a one-element list.
|
||||
- Invalid paths or builder failures are logged as warnings without blocking other interceptors.
|
||||
- The builder return value must be `callable`; non-callable values are skipped with a warning.
|
||||
|
||||
## How It Works
|
||||
|
||||
MCP servers expose tools that are automatically discovered and integrated into DeerFlow’s agent system at runtime. Once enabled, these tools become available to agents without additional code changes.
|
||||
|
||||
@@ -41,6 +41,13 @@ summarization:
|
||||
|
||||
# Custom summary prompt (optional)
|
||||
summary_prompt: null
|
||||
|
||||
# Tool names treated as skill file reads for skill rescue
|
||||
skill_file_read_tool_names:
|
||||
- read_file
|
||||
- read
|
||||
- view
|
||||
- cat
|
||||
```
|
||||
|
||||
### Configuration Options
|
||||
@@ -125,6 +132,26 @@ keep:
|
||||
- **Default**: `null` (uses LangChain's default prompt)
|
||||
- **Description**: Custom prompt template for generating summaries. The prompt should guide the model to extract the most important context.
|
||||
|
||||
#### `preserve_recent_skill_count`
|
||||
- **Type**: Integer (≥ 0)
|
||||
- **Default**: `5`
|
||||
- **Description**: Number of most-recently-loaded skill files (tool results whose tool name is in `skill_file_read_tool_names` and whose target path is under `skills.container_path`, e.g. `/mnt/skills/...`) that are rescued from summarization. Prevents the agent from losing skill instructions after compression. Set to `0` to disable skill rescue entirely.
|
||||
|
||||
#### `preserve_recent_skill_tokens`
|
||||
- **Type**: Integer (≥ 0)
|
||||
- **Default**: `25000`
|
||||
- **Description**: Total token budget reserved for rescued skill reads. Once this budget is exhausted, older skill bundles are allowed to be summarized.
|
||||
|
||||
#### `preserve_recent_skill_tokens_per_skill`
|
||||
- **Type**: Integer (≥ 0)
|
||||
- **Default**: `5000`
|
||||
- **Description**: Per-skill token cap. Any individual skill read whose tool result exceeds this size is not rescued (it falls through to the summarizer like ordinary content).
|
||||
|
||||
#### `skill_file_read_tool_names`
|
||||
- **Type**: List of strings
|
||||
- **Default**: `["read_file", "read", "view", "cat"]`
|
||||
- **Description**: Tool names treated as skill file reads during summarization rescue. A tool call is only eligible for skill rescue when its name appears in this list and its target path is under `skills.container_path`.
|
||||
|
||||
**Default Prompt Behavior:**
|
||||
The default LangChain prompt instructs the model to:
|
||||
- Extract highest quality/most relevant context
|
||||
@@ -147,6 +174,7 @@ The default LangChain prompt instructs the model to:
|
||||
- A single summary message is added
|
||||
- Recent messages are preserved
|
||||
6. **AI/Tool Pair Protection**: The system ensures AI messages and their corresponding tool messages stay together
|
||||
7. **Skill Rescue**: Before the summary is generated, the most recently loaded skill files (tool results whose tool name is in `skill_file_read_tool_names` and whose target path is under `skills.container_path`) are lifted out of the summarization set and prepended to the preserved tail. Selection walks newest-first under three budgets: `preserve_recent_skill_count`, `preserve_recent_skill_tokens`, and `preserve_recent_skill_tokens_per_skill`. The triggering AIMessage and all of its paired ToolMessages move together so tool_call ↔ tool_result pairing stays intact.
|
||||
|
||||
### Token Counting
|
||||
|
||||
|
||||
@@ -26,6 +26,15 @@ from deerflow.models import create_chat_model
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_runtime_config(config: RunnableConfig) -> dict:
|
||||
"""Merge legacy configurable options with LangGraph runtime context."""
|
||||
cfg = dict(config.get("configurable", {}) or {})
|
||||
context = config.get("context", {}) or {}
|
||||
if isinstance(context, dict):
|
||||
cfg.update(context)
|
||||
return cfg
|
||||
|
||||
|
||||
def _resolve_model_name(requested_model_name: str | None = None) -> str:
|
||||
"""Resolve a runtime model name safely, falling back to default if invalid. Returns None if no models are configured."""
|
||||
app_config = get_app_config()
|
||||
@@ -84,7 +93,24 @@ def _create_summarization_middleware() -> DeerFlowSummarizationMiddleware | None
|
||||
if get_memory_config().enabled:
|
||||
hooks.append(memory_flush_hook)
|
||||
|
||||
return DeerFlowSummarizationMiddleware(**kwargs, before_summarization=hooks)
|
||||
# The logic below relies on two assumptions holding true: this factory is
|
||||
# the sole entry point for DeerFlowSummarizationMiddleware, and the runtime
|
||||
# config is not expected to change after startup.
|
||||
try:
|
||||
skills_container_path = get_app_config().skills.container_path or "/mnt/skills"
|
||||
except Exception:
|
||||
logger.exception("Failed to resolve skills container path; falling back to default")
|
||||
skills_container_path = "/mnt/skills"
|
||||
|
||||
return DeerFlowSummarizationMiddleware(
|
||||
**kwargs,
|
||||
skills_container_path=skills_container_path,
|
||||
skill_file_read_tool_names=config.skill_file_read_tool_names,
|
||||
before_summarization=hooks,
|
||||
preserve_recent_skill_count=config.preserve_recent_skill_count,
|
||||
preserve_recent_skill_tokens=config.preserve_recent_skill_tokens,
|
||||
preserve_recent_skill_tokens_per_skill=config.preserve_recent_skill_tokens_per_skill,
|
||||
)
|
||||
|
||||
|
||||
def _create_todo_list_middleware(is_plan_mode: bool) -> TodoMiddleware | None:
|
||||
@@ -231,7 +257,8 @@ def _build_middlewares(config: RunnableConfig, model_name: str | None, agent_nam
|
||||
middlewares.append(summarization_middleware)
|
||||
|
||||
# Add TodoList middleware if plan mode is enabled
|
||||
is_plan_mode = config.get("configurable", {}).get("is_plan_mode", False)
|
||||
cfg = _get_runtime_config(config)
|
||||
is_plan_mode = cfg.get("is_plan_mode", False)
|
||||
todo_list_middleware = _create_todo_list_middleware(is_plan_mode)
|
||||
if todo_list_middleware is not None:
|
||||
middlewares.append(todo_list_middleware)
|
||||
@@ -260,9 +287,9 @@ def _build_middlewares(config: RunnableConfig, model_name: str | None, agent_nam
|
||||
middlewares.append(DeferredToolFilterMiddleware())
|
||||
|
||||
# Add SubagentLimitMiddleware to truncate excess parallel task calls
|
||||
subagent_enabled = config.get("configurable", {}).get("subagent_enabled", False)
|
||||
subagent_enabled = cfg.get("subagent_enabled", False)
|
||||
if subagent_enabled:
|
||||
max_concurrent_subagents = config.get("configurable", {}).get("max_concurrent_subagents", 3)
|
||||
max_concurrent_subagents = cfg.get("max_concurrent_subagents", 3)
|
||||
middlewares.append(SubagentLimitMiddleware(max_concurrent=max_concurrent_subagents))
|
||||
|
||||
# LoopDetectionMiddleware — detect and break repetitive tool call loops
|
||||
@@ -282,7 +309,7 @@ def make_lead_agent(config: RunnableConfig):
|
||||
from deerflow.tools import get_available_tools
|
||||
from deerflow.tools.builtins import setup_agent
|
||||
|
||||
cfg = config.get("configurable", {})
|
||||
cfg = _get_runtime_config(config)
|
||||
|
||||
thinking_enabled = cfg.get("thinking_enabled", True)
|
||||
reasoning_effort = cfg.get("reasoning_effort", None)
|
||||
@@ -333,6 +360,7 @@ def make_lead_agent(config: RunnableConfig):
|
||||
"is_plan_mode": is_plan_mode,
|
||||
"subagent_enabled": subagent_enabled,
|
||||
"tool_groups": agent_config.tool_groups if agent_config else None,
|
||||
"available_skills": ["bootstrap"] if is_bootstrap else (agent_config.skills if agent_config and agent_config.skills is not None else None),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -164,6 +164,36 @@ Skip simple one-off tasks.
|
||||
"""
|
||||
|
||||
|
||||
def _build_available_subagents_description(available_names: list[str], bash_available: bool) -> str:
|
||||
"""Dynamically build subagent type descriptions from registry.
|
||||
|
||||
Mirrors Codex's pattern where agent_type_description is dynamically generated
|
||||
from all registered roles, so the LLM knows about every available type.
|
||||
"""
|
||||
# Built-in descriptions (kept for backward compatibility with existing prompt quality)
|
||||
builtin_descriptions = {
|
||||
"general-purpose": "For ANY non-trivial task - web research, code exploration, file operations, analysis, etc.",
|
||||
"bash": (
|
||||
"For command execution (git, build, test, deploy operations)" if bash_available else "Not available in the current sandbox configuration. Use direct file/web tools or switch to AioSandboxProvider for isolated shell access."
|
||||
),
|
||||
}
|
||||
|
||||
# Lazy import moved outside loop to avoid repeated import overhead
|
||||
from deerflow.subagents.registry import get_subagent_config
|
||||
|
||||
lines = []
|
||||
for name in available_names:
|
||||
if name in builtin_descriptions:
|
||||
lines.append(f"- **{name}**: {builtin_descriptions[name]}")
|
||||
else:
|
||||
config = get_subagent_config(name)
|
||||
if config is not None:
|
||||
desc = config.description.split("\n")[0].strip() # First line only for brevity
|
||||
lines.append(f"- **{name}**: {desc}")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def _build_subagent_section(max_concurrent: int) -> str:
|
||||
"""Build the subagent system prompt section with dynamic concurrency limit.
|
||||
|
||||
@@ -174,13 +204,12 @@ def _build_subagent_section(max_concurrent: int) -> str:
|
||||
Formatted subagent section string.
|
||||
"""
|
||||
n = max_concurrent
|
||||
bash_available = "bash" in get_available_subagent_names()
|
||||
available_subagents = (
|
||||
"- **general-purpose**: For ANY non-trivial task - web research, code exploration, file operations, analysis, etc.\n- **bash**: For command execution (git, build, test, deploy operations)"
|
||||
if bash_available
|
||||
else "- **general-purpose**: For ANY non-trivial task - web research, code exploration, file operations, analysis, etc.\n"
|
||||
"- **bash**: Not available in the current sandbox configuration. Use direct file/web tools or switch to AioSandboxProvider for isolated shell access."
|
||||
)
|
||||
available_names = get_available_subagent_names()
|
||||
bash_available = "bash" in available_names
|
||||
|
||||
# Dynamically build subagent type descriptions from registry (aligned with Codex's
|
||||
# agent_type_description pattern where all registered roles are listed in the tool spec).
|
||||
available_subagents = _build_available_subagents_description(available_names, bash_available)
|
||||
direct_tool_examples = "bash, ls, read_file, web_search, etc." if bash_available else "ls, read_file, web_search, etc."
|
||||
direct_execution_example = (
|
||||
'# User asks: "Run the tests"\n# Thinking: Cannot decompose into parallel sub-tasks\n# → Execute directly\n\nbash("npm test") # Direct execution, not task()'
|
||||
@@ -420,7 +449,7 @@ You: "Deploying to staging..." [proceed]
|
||||
- Treat `/mnt/user-data/workspace` as your default current working directory for coding and file-editing tasks
|
||||
- When writing scripts or commands that create/read files from the workspace, prefer relative paths such as `hello.txt`, `../uploads/data.csv`, and `../outputs/report.md`
|
||||
- Avoid hardcoding `/mnt/user-data/...` inside generated scripts when a relative path from the workspace is enough
|
||||
- Final deliverables must be copied to `/mnt/user-data/outputs` and presented using `present_file` tool
|
||||
- Final deliverables must be copied to `/mnt/user-data/outputs` and presented using `present_files` tool
|
||||
{acp_section}
|
||||
</working_directory>
|
||||
|
||||
@@ -648,7 +677,7 @@ def _build_acp_section() -> str:
|
||||
"- ACP agents (e.g. codex, claude_code) run in their own independent workspace — NOT in `/mnt/user-data/`\n"
|
||||
"- When writing prompts for ACP agents, describe the task only — do NOT reference `/mnt/user-data` paths\n"
|
||||
"- ACP agent results are accessible at `/mnt/acp-workspace/` (read-only) — use `ls`, `read_file`, or `bash cp` to retrieve output files\n"
|
||||
"- To deliver ACP output to the user: copy from `/mnt/acp-workspace/<file>` to `/mnt/user-data/outputs/<file>`, then use `present_file`"
|
||||
"- To deliver ACP output to the user: copy from `/mnt/acp-workspace/<file>` to `/mnt/user-data/outputs/<file>`, then use `present_files`"
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -409,7 +409,7 @@ class MemoryUpdater:
|
||||
|
||||
current_memory, prompt = prepared
|
||||
model = self._get_model()
|
||||
response = await model.ainvoke(prompt)
|
||||
response = await model.ainvoke(prompt, config={"run_name": "memory_agent"})
|
||||
return await asyncio.to_thread(
|
||||
self._finalize_update,
|
||||
current_memory=current_memory,
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
import json
|
||||
import logging
|
||||
from collections.abc import Callable
|
||||
from hashlib import sha256
|
||||
from typing import override
|
||||
|
||||
from langchain.agents import AgentState
|
||||
@@ -36,6 +37,13 @@ class ClarificationMiddleware(AgentMiddleware[ClarificationMiddlewareState]):
|
||||
|
||||
state_schema = ClarificationMiddlewareState
|
||||
|
||||
def _stable_message_id(self, tool_call_id: str, formatted_message: str) -> str:
|
||||
"""Build a deterministic message ID so retried clarification calls replace, not append."""
|
||||
if tool_call_id:
|
||||
return f"clarification:{tool_call_id}"
|
||||
digest = sha256(formatted_message.encode("utf-8")).hexdigest()[:16]
|
||||
return f"clarification:{digest}"
|
||||
|
||||
def _is_chinese(self, text: str) -> bool:
|
||||
"""Check if text contains Chinese characters.
|
||||
|
||||
@@ -131,6 +139,7 @@ class ClarificationMiddleware(AgentMiddleware[ClarificationMiddlewareState]):
|
||||
# Create a ToolMessage with the formatted question
|
||||
# This will be added to the message history
|
||||
tool_message = ToolMessage(
|
||||
id=self._stable_message_id(tool_call_id, formatted_message),
|
||||
content=formatted_message,
|
||||
tool_call_id=tool_call_id,
|
||||
name="ask_clarification",
|
||||
|
||||
+48
-1
@@ -16,6 +16,9 @@ from typing import override
|
||||
from langchain.agents import AgentState
|
||||
from langchain.agents.middleware import AgentMiddleware
|
||||
from langchain.agents.middleware.types import ModelCallResult, ModelRequest, ModelResponse
|
||||
from langchain_core.messages import ToolMessage
|
||||
from langgraph.prebuilt.tool_node import ToolCallRequest
|
||||
from langgraph.types import Command
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -35,7 +38,7 @@ class DeferredToolFilterMiddleware(AgentMiddleware[AgentState]):
|
||||
if not registry:
|
||||
return request
|
||||
|
||||
deferred_names = {e.name for e in registry.entries}
|
||||
deferred_names = registry.deferred_names
|
||||
active_tools = [t for t in request.tools if getattr(t, "name", None) not in deferred_names]
|
||||
|
||||
if len(active_tools) < len(request.tools):
|
||||
@@ -43,6 +46,28 @@ class DeferredToolFilterMiddleware(AgentMiddleware[AgentState]):
|
||||
|
||||
return request.override(tools=active_tools)
|
||||
|
||||
def _blocked_tool_message(self, request: ToolCallRequest) -> ToolMessage | None:
|
||||
from deerflow.tools.builtins.tool_search import get_deferred_registry
|
||||
|
||||
registry = get_deferred_registry()
|
||||
if not registry:
|
||||
return None
|
||||
|
||||
tool_name = str(request.tool_call.get("name") or "")
|
||||
if not tool_name:
|
||||
return None
|
||||
|
||||
if not registry.contains(tool_name):
|
||||
return None
|
||||
|
||||
tool_call_id = str(request.tool_call.get("id") or "missing_tool_call_id")
|
||||
return ToolMessage(
|
||||
content=(f"Error: Tool '{tool_name}' is deferred and has not been promoted yet. Call tool_search first to expose and promote this tool's schema, then retry."),
|
||||
tool_call_id=tool_call_id,
|
||||
name=tool_name,
|
||||
status="error",
|
||||
)
|
||||
|
||||
@override
|
||||
def wrap_model_call(
|
||||
self,
|
||||
@@ -51,6 +76,17 @@ class DeferredToolFilterMiddleware(AgentMiddleware[AgentState]):
|
||||
) -> ModelCallResult:
|
||||
return handler(self._filter_tools(request))
|
||||
|
||||
@override
|
||||
def wrap_tool_call(
|
||||
self,
|
||||
request: ToolCallRequest,
|
||||
handler: Callable[[ToolCallRequest], ToolMessage | Command],
|
||||
) -> ToolMessage | Command:
|
||||
blocked = self._blocked_tool_message(request)
|
||||
if blocked is not None:
|
||||
return blocked
|
||||
return handler(request)
|
||||
|
||||
@override
|
||||
async def awrap_model_call(
|
||||
self,
|
||||
@@ -58,3 +94,14 @@ class DeferredToolFilterMiddleware(AgentMiddleware[AgentState]):
|
||||
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
|
||||
) -> ModelCallResult:
|
||||
return await handler(self._filter_tools(request))
|
||||
|
||||
@override
|
||||
async def awrap_tool_call(
|
||||
self,
|
||||
request: ToolCallRequest,
|
||||
handler: Callable[[ToolCallRequest], Awaitable[ToolMessage | Command]],
|
||||
) -> ToolMessage | Command:
|
||||
blocked = self._blocked_tool_message(request)
|
||||
if blocked is not None:
|
||||
return blocked
|
||||
return await handler(request)
|
||||
|
||||
@@ -160,6 +160,8 @@ class LLMErrorHandlingMiddleware(AgentMiddleware[AgentState]):
|
||||
"APITimeoutError",
|
||||
"APIConnectionError",
|
||||
"InternalServerError",
|
||||
"ReadError", # httpx.ReadError: connection dropped mid-stream
|
||||
"RemoteProtocolError", # httpx: server closed connection unexpectedly
|
||||
}:
|
||||
return True, "transient"
|
||||
if status_code in _RETRIABLE_STATUS_CODES:
|
||||
|
||||
@@ -25,6 +25,8 @@ from langchain.agents.middleware import AgentMiddleware
|
||||
from langchain_core.messages import HumanMessage
|
||||
from langgraph.runtime import Runtime
|
||||
|
||||
from deerflow.utils.runtime import get_thread_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Defaults — can be overridden via constructor
|
||||
@@ -183,10 +185,7 @@ class LoopDetectionMiddleware(AgentMiddleware[AgentState]):
|
||||
|
||||
def _get_thread_id(self, runtime: Runtime) -> str:
|
||||
"""Extract thread_id from runtime context for per-thread tracking."""
|
||||
thread_id = runtime.context.get("thread_id") if runtime.context else None
|
||||
if thread_id:
|
||||
return thread_id
|
||||
return "default"
|
||||
return get_thread_id(runtime) or "default"
|
||||
|
||||
def _evict_if_needed(self) -> None:
|
||||
"""Evict least recently used threads if over the limit.
|
||||
|
||||
@@ -5,12 +5,12 @@ from typing import override
|
||||
|
||||
from langchain.agents import AgentState
|
||||
from langchain.agents.middleware import AgentMiddleware
|
||||
from langgraph.config import get_config
|
||||
from langgraph.runtime import Runtime
|
||||
|
||||
from deerflow.agents.memory.message_processing import detect_correction, detect_reinforcement, filter_messages_for_memory
|
||||
from deerflow.agents.memory.queue import get_memory_queue
|
||||
from deerflow.config.memory_config import get_memory_config
|
||||
from deerflow.utils.runtime import get_thread_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -57,13 +57,10 @@ class MemoryMiddleware(AgentMiddleware[MemoryMiddlewareState]):
|
||||
if not config.enabled:
|
||||
return None
|
||||
|
||||
# Get thread ID from runtime context first, then fall back to LangGraph's configurable metadata
|
||||
thread_id = runtime.context.get("thread_id") if runtime.context else None
|
||||
if thread_id is None:
|
||||
config_data = get_config()
|
||||
thread_id = config_data.get("configurable", {}).get("thread_id")
|
||||
# Resolve thread ID from the runtime or configured fallback sources
|
||||
thread_id = get_thread_id(runtime)
|
||||
if not thread_id:
|
||||
logger.debug("No thread_id in context, skipping memory update")
|
||||
logger.debug("No thread_id could be resolved from runtime/config, skipping memory update")
|
||||
return None
|
||||
|
||||
# Get messages from state
|
||||
|
||||
@@ -14,6 +14,7 @@ from langgraph.prebuilt.tool_node import ToolCallRequest
|
||||
from langgraph.types import Command
|
||||
|
||||
from deerflow.agents.thread_state import ThreadState
|
||||
from deerflow.utils.runtime import get_thread_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -218,15 +219,7 @@ class SandboxAuditMiddleware(AgentMiddleware[ThreadState]):
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _get_thread_id(self, request: ToolCallRequest) -> str | None:
|
||||
runtime = request.runtime # ToolRuntime; may be None-like in tests
|
||||
if runtime is None:
|
||||
return None
|
||||
ctx = getattr(runtime, "context", None) or {}
|
||||
thread_id = ctx.get("thread_id") if isinstance(ctx, dict) else None
|
||||
if thread_id is None:
|
||||
cfg = getattr(runtime, "config", None) or {}
|
||||
thread_id = cfg.get("configurable", {}).get("thread_id")
|
||||
return thread_id
|
||||
return get_thread_id(request.runtime)
|
||||
|
||||
_AUDIT_COMMAND_LIMIT = 200
|
||||
|
||||
|
||||
@@ -3,16 +3,19 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from collections.abc import Collection
|
||||
from dataclasses import dataclass
|
||||
from typing import Protocol, runtime_checkable
|
||||
from typing import Any, Protocol, runtime_checkable
|
||||
|
||||
from langchain.agents import AgentState
|
||||
from langchain.agents.middleware import SummarizationMiddleware
|
||||
from langchain_core.messages import AnyMessage, RemoveMessage
|
||||
from langchain_core.messages import AIMessage, AnyMessage, RemoveMessage, ToolMessage
|
||||
from langgraph.config import get_config
|
||||
from langgraph.graph.message import REMOVE_ALL_MESSAGES
|
||||
from langgraph.runtime import Runtime
|
||||
|
||||
from deerflow.utils.runtime import get_thread_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -34,18 +37,6 @@ class BeforeSummarizationHook(Protocol):
|
||||
def __call__(self, event: SummarizationEvent) -> None: ...
|
||||
|
||||
|
||||
def _resolve_thread_id(runtime: Runtime) -> str | None:
|
||||
"""Resolve the current thread ID from runtime context or LangGraph config."""
|
||||
thread_id = runtime.context.get("thread_id") if runtime.context else None
|
||||
if thread_id is None:
|
||||
try:
|
||||
config_data = get_config()
|
||||
except RuntimeError:
|
||||
return None
|
||||
thread_id = config_data.get("configurable", {}).get("thread_id")
|
||||
return thread_id
|
||||
|
||||
|
||||
def _resolve_agent_name(runtime: Runtime) -> str | None:
|
||||
"""Resolve the current agent name from runtime context or LangGraph config."""
|
||||
agent_name = runtime.context.get("agent_name") if runtime.context else None
|
||||
@@ -58,17 +49,63 @@ def _resolve_agent_name(runtime: Runtime) -> str | None:
|
||||
return agent_name
|
||||
|
||||
|
||||
def _tool_call_path(tool_call: dict[str, Any]) -> str | None:
|
||||
"""Best-effort extraction of a file path argument from a read_file-like tool call."""
|
||||
args = tool_call.get("args") or {}
|
||||
if not isinstance(args, dict):
|
||||
return None
|
||||
for key in ("path", "file_path", "filepath"):
|
||||
value = args.get(key)
|
||||
if isinstance(value, str) and value:
|
||||
return value
|
||||
return None
|
||||
|
||||
|
||||
def _clone_ai_message(
|
||||
message: AIMessage,
|
||||
tool_calls: list[dict[str, Any]],
|
||||
*,
|
||||
content: Any | None = None,
|
||||
) -> AIMessage:
|
||||
"""Clone an AIMessage while replacing its tool_calls list and optional content."""
|
||||
update: dict[str, Any] = {"tool_calls": tool_calls}
|
||||
if content is not None:
|
||||
update["content"] = content
|
||||
return message.model_copy(update=update)
|
||||
|
||||
|
||||
@dataclass
|
||||
class _SkillBundle:
|
||||
"""Skill-related tool calls and tool results associated with one AIMessage."""
|
||||
|
||||
ai_index: int
|
||||
skill_tool_indices: tuple[int, ...]
|
||||
skill_tool_call_ids: frozenset[str]
|
||||
skill_tool_tokens: int
|
||||
skill_key: str
|
||||
|
||||
|
||||
class DeerFlowSummarizationMiddleware(SummarizationMiddleware):
|
||||
"""Summarization middleware with pre-compression hook dispatch."""
|
||||
"""Summarization middleware with pre-compression hook dispatch and skill rescue."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*args,
|
||||
skills_container_path: str | None = None,
|
||||
skill_file_read_tool_names: Collection[str] | None = None,
|
||||
before_summarization: list[BeforeSummarizationHook] | None = None,
|
||||
preserve_recent_skill_count: int = 5,
|
||||
preserve_recent_skill_tokens: int = 25_000,
|
||||
preserve_recent_skill_tokens_per_skill: int = 5_000,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
super().__init__(*args, **kwargs)
|
||||
self._skills_container_path = skills_container_path or "/mnt/skills"
|
||||
self._skill_file_read_tool_names = frozenset(skill_file_read_tool_names or {"read_file", "read", "view", "cat"})
|
||||
self._before_summarization_hooks = before_summarization or []
|
||||
self._preserve_recent_skill_count = max(0, preserve_recent_skill_count)
|
||||
self._preserve_recent_skill_tokens = max(0, preserve_recent_skill_tokens)
|
||||
self._preserve_recent_skill_tokens_per_skill = max(0, preserve_recent_skill_tokens_per_skill)
|
||||
|
||||
def before_model(self, state: AgentState, runtime: Runtime) -> dict | None:
|
||||
return self._maybe_summarize(state, runtime)
|
||||
@@ -88,7 +125,7 @@ class DeerFlowSummarizationMiddleware(SummarizationMiddleware):
|
||||
if cutoff_index <= 0:
|
||||
return None
|
||||
|
||||
messages_to_summarize, preserved_messages = self._partition_messages(messages, cutoff_index)
|
||||
messages_to_summarize, preserved_messages = self._partition_with_skill_rescue(messages, cutoff_index)
|
||||
self._fire_hooks(messages_to_summarize, preserved_messages, runtime)
|
||||
summary = self._create_summary(messages_to_summarize)
|
||||
new_messages = self._build_new_messages(summary)
|
||||
@@ -113,7 +150,7 @@ class DeerFlowSummarizationMiddleware(SummarizationMiddleware):
|
||||
if cutoff_index <= 0:
|
||||
return None
|
||||
|
||||
messages_to_summarize, preserved_messages = self._partition_messages(messages, cutoff_index)
|
||||
messages_to_summarize, preserved_messages = self._partition_with_skill_rescue(messages, cutoff_index)
|
||||
self._fire_hooks(messages_to_summarize, preserved_messages, runtime)
|
||||
summary = await self._acreate_summary(messages_to_summarize)
|
||||
new_messages = self._build_new_messages(summary)
|
||||
@@ -126,6 +163,155 @@ class DeerFlowSummarizationMiddleware(SummarizationMiddleware):
|
||||
]
|
||||
}
|
||||
|
||||
def _partition_with_skill_rescue(
|
||||
self,
|
||||
messages: list[AnyMessage],
|
||||
cutoff_index: int,
|
||||
) -> tuple[list[AnyMessage], list[AnyMessage]]:
|
||||
"""Partition like the parent, then rescue recently-loaded skill bundles."""
|
||||
to_summarize, preserved = self._partition_messages(messages, cutoff_index)
|
||||
|
||||
if self._preserve_recent_skill_count == 0 or self._preserve_recent_skill_tokens == 0 or not to_summarize:
|
||||
return to_summarize, preserved
|
||||
|
||||
try:
|
||||
bundles = self._find_skill_bundles(to_summarize, self._skills_container_path)
|
||||
except Exception:
|
||||
logger.exception("Skill-preserving summarization rescue failed; falling back to default partition")
|
||||
return to_summarize, preserved
|
||||
|
||||
if not bundles:
|
||||
return to_summarize, preserved
|
||||
|
||||
rescue_bundles = self._select_bundles_to_rescue(bundles)
|
||||
if not rescue_bundles:
|
||||
return to_summarize, preserved
|
||||
|
||||
bundles_by_ai_index = {bundle.ai_index: bundle for bundle in rescue_bundles}
|
||||
rescue_tool_indices = {idx for bundle in rescue_bundles for idx in bundle.skill_tool_indices}
|
||||
rescued: list[AnyMessage] = []
|
||||
remaining: list[AnyMessage] = []
|
||||
for i, msg in enumerate(to_summarize):
|
||||
bundle = bundles_by_ai_index.get(i)
|
||||
if bundle is not None and isinstance(msg, AIMessage):
|
||||
rescued_tool_calls = [tc for tc in msg.tool_calls if tc.get("id") in bundle.skill_tool_call_ids]
|
||||
remaining_tool_calls = [tc for tc in msg.tool_calls if tc.get("id") not in bundle.skill_tool_call_ids]
|
||||
|
||||
if rescued_tool_calls:
|
||||
rescued.append(_clone_ai_message(msg, rescued_tool_calls, content=""))
|
||||
if remaining_tool_calls or msg.content:
|
||||
remaining.append(_clone_ai_message(msg, remaining_tool_calls))
|
||||
continue
|
||||
|
||||
if i in rescue_tool_indices:
|
||||
rescued.append(msg)
|
||||
continue
|
||||
|
||||
remaining.append(msg)
|
||||
|
||||
return remaining, rescued + preserved
|
||||
|
||||
def _find_skill_bundles(
|
||||
self,
|
||||
messages: list[AnyMessage],
|
||||
skills_root: str,
|
||||
) -> list[_SkillBundle]:
|
||||
"""Locate AIMessage + paired ToolMessage groups that load skill files."""
|
||||
bundles: list[_SkillBundle] = []
|
||||
n = len(messages)
|
||||
i = 0
|
||||
while i < n:
|
||||
msg = messages[i]
|
||||
if not (isinstance(msg, AIMessage) and msg.tool_calls):
|
||||
i += 1
|
||||
continue
|
||||
|
||||
tool_calls = list(msg.tool_calls)
|
||||
skill_paths_by_id: dict[str, str] = {}
|
||||
for tc in tool_calls:
|
||||
if self._is_skill_tool_call(tc, skills_root):
|
||||
tc_id = tc.get("id")
|
||||
path = _tool_call_path(tc)
|
||||
if tc_id and path:
|
||||
skill_paths_by_id[tc_id] = path
|
||||
|
||||
if not skill_paths_by_id:
|
||||
i += 1
|
||||
continue
|
||||
|
||||
skill_tool_tokens = 0
|
||||
skill_key_parts: list[str] = []
|
||||
skill_tool_indices: list[int] = []
|
||||
matched_skill_call_ids: set[str] = set()
|
||||
|
||||
j = i + 1
|
||||
while j < n and isinstance(messages[j], ToolMessage):
|
||||
j += 1
|
||||
|
||||
for k in range(i + 1, j):
|
||||
tool_msg = messages[k]
|
||||
if isinstance(tool_msg, ToolMessage) and tool_msg.tool_call_id in skill_paths_by_id:
|
||||
skill_tool_tokens += self.token_counter([tool_msg])
|
||||
skill_key_parts.append(skill_paths_by_id[tool_msg.tool_call_id])
|
||||
skill_tool_indices.append(k)
|
||||
matched_skill_call_ids.add(tool_msg.tool_call_id)
|
||||
|
||||
if not skill_tool_indices:
|
||||
i = j
|
||||
continue
|
||||
|
||||
bundles.append(
|
||||
_SkillBundle(
|
||||
ai_index=i,
|
||||
skill_tool_indices=tuple(skill_tool_indices),
|
||||
skill_tool_call_ids=frozenset(matched_skill_call_ids),
|
||||
skill_tool_tokens=skill_tool_tokens,
|
||||
skill_key="|".join(sorted(skill_key_parts)),
|
||||
)
|
||||
)
|
||||
i = j
|
||||
|
||||
return bundles
|
||||
|
||||
def _select_bundles_to_rescue(self, bundles: list[_SkillBundle]) -> list[_SkillBundle]:
|
||||
"""Pick bundles to keep, walking newest-first under count/token budgets."""
|
||||
selected: list[_SkillBundle] = []
|
||||
if not bundles:
|
||||
return selected
|
||||
|
||||
seen_skill_keys: set[str] = set()
|
||||
total_tokens = 0
|
||||
kept = 0
|
||||
|
||||
for bundle in reversed(bundles):
|
||||
if kept >= self._preserve_recent_skill_count:
|
||||
break
|
||||
if bundle.skill_key in seen_skill_keys:
|
||||
continue
|
||||
if bundle.skill_tool_tokens > self._preserve_recent_skill_tokens_per_skill:
|
||||
continue
|
||||
if total_tokens + bundle.skill_tool_tokens > self._preserve_recent_skill_tokens:
|
||||
continue
|
||||
|
||||
selected.append(bundle)
|
||||
total_tokens += bundle.skill_tool_tokens
|
||||
kept += 1
|
||||
seen_skill_keys.add(bundle.skill_key)
|
||||
|
||||
selected.reverse()
|
||||
return selected
|
||||
|
||||
def _is_skill_tool_call(self, tool_call: dict[str, Any], skills_root: str) -> bool:
|
||||
"""Return True when ``tool_call`` reads a file under the configured skills root."""
|
||||
name = tool_call.get("name") or ""
|
||||
if name not in self._skill_file_read_tool_names:
|
||||
return False
|
||||
path = _tool_call_path(tool_call)
|
||||
if not path:
|
||||
return False
|
||||
normalized_root = skills_root.rstrip("/")
|
||||
return path == normalized_root or path.startswith(normalized_root + "/")
|
||||
|
||||
def _fire_hooks(
|
||||
self,
|
||||
messages_to_summarize: list[AnyMessage],
|
||||
@@ -138,7 +324,7 @@ class DeerFlowSummarizationMiddleware(SummarizationMiddleware):
|
||||
event = SummarizationEvent(
|
||||
messages_to_summarize=tuple(messages_to_summarize),
|
||||
preserved_messages=tuple(preserved_messages),
|
||||
thread_id=_resolve_thread_id(runtime),
|
||||
thread_id=get_thread_id(runtime),
|
||||
agent_name=_resolve_agent_name(runtime),
|
||||
runtime=runtime,
|
||||
)
|
||||
|
||||
@@ -3,11 +3,11 @@ from typing import NotRequired, override
|
||||
|
||||
from langchain.agents import AgentState
|
||||
from langchain.agents.middleware import AgentMiddleware
|
||||
from langgraph.config import get_config
|
||||
from langgraph.runtime import Runtime
|
||||
|
||||
from deerflow.agents.thread_state import ThreadDataState
|
||||
from deerflow.config.paths import Paths, get_paths
|
||||
from deerflow.utils.runtime import get_thread_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -75,11 +75,7 @@ class ThreadDataMiddleware(AgentMiddleware[ThreadDataMiddlewareState]):
|
||||
|
||||
@override
|
||||
def before_agent(self, state: ThreadDataMiddlewareState, runtime: Runtime) -> dict | None:
|
||||
context = runtime.context or {}
|
||||
thread_id = context.get("thread_id")
|
||||
if thread_id is None:
|
||||
config = get_config()
|
||||
thread_id = config.get("configurable", {}).get("thread_id")
|
||||
thread_id = get_thread_id(runtime)
|
||||
|
||||
if thread_id is None:
|
||||
raise ValueError("Thread ID is required in runtime context or config.configurable")
|
||||
|
||||
@@ -127,7 +127,7 @@ class TitleMiddleware(AgentMiddleware[TitleMiddlewareState]):
|
||||
model = create_chat_model(name=config.model_name, thinking_enabled=False)
|
||||
else:
|
||||
model = create_chat_model(thinking_enabled=False)
|
||||
response = await model.ainvoke(prompt)
|
||||
response = await model.ainvoke(prompt, config={"run_name": "title_agent"})
|
||||
title = self._parse_title(response.content)
|
||||
if title:
|
||||
return {"title": title}
|
||||
|
||||
@@ -11,6 +11,7 @@ from langgraph.runtime import Runtime
|
||||
|
||||
from deerflow.config.paths import Paths, get_paths
|
||||
from deerflow.utils.file_conversion import extract_outline
|
||||
from deerflow.utils.runtime import get_thread_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -213,14 +214,7 @@ class UploadsMiddleware(AgentMiddleware[UploadsMiddlewareState]):
|
||||
return None
|
||||
|
||||
# Resolve uploads directory for existence checks
|
||||
thread_id = (runtime.context or {}).get("thread_id")
|
||||
if thread_id is None:
|
||||
try:
|
||||
from langgraph.config import get_config
|
||||
|
||||
thread_id = get_config().get("configurable", {}).get("thread_id")
|
||||
except RuntimeError:
|
||||
pass # get_config() raises outside a runnable context (e.g. unit tests)
|
||||
thread_id = get_thread_id(runtime)
|
||||
uploads_dir = self._paths.sandbox_uploads_dir(thread_id) if thread_id else None
|
||||
|
||||
# Get newly uploaded files from the current message's additional_kwargs.files
|
||||
|
||||
@@ -38,6 +38,6 @@ class JinaClient:
|
||||
|
||||
return response.text
|
||||
except Exception as e:
|
||||
error_message = f"Request to Jina API failed: {str(e)}"
|
||||
logger.exception(error_message)
|
||||
error_message = f"Request to Jina API failed: {type(e).__name__}: {e}"
|
||||
logger.warning(error_message)
|
||||
return f"Error: {error_message}"
|
||||
|
||||
@@ -25,6 +25,47 @@ class SubagentOverrideConfig(BaseModel):
|
||||
min_length=1,
|
||||
description="Model name for this subagent (None = inherit from parent agent)",
|
||||
)
|
||||
skills: list[str] | None = Field(
|
||||
default=None,
|
||||
description="Skill names whitelist for this subagent (None = inherit all enabled skills, [] = no skills)",
|
||||
)
|
||||
|
||||
|
||||
class CustomSubagentConfig(BaseModel):
|
||||
"""User-defined subagent type declared in config.yaml."""
|
||||
|
||||
description: str = Field(
|
||||
description="When the lead agent should delegate to this subagent",
|
||||
)
|
||||
system_prompt: str = Field(
|
||||
description="System prompt that guides the subagent's behavior",
|
||||
)
|
||||
tools: list[str] | None = Field(
|
||||
default=None,
|
||||
description="Tool names whitelist (None = inherit all tools from parent)",
|
||||
)
|
||||
disallowed_tools: list[str] | None = Field(
|
||||
default_factory=lambda: ["task", "ask_clarification", "present_files"],
|
||||
description="Tool names to deny",
|
||||
)
|
||||
skills: list[str] | None = Field(
|
||||
default=None,
|
||||
description="Skill names whitelist (None = inherit all enabled skills, [] = no skills)",
|
||||
)
|
||||
model: str = Field(
|
||||
default="inherit",
|
||||
description="Model to use - 'inherit' uses parent's model",
|
||||
)
|
||||
max_turns: int = Field(
|
||||
default=50,
|
||||
ge=1,
|
||||
description="Maximum number of agent turns before stopping",
|
||||
)
|
||||
timeout_seconds: int = Field(
|
||||
default=900,
|
||||
ge=1,
|
||||
description="Maximum execution time in seconds",
|
||||
)
|
||||
|
||||
|
||||
class SubagentsAppConfig(BaseModel):
|
||||
@@ -44,6 +85,10 @@ class SubagentsAppConfig(BaseModel):
|
||||
default_factory=dict,
|
||||
description="Per-agent configuration overrides keyed by agent name",
|
||||
)
|
||||
custom_agents: dict[str, CustomSubagentConfig] = Field(
|
||||
default_factory=dict,
|
||||
description="User-defined subagent types keyed by agent name",
|
||||
)
|
||||
|
||||
def get_timeout_for(self, agent_name: str) -> int:
|
||||
"""Get the effective timeout for a specific agent.
|
||||
@@ -82,6 +127,20 @@ class SubagentsAppConfig(BaseModel):
|
||||
return self.max_turns
|
||||
return builtin_default
|
||||
|
||||
def get_skills_for(self, agent_name: str) -> list[str] | None:
|
||||
"""Get the skills override for a specific agent.
|
||||
|
||||
Args:
|
||||
agent_name: The name of the subagent.
|
||||
|
||||
Returns:
|
||||
Skill names whitelist if overridden, None otherwise (subagent will inherit all enabled skills).
|
||||
"""
|
||||
override = self.agents.get(agent_name)
|
||||
if override is not None and override.skills is not None:
|
||||
return override.skills
|
||||
return None
|
||||
|
||||
|
||||
_subagents_config: SubagentsAppConfig = SubagentsAppConfig()
|
||||
|
||||
@@ -105,15 +164,20 @@ def load_subagents_config_from_dict(config_dict: dict) -> None:
|
||||
parts.append(f"max_turns={override.max_turns}")
|
||||
if override.model is not None:
|
||||
parts.append(f"model={override.model}")
|
||||
if override.skills is not None:
|
||||
parts.append(f"skills={override.skills}")
|
||||
if parts:
|
||||
overrides_summary[name] = ", ".join(parts)
|
||||
|
||||
if overrides_summary:
|
||||
custom_agents_names = list(_subagents_config.custom_agents.keys())
|
||||
|
||||
if overrides_summary or custom_agents_names:
|
||||
logger.info(
|
||||
"Subagents config loaded: default timeout=%ss, default max_turns=%s, per-agent overrides=%s",
|
||||
"Subagents config loaded: default timeout=%ss, default max_turns=%s, per-agent overrides=%s, custom_agents=%s",
|
||||
_subagents_config.timeout_seconds,
|
||||
_subagents_config.max_turns,
|
||||
overrides_summary,
|
||||
overrides_summary or "none",
|
||||
custom_agents_names or "none",
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
|
||||
@@ -51,6 +51,25 @@ class SummarizationConfig(BaseModel):
|
||||
default=None,
|
||||
description="Custom prompt template for generating summaries. If not provided, uses the default LangChain prompt.",
|
||||
)
|
||||
preserve_recent_skill_count: int = Field(
|
||||
default=5,
|
||||
ge=0,
|
||||
description="Number of most-recently-loaded skill files to exclude from summarization. Set to 0 to disable skill preservation.",
|
||||
)
|
||||
preserve_recent_skill_tokens: int = Field(
|
||||
default=25000,
|
||||
ge=0,
|
||||
description="Total token budget reserved for recently-loaded skill files that must be preserved across summarization.",
|
||||
)
|
||||
preserve_recent_skill_tokens_per_skill: int = Field(
|
||||
default=5000,
|
||||
ge=0,
|
||||
description="Per-skill token cap when preserving skill files across summarization. Skill reads above this size are not rescued.",
|
||||
)
|
||||
skill_file_read_tool_names: list[str] = Field(
|
||||
default_factory=lambda: ["read_file", "read", "view", "cat"],
|
||||
description="Tool names treated as skill file reads when preserving recently-loaded skills across summarization.",
|
||||
)
|
||||
|
||||
|
||||
# Global configuration instance
|
||||
|
||||
@@ -12,6 +12,7 @@ from langchain_core.tools import BaseTool
|
||||
from deerflow.config.extensions_config import ExtensionsConfig
|
||||
from deerflow.mcp.client import build_servers_config
|
||||
from deerflow.mcp.oauth import build_oauth_tool_interceptor, get_initial_oauth_headers
|
||||
from deerflow.reflection import resolve_variable
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -95,6 +96,27 @@ async def get_mcp_tools() -> list[BaseTool]:
|
||||
if oauth_interceptor is not None:
|
||||
tool_interceptors.append(oauth_interceptor)
|
||||
|
||||
# Load custom interceptors declared in extensions_config.json
|
||||
# Format: "mcpInterceptors": ["pkg.module:builder_func", ...]
|
||||
raw_interceptor_paths = (extensions_config.model_extra or {}).get("mcpInterceptors")
|
||||
if isinstance(raw_interceptor_paths, str):
|
||||
raw_interceptor_paths = [raw_interceptor_paths]
|
||||
elif not isinstance(raw_interceptor_paths, list):
|
||||
if raw_interceptor_paths is not None:
|
||||
logger.warning(f"mcpInterceptors must be a list of strings, got {type(raw_interceptor_paths).__name__}; skipping")
|
||||
raw_interceptor_paths = []
|
||||
for interceptor_path in raw_interceptor_paths:
|
||||
try:
|
||||
builder = resolve_variable(interceptor_path)
|
||||
interceptor = builder()
|
||||
if callable(interceptor):
|
||||
tool_interceptors.append(interceptor)
|
||||
logger.info(f"Loaded MCP interceptor: {interceptor_path}")
|
||||
elif interceptor is not None:
|
||||
logger.warning(f"Builder {interceptor_path} returned non-callable {type(interceptor).__name__}; skipping")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to load MCP interceptor {interceptor_path}: {e}", exc_info=True)
|
||||
|
||||
client = MultiServerMCPClient(servers_config, tool_interceptors=tool_interceptors, tool_name_prefix=True)
|
||||
|
||||
# Get all tools from all servers
|
||||
|
||||
@@ -190,23 +190,33 @@ class ClaudeChatModel(ChatAnthropic):
|
||||
)
|
||||
|
||||
def _apply_prompt_caching(self, payload: dict) -> None:
|
||||
"""Apply ephemeral cache_control to system and recent messages."""
|
||||
# Cache system messages
|
||||
"""Apply ephemeral cache_control to system, recent messages, and last tool definition.
|
||||
|
||||
Uses a budget of MAX_CACHE_BREAKPOINTS (4) breakpoints — the hard limit
|
||||
enforced by both the Anthropic API and AWS Bedrock. Breakpoints are
|
||||
placed on the *last* eligible blocks because later breakpoints cover a
|
||||
larger prefix and yield better cache hit rates.
|
||||
"""
|
||||
MAX_CACHE_BREAKPOINTS = 4
|
||||
|
||||
# Collect candidate blocks in document order:
|
||||
# 1. system text blocks
|
||||
# 2. content blocks of the last prompt_cache_size messages
|
||||
# 3. the last tool definition
|
||||
candidates: list[dict] = []
|
||||
|
||||
# 1. System blocks
|
||||
system = payload.get("system")
|
||||
if system and isinstance(system, list):
|
||||
for block in system:
|
||||
if isinstance(block, dict) and block.get("type") == "text":
|
||||
block["cache_control"] = {"type": "ephemeral"}
|
||||
candidates.append(block)
|
||||
elif system and isinstance(system, str):
|
||||
payload["system"] = [
|
||||
{
|
||||
"type": "text",
|
||||
"text": system,
|
||||
"cache_control": {"type": "ephemeral"},
|
||||
}
|
||||
]
|
||||
new_block: dict = {"type": "text", "text": system}
|
||||
payload["system"] = [new_block]
|
||||
candidates.append(new_block)
|
||||
|
||||
# Cache recent messages
|
||||
# 2. Recent message blocks
|
||||
messages = payload.get("messages", [])
|
||||
cache_start = max(0, len(messages) - self.prompt_cache_size)
|
||||
for i in range(cache_start, len(messages)):
|
||||
@@ -217,20 +227,21 @@ class ClaudeChatModel(ChatAnthropic):
|
||||
if isinstance(content, list):
|
||||
for block in content:
|
||||
if isinstance(block, dict):
|
||||
block["cache_control"] = {"type": "ephemeral"}
|
||||
candidates.append(block)
|
||||
elif isinstance(content, str) and content:
|
||||
msg["content"] = [
|
||||
{
|
||||
"type": "text",
|
||||
"text": content,
|
||||
"cache_control": {"type": "ephemeral"},
|
||||
}
|
||||
]
|
||||
new_block = {"type": "text", "text": content}
|
||||
msg["content"] = [new_block]
|
||||
candidates.append(new_block)
|
||||
|
||||
# Cache the last tool definition
|
||||
# 3. Last tool definition
|
||||
tools = payload.get("tools", [])
|
||||
if tools and isinstance(tools[-1], dict):
|
||||
tools[-1]["cache_control"] = {"type": "ephemeral"}
|
||||
candidates.append(tools[-1])
|
||||
|
||||
# Apply cache_control only to the last MAX_CACHE_BREAKPOINTS candidates
|
||||
# to stay within the API limit.
|
||||
for block in candidates[-MAX_CACHE_BREAKPOINTS:]:
|
||||
block["cache_control"] = {"type": "ephemeral"}
|
||||
|
||||
def _apply_thinking_budget(self, payload: dict) -> None:
|
||||
"""Auto-allocate thinking budget (80% of max_tokens)."""
|
||||
|
||||
@@ -30,6 +30,22 @@ def _vllm_disable_chat_template_kwargs(chat_template_kwargs: dict) -> dict:
|
||||
return disable_kwargs
|
||||
|
||||
|
||||
def _enable_stream_usage_by_default(model_use_path: str, model_settings_from_config: dict) -> None:
|
||||
"""Enable stream usage for OpenAI-compatible models unless explicitly configured.
|
||||
|
||||
LangChain only auto-enables ``stream_usage`` for OpenAI models when no custom
|
||||
base URL or client is configured. DeerFlow frequently uses OpenAI-compatible
|
||||
gateways, so token usage tracking would otherwise stay empty and the
|
||||
TokenUsageMiddleware would have nothing to log.
|
||||
"""
|
||||
if model_use_path != "langchain_openai:ChatOpenAI":
|
||||
return
|
||||
if "stream_usage" in model_settings_from_config:
|
||||
return
|
||||
if "base_url" in model_settings_from_config or "openai_api_base" in model_settings_from_config:
|
||||
model_settings_from_config["stream_usage"] = True
|
||||
|
||||
|
||||
def create_chat_model(name: str | None = None, thinking_enabled: bool = False, **kwargs) -> BaseChatModel:
|
||||
"""Create a chat model instance from the config.
|
||||
|
||||
@@ -97,6 +113,8 @@ def create_chat_model(name: str | None = None, thinking_enabled: bool = False, *
|
||||
kwargs.pop("reasoning_effort", None)
|
||||
model_settings_from_config.pop("reasoning_effort", None)
|
||||
|
||||
_enable_stream_usage_by_default(model_config.use, model_settings_from_config)
|
||||
|
||||
# For Codex Responses API models: map thinking mode to reasoning_effort
|
||||
from deerflow.models.openai_codex_provider import CodexChatModel
|
||||
|
||||
@@ -113,6 +131,12 @@ def create_chat_model(name: str | None = None, thinking_enabled: bool = False, *
|
||||
elif "reasoning_effort" not in model_settings_from_config:
|
||||
model_settings_from_config["reasoning_effort"] = "medium"
|
||||
|
||||
# For MindIE models: enforce conservative retry defaults.
|
||||
# Timeout normalization is handled inside MindIEChatModel itself.
|
||||
if getattr(model_class, "__name__", "") == "MindIEChatModel":
|
||||
# Enforce max_retries constraint to prevent cascading timeouts.
|
||||
model_settings_from_config["max_retries"] = model_settings_from_config.get("max_retries", 1)
|
||||
|
||||
model_instance = model_class(**{**model_settings_from_config, **kwargs})
|
||||
|
||||
callbacks = build_tracing_callbacks()
|
||||
|
||||
@@ -0,0 +1,237 @@
|
||||
import ast
|
||||
import json
|
||||
import re
|
||||
import uuid
|
||||
from collections.abc import Iterator
|
||||
|
||||
import httpx
|
||||
from langchain_core.messages import AIMessage, AIMessageChunk, HumanMessage, ToolMessage
|
||||
from langchain_core.outputs import ChatGenerationChunk, ChatResult
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
|
||||
def _fix_messages(messages: list) -> list:
|
||||
"""Sanitize incoming messages for MindIE compatibility.
|
||||
|
||||
MindIE's chat template may fail to parse LangChain's native tool_calls
|
||||
or ToolMessage roles, resulting in 0-token generation errors. This function
|
||||
flattens multi-modal list contents into strings and converts tool-related
|
||||
messages into raw text with XML tags expected by the underlying model.
|
||||
"""
|
||||
fixed = []
|
||||
for msg in messages:
|
||||
# Flatten content if it's a list of blocks
|
||||
if isinstance(msg.content, list):
|
||||
parts = []
|
||||
for block in msg.content:
|
||||
if isinstance(block, str):
|
||||
parts.append(block)
|
||||
elif isinstance(block, dict) and block.get("type") == "text":
|
||||
parts.append(block.get("text", ""))
|
||||
text = "".join(parts)
|
||||
else:
|
||||
text = msg.content or ""
|
||||
|
||||
# Convert AIMessage with tool_calls to raw XML text format
|
||||
if isinstance(msg, AIMessage) and getattr(msg, "tool_calls", []):
|
||||
xml_parts = []
|
||||
for tool in msg.tool_calls:
|
||||
args_xml = " ".join(f"<parameter={k}>{json.dumps(v, ensure_ascii=False)}</parameter>" for k, v in tool.get("args", {}).items())
|
||||
xml_parts.append(f"<tool_call> <function={tool['name']}> {args_xml} </function> </tool_call>")
|
||||
full_text = f"{text}\n" + "\n".join(xml_parts) if text else "\n".join(xml_parts)
|
||||
fixed.append(AIMessage(content=full_text.strip() or " "))
|
||||
continue
|
||||
|
||||
# Wrap tool execution results in XML tags and convert to HumanMessage
|
||||
if isinstance(msg, ToolMessage):
|
||||
tool_result_text = f"<tool_response>\n{text}\n</tool_response>"
|
||||
fixed.append(HumanMessage(content=tool_result_text))
|
||||
continue
|
||||
|
||||
# Fallback to prevent completely empty message content
|
||||
if not text.strip():
|
||||
text = " "
|
||||
|
||||
fixed.append(msg.model_copy(update={"content": text}))
|
||||
|
||||
return fixed
|
||||
|
||||
|
||||
def _parse_xml_tool_call_to_dict(content: str) -> tuple[str, list[dict]]:
|
||||
"""Parse XML-style tool calls from model output into LangChain dicts.
|
||||
|
||||
Args:
|
||||
content: The raw text output from the model.
|
||||
|
||||
Returns:
|
||||
A tuple containing the cleaned text (with XML blocks removed) and
|
||||
a list of tool call dictionaries formatted for LangChain.
|
||||
"""
|
||||
if not isinstance(content, str) or "<tool_call>" not in content:
|
||||
return content, []
|
||||
|
||||
tool_calls = []
|
||||
clean_parts: list[str] = []
|
||||
cursor = 0
|
||||
for start, end, inner_content in _iter_tool_call_blocks(content):
|
||||
clean_parts.append(content[cursor:start])
|
||||
cursor = end
|
||||
|
||||
func_match = re.search(r"<function=([^>]+)>", inner_content)
|
||||
if not func_match:
|
||||
continue
|
||||
function_name = func_match.group(1).strip()
|
||||
|
||||
args = {}
|
||||
param_pattern = re.compile(r"<parameter=([^>]+)>(.*?)</parameter>", re.DOTALL)
|
||||
for param_match in param_pattern.finditer(inner_content):
|
||||
key = param_match.group(1).strip()
|
||||
raw_value = param_match.group(2).strip()
|
||||
|
||||
# Attempt to deserialize string values into native Python types
|
||||
# to satisfy downstream Pydantic validation.
|
||||
parsed_value = raw_value
|
||||
if raw_value.startswith(("[", "{")) or raw_value in ("true", "false", "null") or raw_value.isdigit():
|
||||
try:
|
||||
parsed_value = json.loads(raw_value)
|
||||
except json.JSONDecodeError:
|
||||
try:
|
||||
parsed_value = ast.literal_eval(raw_value)
|
||||
except (ValueError, SyntaxError):
|
||||
pass
|
||||
|
||||
args[key] = parsed_value
|
||||
|
||||
tool_calls.append({"name": function_name, "args": args, "id": f"call_{uuid.uuid4().hex[:10]}"})
|
||||
clean_parts.append(content[cursor:])
|
||||
|
||||
return "".join(clean_parts).strip(), tool_calls
|
||||
|
||||
|
||||
def _iter_tool_call_blocks(content: str) -> Iterator[tuple[int, int, str]]:
|
||||
"""Iterate `<tool_call>...</tool_call>` blocks and tolerate nesting."""
|
||||
token_pattern = re.compile(r"</?tool_call>")
|
||||
depth = 0
|
||||
block_start = -1
|
||||
|
||||
for match in token_pattern.finditer(content):
|
||||
token = match.group(0)
|
||||
if token == "<tool_call>":
|
||||
if depth == 0:
|
||||
block_start = match.start()
|
||||
depth += 1
|
||||
continue
|
||||
|
||||
if depth == 0:
|
||||
continue
|
||||
|
||||
depth -= 1
|
||||
if depth == 0 and block_start != -1:
|
||||
block_end = match.end()
|
||||
inner_start = block_start + len("<tool_call>")
|
||||
inner_end = match.start()
|
||||
yield block_start, block_end, content[inner_start:inner_end]
|
||||
block_start = -1
|
||||
|
||||
|
||||
def _decode_escaped_newlines_outside_fences(content: str) -> str:
|
||||
"""Decode literal `\\n` outside fenced code blocks."""
|
||||
if "\\n" not in content:
|
||||
return content
|
||||
|
||||
parts = re.split(r"(```[\s\S]*?```)", content)
|
||||
for idx, part in enumerate(parts):
|
||||
if part.startswith("```"):
|
||||
continue
|
||||
parts[idx] = part.replace("\\n", "\n")
|
||||
return "".join(parts)
|
||||
|
||||
|
||||
class MindIEChatModel(ChatOpenAI):
|
||||
"""Chat model adapter for MindIE engine.
|
||||
|
||||
Addresses compatibility issues including:
|
||||
- Flattening multimodal list contents to strings.
|
||||
- Intercepting and parsing hardcoded XML tool calls into LangChain standard.
|
||||
- Handling stream=True dropping choices when tools are present by falling back
|
||||
to non-streaming generation and yielding simulated chunks.
|
||||
- Fixing over-escaped newline characters from gateway responses.
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
"""Normalize timeout kwargs without creating long-lived clients."""
|
||||
connect_timeout = kwargs.pop("connect_timeout", 30.0)
|
||||
read_timeout = kwargs.pop("read_timeout", 900.0)
|
||||
write_timeout = kwargs.pop("write_timeout", 60.0)
|
||||
pool_timeout = kwargs.pop("pool_timeout", 30.0)
|
||||
|
||||
kwargs.setdefault(
|
||||
"timeout",
|
||||
httpx.Timeout(
|
||||
connect=connect_timeout,
|
||||
read=read_timeout,
|
||||
write=write_timeout,
|
||||
pool=pool_timeout,
|
||||
),
|
||||
)
|
||||
super().__init__(**kwargs)
|
||||
|
||||
def _patch_result_with_tools(self, result: ChatResult) -> ChatResult:
|
||||
"""Apply post-generation fixes to the model result."""
|
||||
for gen in result.generations:
|
||||
msg = gen.message
|
||||
|
||||
if isinstance(msg.content, str):
|
||||
# Keep escaped newlines inside fenced code blocks untouched.
|
||||
msg.content = _decode_escaped_newlines_outside_fences(msg.content)
|
||||
|
||||
if "<tool_call>" in msg.content:
|
||||
clean_content, extracted_tools = _parse_xml_tool_call_to_dict(msg.content)
|
||||
|
||||
if extracted_tools:
|
||||
msg.content = clean_content
|
||||
if getattr(msg, "tool_calls", None) is None:
|
||||
msg.tool_calls = []
|
||||
msg.tool_calls.extend(extracted_tools)
|
||||
return result
|
||||
|
||||
def _generate(self, messages, stop=None, run_manager=None, **kwargs):
|
||||
result = super()._generate(_fix_messages(messages), stop=stop, run_manager=run_manager, **kwargs)
|
||||
return self._patch_result_with_tools(result)
|
||||
|
||||
async def _agenerate(self, messages, stop=None, run_manager=None, **kwargs):
|
||||
result = await super()._agenerate(_fix_messages(messages), stop=stop, run_manager=run_manager, **kwargs)
|
||||
return self._patch_result_with_tools(result)
|
||||
|
||||
async def _astream(self, messages, stop=None, run_manager=None, **kwargs):
|
||||
# Route standard queries to native streaming for lower TTFB
|
||||
if not kwargs.get("tools"):
|
||||
async for chunk in super()._astream(_fix_messages(messages), stop=stop, run_manager=run_manager, **kwargs):
|
||||
if isinstance(chunk.message.content, str):
|
||||
chunk.message.content = _decode_escaped_newlines_outside_fences(chunk.message.content)
|
||||
yield chunk
|
||||
return
|
||||
|
||||
# Fallback for tool-enabled requests:
|
||||
# MindIE currently drops choices when stream=True and tools are present.
|
||||
# We await the full generation and yield chunks to simulate streaming.
|
||||
result = await self._agenerate(messages, stop=stop, run_manager=run_manager, **kwargs)
|
||||
|
||||
for gen in result.generations:
|
||||
msg = gen.message
|
||||
content = msg.content
|
||||
standard_tool_calls = getattr(msg, "tool_calls", [])
|
||||
|
||||
# Yield text in chunks to allow downstream UI/Markdown parsers to render smoothly
|
||||
if isinstance(content, str) and content:
|
||||
chunk_size = 15
|
||||
for i in range(0, len(content), chunk_size):
|
||||
chunk_text = content[i : i + chunk_size]
|
||||
chunk_msg = AIMessageChunk(content=chunk_text, id=msg.id, response_metadata=msg.response_metadata if i == 0 else {})
|
||||
yield ChatGenerationChunk(message=chunk_msg, generation_info=gen.generation_info if i == 0 else None)
|
||||
|
||||
if standard_tool_calls:
|
||||
yield ChatGenerationChunk(message=AIMessageChunk(content="", id=msg.id, tool_calls=standard_tool_calls, invalid_tool_calls=getattr(msg, "invalid_tool_calls", [])))
|
||||
else:
|
||||
chunk_msg = AIMessageChunk(content=content, id=msg.id, tool_calls=standard_tool_calls, invalid_tool_calls=getattr(msg, "invalid_tool_calls", []))
|
||||
yield ChatGenerationChunk(message=chunk_msg, generation_info=gen.generation_info)
|
||||
@@ -288,10 +288,10 @@ class LocalSandbox(Sandbox):
|
||||
timeout=600,
|
||||
)
|
||||
else:
|
||||
args = [shell, "-c", resolved_command]
|
||||
result = subprocess.run(
|
||||
resolved_command,
|
||||
executable=shell,
|
||||
shell=True,
|
||||
args,
|
||||
shell=False,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=600,
|
||||
|
||||
@@ -7,6 +7,7 @@ from langgraph.runtime import Runtime
|
||||
|
||||
from deerflow.agents.thread_state import SandboxState, ThreadDataState
|
||||
from deerflow.sandbox import get_sandbox_provider
|
||||
from deerflow.utils.runtime import get_thread_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -56,7 +57,7 @@ class SandboxMiddleware(AgentMiddleware[SandboxMiddlewareState]):
|
||||
|
||||
# Eager initialization (original behavior)
|
||||
if "sandbox" not in state or state["sandbox"] is None:
|
||||
thread_id = (runtime.context or {}).get("thread_id")
|
||||
thread_id = get_thread_id(runtime)
|
||||
if thread_id is None:
|
||||
return super().before_agent(state, runtime)
|
||||
sandbox_id = self._acquire_sandbox(thread_id)
|
||||
|
||||
@@ -19,6 +19,7 @@ from deerflow.sandbox.sandbox import Sandbox
|
||||
from deerflow.sandbox.sandbox_provider import get_sandbox_provider
|
||||
from deerflow.sandbox.search import GrepMatch
|
||||
from deerflow.sandbox.security import LOCAL_HOST_BASH_DISABLED_MESSAGE, is_host_bash_allowed
|
||||
from deerflow.utils.runtime import get_thread_id
|
||||
|
||||
_ABSOLUTE_PATH_PATTERN = re.compile(r"(?<![:\w])(?<!:/)/(?:[^\s\"'`;&|<>()]+)")
|
||||
_FILE_URL_PATTERN = re.compile(r"\bfile://\S+", re.IGNORECASE)
|
||||
@@ -851,11 +852,9 @@ def ensure_sandbox_initialized(runtime: ToolRuntime[ContextT, ThreadState] | Non
|
||||
# Sandbox was released, fall through to acquire new one
|
||||
|
||||
# Lazy acquisition: get thread_id and acquire sandbox
|
||||
thread_id = runtime.context.get("thread_id") if runtime.context else None
|
||||
thread_id = get_thread_id(runtime)
|
||||
if thread_id is None:
|
||||
thread_id = runtime.config.get("configurable", {}).get("thread_id") if runtime.config else None
|
||||
if thread_id is None:
|
||||
raise SandboxRuntimeError("Thread ID not available in runtime context")
|
||||
raise SandboxRuntimeError("Thread ID not available in runtime context, runtime config, or LangGraph config")
|
||||
|
||||
provider = get_sandbox_provider()
|
||||
sandbox_id = provider.acquire(thread_id)
|
||||
|
||||
@@ -2,21 +2,24 @@ import logging
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
|
||||
from .types import Skill
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def parse_skill_file(skill_file: Path, category: str, relative_path: Path | None = None) -> Skill | None:
|
||||
"""
|
||||
Parse a SKILL.md file and extract metadata.
|
||||
"""Parse a SKILL.md file and extract metadata.
|
||||
|
||||
Args:
|
||||
skill_file: Path to the SKILL.md file
|
||||
category: Category of the skill ('public' or 'custom')
|
||||
skill_file: Path to the SKILL.md file.
|
||||
category: Category of the skill ('public' or 'custom').
|
||||
relative_path: Relative path from the category root to the skill
|
||||
directory. Defaults to the skill directory name when omitted.
|
||||
|
||||
Returns:
|
||||
Skill object if parsing succeeds, None otherwise
|
||||
Skill object if parsing succeeds, None otherwise.
|
||||
"""
|
||||
if not skill_file.exists() or skill_file.name != "SKILL.md":
|
||||
return None
|
||||
@@ -24,90 +27,42 @@ def parse_skill_file(skill_file: Path, category: str, relative_path: Path | None
|
||||
try:
|
||||
content = skill_file.read_text(encoding="utf-8")
|
||||
|
||||
# Extract YAML front matter
|
||||
# Pattern: ---\nkey: value\n---
|
||||
# Extract YAML front-matter block between leading ``---`` fences.
|
||||
front_matter_match = re.match(r"^---\s*\n(.*?)\n---\s*\n", content, re.DOTALL)
|
||||
|
||||
if not front_matter_match:
|
||||
return None
|
||||
|
||||
front_matter = front_matter_match.group(1)
|
||||
front_matter_text = front_matter_match.group(1)
|
||||
|
||||
# Parse YAML front matter with basic multiline string support
|
||||
metadata = {}
|
||||
lines = front_matter.split("\n")
|
||||
current_key = None
|
||||
current_value = []
|
||||
is_multiline = False
|
||||
multiline_style = None
|
||||
indent_level = None
|
||||
try:
|
||||
metadata = yaml.safe_load(front_matter_text)
|
||||
except yaml.YAMLError as exc:
|
||||
logger.error("Invalid YAML front-matter in %s: %s", skill_file, exc)
|
||||
return None
|
||||
|
||||
for line in lines:
|
||||
if is_multiline:
|
||||
if not line.strip():
|
||||
current_value.append("")
|
||||
continue
|
||||
if not isinstance(metadata, dict):
|
||||
logger.error("Front-matter in %s is not a YAML mapping", skill_file)
|
||||
return None
|
||||
|
||||
current_indent = len(line) - len(line.lstrip())
|
||||
|
||||
if indent_level is None:
|
||||
if current_indent > 0:
|
||||
indent_level = current_indent
|
||||
current_value.append(line[indent_level:])
|
||||
continue
|
||||
elif current_indent >= indent_level:
|
||||
current_value.append(line[indent_level:])
|
||||
continue
|
||||
|
||||
# If we reach here, it's either a new key or the end of multiline
|
||||
if current_key and is_multiline:
|
||||
if multiline_style == "|":
|
||||
metadata[current_key] = "\n".join(current_value).rstrip()
|
||||
else:
|
||||
text = "\n".join(current_value).rstrip()
|
||||
# Replace single newlines with spaces for folded blocks
|
||||
metadata[current_key] = re.sub(r"(?<!\n)\n(?!\n)", " ", text)
|
||||
|
||||
current_key = None
|
||||
current_value = []
|
||||
is_multiline = False
|
||||
multiline_style = None
|
||||
indent_level = None
|
||||
|
||||
if not line.strip():
|
||||
continue
|
||||
|
||||
if ":" in line:
|
||||
# Handle nested dicts simply by ignoring indentation for now,
|
||||
# or just extracting top-level keys
|
||||
key, value = line.split(":", 1)
|
||||
key = key.strip()
|
||||
value = value.strip()
|
||||
|
||||
if value in (">", "|"):
|
||||
current_key = key
|
||||
is_multiline = True
|
||||
multiline_style = value
|
||||
current_value = []
|
||||
indent_level = None
|
||||
else:
|
||||
metadata[key] = value
|
||||
|
||||
if current_key and is_multiline:
|
||||
if multiline_style == "|":
|
||||
metadata[current_key] = "\n".join(current_value).rstrip()
|
||||
else:
|
||||
text = "\n".join(current_value).rstrip()
|
||||
metadata[current_key] = re.sub(r"(?<!\n)\n(?!\n)", " ", text)
|
||||
|
||||
# Extract required fields
|
||||
# Extract required fields. Both must be non-empty strings.
|
||||
name = metadata.get("name")
|
||||
description = metadata.get("description")
|
||||
|
||||
if not name or not isinstance(name, str):
|
||||
return None
|
||||
if not description or not isinstance(description, str):
|
||||
return None
|
||||
|
||||
# Normalise: strip surrounding whitespace that YAML may preserve.
|
||||
name = name.strip()
|
||||
description = description.strip()
|
||||
|
||||
if not name or not description:
|
||||
return None
|
||||
|
||||
license_text = metadata.get("license")
|
||||
if license_text is not None:
|
||||
license_text = str(license_text).strip() or None
|
||||
|
||||
return Skill(
|
||||
name=name,
|
||||
@@ -117,9 +72,9 @@ def parse_skill_file(skill_file: Path, category: str, relative_path: Path | None
|
||||
skill_file=skill_file,
|
||||
relative_path=relative_path or Path(skill_file.parent.name),
|
||||
category=category,
|
||||
enabled=True, # Default to enabled, actual state comes from config file
|
||||
enabled=True, # Actual state comes from the extensions config file.
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error parsing skill file %s: %s", skill_file, e)
|
||||
except Exception:
|
||||
logger.exception("Unexpected error parsing skill file %s", skill_file)
|
||||
return None
|
||||
|
||||
@@ -54,7 +54,8 @@ async def scan_skill_content(content: str, *, executable: bool = False, location
|
||||
[
|
||||
{"role": "system", "content": rubric},
|
||||
{"role": "user", "content": prompt},
|
||||
]
|
||||
],
|
||||
config={"run_name": "security_agent"},
|
||||
)
|
||||
parsed = _extract_json_object(str(getattr(response, "content", "") or ""))
|
||||
if parsed and parsed.get("decision") in {"allow", "warn", "block"}:
|
||||
|
||||
@@ -13,6 +13,8 @@ class SubagentConfig:
|
||||
system_prompt: The system prompt that guides the subagent's behavior.
|
||||
tools: Optional list of tool names to allow. If None, inherits all tools.
|
||||
disallowed_tools: Optional list of tool names to deny.
|
||||
skills: Optional list of skill names to load. If None, inherits all enabled skills.
|
||||
If an empty list, no skills are loaded.
|
||||
model: Model to use - 'inherit' uses parent's model.
|
||||
max_turns: Maximum number of agent turns before stopping.
|
||||
timeout_seconds: Maximum execution time in seconds (default: 900 = 15 minutes).
|
||||
@@ -23,6 +25,7 @@ class SubagentConfig:
|
||||
system_prompt: str
|
||||
tools: list[str] | None = None
|
||||
disallowed_tools: list[str] | None = field(default_factory=lambda: ["task"])
|
||||
skills: list[str] | None = None
|
||||
model: str = "inherit"
|
||||
max_turns: int = 50
|
||||
timeout_seconds: int = 900
|
||||
|
||||
@@ -13,7 +13,7 @@ from typing import Any
|
||||
|
||||
from langchain.agents import create_agent
|
||||
from langchain.tools import BaseTool
|
||||
from langchain_core.messages import AIMessage, HumanMessage
|
||||
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
|
||||
from deerflow.agents.thread_state import SandboxState, ThreadDataState, ThreadState
|
||||
@@ -184,7 +184,63 @@ class SubagentExecutor:
|
||||
state_schema=ThreadState,
|
||||
)
|
||||
|
||||
def _build_initial_state(self, task: str) -> dict[str, Any]:
|
||||
async def _load_skill_messages(self) -> list[SystemMessage]:
|
||||
"""Load skill content as conversation items based on config.skills.
|
||||
|
||||
Aligned with Codex's pattern: each subagent loads its own skills
|
||||
per-session and injects them as conversation items (developer messages),
|
||||
not as system prompt text. The config.skills whitelist controls which
|
||||
skills are loaded:
|
||||
- None: load all enabled skills
|
||||
- []: no skills
|
||||
- ["skill-a", "skill-b"]: only these skills
|
||||
|
||||
Returns:
|
||||
List of SystemMessages containing skill content.
|
||||
"""
|
||||
if self.config.skills is not None and len(self.config.skills) == 0:
|
||||
logger.info(f"[trace={self.trace_id}] Subagent {self.config.name} skills=[] — skipping skill loading")
|
||||
return []
|
||||
|
||||
try:
|
||||
from deerflow.skills.loader import load_skills
|
||||
|
||||
# Use asyncio.to_thread to avoid blocking the event loop (LangGraph ASGI requirement)
|
||||
all_skills = await asyncio.to_thread(load_skills, enabled_only=True)
|
||||
logger.info(f"[trace={self.trace_id}] Subagent {self.config.name} loaded {len(all_skills)} enabled skills from disk")
|
||||
except Exception:
|
||||
logger.warning(f"[trace={self.trace_id}] Failed to load skills for subagent {self.config.name}", exc_info=True)
|
||||
return []
|
||||
|
||||
if not all_skills:
|
||||
logger.info(f"[trace={self.trace_id}] Subagent {self.config.name} no enabled skills found")
|
||||
return []
|
||||
|
||||
# Filter by config.skills whitelist
|
||||
if self.config.skills is not None:
|
||||
allowed = set(self.config.skills)
|
||||
skills = [s for s in all_skills if s.name in allowed]
|
||||
else:
|
||||
skills = all_skills
|
||||
|
||||
if not skills:
|
||||
return []
|
||||
|
||||
# Read each skill's SKILL.md content and create conversation items
|
||||
messages = []
|
||||
for skill in skills:
|
||||
try:
|
||||
content = await asyncio.to_thread(skill.skill_file.read_text, encoding="utf-8")
|
||||
content = content.strip()
|
||||
if content:
|
||||
messages.append(SystemMessage(content=f'<skill name="{skill.name}">\n{content}\n</skill>'))
|
||||
logger.info(f"[trace={self.trace_id}] Subagent {self.config.name} loaded skill: {skill.name}")
|
||||
except Exception:
|
||||
logger.debug(f"[trace={self.trace_id}] Failed to read skill {skill.name}", exc_info=True)
|
||||
|
||||
return messages
|
||||
|
||||
async def _build_initial_state(self, task: str) -> dict[str, Any]:
|
||||
"""Build the initial state for agent execution.
|
||||
|
||||
Args:
|
||||
@@ -193,8 +249,17 @@ class SubagentExecutor:
|
||||
Returns:
|
||||
Initial state dictionary.
|
||||
"""
|
||||
# Load skills as conversation items (Codex pattern)
|
||||
skill_messages = await self._load_skill_messages()
|
||||
|
||||
messages: list = []
|
||||
# Skill content injected as developer/system messages before the task
|
||||
messages.extend(skill_messages)
|
||||
# Then the actual task
|
||||
messages.append(HumanMessage(content=task))
|
||||
|
||||
state: dict[str, Any] = {
|
||||
"messages": [HumanMessage(content=task)],
|
||||
"messages": messages,
|
||||
}
|
||||
|
||||
# Pass through sandbox and thread data from parent
|
||||
@@ -230,7 +295,7 @@ class SubagentExecutor:
|
||||
|
||||
try:
|
||||
agent = self._create_agent()
|
||||
state = self._build_initial_state(task)
|
||||
state = await self._build_initial_state(task)
|
||||
|
||||
# Build config with thread_id for sandbox access and recursion limit
|
||||
run_config: RunnableConfig = {
|
||||
|
||||
@@ -10,53 +10,100 @@ from deerflow.subagents.config import SubagentConfig
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _build_custom_subagent_config(name: str) -> SubagentConfig | None:
|
||||
"""Build a SubagentConfig from config.yaml custom_agents section.
|
||||
|
||||
Args:
|
||||
name: The name of the custom subagent.
|
||||
|
||||
Returns:
|
||||
SubagentConfig if found in custom_agents, None otherwise.
|
||||
"""
|
||||
from deerflow.config.subagents_config import get_subagents_app_config
|
||||
|
||||
app_config = get_subagents_app_config()
|
||||
custom = app_config.custom_agents.get(name)
|
||||
if custom is None:
|
||||
return None
|
||||
|
||||
return SubagentConfig(
|
||||
name=name,
|
||||
description=custom.description,
|
||||
system_prompt=custom.system_prompt,
|
||||
tools=custom.tools,
|
||||
disallowed_tools=custom.disallowed_tools,
|
||||
skills=custom.skills,
|
||||
model=custom.model,
|
||||
max_turns=custom.max_turns,
|
||||
timeout_seconds=custom.timeout_seconds,
|
||||
)
|
||||
|
||||
|
||||
def get_subagent_config(name: str) -> SubagentConfig | None:
|
||||
"""Get a subagent configuration by name, with config.yaml overrides applied.
|
||||
|
||||
Resolution order (mirrors Codex's config layering):
|
||||
1. Built-in subagents (general-purpose, bash)
|
||||
2. Custom subagents from config.yaml custom_agents section
|
||||
3. Per-agent overrides from config.yaml agents section (timeout, max_turns, model, skills)
|
||||
|
||||
Args:
|
||||
name: The name of the subagent.
|
||||
|
||||
Returns:
|
||||
SubagentConfig if found (with any config.yaml overrides applied), None otherwise.
|
||||
"""
|
||||
# Step 1: Look up built-in, then fall back to custom_agents
|
||||
config = BUILTIN_SUBAGENTS.get(name)
|
||||
if config is None:
|
||||
config = _build_custom_subagent_config(name)
|
||||
if config is None:
|
||||
return None
|
||||
|
||||
# Apply runtime overrides (timeout, max_turns, model) from config.yaml
|
||||
# Step 2: Apply per-agent overrides from config.yaml agents section.
|
||||
# Only explicit per-agent overrides are applied here. Global defaults
|
||||
# (timeout_seconds, max_turns at the top level) apply to built-in agents
|
||||
# but must NOT override custom agents' own values — custom agents define
|
||||
# their own defaults in the custom_agents section.
|
||||
# Lazy import to avoid circular deps.
|
||||
from deerflow.config.subagents_config import get_subagents_app_config
|
||||
|
||||
app_config = get_subagents_app_config()
|
||||
effective_timeout = app_config.get_timeout_for(name)
|
||||
effective_max_turns = app_config.get_max_turns_for(name, config.max_turns)
|
||||
is_builtin = name in BUILTIN_SUBAGENTS
|
||||
agent_override = app_config.agents.get(name)
|
||||
|
||||
overrides = {}
|
||||
if effective_timeout != config.timeout_seconds:
|
||||
logger.debug(
|
||||
"Subagent '%s': timeout overridden by config.yaml (%ss -> %ss)",
|
||||
name,
|
||||
config.timeout_seconds,
|
||||
effective_timeout,
|
||||
)
|
||||
overrides["timeout_seconds"] = effective_timeout
|
||||
if effective_max_turns != config.max_turns:
|
||||
logger.debug(
|
||||
"Subagent '%s': max_turns overridden by config.yaml (%s -> %s)",
|
||||
name,
|
||||
config.max_turns,
|
||||
effective_max_turns,
|
||||
)
|
||||
overrides["max_turns"] = effective_max_turns
|
||||
|
||||
# Timeout: per-agent override > global default (builtins only) > config's own value
|
||||
if agent_override is not None and agent_override.timeout_seconds is not None:
|
||||
if agent_override.timeout_seconds != config.timeout_seconds:
|
||||
logger.debug("Subagent '%s': timeout overridden (%ss -> %ss)", name, config.timeout_seconds, agent_override.timeout_seconds)
|
||||
overrides["timeout_seconds"] = agent_override.timeout_seconds
|
||||
elif is_builtin and app_config.timeout_seconds != config.timeout_seconds:
|
||||
logger.debug("Subagent '%s': timeout from global default (%ss -> %ss)", name, config.timeout_seconds, app_config.timeout_seconds)
|
||||
overrides["timeout_seconds"] = app_config.timeout_seconds
|
||||
|
||||
# Max turns: per-agent override > global default (builtins only) > config's own value
|
||||
if agent_override is not None and agent_override.max_turns is not None:
|
||||
if agent_override.max_turns != config.max_turns:
|
||||
logger.debug("Subagent '%s': max_turns overridden (%s -> %s)", name, config.max_turns, agent_override.max_turns)
|
||||
overrides["max_turns"] = agent_override.max_turns
|
||||
elif is_builtin and app_config.max_turns is not None and app_config.max_turns != config.max_turns:
|
||||
logger.debug("Subagent '%s': max_turns from global default (%s -> %s)", name, config.max_turns, app_config.max_turns)
|
||||
overrides["max_turns"] = app_config.max_turns
|
||||
|
||||
# Model: per-agent override only (no global default for model)
|
||||
effective_model = app_config.get_model_for(name)
|
||||
if effective_model is not None and effective_model != config.model:
|
||||
logger.debug(
|
||||
"Subagent '%s': model overridden by config.yaml (%s -> %s)",
|
||||
name,
|
||||
config.model,
|
||||
effective_model,
|
||||
)
|
||||
logger.debug("Subagent '%s': model overridden (%s -> %s)", name, config.model, effective_model)
|
||||
overrides["model"] = effective_model
|
||||
|
||||
# Skills: per-agent override only (no global default for skills)
|
||||
effective_skills = app_config.get_skills_for(name)
|
||||
if effective_skills is not None and effective_skills != config.skills:
|
||||
logger.debug("Subagent '%s': skills overridden (%s -> %s)", name, config.skills, effective_skills)
|
||||
overrides["skills"] = effective_skills
|
||||
|
||||
if overrides:
|
||||
config = replace(config, **overrides)
|
||||
|
||||
@@ -67,18 +114,33 @@ def list_subagents() -> list[SubagentConfig]:
|
||||
"""List all available subagent configurations (with config.yaml overrides applied).
|
||||
|
||||
Returns:
|
||||
List of all registered SubagentConfig instances.
|
||||
List of all registered SubagentConfig instances (built-in + custom).
|
||||
"""
|
||||
return [get_subagent_config(name) for name in BUILTIN_SUBAGENTS]
|
||||
configs = []
|
||||
for name in get_subagent_names():
|
||||
config = get_subagent_config(name)
|
||||
if config is not None:
|
||||
configs.append(config)
|
||||
return configs
|
||||
|
||||
|
||||
def get_subagent_names() -> list[str]:
|
||||
"""Get all available subagent names.
|
||||
"""Get all available subagent names (built-in + custom).
|
||||
|
||||
Returns:
|
||||
List of subagent names.
|
||||
"""
|
||||
return list(BUILTIN_SUBAGENTS.keys())
|
||||
names = list(BUILTIN_SUBAGENTS.keys())
|
||||
|
||||
# Merge custom_agents from config.yaml
|
||||
from deerflow.config.subagents_config import get_subagents_app_config
|
||||
|
||||
app_config = get_subagents_app_config()
|
||||
for custom_name in app_config.custom_agents:
|
||||
if custom_name not in names:
|
||||
names.append(custom_name)
|
||||
|
||||
return names
|
||||
|
||||
|
||||
def get_available_subagent_names() -> list[str]:
|
||||
@@ -87,11 +149,11 @@ def get_available_subagent_names() -> list[str]:
|
||||
Returns:
|
||||
List of subagent names visible to the current sandbox configuration.
|
||||
"""
|
||||
names = list(BUILTIN_SUBAGENTS.keys())
|
||||
names = get_subagent_names()
|
||||
try:
|
||||
host_bash_allowed = is_host_bash_allowed()
|
||||
except Exception:
|
||||
logger.debug("Could not determine host bash availability; exposing all built-in subagents")
|
||||
logger.debug("Could not determine host bash availability; exposing all subagents")
|
||||
return names
|
||||
|
||||
if not host_bash_allowed:
|
||||
|
||||
@@ -3,33 +3,16 @@ from typing import Annotated
|
||||
|
||||
from langchain.tools import InjectedToolCallId, ToolRuntime, tool
|
||||
from langchain_core.messages import ToolMessage
|
||||
from langgraph.config import get_config
|
||||
from langgraph.types import Command
|
||||
from langgraph.typing import ContextT
|
||||
|
||||
from deerflow.agents.thread_state import ThreadState
|
||||
from deerflow.config.paths import VIRTUAL_PATH_PREFIX, get_paths
|
||||
from deerflow.utils.runtime import get_thread_id
|
||||
|
||||
OUTPUTS_VIRTUAL_PREFIX = f"{VIRTUAL_PATH_PREFIX}/outputs"
|
||||
|
||||
|
||||
def _get_thread_id(runtime: ToolRuntime[ContextT, ThreadState]) -> str | None:
|
||||
"""Resolve the current thread id from runtime context or RunnableConfig."""
|
||||
thread_id = runtime.context.get("thread_id") if runtime.context else None
|
||||
if thread_id:
|
||||
return thread_id
|
||||
|
||||
runtime_config = getattr(runtime, "config", None) or {}
|
||||
thread_id = runtime_config.get("configurable", {}).get("thread_id")
|
||||
if thread_id:
|
||||
return thread_id
|
||||
|
||||
try:
|
||||
return get_config().get("configurable", {}).get("thread_id")
|
||||
except RuntimeError:
|
||||
return None
|
||||
|
||||
|
||||
def _normalize_presented_filepath(
|
||||
runtime: ToolRuntime[ContextT, ThreadState],
|
||||
filepath: str,
|
||||
@@ -51,9 +34,9 @@ def _normalize_presented_filepath(
|
||||
if runtime.state is None:
|
||||
raise ValueError("Thread runtime state is not available")
|
||||
|
||||
thread_id = _get_thread_id(runtime)
|
||||
thread_id = get_thread_id(runtime)
|
||||
if not thread_id:
|
||||
raise ValueError("Thread ID is not available in runtime context or runtime config")
|
||||
raise ValueError("Thread ID is not available in runtime context, runtime config, or LangGraph thread-local config")
|
||||
|
||||
thread_data = runtime.state.get("thread_data") or {}
|
||||
outputs_path = thread_data.get("outputs_path")
|
||||
|
||||
@@ -17,21 +17,25 @@ def setup_agent(
|
||||
soul: str,
|
||||
description: str,
|
||||
runtime: ToolRuntime,
|
||||
skills: list[str] | None = None,
|
||||
) -> Command:
|
||||
"""Setup the custom DeerFlow agent.
|
||||
|
||||
Args:
|
||||
soul: Full SOUL.md content defining the agent's personality and behavior.
|
||||
description: One-line description of what the agent does.
|
||||
skills: Optional list of skill names this agent should use. None means use all enabled skills, empty list means no skills.
|
||||
"""
|
||||
|
||||
agent_name: str | None = runtime.context.get("agent_name") if runtime.context else None
|
||||
agent_dir = None
|
||||
is_new_dir = False
|
||||
|
||||
try:
|
||||
agent_name = validate_agent_name(agent_name)
|
||||
paths = get_paths()
|
||||
agent_dir = paths.agent_dir(agent_name) if agent_name else paths.base_dir
|
||||
is_new_dir = not agent_dir.exists()
|
||||
agent_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if agent_name:
|
||||
@@ -39,6 +43,8 @@ def setup_agent(
|
||||
config_data: dict = {"name": agent_name}
|
||||
if description:
|
||||
config_data["description"] = description
|
||||
if skills is not None:
|
||||
config_data["skills"] = skills
|
||||
|
||||
config_file = agent_dir / "config.yaml"
|
||||
with open(config_file, "w", encoding="utf-8") as f:
|
||||
@@ -58,8 +64,8 @@ def setup_agent(
|
||||
except Exception as e:
|
||||
import shutil
|
||||
|
||||
if agent_name and agent_dir is not None and agent_dir.exists():
|
||||
# Cleanup the custom agent directory only if it was created but an error occurred during setup
|
||||
if agent_name and is_new_dir and agent_dir is not None and agent_dir.exists():
|
||||
# Cleanup the custom agent directory only if it was newly created during this call
|
||||
shutil.rmtree(agent_dir)
|
||||
logger.error(f"[agent_creator] Failed to create agent '{agent_name}': {e}", exc_info=True)
|
||||
return Command(update={"messages": [ToolMessage(content=f"Error: {e}", tool_call_id=runtime.tool_call_id)]})
|
||||
|
||||
@@ -10,15 +10,26 @@ from langchain.tools import InjectedToolCallId, ToolRuntime, tool
|
||||
from langgraph.config import get_stream_writer
|
||||
from langgraph.typing import ContextT
|
||||
|
||||
from deerflow.agents.lead_agent.prompt import get_skills_prompt_section
|
||||
from deerflow.agents.thread_state import ThreadState
|
||||
from deerflow.sandbox.security import LOCAL_BASH_SUBAGENT_DISABLED_MESSAGE, is_host_bash_allowed
|
||||
from deerflow.subagents import SubagentExecutor, get_available_subagent_names, get_subagent_config
|
||||
from deerflow.subagents.executor import SubagentStatus, cleanup_background_task, get_background_task_result, request_cancel_background_task
|
||||
from deerflow.utils.runtime import get_thread_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _merge_skill_allowlists(parent: list[str] | None, child: list[str] | None) -> list[str] | None:
|
||||
"""Return the effective subagent skill allowlist under the parent policy."""
|
||||
if parent is None:
|
||||
return child
|
||||
if child is None:
|
||||
return list(parent)
|
||||
|
||||
parent_set = set(parent)
|
||||
return [skill for skill in child if skill in parent_set]
|
||||
|
||||
|
||||
@tool("task", parse_docstring=True)
|
||||
async def task_tool(
|
||||
runtime: ToolRuntime[ContextT, ThreadState],
|
||||
@@ -35,7 +46,7 @@ async def task_tool(
|
||||
- Handle complex multi-step tasks autonomously
|
||||
- Execute commands or operations in isolated contexts
|
||||
|
||||
Available subagent types depend on the active sandbox configuration:
|
||||
Built-in subagent types:
|
||||
- **general-purpose**: A capable agent for complex, multi-step tasks that require
|
||||
both exploration and action. Use when the task requires complex reasoning,
|
||||
multiple dependent steps, or would benefit from isolated context.
|
||||
@@ -43,6 +54,11 @@ async def task_tool(
|
||||
available when host bash is explicitly allowed or when using an isolated shell
|
||||
sandbox such as `AioSandboxProvider`.
|
||||
|
||||
Additional custom subagent types may be defined in config.yaml under
|
||||
`subagents.custom_agents`. Each custom type can have its own system prompt,
|
||||
tools, skills, model, and timeout configuration. If an unknown subagent_type
|
||||
is provided, the error message will list all available types.
|
||||
|
||||
When to use this tool:
|
||||
- Complex tasks requiring multiple steps or tools
|
||||
- Tasks that produce verbose output
|
||||
@@ -72,16 +88,13 @@ async def task_tool(
|
||||
# Build config overrides
|
||||
overrides: dict = {}
|
||||
|
||||
skills_section = get_skills_prompt_section()
|
||||
if skills_section:
|
||||
overrides["system_prompt"] = config.system_prompt + "\n\n" + skills_section
|
||||
# Skills are loaded by SubagentExecutor per-session (aligned with Codex's pattern:
|
||||
# each subagent loads its own skills based on config, injected as conversation items).
|
||||
# No longer appended to system_prompt here.
|
||||
|
||||
if max_turns is not None:
|
||||
overrides["max_turns"] = max_turns
|
||||
|
||||
if overrides:
|
||||
config = replace(config, **overrides)
|
||||
|
||||
# Extract parent context from runtime
|
||||
sandbox_state = None
|
||||
thread_data = None
|
||||
@@ -93,9 +106,7 @@ async def task_tool(
|
||||
if runtime is not None:
|
||||
sandbox_state = runtime.state.get("sandbox")
|
||||
thread_data = runtime.state.get("thread_data")
|
||||
thread_id = runtime.context.get("thread_id") if runtime.context else None
|
||||
if thread_id is None:
|
||||
thread_id = runtime.config.get("configurable", {}).get("thread_id")
|
||||
thread_id = get_thread_id(runtime)
|
||||
|
||||
# Try to get parent model from configurable
|
||||
metadata = runtime.config.get("metadata", {})
|
||||
@@ -104,6 +115,13 @@ async def task_tool(
|
||||
# Get or generate trace_id for distributed tracing
|
||||
trace_id = metadata.get("trace_id") or str(uuid.uuid4())[:8]
|
||||
|
||||
parent_available_skills = metadata.get("available_skills")
|
||||
if parent_available_skills is not None:
|
||||
overrides["skills"] = _merge_skill_allowlists(list(parent_available_skills), config.skills)
|
||||
|
||||
if overrides:
|
||||
config = replace(config, **overrides)
|
||||
|
||||
# Get available tools (excluding task tool to prevent nesting)
|
||||
# Lazy import to avoid circular dependency
|
||||
from deerflow.tools import get_available_tools
|
||||
|
||||
@@ -112,6 +112,15 @@ class DeferredToolRegistry:
|
||||
def entries(self) -> list[DeferredToolEntry]:
|
||||
return list(self._entries)
|
||||
|
||||
@property
|
||||
def deferred_names(self) -> set[str]:
|
||||
"""Names of tools that are still hidden from model binding."""
|
||||
return {entry.name for entry in self._entries}
|
||||
|
||||
def contains(self, name: str) -> bool:
|
||||
"""Return whether *name* is still deferred."""
|
||||
return any(entry.name == name for entry in self._entries)
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self._entries)
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@ from deerflow.skills.manager import (
|
||||
validate_skill_name,
|
||||
)
|
||||
from deerflow.skills.security_scanner import scan_skill_content
|
||||
from deerflow.utils.runtime import get_thread_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -42,14 +43,6 @@ def _get_lock(name: str) -> asyncio.Lock:
|
||||
return lock
|
||||
|
||||
|
||||
def _get_thread_id(runtime: ToolRuntime[ContextT, ThreadState] | None) -> str | None:
|
||||
if runtime is None:
|
||||
return None
|
||||
if runtime.context and runtime.context.get("thread_id"):
|
||||
return runtime.context.get("thread_id")
|
||||
return runtime.config.get("configurable", {}).get("thread_id")
|
||||
|
||||
|
||||
def _history_record(*, action: str, file_path: str, prev_content: str | None, new_content: str | None, thread_id: str | None, scanner: dict[str, Any]) -> dict[str, Any]:
|
||||
return {
|
||||
"action": action,
|
||||
@@ -98,7 +91,7 @@ async def _skill_manage_impl(
|
||||
"""
|
||||
name = validate_skill_name(name)
|
||||
lock = _get_lock(name)
|
||||
thread_id = _get_thread_id(runtime)
|
||||
thread_id = get_thread_id(runtime)
|
||||
|
||||
async with lock:
|
||||
if action == "create":
|
||||
|
||||
@@ -59,7 +59,22 @@ def get_available_tools(
|
||||
if not is_host_bash_allowed(config):
|
||||
tool_configs = [tool for tool in tool_configs if not _is_host_bash_tool(tool)]
|
||||
|
||||
loaded_tools = [resolve_variable(tool.use, BaseTool) for tool in tool_configs]
|
||||
loaded_tools_raw = [(cfg, resolve_variable(cfg.use, BaseTool)) for cfg in tool_configs]
|
||||
|
||||
# Warn when the config ``name`` field and the tool object's ``.name``
|
||||
# attribute diverge — this mismatch is the root cause of issue #1803 where
|
||||
# the LLM receives one name in its tool schema but the runtime router
|
||||
# recognises a different name, producing "not a valid tool" errors.
|
||||
for cfg, loaded in loaded_tools_raw:
|
||||
if cfg.name != loaded.name:
|
||||
logger.warning(
|
||||
"Tool name mismatch: config name %r does not match tool .name %r (use: %s). The tool's own .name will be used for binding.",
|
||||
cfg.name,
|
||||
loaded.name,
|
||||
cfg.use,
|
||||
)
|
||||
|
||||
loaded_tools = [t for _, t in loaded_tools_raw]
|
||||
|
||||
# Conditionally add tools based on config
|
||||
builtin_tools = BUILTIN_TOOLS.copy()
|
||||
@@ -134,4 +149,20 @@ def get_available_tools(
|
||||
logger.warning(f"Failed to load ACP tool: {e}")
|
||||
|
||||
logger.info(f"Total tools loaded: {len(loaded_tools)}, built-in tools: {len(builtin_tools)}, MCP tools: {len(mcp_tools)}, ACP tools: {len(acp_tools)}")
|
||||
return loaded_tools + builtin_tools + mcp_tools + acp_tools
|
||||
|
||||
# Deduplicate by tool name — config-loaded tools take priority, followed by
|
||||
# built-ins, MCP tools, and ACP tools. Duplicate names cause the LLM to
|
||||
# receive ambiguous or concatenated function schemas (issue #1803).
|
||||
all_tools = loaded_tools + builtin_tools + mcp_tools + acp_tools
|
||||
seen_names: set[str] = set()
|
||||
unique_tools: list[BaseTool] = []
|
||||
for t in all_tools:
|
||||
if t.name not in seen_names:
|
||||
unique_tools.append(t)
|
||||
seen_names.add(t.name)
|
||||
else:
|
||||
logger.warning(
|
||||
"Duplicate tool name %r detected and skipped — check your config.yaml and MCP server registrations (issue #1803).",
|
||||
t.name,
|
||||
)
|
||||
return unique_tools
|
||||
|
||||
@@ -0,0 +1,90 @@
|
||||
"""Runtime utilities for thread_id resolution and context access.
|
||||
|
||||
Thread ID Resolution Strategy
|
||||
=============================
|
||||
|
||||
DeerFlow resolves the current ``thread_id`` from a three-level cascade:
|
||||
|
||||
1. **runtime.context["thread_id"]** -- Set by ``worker.py`` (gateway mode)
|
||||
or by LangGraph Server (standard mode) when constructing the Runtime.
|
||||
2. **runtime.config["configurable"]["thread_id"]** -- Available on
|
||||
``ToolRuntime`` instances passed to tools via the ``@tool`` decorator.
|
||||
Not available on ``Runtime`` instances received by middlewares.
|
||||
3. **get_config()["configurable"]["thread_id"]** -- LangGraph's thread-local
|
||||
config, available when executing inside a graph's runnable context.
|
||||
|
||||
About ``__pregel_runtime``
|
||||
===========================
|
||||
|
||||
In gateway mode (``run_agent()`` in ``worker.py``), the agent graph does not
|
||||
run inside the LangGraph Server. The server normally injects a ``Runtime``
|
||||
object automatically. Since we run the graph ourselves, we must inject the
|
||||
Runtime manually via ``config["configurable"]["__pregel_runtime"]``. This is
|
||||
the standard mechanism provided by LangGraph's Pregel engine for injecting
|
||||
runtime context into graph nodes. It is not a private/internal hack -- it is
|
||||
the documented way to pass Runtime when running a graph outside the server.
|
||||
|
||||
Duck Typing
|
||||
===========
|
||||
|
||||
Both ``langgraph.runtime.Runtime`` (middlewares) and
|
||||
``langchain.tools.ToolRuntime`` (tools) expose a ``.context`` attribute (a
|
||||
dict or None). ``ToolRuntime`` additionally exposes ``.config``. The
|
||||
function below uses ``getattr`` with safe defaults so it works with either
|
||||
type, with ``SimpleNamespace`` in tests, or with ``None``.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
|
||||
def get_thread_id(runtime: Any | None) -> str | None:
|
||||
"""Resolve the current thread_id from a runtime object.
|
||||
|
||||
Follows a three-level fallback chain:
|
||||
|
||||
1. ``runtime.context.get("thread_id")`` -- if context is a non-empty dict.
|
||||
2. ``runtime.config.get("configurable", {}).get("thread_id")`` -- if
|
||||
the runtime has a config dict (ToolRuntime).
|
||||
3. ``get_config().get("configurable", {}).get("thread_id")`` -- LangGraph's
|
||||
thread-local config. Wrapped in ``try/except RuntimeError`` because it
|
||||
raises outside a runnable context (e.g., unit tests).
|
||||
|
||||
Args:
|
||||
runtime: A Runtime, ToolRuntime, SimpleNamespace, or None.
|
||||
|
||||
Returns:
|
||||
The thread_id string, or None if it cannot be resolved.
|
||||
"""
|
||||
if runtime is None:
|
||||
return None
|
||||
|
||||
# Level 1: runtime.context["thread_id"]
|
||||
context = getattr(runtime, "context", None)
|
||||
if context and isinstance(context, dict):
|
||||
thread_id = context.get("thread_id")
|
||||
if thread_id:
|
||||
return thread_id
|
||||
|
||||
# Level 2: runtime.config["configurable"]["thread_id"]
|
||||
config = getattr(runtime, "config", None)
|
||||
if config and isinstance(config, dict):
|
||||
thread_id = config.get("configurable", {}).get("thread_id")
|
||||
if thread_id:
|
||||
return thread_id
|
||||
|
||||
# Level 3: langgraph.config.get_config() -- only works inside runnable context
|
||||
try:
|
||||
from langgraph.config import get_config
|
||||
|
||||
config_data = get_config()
|
||||
thread_id = config_data.get("configurable", {}).get("thread_id")
|
||||
if thread_id:
|
||||
return thread_id
|
||||
except RuntimeError:
|
||||
# Expected when not running inside a LangGraph runnable context (e.g., unit tests).
|
||||
# In that case, thread_id cannot be resolved from thread-local config, so fall through.
|
||||
pass
|
||||
|
||||
return None
|
||||
@@ -20,7 +20,12 @@ dependencies = [
|
||||
]
|
||||
|
||||
[dependency-groups]
|
||||
dev = ["pytest>=9.0.3", "ruff>=0.14.11"]
|
||||
dev = [
|
||||
"prompt-toolkit>=3.0.0",
|
||||
"pytest>=9.0.3",
|
||||
"pytest-asyncio>=1.3.0",
|
||||
"ruff>=0.14.11",
|
||||
]
|
||||
|
||||
[tool.uv.workspace]
|
||||
members = ["packages/harness"]
|
||||
|
||||
@@ -2011,6 +2011,65 @@ class TestChannelService:
|
||||
assert service.manager._langgraph_url == "http://custom-langgraph:2024"
|
||||
assert service.manager._gateway_url == "http://custom-gateway:8001"
|
||||
|
||||
def test_disabled_channel_with_string_creds_emits_warning(self, caplog):
|
||||
"""Warning is emitted when a channel has string credentials but enabled=false."""
|
||||
import logging
|
||||
|
||||
from app.channels.service import ChannelService
|
||||
|
||||
async def go():
|
||||
service = ChannelService(
|
||||
channels_config={
|
||||
"wecom": {"enabled": False, "bot_id": "corp123", "bot_secret": "secret"},
|
||||
}
|
||||
)
|
||||
with caplog.at_level(logging.WARNING, logger="app.channels.service"):
|
||||
await service.start()
|
||||
await service.stop()
|
||||
|
||||
_run(go())
|
||||
assert any("wecom" in r.message and r.levelno == logging.WARNING for r in caplog.records)
|
||||
|
||||
def test_disabled_channel_with_int_creds_emits_warning(self, caplog):
|
||||
"""Warning is emitted even when YAML-parsed integer credentials are present."""
|
||||
import logging
|
||||
|
||||
from app.channels.service import ChannelService
|
||||
|
||||
async def go():
|
||||
# Simulate YAML parsing a numeric token/ID as an int
|
||||
service = ChannelService(
|
||||
channels_config={
|
||||
"telegram": {"enabled": False, "bot_token": 123456789},
|
||||
}
|
||||
)
|
||||
with caplog.at_level(logging.WARNING, logger="app.channels.service"):
|
||||
await service.start()
|
||||
await service.stop()
|
||||
|
||||
_run(go())
|
||||
assert any("telegram" in r.message and r.levelno == logging.WARNING for r in caplog.records)
|
||||
|
||||
def test_disabled_channel_without_creds_emits_info(self, caplog):
|
||||
"""Only an info log (no warning) is emitted when a channel is disabled with no credentials."""
|
||||
import logging
|
||||
|
||||
from app.channels.service import ChannelService
|
||||
|
||||
async def go():
|
||||
service = ChannelService(
|
||||
channels_config={
|
||||
"telegram": {"enabled": False},
|
||||
}
|
||||
)
|
||||
with caplog.at_level(logging.DEBUG, logger="app.channels.service"):
|
||||
await service.start()
|
||||
await service.stop()
|
||||
|
||||
_run(go())
|
||||
warning_records = [r for r in caplog.records if "telegram" in r.message and r.levelno == logging.WARNING]
|
||||
assert not warning_records
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Slack send retry tests
|
||||
@@ -2046,6 +2105,11 @@ class TestSlackSendRetry:
|
||||
|
||||
|
||||
class TestSlackAllowedUsers:
|
||||
@staticmethod
|
||||
def _submit_coro(coro, loop):
|
||||
coro.close()
|
||||
return MagicMock()
|
||||
|
||||
def test_numeric_allowed_users_match_string_event_user_id(self):
|
||||
from app.channels.slack import SlackChannel
|
||||
|
||||
@@ -2067,13 +2131,9 @@ class TestSlackAllowedUsers:
|
||||
"ts": "1710000000.000100",
|
||||
}
|
||||
|
||||
def submit_coro(coro, loop):
|
||||
coro.close()
|
||||
return MagicMock()
|
||||
|
||||
with patch(
|
||||
"app.channels.slack.asyncio.run_coroutine_threadsafe",
|
||||
side_effect=submit_coro,
|
||||
side_effect=self._submit_coro,
|
||||
) as submit:
|
||||
channel._handle_message_event(event)
|
||||
|
||||
@@ -2085,6 +2145,74 @@ class TestSlackAllowedUsers:
|
||||
assert inbound.chat_id == "C123"
|
||||
assert inbound.text == "hello from slack"
|
||||
|
||||
def test_string_allowed_users_match_event_user_id(self):
|
||||
from app.channels.slack import SlackChannel
|
||||
|
||||
bus = MessageBus()
|
||||
bus.publish_inbound = AsyncMock()
|
||||
channel = SlackChannel(
|
||||
bus=bus,
|
||||
config={"allowed_users": "U123456"},
|
||||
)
|
||||
channel._loop = MagicMock()
|
||||
channel._loop.is_running.return_value = True
|
||||
channel._add_reaction = MagicMock()
|
||||
channel._send_running_reply = MagicMock()
|
||||
|
||||
event = {
|
||||
"user": "U123456",
|
||||
"text": "hello from slack",
|
||||
"channel": "C123",
|
||||
"ts": "1710000000.000100",
|
||||
}
|
||||
|
||||
with patch(
|
||||
"app.channels.slack.asyncio.run_coroutine_threadsafe",
|
||||
side_effect=self._submit_coro,
|
||||
) as submit:
|
||||
channel._handle_message_event(event)
|
||||
|
||||
channel._add_reaction.assert_called_once_with("C123", "1710000000.000100", "eyes")
|
||||
channel._send_running_reply.assert_called_once_with("C123", "1710000000.000100")
|
||||
submit.assert_called_once()
|
||||
inbound = bus.publish_inbound.call_args.args[0]
|
||||
assert inbound.user_id == "U123456"
|
||||
assert inbound.chat_id == "C123"
|
||||
assert inbound.text == "hello from slack"
|
||||
|
||||
def test_scalar_allowed_users_warns_and_matches_stringified_event_user_id(self, caplog):
|
||||
from app.channels.slack import SlackChannel
|
||||
|
||||
bus = MessageBus()
|
||||
bus.publish_inbound = AsyncMock()
|
||||
with caplog.at_level("WARNING"):
|
||||
channel = SlackChannel(
|
||||
bus=bus,
|
||||
config={"allowed_users": 123456},
|
||||
)
|
||||
channel._loop = MagicMock()
|
||||
channel._loop.is_running.return_value = True
|
||||
channel._add_reaction = MagicMock()
|
||||
channel._send_running_reply = MagicMock()
|
||||
|
||||
event = {
|
||||
"user": "123456",
|
||||
"text": "hello from slack",
|
||||
"channel": "C123",
|
||||
"ts": "1710000000.000100",
|
||||
}
|
||||
|
||||
with patch(
|
||||
"app.channels.slack.asyncio.run_coroutine_threadsafe",
|
||||
side_effect=self._submit_coro,
|
||||
) as submit:
|
||||
channel._handle_message_event(event)
|
||||
|
||||
assert "Slack allowed_users should be a list" in caplog.text
|
||||
submit.assert_called_once()
|
||||
inbound = bus.publish_inbound.call_args.args[0]
|
||||
assert inbound.user_id == "123456"
|
||||
|
||||
def test_raises_after_all_retries_exhausted(self):
|
||||
from app.channels.slack import SlackChannel
|
||||
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
"""Tests for ClarificationMiddleware, focusing on options type coercion."""
|
||||
|
||||
import json
|
||||
from types import SimpleNamespace
|
||||
|
||||
import pytest
|
||||
from langgraph.graph.message import add_messages
|
||||
|
||||
from deerflow.agents.middlewares.clarification_middleware import ClarificationMiddleware
|
||||
|
||||
@@ -118,3 +120,60 @@ class TestFormatClarificationMessage:
|
||||
assert "2. 2" in result
|
||||
assert "3. True" in result
|
||||
assert "4. None" in result
|
||||
|
||||
|
||||
class TestClarificationCommandIdempotency:
|
||||
"""Clarification tool-call retries should not duplicate messages in state."""
|
||||
|
||||
def test_repeated_tool_call_uses_stable_message_id(self, middleware):
|
||||
request = SimpleNamespace(
|
||||
tool_call={
|
||||
"name": "ask_clarification",
|
||||
"id": "call-clarify-1",
|
||||
"args": {
|
||||
"question": "Which environment should I use?",
|
||||
"clarification_type": "approach_choice",
|
||||
"options": ["dev", "prod"],
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
first = middleware.wrap_tool_call(request, lambda _req: pytest.fail("handler should not be called"))
|
||||
second = middleware.wrap_tool_call(request, lambda _req: pytest.fail("handler should not be called"))
|
||||
|
||||
first_message = first.update["messages"][0]
|
||||
second_message = second.update["messages"][0]
|
||||
|
||||
assert first_message.id == "clarification:call-clarify-1"
|
||||
assert second_message.id == first_message.id
|
||||
assert second_message.tool_call_id == first_message.tool_call_id
|
||||
|
||||
merged = add_messages(add_messages([], [first_message]), [second_message])
|
||||
|
||||
assert len(merged) == 1
|
||||
assert merged[0].id == "clarification:call-clarify-1"
|
||||
assert merged[0].content == first_message.content
|
||||
|
||||
def test_missing_tool_call_id_still_gets_stable_message_id(self, middleware):
|
||||
request = SimpleNamespace(
|
||||
tool_call={
|
||||
"name": "ask_clarification",
|
||||
"args": {
|
||||
"question": "Which environment should I use?",
|
||||
"clarification_type": "missing_info",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
first = middleware.wrap_tool_call(request, lambda _req: pytest.fail("handler should not be called"))
|
||||
second = middleware.wrap_tool_call(request, lambda _req: pytest.fail("handler should not be called"))
|
||||
|
||||
first_message = first.update["messages"][0]
|
||||
second_message = second.update["messages"][0]
|
||||
|
||||
assert first_message.id.startswith("clarification:")
|
||||
assert second_message.id == first_message.id
|
||||
|
||||
merged = add_messages(add_messages([], [first_message]), [second_message])
|
||||
|
||||
assert len(merged) == 1
|
||||
|
||||
@@ -0,0 +1,249 @@
|
||||
"""Tests for ClaudeChatModel._apply_prompt_caching.
|
||||
|
||||
Validates that the function never places more than 4 cache_control breakpoints
|
||||
(the hard limit enforced by the Anthropic API and AWS Bedrock) regardless of
|
||||
how many system blocks, message content blocks, or tool definitions are present.
|
||||
"""
|
||||
|
||||
from unittest import mock
|
||||
|
||||
import pytest
|
||||
|
||||
from deerflow.models.claude_provider import ClaudeChatModel
|
||||
|
||||
|
||||
def _make_model(prompt_cache_size: int = 3) -> ClaudeChatModel:
|
||||
"""Return a minimal ClaudeChatModel instance without network calls."""
|
||||
with mock.patch.object(ClaudeChatModel, "model_post_init"):
|
||||
m = ClaudeChatModel(
|
||||
model="claude-sonnet-4-6",
|
||||
anthropic_api_key="sk-ant-fake", # type: ignore[call-arg]
|
||||
prompt_cache_size=prompt_cache_size,
|
||||
)
|
||||
m._is_oauth = False
|
||||
m.enable_prompt_caching = True
|
||||
return m
|
||||
|
||||
|
||||
def _count_cache_control(payload: dict) -> int:
|
||||
"""Count the total number of cache_control markers in a payload."""
|
||||
count = 0
|
||||
|
||||
system = payload.get("system", [])
|
||||
if isinstance(system, list):
|
||||
for block in system:
|
||||
if isinstance(block, dict) and "cache_control" in block:
|
||||
count += 1
|
||||
|
||||
for msg in payload.get("messages", []):
|
||||
if not isinstance(msg, dict):
|
||||
continue
|
||||
content = msg.get("content", [])
|
||||
if isinstance(content, list):
|
||||
for block in content:
|
||||
if isinstance(block, dict) and "cache_control" in block:
|
||||
count += 1
|
||||
|
||||
for tool in payload.get("tools", []):
|
||||
if isinstance(tool, dict) and "cache_control" in tool:
|
||||
count += 1
|
||||
|
||||
return count
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def model() -> ClaudeChatModel:
|
||||
return _make_model()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Basic correctness
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_single_system_block_gets_cached(model):
|
||||
payload: dict = {"system": [{"type": "text", "text": "sys"}]}
|
||||
model._apply_prompt_caching(payload)
|
||||
assert payload["system"][0].get("cache_control") == {"type": "ephemeral"}
|
||||
|
||||
|
||||
def test_string_system_converted_and_cached(model):
|
||||
payload: dict = {"system": "you are helpful"}
|
||||
model._apply_prompt_caching(payload)
|
||||
assert isinstance(payload["system"], list)
|
||||
assert payload["system"][0].get("cache_control") == {"type": "ephemeral"}
|
||||
|
||||
|
||||
def test_last_tool_gets_cached_when_budget_allows(model):
|
||||
payload: dict = {
|
||||
"tools": [{"name": "t1"}, {"name": "t2"}],
|
||||
}
|
||||
model._apply_prompt_caching(payload)
|
||||
# With no system or messages the last tool should be cached.
|
||||
assert payload["tools"][-1].get("cache_control") == {"type": "ephemeral"}
|
||||
assert "cache_control" not in payload["tools"][0]
|
||||
|
||||
|
||||
def test_recent_messages_get_cached(model):
|
||||
"""The last prompt_cache_size messages' content blocks should be cached."""
|
||||
payload: dict = {
|
||||
"messages": [
|
||||
{"role": "user", "content": [{"type": "text", "text": "hello"}]},
|
||||
],
|
||||
}
|
||||
model._apply_prompt_caching(payload)
|
||||
assert payload["messages"][0]["content"][0].get("cache_control") == {"type": "ephemeral"}
|
||||
|
||||
|
||||
def test_string_message_content_converted_and_cached(model):
|
||||
payload: dict = {
|
||||
"messages": [
|
||||
{"role": "user", "content": "simple string"},
|
||||
],
|
||||
}
|
||||
model._apply_prompt_caching(payload)
|
||||
assert isinstance(payload["messages"][0]["content"], list)
|
||||
assert payload["messages"][0]["content"][0].get("cache_control") == {"type": "ephemeral"}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Budget enforcement (the core regression test for issue #2448)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_never_exceeds_4_breakpoints_with_large_system(model):
|
||||
"""Many system text blocks must not produce more than 4 breakpoints total."""
|
||||
payload: dict = {
|
||||
"system": [{"type": "text", "text": f"sys {i}"} for i in range(6)],
|
||||
"tools": [{"name": "t1"}],
|
||||
}
|
||||
model._apply_prompt_caching(payload)
|
||||
assert _count_cache_control(payload) <= 4
|
||||
|
||||
|
||||
def test_never_exceeds_4_breakpoints_multi_turn_with_multi_block_messages(model):
|
||||
"""Multi-turn conversation where each message has multiple content blocks."""
|
||||
# 1 system block + 3 messages × 2 blocks + 1 tool = 8 candidates → must cap at 4
|
||||
payload: dict = {
|
||||
"system": [{"type": "text", "text": "system prompt"}],
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "user text"},
|
||||
{"type": "tool_result", "tool_use_id": "x", "content": "result"},
|
||||
],
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": [
|
||||
{"type": "text", "text": "assistant text"},
|
||||
{"type": "tool_use", "id": "y", "name": "bash", "input": {}},
|
||||
],
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "follow up"},
|
||||
{"type": "text", "text": "second block"},
|
||||
],
|
||||
},
|
||||
],
|
||||
"tools": [{"name": "bash"}],
|
||||
}
|
||||
model._apply_prompt_caching(payload)
|
||||
total = _count_cache_control(payload)
|
||||
assert total <= 4, f"Expected ≤ 4 breakpoints, got {total}"
|
||||
|
||||
|
||||
def test_never_exceeds_4_breakpoints_many_messages(model):
|
||||
"""Large number of messages with multiple blocks per message."""
|
||||
messages = []
|
||||
for i in range(10):
|
||||
messages.append(
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": f"msg {i} block a"},
|
||||
{"type": "text", "text": f"msg {i} block b"},
|
||||
],
|
||||
}
|
||||
)
|
||||
payload: dict = {
|
||||
"system": [{"type": "text", "text": "sys 1"}, {"type": "text", "text": "sys 2"}],
|
||||
"messages": messages,
|
||||
"tools": [{"name": "tool_a"}, {"name": "tool_b"}],
|
||||
}
|
||||
model._apply_prompt_caching(payload)
|
||||
total = _count_cache_control(payload)
|
||||
assert total <= 4, f"Expected ≤ 4 breakpoints, got {total}"
|
||||
|
||||
|
||||
def test_exactly_4_breakpoints_when_4_or_more_candidates(model):
|
||||
"""When there are at least 4 candidates, exactly 4 breakpoints are placed."""
|
||||
payload: dict = {
|
||||
"system": [{"type": "text", "text": f"sys {i}"} for i in range(3)],
|
||||
"messages": [
|
||||
{"role": "user", "content": [{"type": "text", "text": "user"}]},
|
||||
{"role": "assistant", "content": [{"type": "text", "text": "asst"}]},
|
||||
{"role": "user", "content": [{"type": "text", "text": "follow"}]},
|
||||
],
|
||||
"tools": [{"name": "bash"}],
|
||||
}
|
||||
model._apply_prompt_caching(payload)
|
||||
total = _count_cache_control(payload)
|
||||
assert total == 4
|
||||
|
||||
|
||||
def test_breakpoints_placed_on_last_candidates(model):
|
||||
"""Breakpoints should be on the *last* candidates, not the first."""
|
||||
# 5 system blocks but budget = 4 → first system block should NOT be cached,
|
||||
# last 4 (indices 1-4) should be.
|
||||
payload: dict = {
|
||||
"system": [{"type": "text", "text": f"sys {i}"} for i in range(5)],
|
||||
}
|
||||
model._apply_prompt_caching(payload)
|
||||
# First block is NOT in the last-4 window
|
||||
assert "cache_control" not in payload["system"][0]
|
||||
# Last 4 blocks ARE cached
|
||||
for i in range(1, 5):
|
||||
assert payload["system"][i].get("cache_control") == {"type": "ephemeral"}, f"block {i} should be cached"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Edge cases
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_no_candidates_is_a_no_op(model):
|
||||
payload: dict = {}
|
||||
model._apply_prompt_caching(payload)
|
||||
assert _count_cache_control(payload) == 0
|
||||
|
||||
|
||||
def test_non_text_system_blocks_not_added_as_candidates(model):
|
||||
"""Image blocks in system should not receive cache_control."""
|
||||
payload: dict = {
|
||||
"system": [
|
||||
{"type": "image", "source": {"type": "base64", "media_type": "image/png", "data": "abc"}},
|
||||
{"type": "text", "text": "text block"},
|
||||
],
|
||||
}
|
||||
model._apply_prompt_caching(payload)
|
||||
assert "cache_control" not in payload["system"][0]
|
||||
assert payload["system"][1].get("cache_control") == {"type": "ephemeral"}
|
||||
|
||||
|
||||
def test_old_messages_outside_cache_window_not_cached(model):
|
||||
"""Messages older than prompt_cache_size should not be cached."""
|
||||
m = _make_model(prompt_cache_size=1)
|
||||
payload: dict = {
|
||||
"messages": [
|
||||
{"role": "user", "content": [{"type": "text", "text": "old message"}]},
|
||||
{"role": "user", "content": [{"type": "text", "text": "recent message"}]},
|
||||
],
|
||||
}
|
||||
m._apply_prompt_caching(payload)
|
||||
# Only the last message should be within the cache window
|
||||
assert "cache_control" not in payload["messages"][0]["content"][0]
|
||||
assert payload["messages"][1]["content"][0].get("cache_control") == {"type": "ephemeral"}
|
||||
@@ -0,0 +1,68 @@
|
||||
"""Regression tests for Gateway lifespan shutdown.
|
||||
|
||||
These tests guard the invariant that lifespan shutdown is *bounded*: a
|
||||
misbehaving channel whose ``stop()`` blocks forever must not keep the
|
||||
uvicorn worker alive. A hung worker is the precondition for the
|
||||
signal-reentrancy deadlock described in
|
||||
``app.gateway.app._SHUTDOWN_HOOK_TIMEOUT_SECONDS``.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from contextlib import asynccontextmanager
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from fastapi import FastAPI
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _noop_langgraph_runtime(_app):
|
||||
yield
|
||||
|
||||
|
||||
async def _run_lifespan_with_hanging_stop() -> float:
|
||||
"""Drive the lifespan context with stop_channel_service hanging forever.
|
||||
|
||||
Returns the elapsed wall-clock seconds.
|
||||
"""
|
||||
from app.gateway.app import _SHUTDOWN_HOOK_TIMEOUT_SECONDS, lifespan
|
||||
|
||||
async def hang_forever() -> None:
|
||||
await asyncio.sleep(3600)
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
fake_service = MagicMock()
|
||||
fake_service.get_status = MagicMock(return_value={})
|
||||
|
||||
async def fake_start():
|
||||
return fake_service
|
||||
|
||||
with (
|
||||
patch("app.gateway.app.get_app_config"),
|
||||
patch("app.gateway.app.get_gateway_config", return_value=MagicMock(host="x", port=0)),
|
||||
patch("app.gateway.app.langgraph_runtime", _noop_langgraph_runtime),
|
||||
patch("app.channels.service.start_channel_service", side_effect=fake_start),
|
||||
patch("app.channels.service.stop_channel_service", side_effect=hang_forever),
|
||||
):
|
||||
loop = asyncio.get_event_loop()
|
||||
start = loop.time()
|
||||
async with lifespan(app):
|
||||
pass
|
||||
elapsed = loop.time() - start
|
||||
|
||||
assert _SHUTDOWN_HOOK_TIMEOUT_SECONDS < 30.0, "Timeout constant must stay modest"
|
||||
return elapsed
|
||||
|
||||
|
||||
def test_shutdown_is_bounded_when_channel_stop_hangs():
|
||||
"""Lifespan exit must complete near the configured timeout, not hang."""
|
||||
from app.gateway.app import _SHUTDOWN_HOOK_TIMEOUT_SECONDS
|
||||
|
||||
elapsed = asyncio.run(_run_lifespan_with_hanging_stop())
|
||||
|
||||
# Generous upper bound: timeout + 2s slack for scheduling overhead.
|
||||
assert elapsed < _SHUTDOWN_HOOK_TIMEOUT_SECONDS + 2.0, f"Lifespan shutdown took {elapsed:.2f}s; expected <= {_SHUTDOWN_HOOK_TIMEOUT_SECONDS + 2.0:.1f}s"
|
||||
# Lower bound: the wait_for should actually have waited.
|
||||
assert elapsed >= _SHUTDOWN_HOOK_TIMEOUT_SECONDS - 0.5, f"Lifespan exited too quickly ({elapsed:.2f}s); wait_for may not have been invoked."
|
||||
@@ -145,6 +145,21 @@ def test_build_run_config_explicit_agent_name_not_overwritten():
|
||||
assert config["configurable"]["agent_name"] == "explicit-agent"
|
||||
|
||||
|
||||
def test_build_run_config_context_custom_agent_injects_agent_name():
|
||||
"""Custom assistant_id must be forwarded as context['agent_name'] in context mode."""
|
||||
from app.gateway.services import build_run_config
|
||||
|
||||
config = build_run_config(
|
||||
"thread-1",
|
||||
{"context": {"model_name": "deepseek-v3"}},
|
||||
None,
|
||||
assistant_id="finalis",
|
||||
)
|
||||
|
||||
assert config["context"]["agent_name"] == "finalis"
|
||||
assert "configurable" not in config
|
||||
|
||||
|
||||
def test_resolve_agent_factory_returns_make_lead_agent():
|
||||
"""resolve_agent_factory always returns make_lead_agent regardless of assistant_id."""
|
||||
from app.gateway.services import resolve_agent_factory
|
||||
@@ -298,6 +313,36 @@ def test_build_run_config_with_context():
|
||||
assert config["recursion_limit"] == 100
|
||||
|
||||
|
||||
def test_build_run_config_null_context_becomes_empty_context():
|
||||
"""When caller sends context=null, treat it as an empty context object."""
|
||||
from app.gateway.services import build_run_config
|
||||
|
||||
config = build_run_config("thread-1", {"context": None}, None)
|
||||
|
||||
assert config["context"] == {}
|
||||
assert "configurable" not in config
|
||||
|
||||
|
||||
def test_build_run_config_rejects_non_mapping_context():
|
||||
"""When caller sends a non-object context, raise a clear error instead of a TypeError."""
|
||||
import pytest
|
||||
|
||||
from app.gateway.services import build_run_config
|
||||
|
||||
with pytest.raises(ValueError, match="context"):
|
||||
build_run_config("thread-1", {"context": "bad-context"}, None)
|
||||
|
||||
|
||||
def test_build_run_config_null_context_custom_agent_injects_agent_name():
|
||||
"""Custom assistant_id can still be injected when context=null starts context mode."""
|
||||
from app.gateway.services import build_run_config
|
||||
|
||||
config = build_run_config("thread-1", {"context": None}, None, assistant_id="finalis")
|
||||
|
||||
assert config["context"] == {"agent_name": "finalis"}
|
||||
assert "configurable" not in config
|
||||
|
||||
|
||||
def test_build_run_config_context_plus_configurable_warns(caplog):
|
||||
"""When caller sends both 'context' and 'configurable', prefer 'context' and log a warning."""
|
||||
import logging
|
||||
|
||||
@@ -80,6 +80,28 @@ async def test_crawl_network_error(jina_client, monkeypatch):
|
||||
assert "failed" in result.lower()
|
||||
|
||||
|
||||
@pytest.mark.anyio
|
||||
async def test_crawl_transient_failure_logs_without_traceback(jina_client, monkeypatch, caplog):
|
||||
"""Transient network failures must log at WARNING without a traceback and include the exception type."""
|
||||
|
||||
async def mock_post(self, url, **kwargs):
|
||||
raise httpx.ConnectTimeout("timed out")
|
||||
|
||||
monkeypatch.setattr(httpx.AsyncClient, "post", mock_post)
|
||||
|
||||
with caplog.at_level(logging.DEBUG, logger="deerflow.community.jina_ai.jina_client"):
|
||||
result = await jina_client.crawl("https://example.com")
|
||||
|
||||
jina_records = [r for r in caplog.records if r.name == "deerflow.community.jina_ai.jina_client"]
|
||||
assert len(jina_records) == 1, f"expected exactly one log record, got {len(jina_records)}"
|
||||
record = jina_records[0]
|
||||
assert record.levelno == logging.WARNING, f"expected WARNING, got {record.levelname}"
|
||||
assert record.exc_info is None, "transient failures must not attach a traceback"
|
||||
assert "ConnectTimeout" in record.getMessage()
|
||||
assert result.startswith("Error:")
|
||||
assert "ConnectTimeout" in result
|
||||
|
||||
|
||||
@pytest.mark.anyio
|
||||
async def test_crawl_passes_headers(jina_client, monkeypatch):
|
||||
"""Test that correct headers are sent."""
|
||||
|
||||
@@ -113,6 +113,54 @@ def test_make_lead_agent_disables_thinking_when_model_does_not_support_it(monkey
|
||||
assert result["model"] is not None
|
||||
|
||||
|
||||
def test_make_lead_agent_reads_runtime_options_from_context(monkeypatch):
|
||||
app_config = _make_app_config(
|
||||
[
|
||||
_make_model("default-model", supports_thinking=False),
|
||||
_make_model("context-model", supports_thinking=True),
|
||||
]
|
||||
)
|
||||
|
||||
import deerflow.tools as tools_module
|
||||
|
||||
get_available_tools = MagicMock(return_value=[])
|
||||
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: app_config)
|
||||
monkeypatch.setattr(tools_module, "get_available_tools", get_available_tools)
|
||||
monkeypatch.setattr(lead_agent_module, "_build_middlewares", lambda config, model_name, agent_name=None: [])
|
||||
|
||||
captured: dict[str, object] = {}
|
||||
|
||||
def _fake_create_chat_model(*, name, thinking_enabled, reasoning_effort=None):
|
||||
captured["name"] = name
|
||||
captured["thinking_enabled"] = thinking_enabled
|
||||
captured["reasoning_effort"] = reasoning_effort
|
||||
return object()
|
||||
|
||||
monkeypatch.setattr(lead_agent_module, "create_chat_model", _fake_create_chat_model)
|
||||
monkeypatch.setattr(lead_agent_module, "create_agent", lambda **kwargs: kwargs)
|
||||
|
||||
result = lead_agent_module.make_lead_agent(
|
||||
{
|
||||
"context": {
|
||||
"model_name": "context-model",
|
||||
"thinking_enabled": False,
|
||||
"reasoning_effort": "high",
|
||||
"is_plan_mode": True,
|
||||
"subagent_enabled": True,
|
||||
"max_concurrent_subagents": 7,
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
assert captured == {
|
||||
"name": "context-model",
|
||||
"thinking_enabled": False,
|
||||
"reasoning_effort": "high",
|
||||
}
|
||||
get_available_tools.assert_called_once_with(model_name="context-model", groups=None, subagent_enabled=True)
|
||||
assert result["model"] is not None
|
||||
|
||||
|
||||
def test_make_lead_agent_rejects_invalid_bootstrap_agent_name(monkeypatch):
|
||||
app_config = _make_app_config([_make_model("safe-model", supports_thinking=False)])
|
||||
|
||||
@@ -207,3 +255,27 @@ def test_create_summarization_middleware_registers_memory_flush_hook_when_memory
|
||||
lead_agent_module._create_summarization_middleware()
|
||||
|
||||
assert captured["before_summarization"] == [lead_agent_module.memory_flush_hook]
|
||||
|
||||
|
||||
def test_create_summarization_middleware_passes_skill_read_tool_names(monkeypatch):
|
||||
app_config = _make_app_config([_make_model("default-model", supports_thinking=False)])
|
||||
monkeypatch.setattr(
|
||||
lead_agent_module,
|
||||
"get_summarization_config",
|
||||
lambda: SummarizationConfig(enabled=True, skill_file_read_tool_names=["read_file", "cat"]),
|
||||
)
|
||||
monkeypatch.setattr(lead_agent_module, "get_memory_config", lambda: MemoryConfig(enabled=False))
|
||||
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: app_config)
|
||||
monkeypatch.setattr(lead_agent_module, "create_chat_model", lambda **kwargs: object())
|
||||
|
||||
captured: dict[str, object] = {}
|
||||
|
||||
def _fake_middleware(**kwargs):
|
||||
captured.update(kwargs)
|
||||
return kwargs
|
||||
|
||||
monkeypatch.setattr(lead_agent_module, "DeerFlowSummarizationMiddleware", _fake_middleware)
|
||||
|
||||
lead_agent_module._create_summarization_middleware()
|
||||
|
||||
assert captured["skill_file_read_tool_names"] == ["read_file", "cat"]
|
||||
|
||||
@@ -297,6 +297,82 @@ def test_circuit_breaker_does_not_trip_on_non_retriable_errors(monkeypatch: pyte
|
||||
assert middleware._check_circuit() is False
|
||||
|
||||
|
||||
# ---------- ReadError / RemoteProtocolError retriable classification ----------
|
||||
|
||||
|
||||
class _ReadError(Exception):
|
||||
"""Local stand-in for httpx.ReadError — same class name, no httpx dependency."""
|
||||
|
||||
|
||||
class _RemoteProtocolError(Exception):
|
||||
"""Local stand-in for httpx.RemoteProtocolError — same class name, no httpx dependency."""
|
||||
|
||||
|
||||
_ReadError.__name__ = "ReadError"
|
||||
_RemoteProtocolError.__name__ = "RemoteProtocolError"
|
||||
|
||||
|
||||
def test_classify_error_read_error_is_retriable() -> None:
|
||||
middleware = _build_middleware()
|
||||
exc = _ReadError("Connection dropped mid-stream")
|
||||
exc.__class__.__name__ = "ReadError"
|
||||
retriable, reason = middleware._classify_error(exc)
|
||||
assert retriable is True
|
||||
assert reason == "transient"
|
||||
|
||||
|
||||
def test_classify_error_remote_protocol_error_is_retriable() -> None:
|
||||
middleware = _build_middleware()
|
||||
exc = _RemoteProtocolError("Server closed connection unexpectedly")
|
||||
exc.__class__.__name__ = "RemoteProtocolError"
|
||||
retriable, reason = middleware._classify_error(exc)
|
||||
assert retriable is True
|
||||
assert reason == "transient"
|
||||
|
||||
|
||||
def test_sync_read_error_triggers_retry_loop(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
middleware = _build_middleware(retry_max_attempts=3, retry_base_delay_ms=10, retry_cap_delay_ms=10)
|
||||
attempts = 0
|
||||
waits: list[float] = []
|
||||
monkeypatch.setattr("time.sleep", lambda d: waits.append(d))
|
||||
|
||||
def handler(_request) -> AIMessage:
|
||||
nonlocal attempts
|
||||
attempts += 1
|
||||
raise _ReadError("Connection dropped mid-stream")
|
||||
|
||||
result = middleware.wrap_model_call(SimpleNamespace(), handler)
|
||||
|
||||
assert isinstance(result, AIMessage)
|
||||
assert "temporarily unavailable" in result.content
|
||||
assert attempts == 3 # exhausted all retries
|
||||
assert len(waits) == 2 # slept between attempts 1→2 and 2→3
|
||||
|
||||
|
||||
@pytest.mark.anyio
|
||||
async def test_async_read_error_triggers_retry_loop(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
middleware = _build_middleware(retry_max_attempts=3, retry_base_delay_ms=10, retry_cap_delay_ms=10)
|
||||
attempts = 0
|
||||
waits: list[float] = []
|
||||
|
||||
async def fake_sleep(d: float) -> None:
|
||||
waits.append(d)
|
||||
|
||||
monkeypatch.setattr(asyncio, "sleep", fake_sleep)
|
||||
|
||||
async def handler(_request) -> AIMessage:
|
||||
nonlocal attempts
|
||||
attempts += 1
|
||||
raise _ReadError("Connection dropped mid-stream")
|
||||
|
||||
result = await middleware.awrap_model_call(SimpleNamespace(), handler)
|
||||
|
||||
assert isinstance(result, AIMessage)
|
||||
assert "temporarily unavailable" in result.content
|
||||
assert attempts == 3 # exhausted all retries
|
||||
assert len(waits) == 2 # slept between attempts 1→2 and 2→3
|
||||
|
||||
|
||||
@pytest.mark.anyio
|
||||
async def test_async_circuit_breaker_trips_and_recovers(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
"""Verify async version of circuit breaker correctly handles state transitions."""
|
||||
|
||||
@@ -255,7 +255,9 @@ class TestMultipleMounts:
|
||||
|
||||
sandbox.execute_command("cat /mnt/data/test.txt")
|
||||
# Verify the command received the resolved local path
|
||||
assert str(data_dir) in captured.get("command", "")
|
||||
command = captured.get("command", [])
|
||||
assert isinstance(command, list) and len(command) >= 3
|
||||
assert str(data_dir) in command[2]
|
||||
|
||||
def test_reverse_resolve_path_does_not_match_partial_prefix(self, tmp_path):
|
||||
foo_dir = tmp_path / "foo"
|
||||
|
||||
@@ -0,0 +1,274 @@
|
||||
"""Tests for custom MCP tool interceptors loaded via extensions_config.json."""
|
||||
|
||||
import asyncio
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
from deerflow.mcp.tools import get_mcp_tools
|
||||
|
||||
|
||||
def _make_patches(*, interceptor_paths=None):
|
||||
"""Set up mocks for get_mcp_tools() with optional custom interceptors.
|
||||
|
||||
Returns a dict of patch context managers.
|
||||
"""
|
||||
mock_client = MagicMock()
|
||||
mock_client.get_tools = AsyncMock(return_value=[])
|
||||
|
||||
extra = {}
|
||||
if interceptor_paths is not None:
|
||||
extra["mcpInterceptors"] = interceptor_paths
|
||||
|
||||
return {
|
||||
"client_cls": patch(
|
||||
"langchain_mcp_adapters.client.MultiServerMCPClient",
|
||||
return_value=mock_client,
|
||||
),
|
||||
"from_file": patch(
|
||||
"deerflow.config.extensions_config.ExtensionsConfig.from_file",
|
||||
return_value=MagicMock(
|
||||
model_extra=extra,
|
||||
get_enabled_mcp_servers=MagicMock(return_value={}),
|
||||
),
|
||||
),
|
||||
"build_servers": patch(
|
||||
"deerflow.mcp.tools.build_servers_config",
|
||||
return_value={"test-server": {}},
|
||||
),
|
||||
"oauth_headers": patch(
|
||||
"deerflow.mcp.tools.get_initial_oauth_headers",
|
||||
new_callable=AsyncMock,
|
||||
return_value={},
|
||||
),
|
||||
"oauth_interceptor": patch(
|
||||
"deerflow.mcp.tools.build_oauth_tool_interceptor",
|
||||
return_value=None,
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
def _get_interceptors(mock_cls):
|
||||
"""Extract the tool_interceptors list passed to MultiServerMCPClient."""
|
||||
kw = mock_cls.call_args
|
||||
return kw.kwargs.get("tool_interceptors") or kw[1].get("tool_interceptors", [])
|
||||
|
||||
|
||||
def test_custom_interceptor_loaded_and_appended():
|
||||
"""A valid interceptor builder path is resolved, called, and appended to tool_interceptors."""
|
||||
|
||||
async def fake_interceptor(request, handler):
|
||||
return await handler(request)
|
||||
|
||||
def fake_builder():
|
||||
return fake_interceptor
|
||||
|
||||
p = _make_patches(interceptor_paths=["my_package.auth:build_interceptor"])
|
||||
|
||||
with (
|
||||
p["client_cls"] as mock_cls,
|
||||
p["from_file"],
|
||||
p["build_servers"],
|
||||
p["oauth_headers"],
|
||||
p["oauth_interceptor"],
|
||||
patch("deerflow.mcp.tools.resolve_variable", return_value=fake_builder),
|
||||
):
|
||||
asyncio.run(get_mcp_tools())
|
||||
|
||||
interceptors = _get_interceptors(mock_cls)
|
||||
assert len(interceptors) == 1
|
||||
assert interceptors[0] is fake_interceptor
|
||||
|
||||
|
||||
def test_multiple_custom_interceptors():
|
||||
"""Multiple interceptor paths are all loaded in order."""
|
||||
|
||||
async def interceptor_a(request, handler):
|
||||
return await handler(request)
|
||||
|
||||
async def interceptor_b(request, handler):
|
||||
return await handler(request)
|
||||
|
||||
builders = {
|
||||
"pkg.a:build_a": lambda: interceptor_a,
|
||||
"pkg.b:build_b": lambda: interceptor_b,
|
||||
}
|
||||
|
||||
p = _make_patches(interceptor_paths=["pkg.a:build_a", "pkg.b:build_b"])
|
||||
|
||||
with (
|
||||
p["client_cls"] as mock_cls,
|
||||
p["from_file"],
|
||||
p["build_servers"],
|
||||
p["oauth_headers"],
|
||||
p["oauth_interceptor"],
|
||||
patch("deerflow.mcp.tools.resolve_variable", side_effect=lambda path: builders[path]),
|
||||
):
|
||||
asyncio.run(get_mcp_tools())
|
||||
|
||||
interceptors = _get_interceptors(mock_cls)
|
||||
assert len(interceptors) == 2
|
||||
assert interceptors[0] is interceptor_a
|
||||
assert interceptors[1] is interceptor_b
|
||||
|
||||
|
||||
def test_custom_interceptor_builder_returning_none_is_skipped():
|
||||
"""If a builder returns None, it is not appended to the interceptor list."""
|
||||
p = _make_patches(interceptor_paths=["pkg.noop:build_noop"])
|
||||
|
||||
with (
|
||||
p["client_cls"] as mock_cls,
|
||||
p["from_file"],
|
||||
p["build_servers"],
|
||||
p["oauth_headers"],
|
||||
p["oauth_interceptor"],
|
||||
patch("deerflow.mcp.tools.resolve_variable", return_value=lambda: None),
|
||||
):
|
||||
asyncio.run(get_mcp_tools())
|
||||
|
||||
assert len(_get_interceptors(mock_cls)) == 0
|
||||
|
||||
|
||||
def test_custom_interceptor_resolve_error_logs_warning_and_continues():
|
||||
"""A broken interceptor path logs a warning and does not block tool loading."""
|
||||
p = _make_patches(interceptor_paths=["broken.path:does_not_exist"])
|
||||
|
||||
with (
|
||||
p["client_cls"],
|
||||
p["from_file"],
|
||||
p["build_servers"],
|
||||
p["oauth_headers"],
|
||||
p["oauth_interceptor"],
|
||||
patch("deerflow.mcp.tools.resolve_variable", side_effect=ImportError("no such module")),
|
||||
patch("deerflow.mcp.tools.logger.warning") as mock_warn,
|
||||
):
|
||||
tools = asyncio.run(get_mcp_tools())
|
||||
|
||||
assert tools == []
|
||||
mock_warn.assert_called_once()
|
||||
assert "broken.path:does_not_exist" in mock_warn.call_args[0][0]
|
||||
|
||||
|
||||
def test_custom_interceptor_builder_exception_logs_warning_and_continues():
|
||||
"""If the builder function itself raises, the error is caught and logged."""
|
||||
|
||||
def exploding_builder():
|
||||
raise RuntimeError("builder exploded")
|
||||
|
||||
p = _make_patches(interceptor_paths=["pkg.bad:exploding_builder"])
|
||||
|
||||
with (
|
||||
p["client_cls"],
|
||||
p["from_file"],
|
||||
p["build_servers"],
|
||||
p["oauth_headers"],
|
||||
p["oauth_interceptor"],
|
||||
patch("deerflow.mcp.tools.resolve_variable", return_value=exploding_builder),
|
||||
patch("deerflow.mcp.tools.logger.warning") as mock_warn,
|
||||
):
|
||||
tools = asyncio.run(get_mcp_tools())
|
||||
|
||||
assert tools == []
|
||||
mock_warn.assert_called_once()
|
||||
assert "pkg.bad:exploding_builder" in mock_warn.call_args[0][0]
|
||||
|
||||
|
||||
def test_no_mcp_interceptors_field_is_safe():
|
||||
"""When mcpInterceptors is absent from config, no interceptors are added."""
|
||||
p = _make_patches(interceptor_paths=None)
|
||||
|
||||
with (
|
||||
p["client_cls"] as mock_cls,
|
||||
p["from_file"],
|
||||
p["build_servers"],
|
||||
p["oauth_headers"],
|
||||
p["oauth_interceptor"],
|
||||
):
|
||||
asyncio.run(get_mcp_tools())
|
||||
|
||||
assert len(_get_interceptors(mock_cls)) == 0
|
||||
|
||||
|
||||
def test_custom_interceptor_coexists_with_oauth_interceptor():
|
||||
"""Custom interceptors are appended after the OAuth interceptor."""
|
||||
|
||||
async def oauth_fn(request, handler):
|
||||
return await handler(request)
|
||||
|
||||
async def custom_fn(request, handler):
|
||||
return await handler(request)
|
||||
|
||||
p = _make_patches(interceptor_paths=["pkg.custom:build_custom"])
|
||||
|
||||
with (
|
||||
p["client_cls"] as mock_cls,
|
||||
p["from_file"],
|
||||
p["build_servers"],
|
||||
p["oauth_headers"],
|
||||
patch("deerflow.mcp.tools.build_oauth_tool_interceptor", return_value=oauth_fn),
|
||||
patch("deerflow.mcp.tools.resolve_variable", return_value=lambda: custom_fn),
|
||||
):
|
||||
asyncio.run(get_mcp_tools())
|
||||
|
||||
interceptors = _get_interceptors(mock_cls)
|
||||
assert len(interceptors) == 2
|
||||
assert interceptors[0] is oauth_fn
|
||||
assert interceptors[1] is custom_fn
|
||||
|
||||
|
||||
def test_mcp_interceptors_single_string_is_normalized():
|
||||
"""A single string value for mcpInterceptors is normalized to a list."""
|
||||
|
||||
async def fake_interceptor(request, handler):
|
||||
return await handler(request)
|
||||
|
||||
p = _make_patches(interceptor_paths="pkg.single:build_it")
|
||||
|
||||
with (
|
||||
p["client_cls"] as mock_cls,
|
||||
p["from_file"],
|
||||
p["build_servers"],
|
||||
p["oauth_headers"],
|
||||
p["oauth_interceptor"],
|
||||
patch("deerflow.mcp.tools.resolve_variable", return_value=lambda: fake_interceptor),
|
||||
):
|
||||
asyncio.run(get_mcp_tools())
|
||||
|
||||
assert len(_get_interceptors(mock_cls)) == 1
|
||||
|
||||
|
||||
def test_mcp_interceptors_invalid_type_logs_warning():
|
||||
"""A non-list, non-string value for mcpInterceptors logs a warning and is skipped."""
|
||||
p = _make_patches(interceptor_paths=42)
|
||||
|
||||
with (
|
||||
p["client_cls"] as mock_cls,
|
||||
p["from_file"],
|
||||
p["build_servers"],
|
||||
p["oauth_headers"],
|
||||
p["oauth_interceptor"],
|
||||
patch("deerflow.mcp.tools.logger.warning") as mock_warn,
|
||||
):
|
||||
asyncio.run(get_mcp_tools())
|
||||
|
||||
assert len(_get_interceptors(mock_cls)) == 0
|
||||
mock_warn.assert_called_once()
|
||||
assert "must be a list" in mock_warn.call_args[0][0]
|
||||
|
||||
|
||||
def test_custom_interceptor_non_callable_return_logs_warning():
|
||||
"""If a builder returns a non-callable value, it is skipped with a warning."""
|
||||
p = _make_patches(interceptor_paths=["pkg.bad:returns_string"])
|
||||
|
||||
with (
|
||||
p["client_cls"] as mock_cls,
|
||||
p["from_file"],
|
||||
p["build_servers"],
|
||||
p["oauth_headers"],
|
||||
p["oauth_interceptor"],
|
||||
patch("deerflow.mcp.tools.resolve_variable", return_value=lambda: "not_a_callable"),
|
||||
patch("deerflow.mcp.tools.logger.warning") as mock_warn,
|
||||
):
|
||||
asyncio.run(get_mcp_tools())
|
||||
|
||||
assert len(_get_interceptors(mock_cls)) == 0
|
||||
mock_warn.assert_called_once()
|
||||
assert "non-callable" in mock_warn.call_args[0][0]
|
||||
@@ -598,6 +598,7 @@ class TestUpdateMemoryStructuredResponse:
|
||||
|
||||
assert result is True
|
||||
model.ainvoke.assert_awaited_once()
|
||||
assert model.ainvoke.await_args.kwargs["config"] == {"run_name": "memory_agent"}
|
||||
|
||||
def test_correction_hint_injected_when_detected(self):
|
||||
updater = MemoryUpdater()
|
||||
|
||||
@@ -0,0 +1,397 @@
|
||||
"""
|
||||
Unit tests for MindIEChatModel adapter.
|
||||
"""
|
||||
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage
|
||||
from langchain_core.outputs import ChatGeneration, ChatResult
|
||||
|
||||
# ── Import the module under test ──────────────────────────────────────────────
|
||||
from deerflow.models.mindie_provider import (
|
||||
MindIEChatModel,
|
||||
_fix_messages,
|
||||
_parse_xml_tool_call_to_dict,
|
||||
)
|
||||
|
||||
# ═════════════════════════════════════════════════════════════════════════════
|
||||
# Helpers
|
||||
# ═════════════════════════════════════════════════════════════════════════════
|
||||
|
||||
|
||||
def _make_chat_result(content: str, tool_calls=None) -> ChatResult:
|
||||
msg = AIMessage(content=content)
|
||||
if tool_calls:
|
||||
msg.tool_calls = tool_calls
|
||||
gen = ChatGeneration(message=msg)
|
||||
return ChatResult(generations=[gen])
|
||||
|
||||
|
||||
# ═════════════════════════════════════════════════════════════════════════════
|
||||
# 1. _fix_messages
|
||||
# ═════════════════════════════════════════════════════════════════════════════
|
||||
|
||||
|
||||
class TestFixMessages:
|
||||
# ── list content → str ────────────────────────────────────────────────────
|
||||
|
||||
def test_list_content_extracted_to_str(self):
|
||||
msg = HumanMessage(
|
||||
content=[
|
||||
{"type": "text", "text": "Hello"},
|
||||
{"type": "text", "text": " world"},
|
||||
]
|
||||
)
|
||||
result = _fix_messages([msg])
|
||||
assert result[0].content == "Hello world"
|
||||
|
||||
def test_list_content_ignores_non_text_blocks(self):
|
||||
msg = HumanMessage(
|
||||
content=[
|
||||
{"type": "image_url", "image_url": "http://x.com/img.png"},
|
||||
{"type": "text", "text": "caption"},
|
||||
]
|
||||
)
|
||||
result = _fix_messages([msg])
|
||||
assert result[0].content == "caption"
|
||||
|
||||
def test_empty_list_content_becomes_space(self):
|
||||
msg = HumanMessage(content=[])
|
||||
result = _fix_messages([msg])
|
||||
assert result[0].content == " "
|
||||
|
||||
# ── plain str content ─────────────────────────────────────────────────────
|
||||
|
||||
def test_plain_string_content_preserved(self):
|
||||
msg = HumanMessage(content="hi there")
|
||||
result = _fix_messages([msg])
|
||||
assert result[0].content == "hi there"
|
||||
|
||||
def test_empty_string_content_becomes_space(self):
|
||||
msg = HumanMessage(content="")
|
||||
result = _fix_messages([msg])
|
||||
assert result[0].content == " "
|
||||
|
||||
# ── AIMessage with tool_calls → XML ───────────────────────────────────────
|
||||
|
||||
def test_ai_message_with_tool_calls_serialised_to_xml(self):
|
||||
msg = AIMessage(
|
||||
content="Sure",
|
||||
tool_calls=[
|
||||
{
|
||||
"name": "get_weather",
|
||||
"args": {"city": "London"},
|
||||
"id": "call_abc",
|
||||
}
|
||||
],
|
||||
)
|
||||
result = _fix_messages([msg])
|
||||
out = result[0]
|
||||
assert isinstance(out, AIMessage)
|
||||
assert "<tool_call>" in out.content
|
||||
assert "<function=get_weather>" in out.content
|
||||
assert '<parameter=city>"London"</parameter>' in out.content
|
||||
assert not getattr(out, "tool_calls", [])
|
||||
|
||||
def test_ai_message_text_preserved_before_xml(self):
|
||||
msg = AIMessage(
|
||||
content="Here you go",
|
||||
tool_calls=[{"name": "search", "args": {"q": "pytest"}, "id": "x"}],
|
||||
)
|
||||
result = _fix_messages([msg])
|
||||
assert result[0].content.startswith("Here you go")
|
||||
|
||||
def test_ai_message_multiple_tool_calls(self):
|
||||
msg = AIMessage(
|
||||
content="",
|
||||
tool_calls=[
|
||||
{"name": "tool_a", "args": {"x": 1}, "id": "id1"},
|
||||
{"name": "tool_b", "args": {"y": 2}, "id": "id2"},
|
||||
],
|
||||
)
|
||||
result = _fix_messages([msg])
|
||||
content = result[0].content
|
||||
assert content.count("<tool_call>") == 2
|
||||
assert "<function=tool_a>" in content
|
||||
assert "<function=tool_b>" in content
|
||||
|
||||
# ── ToolMessage → HumanMessage ────────────────────────────────────────────
|
||||
|
||||
def test_tool_message_becomes_human_message(self):
|
||||
msg = ToolMessage(content="42 degrees", tool_call_id="call_abc")
|
||||
result = _fix_messages([msg])
|
||||
out = result[0]
|
||||
assert isinstance(out, HumanMessage)
|
||||
assert "<tool_response>" in out.content
|
||||
assert "42 degrees" in out.content
|
||||
|
||||
def test_tool_message_with_list_content(self):
|
||||
msg = ToolMessage(
|
||||
content=[{"type": "text", "text": "result"}],
|
||||
tool_call_id="call_xyz",
|
||||
)
|
||||
result = _fix_messages([msg])
|
||||
assert isinstance(result[0], HumanMessage)
|
||||
assert "result" in result[0].content
|
||||
|
||||
# ── Mixed message list ────────────────────────────────────────────────────
|
||||
|
||||
def test_mixed_message_types_ordering_preserved(self):
|
||||
msgs = [
|
||||
HumanMessage(content="q"),
|
||||
AIMessage(content="a"),
|
||||
ToolMessage(content="tool out", tool_call_id="c1"),
|
||||
HumanMessage(content="follow up"),
|
||||
]
|
||||
result = _fix_messages(msgs)
|
||||
assert len(result) == 4
|
||||
assert isinstance(result[2], HumanMessage)
|
||||
assert result[3].content == "follow up"
|
||||
|
||||
# ── SystemMessage pass-through ────────────────────────────────────────────
|
||||
|
||||
def test_system_message_passed_through_unchanged(self):
|
||||
msg = SystemMessage(content="You are helpful.")
|
||||
result = _fix_messages([msg])
|
||||
assert result[0].content == "You are helpful."
|
||||
|
||||
|
||||
# ═════════════════════════════════════════════════════════════════════════════
|
||||
# 2. _parse_xml_tool_call_to_dict
|
||||
# ═════════════════════════════════════════════════════════════════════════════
|
||||
|
||||
|
||||
class TestParseXmlToolCalls:
|
||||
def test_no_tool_call_returns_original(self):
|
||||
content = "Just a normal reply."
|
||||
clean, calls = _parse_xml_tool_call_to_dict(content)
|
||||
assert clean == content
|
||||
assert calls == []
|
||||
|
||||
def test_single_tool_call_parsed(self):
|
||||
content = "<tool_call> <function=search> <parameter=query>pytest</parameter> </function> </tool_call>"
|
||||
clean, calls = _parse_xml_tool_call_to_dict(content)
|
||||
assert clean == ""
|
||||
assert len(calls) == 1
|
||||
assert calls[0]["name"] == "search"
|
||||
assert calls[0]["args"]["query"] == "pytest"
|
||||
assert calls[0]["id"].startswith("call_")
|
||||
|
||||
def test_multiple_tool_calls_parsed(self):
|
||||
content = "<tool_call><function=a><parameter=x>1</parameter></function></tool_call><tool_call><function=b><parameter=y>2</parameter></function></tool_call>"
|
||||
_, calls = _parse_xml_tool_call_to_dict(content)
|
||||
assert len(calls) == 2
|
||||
assert calls[0]["name"] == "a"
|
||||
assert calls[1]["name"] == "b"
|
||||
|
||||
def test_text_before_tool_call_preserved(self):
|
||||
content = "Here is the answer.\n<tool_call><function=f><parameter=k>v</parameter></function></tool_call>"
|
||||
clean, calls = _parse_xml_tool_call_to_dict(content)
|
||||
assert clean == "Here is the answer."
|
||||
assert len(calls) == 1
|
||||
|
||||
def test_integer_param_deserialised(self):
|
||||
content = "<tool_call><function=f><parameter=n>42</parameter></function></tool_call>"
|
||||
_, calls = _parse_xml_tool_call_to_dict(content)
|
||||
assert calls[0]["args"]["n"] == 42
|
||||
|
||||
def test_list_param_deserialised(self):
|
||||
content = '<tool_call><function=f><parameter=lst>["a","b"]</parameter></function></tool_call>'
|
||||
_, calls = _parse_xml_tool_call_to_dict(content)
|
||||
assert calls[0]["args"]["lst"] == ["a", "b"]
|
||||
|
||||
def test_dict_param_deserialised(self):
|
||||
content = '<tool_call><function=f><parameter=d>{"k": 1}</parameter></function></tool_call>'
|
||||
_, calls = _parse_xml_tool_call_to_dict(content)
|
||||
assert calls[0]["args"]["d"] == {"k": 1}
|
||||
|
||||
def test_bool_param_deserialised(self):
|
||||
content = "<tool_call><function=f><parameter=flag>true</parameter></function></tool_call>"
|
||||
_, calls = _parse_xml_tool_call_to_dict(content)
|
||||
assert calls[0]["args"]["flag"] is True
|
||||
|
||||
def test_malformed_param_stays_string(self):
|
||||
content = "<tool_call><function=f><parameter=bad>{broken json</parameter></function></tool_call>"
|
||||
_, calls = _parse_xml_tool_call_to_dict(content)
|
||||
assert calls[0]["args"]["bad"] == "{broken json"
|
||||
|
||||
def test_non_string_input_returned_as_is(self):
|
||||
result = _parse_xml_tool_call_to_dict(None)
|
||||
assert result == (None, [])
|
||||
|
||||
def test_unique_ids_generated(self):
|
||||
block = "<tool_call><function=f><parameter=k>v</parameter></function></tool_call>"
|
||||
_, c1 = _parse_xml_tool_call_to_dict(block)
|
||||
_, c2 = _parse_xml_tool_call_to_dict(block)
|
||||
assert c1[0]["id"] != c2[0]["id"]
|
||||
|
||||
|
||||
# ═════════════════════════════════════════════════════════════════════════════
|
||||
# 3. MindIEChatModel._patch_result_with_tools
|
||||
# ═════════════════════════════════════════════════════════════════════════════
|
||||
|
||||
|
||||
class TestPatchResult:
|
||||
def _model(self):
|
||||
with patch.object(MindIEChatModel, "__init__", return_value=None):
|
||||
m = MindIEChatModel.__new__(MindIEChatModel)
|
||||
return m
|
||||
|
||||
def test_escaped_newlines_fixed(self):
|
||||
model = self._model()
|
||||
result = _make_chat_result("line1\\nline2")
|
||||
patched = model._patch_result_with_tools(result)
|
||||
assert patched.generations[0].message.content == "line1\nline2"
|
||||
|
||||
def test_xml_tool_calls_extracted(self):
|
||||
model = self._model()
|
||||
content = "<tool_call><function=calc><parameter=expr>1+1</parameter></function></tool_call>"
|
||||
result = _make_chat_result(content)
|
||||
patched = model._patch_result_with_tools(result)
|
||||
msg = patched.generations[0].message
|
||||
assert msg.content == ""
|
||||
assert len(msg.tool_calls) == 1
|
||||
assert msg.tool_calls[0]["name"] == "calc"
|
||||
|
||||
def test_patch_result_appends_to_existing_tool_calls(self):
|
||||
model = self._model()
|
||||
existing = [{"name": "existing", "args": {}, "id": "e1"}]
|
||||
content = "<tool_call><function=new_tool><parameter=k>v</parameter></function></tool_call>"
|
||||
result = _make_chat_result(content, tool_calls=existing)
|
||||
patched = model._patch_result_with_tools(result)
|
||||
msg = patched.generations[0].message
|
||||
assert len(msg.tool_calls) == 2
|
||||
names = [tc["name"] for tc in msg.tool_calls]
|
||||
assert "existing" in names
|
||||
assert "new_tool" in names
|
||||
|
||||
def test_no_tool_call_content_unchanged(self):
|
||||
model = self._model()
|
||||
result = _make_chat_result("plain reply")
|
||||
patched = model._patch_result_with_tools(result)
|
||||
assert patched.generations[0].message.content == "plain reply"
|
||||
|
||||
def test_non_string_content_skipped(self):
|
||||
model = self._model()
|
||||
msg = AIMessage(content=[{"type": "text", "text": "hi"}])
|
||||
gen = ChatGeneration(message=msg)
|
||||
result = ChatResult(generations=[gen])
|
||||
patched = model._patch_result_with_tools(result)
|
||||
assert patched is not None
|
||||
|
||||
|
||||
# ═════════════════════════════════════════════════════════════════════════════
|
||||
# 4. MindIEChatModel._generate (sync)
|
||||
# ═════════════════════════════════════════════════════════════════════════════
|
||||
|
||||
|
||||
class TestGenerate:
|
||||
def test_generate_calls_fix_messages_and_patch(self):
|
||||
with patch("deerflow.models.mindie_provider.ChatOpenAI._generate") as mock_super_gen, patch.object(MindIEChatModel, "__init__", return_value=None):
|
||||
mock_super_gen.return_value = _make_chat_result("hello")
|
||||
model = MindIEChatModel.__new__(MindIEChatModel)
|
||||
|
||||
msgs = [HumanMessage(content="ping")]
|
||||
result = model._generate(msgs)
|
||||
|
||||
assert mock_super_gen.called
|
||||
called_msgs = mock_super_gen.call_args[0][0]
|
||||
assert all(isinstance(m.content, str) for m in called_msgs)
|
||||
assert result.generations[0].message.content == "hello"
|
||||
|
||||
|
||||
# ═════════════════════════════════════════════════════════════════════════════
|
||||
# 5. MindIEChatModel._agenerate (async)
|
||||
# ═════════════════════════════════════════════════════════════════════════════
|
||||
|
||||
|
||||
class TestAGenerate:
|
||||
@pytest.mark.asyncio
|
||||
async def test_agenerate_patches_result(self):
|
||||
with patch("deerflow.models.mindie_provider.ChatOpenAI._agenerate", new_callable=AsyncMock) as mock_ag, patch.object(MindIEChatModel, "__init__", return_value=None):
|
||||
mock_ag.return_value = _make_chat_result("world\\nfoo")
|
||||
model = MindIEChatModel.__new__(MindIEChatModel)
|
||||
|
||||
result = await model._agenerate([HumanMessage(content="hi")])
|
||||
assert result.generations[0].message.content == "world\nfoo"
|
||||
|
||||
|
||||
# ═════════════════════════════════════════════════════════════════════════════
|
||||
# 6. MindIEChatModel._astream (async generator)
|
||||
# ═════════════════════════════════════════════════════════════════════════════
|
||||
|
||||
|
||||
class TestAStream:
|
||||
async def _collect(self, gen):
|
||||
chunks = []
|
||||
async for chunk in gen:
|
||||
chunks.append(chunk)
|
||||
return chunks
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_no_tools_uses_real_stream(self):
|
||||
from langchain_core.messages import AIMessageChunk
|
||||
from langchain_core.outputs import ChatGenerationChunk
|
||||
|
||||
async def fake_stream(*args, **kwargs):
|
||||
for char in ["hel", "lo"]:
|
||||
yield ChatGenerationChunk(message=AIMessageChunk(content=char))
|
||||
|
||||
with patch("deerflow.models.mindie_provider.ChatOpenAI._astream", side_effect=fake_stream), patch.object(MindIEChatModel, "__init__", return_value=None):
|
||||
model = MindIEChatModel.__new__(MindIEChatModel)
|
||||
chunks = await self._collect(model._astream([HumanMessage(content="hi")]))
|
||||
|
||||
assert "".join(c.message.content for c in chunks) == "hello"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_no_tools_fixes_escaped_newlines_in_stream(self):
|
||||
from langchain_core.messages import AIMessageChunk
|
||||
from langchain_core.outputs import ChatGenerationChunk
|
||||
|
||||
async def fake_stream(*args, **kwargs):
|
||||
yield ChatGenerationChunk(message=AIMessageChunk(content="a\\nb"))
|
||||
|
||||
with patch("deerflow.models.mindie_provider.ChatOpenAI._astream", side_effect=fake_stream), patch.object(MindIEChatModel, "__init__", return_value=None):
|
||||
model = MindIEChatModel.__new__(MindIEChatModel)
|
||||
chunks = await self._collect(model._astream([HumanMessage(content="x")]))
|
||||
|
||||
assert chunks[0].message.content == "a\nb"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_with_tools_fake_streams_text_in_chunks(self):
|
||||
with patch.object(MindIEChatModel, "_agenerate", new_callable=AsyncMock) as mock_ag, patch.object(MindIEChatModel, "__init__", return_value=None):
|
||||
long_text = "A" * 50
|
||||
mock_ag.return_value = _make_chat_result(long_text)
|
||||
model = MindIEChatModel.__new__(MindIEChatModel)
|
||||
|
||||
chunks = await self._collect(model._astream([HumanMessage(content="q")], tools=[{"type": "function", "function": {"name": "dummy"}}]))
|
||||
|
||||
full = "".join(c.message.content for c in chunks)
|
||||
assert full == long_text
|
||||
assert len(chunks) > 1
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_with_tools_emits_tool_call_chunk(self):
|
||||
|
||||
tool_calls = [{"name": "fn", "args": {}, "id": "c1"}]
|
||||
with patch.object(MindIEChatModel, "_agenerate", new_callable=AsyncMock) as mock_ag, patch.object(MindIEChatModel, "__init__", return_value=None):
|
||||
mock_ag.return_value = _make_chat_result("ok", tool_calls=tool_calls)
|
||||
model = MindIEChatModel.__new__(MindIEChatModel)
|
||||
|
||||
chunks = await self._collect(model._astream([HumanMessage(content="q")], tools=[{"type": "function", "function": {"name": "fn"}}]))
|
||||
|
||||
tool_chunks = [c for c in chunks if getattr(c.message, "tool_calls", [])]
|
||||
assert tool_chunks, "No chunk carried tool_calls"
|
||||
assert tool_chunks[-1].message.tool_calls[0]["name"] == "fn"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_with_tools_empty_text_still_emits_tool_chunk(self):
|
||||
tool_calls = [{"name": "x", "args": {}, "id": "c2"}]
|
||||
with patch.object(MindIEChatModel, "_agenerate", new_callable=AsyncMock) as mock_ag, patch.object(MindIEChatModel, "__init__", return_value=None):
|
||||
mock_ag.return_value = _make_chat_result("", tool_calls=tool_calls)
|
||||
model = MindIEChatModel.__new__(MindIEChatModel)
|
||||
|
||||
chunks = await self._collect(model._astream([HumanMessage(content="q")], tools=[{"type": "function", "function": {"name": "x"}}]))
|
||||
|
||||
assert any(getattr(c.message, "tool_calls", []) for c in chunks)
|
||||
@@ -597,6 +597,99 @@ def test_openai_compatible_provider_passes_base_url(monkeypatch):
|
||||
assert captured.get("api_key") == "test-key"
|
||||
assert captured.get("temperature") == 1.0
|
||||
assert captured.get("max_tokens") == 4096
|
||||
assert captured.get("stream_usage") is True
|
||||
|
||||
|
||||
def test_openai_compatible_provider_respects_explicit_stream_usage(monkeypatch):
|
||||
"""Explicit stream_usage should not be overwritten by the factory default."""
|
||||
model = ModelConfig(
|
||||
name="minimax-m2.5",
|
||||
display_name="MiniMax M2.5",
|
||||
description=None,
|
||||
use="langchain_openai:ChatOpenAI",
|
||||
model="MiniMax-M2.5",
|
||||
base_url="https://api.minimax.io/v1",
|
||||
api_key="test-key",
|
||||
stream_usage=False,
|
||||
supports_vision=True,
|
||||
supports_thinking=False,
|
||||
)
|
||||
cfg = _make_app_config([model])
|
||||
_patch_factory(monkeypatch, cfg)
|
||||
|
||||
captured: dict = {}
|
||||
|
||||
class CapturingModel(FakeChatModel):
|
||||
def __init__(self, **kwargs):
|
||||
captured.update(kwargs)
|
||||
BaseChatModel.__init__(self, **kwargs)
|
||||
|
||||
monkeypatch.setattr(factory_module, "resolve_class", lambda path, base: CapturingModel)
|
||||
|
||||
factory_module.create_chat_model(name="minimax-m2.5")
|
||||
|
||||
assert captured.get("stream_usage") is False
|
||||
|
||||
|
||||
def test_openai_compatible_provider_enables_stream_usage_for_openai_api_base(monkeypatch):
|
||||
"""openai_api_base should trigger stream_usage default for ChatOpenAI."""
|
||||
model = ModelConfig(
|
||||
name="openai-compatible",
|
||||
display_name="OpenAI-Compatible",
|
||||
description=None,
|
||||
use="langchain_openai:ChatOpenAI",
|
||||
model="example-model",
|
||||
openai_api_base="https://example.com/v1",
|
||||
api_key="test-key",
|
||||
supports_vision=False,
|
||||
supports_thinking=False,
|
||||
)
|
||||
cfg = _make_app_config([model])
|
||||
_patch_factory(monkeypatch, cfg)
|
||||
|
||||
captured: dict = {}
|
||||
|
||||
class CapturingModel(FakeChatModel):
|
||||
def __init__(self, **kwargs):
|
||||
captured.update(kwargs)
|
||||
BaseChatModel.__init__(self, **kwargs)
|
||||
|
||||
monkeypatch.setattr(factory_module, "resolve_class", lambda path, base: CapturingModel)
|
||||
|
||||
factory_module.create_chat_model(name="openai-compatible")
|
||||
|
||||
assert captured.get("openai_api_base") == "https://example.com/v1"
|
||||
assert captured.get("stream_usage") is True
|
||||
|
||||
|
||||
def test_non_openai_provider_does_not_receive_stream_usage_default(monkeypatch):
|
||||
"""Non-OpenAI providers with base_url should not receive stream_usage by default."""
|
||||
model = ModelConfig(
|
||||
name="ollama-local",
|
||||
display_name="Ollama Local",
|
||||
description=None,
|
||||
use="langchain_ollama:ChatOllama",
|
||||
model="qwen2.5",
|
||||
base_url="http://127.0.0.1:11434",
|
||||
supports_vision=False,
|
||||
supports_thinking=False,
|
||||
)
|
||||
cfg = _make_app_config([model])
|
||||
_patch_factory(monkeypatch, cfg)
|
||||
|
||||
captured: dict = {}
|
||||
|
||||
class CapturingModel(FakeChatModel):
|
||||
def __init__(self, **kwargs):
|
||||
captured.update(kwargs)
|
||||
BaseChatModel.__init__(self, **kwargs)
|
||||
|
||||
monkeypatch.setattr(factory_module, "resolve_class", lambda path, base: CapturingModel)
|
||||
|
||||
factory_module.create_chat_model(name="ollama-local")
|
||||
|
||||
assert captured.get("base_url") == "http://127.0.0.1:11434"
|
||||
assert "stream_usage" not in captured
|
||||
|
||||
|
||||
def test_openai_compatible_provider_multiple_models(monkeypatch):
|
||||
|
||||
@@ -0,0 +1,70 @@
|
||||
"""Tests for deerflow.utils.runtime.get_thread_id."""
|
||||
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import patch
|
||||
|
||||
from deerflow.utils.runtime import get_thread_id
|
||||
|
||||
|
||||
class TestGetThreadId:
|
||||
"""Tests for get_thread_id() with various runtime shapes."""
|
||||
|
||||
def test_returns_none_when_runtime_is_none(self):
|
||||
assert get_thread_id(None) is None
|
||||
|
||||
def test_returns_thread_id_from_context(self):
|
||||
runtime = SimpleNamespace(context={"thread_id": "t-1"}, config={})
|
||||
assert get_thread_id(runtime) == "t-1"
|
||||
|
||||
def test_returns_none_from_empty_context(self):
|
||||
runtime = SimpleNamespace(context={}, config={})
|
||||
assert get_thread_id(runtime) is None
|
||||
|
||||
def test_returns_none_from_none_context(self):
|
||||
runtime = SimpleNamespace(context=None, config={})
|
||||
assert get_thread_id(runtime) is None
|
||||
|
||||
def test_falls_back_to_runtime_config(self):
|
||||
runtime = SimpleNamespace(
|
||||
context=None,
|
||||
config={"configurable": {"thread_id": "t-from-config"}},
|
||||
)
|
||||
assert get_thread_id(runtime) == "t-from-config"
|
||||
|
||||
def test_context_takes_precedence_over_config(self):
|
||||
runtime = SimpleNamespace(
|
||||
context={"thread_id": "t-from-context"},
|
||||
config={"configurable": {"thread_id": "t-from-config"}},
|
||||
)
|
||||
assert get_thread_id(runtime) == "t-from-context"
|
||||
|
||||
def test_falls_back_to_get_config(self):
|
||||
runtime = SimpleNamespace(context=None, config={})
|
||||
with patch("langgraph.config.get_config", return_value={"configurable": {"thread_id": "t-from-lg"}}):
|
||||
assert get_thread_id(runtime) == "t-from-lg"
|
||||
|
||||
def test_returns_none_when_get_config_raises_runtime_error(self):
|
||||
runtime = SimpleNamespace(context=None, config={})
|
||||
with patch("langgraph.config.get_config", side_effect=RuntimeError):
|
||||
assert get_thread_id(runtime) is None
|
||||
|
||||
def test_handles_object_without_context_or_config(self):
|
||||
runtime = SimpleNamespace()
|
||||
assert get_thread_id(runtime) is None
|
||||
|
||||
def test_handles_context_not_dict(self):
|
||||
runtime = SimpleNamespace(context="not-a-dict", config={})
|
||||
assert get_thread_id(runtime) is None
|
||||
|
||||
def test_config_without_configurable(self):
|
||||
runtime = SimpleNamespace(context=None, config={"other_key": "value"})
|
||||
assert get_thread_id(runtime) is None
|
||||
|
||||
def test_empty_string_thread_id_treated_as_missing(self):
|
||||
runtime = SimpleNamespace(context={"thread_id": ""}, config={})
|
||||
assert get_thread_id(runtime) is None
|
||||
|
||||
def test_full_cascade_with_all_levels_failing(self):
|
||||
runtime = SimpleNamespace(context=None, config={})
|
||||
with patch("langgraph.config.get_config", return_value={"configurable": {}}):
|
||||
assert get_thread_id(runtime) is None
|
||||
@@ -5,6 +5,27 @@ import pytest
|
||||
from deerflow.skills.security_scanner import scan_skill_content
|
||||
|
||||
|
||||
@pytest.mark.anyio
|
||||
async def test_scan_skill_content_passes_run_name_to_model(monkeypatch):
|
||||
config = SimpleNamespace(skill_evolution=SimpleNamespace(moderation_model_name=None))
|
||||
fake_response = SimpleNamespace(content='{"decision":"allow","reason":"ok"}')
|
||||
|
||||
class FakeModel:
|
||||
async def ainvoke(self, *args, **kwargs):
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
return fake_response
|
||||
|
||||
model = FakeModel()
|
||||
monkeypatch.setattr("deerflow.skills.security_scanner.get_app_config", lambda: config)
|
||||
monkeypatch.setattr("deerflow.skills.security_scanner.create_chat_model", lambda **kwargs: model)
|
||||
|
||||
result = await scan_skill_content("---\nname: demo-skill\ndescription: demo\n---\n", executable=False)
|
||||
|
||||
assert result.decision == "allow"
|
||||
assert model.kwargs["config"] == {"run_name": "security_agent"}
|
||||
|
||||
|
||||
@pytest.mark.anyio
|
||||
async def test_scan_skill_content_blocks_when_model_unavailable(monkeypatch):
|
||||
config = SimpleNamespace(skill_evolution=SimpleNamespace(moderation_model_name=None))
|
||||
|
||||
@@ -1,16 +1,48 @@
|
||||
"""Tests for setup_agent tool — validates agent name security and data loss prevention."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from deerflow.tools.builtins.setup_agent_tool import setup_agent
|
||||
|
||||
# --- Helpers ---
|
||||
|
||||
|
||||
class _DummyRuntime(SimpleNamespace):
|
||||
context: dict
|
||||
tool_call_id: str
|
||||
|
||||
|
||||
def _make_runtime(agent_name: str | None = "test-agent") -> MagicMock:
|
||||
runtime = MagicMock()
|
||||
runtime.context = {"agent_name": agent_name}
|
||||
runtime.tool_call_id = "call_1"
|
||||
return runtime
|
||||
|
||||
|
||||
def _make_paths_mock(tmp_path: Path):
|
||||
paths = MagicMock()
|
||||
paths.base_dir = tmp_path
|
||||
paths.agent_dir = lambda name: tmp_path / "agents" / name
|
||||
return paths
|
||||
|
||||
|
||||
def _call_setup_agent(tmp_path: Path, soul: str, description: str, agent_name: str = "test-agent"):
|
||||
"""Call the underlying setup_agent function directly, bypassing langchain tool wrapper."""
|
||||
with patch("deerflow.tools.builtins.setup_agent_tool.get_paths", return_value=_make_paths_mock(tmp_path)):
|
||||
return setup_agent.func(
|
||||
soul=soul,
|
||||
description=description,
|
||||
runtime=_make_runtime(agent_name),
|
||||
)
|
||||
|
||||
|
||||
# --- Agent name validation tests ---
|
||||
|
||||
|
||||
def test_setup_agent_rejects_invalid_agent_name_before_writing(tmp_path, monkeypatch):
|
||||
monkeypatch.setenv("DEER_FLOW_HOME", str(tmp_path))
|
||||
outside_dir = tmp_path.parent / "outside-target"
|
||||
@@ -38,3 +70,58 @@ def test_setup_agent_rejects_absolute_agent_name_before_writing(tmp_path, monkey
|
||||
assert "Invalid agent name" in messages[0].content
|
||||
assert not (tmp_path / "agents").exists()
|
||||
assert not (Path(absolute_agent) / "SOUL.md").exists()
|
||||
|
||||
|
||||
# --- Data loss prevention tests ---
|
||||
|
||||
|
||||
class TestSetupAgentNoDataLoss:
|
||||
"""Ensure shutil.rmtree only removes directories created during the current call."""
|
||||
|
||||
def test_existing_agent_dir_preserved_on_failure(self, tmp_path: Path):
|
||||
"""If the agent directory already exists and setup fails,
|
||||
the directory and its contents must NOT be deleted."""
|
||||
agent_dir = tmp_path / "agents" / "test-agent"
|
||||
agent_dir.mkdir(parents=True)
|
||||
old_soul = agent_dir / "SOUL.md"
|
||||
old_soul.write_text("original soul content")
|
||||
|
||||
with patch("deerflow.tools.builtins.setup_agent_tool.get_paths", return_value=_make_paths_mock(tmp_path)):
|
||||
# Force soul_file.write_text to raise after directory already exists
|
||||
with patch.object(Path, "write_text", side_effect=OSError("disk full")):
|
||||
setup_agent.func(
|
||||
soul="new soul",
|
||||
description="desc",
|
||||
runtime=_make_runtime(),
|
||||
)
|
||||
|
||||
# Directory must still exist
|
||||
assert agent_dir.exists(), "Pre-existing agent directory was deleted on failure"
|
||||
# Original SOUL.md should still be on disk (not deleted by rmtree)
|
||||
assert old_soul.exists(), "Pre-existing SOUL.md was deleted on failure"
|
||||
|
||||
def test_new_agent_dir_cleaned_up_on_failure(self, tmp_path: Path):
|
||||
"""If the agent directory is newly created and setup fails,
|
||||
the directory should be cleaned up."""
|
||||
agent_dir = tmp_path / "agents" / "test-agent"
|
||||
assert not agent_dir.exists()
|
||||
|
||||
with patch("deerflow.tools.builtins.setup_agent_tool.get_paths", return_value=_make_paths_mock(tmp_path)):
|
||||
with patch("yaml.dump", side_effect=OSError("write error")):
|
||||
setup_agent.func(
|
||||
soul="new soul",
|
||||
description="desc",
|
||||
runtime=_make_runtime(),
|
||||
)
|
||||
|
||||
# Newly created directory should be cleaned up
|
||||
assert not agent_dir.exists(), "Newly created agent directory was not cleaned up on failure"
|
||||
|
||||
def test_successful_setup_creates_files(self, tmp_path: Path):
|
||||
"""Happy path: setup_agent creates config.yaml and SOUL.md."""
|
||||
_call_setup_agent(tmp_path, soul="# My Agent", description="A test agent")
|
||||
|
||||
agent_dir = tmp_path / "agents" / "test-agent"
|
||||
assert agent_dir.exists()
|
||||
assert (agent_dir / "SOUL.md").read_text() == "# My Agent"
|
||||
assert (agent_dir / "config.yaml").exists()
|
||||
|
||||
@@ -0,0 +1,31 @@
|
||||
"""Validate every bundled SKILL.md under skills/public/.
|
||||
|
||||
Catches regressions like #2443 — a SKILL.md whose YAML front-matter fails to
|
||||
parse (e.g. an unquoted description containing a colon, which YAML interprets
|
||||
as a nested mapping). Each bundled skill is checked individually so the
|
||||
failure message identifies the exact file.
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from deerflow.skills.validation import _validate_skill_frontmatter
|
||||
|
||||
SKILLS_PUBLIC_DIR = Path(__file__).resolve().parents[2] / "skills" / "public"
|
||||
BUNDLED_SKILL_DIRS = sorted(p.parent for p in SKILLS_PUBLIC_DIR.rglob("SKILL.md"))
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"skill_dir",
|
||||
BUNDLED_SKILL_DIRS,
|
||||
ids=lambda p: str(p.relative_to(SKILLS_PUBLIC_DIR)),
|
||||
)
|
||||
def test_bundled_skill_frontmatter_is_valid(skill_dir: Path) -> None:
|
||||
valid, msg, name = _validate_skill_frontmatter(skill_dir)
|
||||
assert valid, f"{skill_dir.relative_to(SKILLS_PUBLIC_DIR)}: {msg}"
|
||||
assert name, f"{skill_dir.relative_to(SKILLS_PUBLIC_DIR)}: no name extracted"
|
||||
|
||||
|
||||
def test_skills_public_dir_has_skills() -> None:
|
||||
assert BUNDLED_SKILL_DIRS, f"no SKILL.md found under {SKILLS_PUBLIC_DIR}"
|
||||
@@ -1,119 +1,131 @@
|
||||
"""Tests for skill file parser."""
|
||||
"""Tests for the SKILL.md parser regression introduced in issue #1803.
|
||||
|
||||
The previous hand-rolled YAML parser stored quoted string values with their
|
||||
surrounding quotes intact (e.g. ``name: "my-skill"`` → ``'"my-skill"'``).
|
||||
This caused a mismatch with ``_validate_skill_frontmatter`` (which uses
|
||||
``yaml.safe_load``) and broke skill lookup after installation.
|
||||
|
||||
The parser now uses ``yaml.safe_load`` consistently with ``validation.py``.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from deerflow.skills.parser import parse_skill_file
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _write_skill(tmp_path: Path, content: str) -> Path:
|
||||
"""Write a SKILL.md file and return its path."""
|
||||
skill_file = tmp_path / "SKILL.md"
|
||||
skill_file.write_text(content, encoding="utf-8")
|
||||
|
||||
def _write_skill(tmp_path: Path, front_matter: str, body: str = "# My Skill\n") -> Path:
|
||||
"""Write a minimal SKILL.md and return the path."""
|
||||
skill_dir = tmp_path / "my-skill"
|
||||
skill_dir.mkdir()
|
||||
skill_file = skill_dir / "SKILL.md"
|
||||
skill_file.write_text(f"---\n{front_matter}\n---\n{body}", encoding="utf-8")
|
||||
return skill_file
|
||||
|
||||
|
||||
class TestParseSkillFile:
|
||||
def test_valid_skill_file(self, tmp_path):
|
||||
skill_file = _write_skill(
|
||||
tmp_path,
|
||||
"---\nname: my-skill\ndescription: A test skill\nlicense: MIT\n---\n\n# My Skill\n",
|
||||
)
|
||||
result = parse_skill_file(skill_file, "public")
|
||||
assert result is not None
|
||||
assert result.name == "my-skill"
|
||||
assert result.description == "A test skill"
|
||||
assert result.license == "MIT"
|
||||
assert result.category == "public"
|
||||
assert result.enabled is True
|
||||
assert result.skill_dir == tmp_path
|
||||
assert result.skill_file == skill_file
|
||||
# ---------------------------------------------------------------------------
|
||||
# Basic parsing
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def test_missing_name_returns_none(self, tmp_path):
|
||||
skill_file = _write_skill(
|
||||
tmp_path,
|
||||
"---\ndescription: A test skill\n---\n\nBody\n",
|
||||
)
|
||||
assert parse_skill_file(skill_file, "public") is None
|
||||
|
||||
def test_missing_description_returns_none(self, tmp_path):
|
||||
skill_file = _write_skill(
|
||||
tmp_path,
|
||||
"---\nname: my-skill\n---\n\nBody\n",
|
||||
)
|
||||
assert parse_skill_file(skill_file, "public") is None
|
||||
def test_parse_plain_name(tmp_path):
|
||||
"""Unquoted name is parsed correctly."""
|
||||
skill_file = _write_skill(tmp_path, "name: my-skill\ndescription: A test skill")
|
||||
skill = parse_skill_file(skill_file, category="custom")
|
||||
assert skill is not None
|
||||
assert skill.name == "my-skill"
|
||||
|
||||
def test_no_front_matter_returns_none(self, tmp_path):
|
||||
skill_file = _write_skill(tmp_path, "# Just a markdown file\n\nNo front matter here.\n")
|
||||
assert parse_skill_file(skill_file, "public") is None
|
||||
|
||||
def test_nonexistent_file_returns_none(self, tmp_path):
|
||||
skill_file = tmp_path / "SKILL.md"
|
||||
assert parse_skill_file(skill_file, "public") is None
|
||||
def test_parse_quoted_name_no_quotes_in_result(tmp_path):
|
||||
"""Quoted name (YAML string) must not include surrounding quotes in result.
|
||||
|
||||
def test_wrong_filename_returns_none(self, tmp_path):
|
||||
wrong_file = tmp_path / "README.md"
|
||||
wrong_file.write_text("---\nname: test\ndescription: test\n---\n", encoding="utf-8")
|
||||
assert parse_skill_file(wrong_file, "public") is None
|
||||
Regression: the old hand-rolled parser stored ``'"my-skill"'`` instead of
|
||||
``'my-skill'`` when the YAML value was wrapped in double-quotes.
|
||||
"""
|
||||
skill_file = _write_skill(tmp_path, 'name: "my-skill"\ndescription: A test skill')
|
||||
skill = parse_skill_file(skill_file, category="custom")
|
||||
assert skill is not None
|
||||
assert skill.name == "my-skill", f"Expected 'my-skill', got {skill.name!r}"
|
||||
|
||||
def test_optional_license_field(self, tmp_path):
|
||||
skill_file = _write_skill(
|
||||
tmp_path,
|
||||
"---\nname: my-skill\ndescription: A test skill\n---\n\nBody\n",
|
||||
)
|
||||
result = parse_skill_file(skill_file, "custom")
|
||||
assert result is not None
|
||||
assert result.license is None
|
||||
assert result.category == "custom"
|
||||
|
||||
def test_custom_relative_path(self, tmp_path):
|
||||
skill_file = _write_skill(
|
||||
tmp_path,
|
||||
"---\nname: nested-skill\ndescription: Nested\n---\n\nBody\n",
|
||||
)
|
||||
rel = Path("group/nested-skill")
|
||||
result = parse_skill_file(skill_file, "public", relative_path=rel)
|
||||
assert result is not None
|
||||
assert result.relative_path == rel
|
||||
def test_parse_single_quoted_name(tmp_path):
|
||||
"""Single-quoted YAML strings are also handled correctly."""
|
||||
skill_file = _write_skill(tmp_path, "name: 'my-skill'\ndescription: A test skill")
|
||||
skill = parse_skill_file(skill_file, category="custom")
|
||||
assert skill is not None
|
||||
assert skill.name == "my-skill"
|
||||
|
||||
def test_default_relative_path_is_parent_name(self, tmp_path):
|
||||
skill_file = _write_skill(
|
||||
tmp_path,
|
||||
"---\nname: my-skill\ndescription: Test\n---\n\nBody\n",
|
||||
)
|
||||
result = parse_skill_file(skill_file, "public")
|
||||
assert result is not None
|
||||
assert result.relative_path == Path(tmp_path.name)
|
||||
|
||||
def test_colons_in_description(self, tmp_path):
|
||||
skill_file = _write_skill(
|
||||
tmp_path,
|
||||
"---\nname: my-skill\ndescription: A skill: does things\n---\n\nBody\n",
|
||||
)
|
||||
result = parse_skill_file(skill_file, "public")
|
||||
assert result is not None
|
||||
assert result.description == "A skill: does things"
|
||||
def test_parse_description_returned(tmp_path):
|
||||
"""Description field is correctly extracted."""
|
||||
skill_file = _write_skill(tmp_path, "name: my-skill\ndescription: Does amazing things")
|
||||
skill = parse_skill_file(skill_file, category="custom")
|
||||
assert skill is not None
|
||||
assert skill.description == "Does amazing things"
|
||||
|
||||
def test_multiline_yaml_folded_description(self, tmp_path):
|
||||
skill_file = _write_skill(
|
||||
tmp_path,
|
||||
"---\nname: multiline-skill\ndescription: >\n This is a multiline\n description for a skill.\n\n It spans multiple lines.\nlicense: MIT\n---\n\nBody\n",
|
||||
)
|
||||
result = parse_skill_file(skill_file, "public")
|
||||
assert result is not None
|
||||
assert result.name == "multiline-skill"
|
||||
assert result.description == "This is a multiline description for a skill.\n\nIt spans multiple lines."
|
||||
assert result.license == "MIT"
|
||||
|
||||
def test_multiline_yaml_literal_description(self, tmp_path):
|
||||
skill_file = _write_skill(
|
||||
tmp_path,
|
||||
"---\nname: pipe-skill\ndescription: |\n First line.\n Second line.\n---\n\nBody\n",
|
||||
)
|
||||
result = parse_skill_file(skill_file, "public")
|
||||
assert result is not None
|
||||
assert result.name == "pipe-skill"
|
||||
assert result.description == "First line.\nSecond line."
|
||||
def test_parse_multiline_description(tmp_path):
|
||||
"""Multi-line YAML descriptions are collapsed correctly by yaml.safe_load."""
|
||||
front_matter = "name: my-skill\ndescription: >\n A folded\n description"
|
||||
skill_file = _write_skill(tmp_path, front_matter)
|
||||
skill = parse_skill_file(skill_file, category="custom")
|
||||
assert skill is not None
|
||||
assert "folded" in skill.description
|
||||
|
||||
def test_empty_front_matter_returns_none(self, tmp_path):
|
||||
skill_file = _write_skill(tmp_path, "---\n\n---\n\nBody\n")
|
||||
assert parse_skill_file(skill_file, "public") is None
|
||||
|
||||
def test_parse_license_field(tmp_path):
|
||||
"""Optional license field is captured when present."""
|
||||
skill_file = _write_skill(tmp_path, "name: my-skill\ndescription: Test\nlicense: MIT")
|
||||
skill = parse_skill_file(skill_file, category="custom")
|
||||
assert skill is not None
|
||||
assert skill.license == "MIT"
|
||||
|
||||
|
||||
def test_parse_missing_name_returns_none(tmp_path):
|
||||
"""Skills missing a name field are rejected."""
|
||||
skill_file = _write_skill(tmp_path, "description: A test skill")
|
||||
skill = parse_skill_file(skill_file, category="custom")
|
||||
assert skill is None
|
||||
|
||||
|
||||
def test_parse_missing_description_returns_none(tmp_path):
|
||||
"""Skills missing a description field are rejected."""
|
||||
skill_file = _write_skill(tmp_path, "name: my-skill")
|
||||
skill = parse_skill_file(skill_file, category="custom")
|
||||
assert skill is None
|
||||
|
||||
|
||||
def test_parse_no_front_matter_returns_none(tmp_path):
|
||||
"""Files without YAML front-matter delimiters return None."""
|
||||
skill_dir = tmp_path / "no-fm"
|
||||
skill_dir.mkdir()
|
||||
skill_file = skill_dir / "SKILL.md"
|
||||
skill_file.write_text("# No front matter here\n", encoding="utf-8")
|
||||
skill = parse_skill_file(skill_file, category="public")
|
||||
assert skill is None
|
||||
|
||||
|
||||
def test_parse_invalid_yaml_returns_none(tmp_path):
|
||||
"""Malformed YAML front-matter is handled gracefully (returns None)."""
|
||||
skill_file = _write_skill(tmp_path, "name: [unclosed")
|
||||
skill = parse_skill_file(skill_file, category="custom")
|
||||
assert skill is None
|
||||
|
||||
|
||||
def test_parse_category_stored(tmp_path):
|
||||
"""Category is propagated into the returned Skill object."""
|
||||
skill_file = _write_skill(tmp_path, "name: my-skill\ndescription: Test")
|
||||
skill = parse_skill_file(skill_file, category="public")
|
||||
assert skill is not None
|
||||
assert skill.category == "public"
|
||||
|
||||
|
||||
def test_parse_nonexistent_file_returns_none(tmp_path):
|
||||
"""Non-existent files are handled gracefully."""
|
||||
skill = parse_skill_file(tmp_path / "ghost" / "SKILL.md", category="custom")
|
||||
assert skill is None
|
||||
|
||||
@@ -25,7 +25,9 @@ def test_build_subagent_section_hides_bash_examples_when_unavailable(monkeypatch
|
||||
|
||||
section = prompt_module._build_subagent_section(3)
|
||||
|
||||
assert "Not available in the current sandbox configuration" in section
|
||||
# When bash is not available, it should not appear at all (aligned with Codex:
|
||||
# unavailable roles are omitted, not listed as disabled)
|
||||
assert "**bash**" not in section
|
||||
assert 'bash("npm test")' not in section
|
||||
assert 'read_file("/mnt/user-data/workspace/README.md")' in section
|
||||
assert "available tools (ls, read_file, web_search, etc.)" in section
|
||||
|
||||
@@ -0,0 +1,596 @@
|
||||
"""Tests for subagent per-agent skill configuration and custom subagent types.
|
||||
|
||||
Covers:
|
||||
- SubagentConfig.skills field
|
||||
- SubagentOverrideConfig.skills field
|
||||
- CustomSubagentConfig model validation
|
||||
- SubagentsAppConfig.custom_agents and get_skills_for()
|
||||
- Registry: custom agent lookup, skills override, merged available names
|
||||
- Skills filter passthrough in task_tool config assembly
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
from deerflow.config.subagents_config import (
|
||||
CustomSubagentConfig,
|
||||
SubagentOverrideConfig,
|
||||
SubagentsAppConfig,
|
||||
get_subagents_app_config,
|
||||
load_subagents_config_from_dict,
|
||||
)
|
||||
from deerflow.subagents.config import SubagentConfig
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _reset_subagents_config(**kwargs) -> None:
|
||||
"""Reset global subagents config to a known state."""
|
||||
load_subagents_config_from_dict(kwargs)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# SubagentConfig.skills field
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestSubagentConfigSkills:
|
||||
def test_default_skills_is_none(self):
|
||||
config = SubagentConfig(name="test", description="test", system_prompt="test")
|
||||
assert config.skills is None
|
||||
|
||||
def test_skills_whitelist(self):
|
||||
config = SubagentConfig(
|
||||
name="test",
|
||||
description="test",
|
||||
system_prompt="test",
|
||||
skills=["data-analysis", "visualization"],
|
||||
)
|
||||
assert config.skills == ["data-analysis", "visualization"]
|
||||
|
||||
def test_skills_empty_list_means_no_skills(self):
|
||||
config = SubagentConfig(
|
||||
name="test",
|
||||
description="test",
|
||||
system_prompt="test",
|
||||
skills=[],
|
||||
)
|
||||
assert config.skills == []
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# SubagentOverrideConfig.skills field
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestSubagentOverrideConfigSkills:
|
||||
def test_default_skills_is_none(self):
|
||||
override = SubagentOverrideConfig()
|
||||
assert override.skills is None
|
||||
|
||||
def test_skills_whitelist(self):
|
||||
override = SubagentOverrideConfig(skills=["web-search", "data-analysis"])
|
||||
assert override.skills == ["web-search", "data-analysis"]
|
||||
|
||||
def test_skills_empty_list(self):
|
||||
override = SubagentOverrideConfig(skills=[])
|
||||
assert override.skills == []
|
||||
|
||||
def test_skills_coexists_with_other_fields(self):
|
||||
override = SubagentOverrideConfig(
|
||||
timeout_seconds=300,
|
||||
model="gpt-5",
|
||||
skills=["my-skill"],
|
||||
)
|
||||
assert override.timeout_seconds == 300
|
||||
assert override.model == "gpt-5"
|
||||
assert override.skills == ["my-skill"]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# CustomSubagentConfig model
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestCustomSubagentConfig:
|
||||
def test_minimal_valid(self):
|
||||
config = CustomSubagentConfig(
|
||||
description="A test agent",
|
||||
system_prompt="You are a test agent.",
|
||||
)
|
||||
assert config.description == "A test agent"
|
||||
assert config.system_prompt == "You are a test agent."
|
||||
assert config.tools is None
|
||||
assert config.disallowed_tools == ["task", "ask_clarification", "present_files"]
|
||||
assert config.skills is None
|
||||
assert config.model == "inherit"
|
||||
assert config.max_turns == 50
|
||||
assert config.timeout_seconds == 900
|
||||
|
||||
def test_full_configuration(self):
|
||||
config = CustomSubagentConfig(
|
||||
description="Data analysis specialist",
|
||||
system_prompt="You are a data analysis subagent.",
|
||||
tools=["bash", "read_file", "write_file"],
|
||||
disallowed_tools=["task"],
|
||||
skills=["data-analysis", "visualization"],
|
||||
model="qwen3:32b",
|
||||
max_turns=80,
|
||||
timeout_seconds=600,
|
||||
)
|
||||
assert config.tools == ["bash", "read_file", "write_file"]
|
||||
assert config.skills == ["data-analysis", "visualization"]
|
||||
assert config.model == "qwen3:32b"
|
||||
assert config.max_turns == 80
|
||||
assert config.timeout_seconds == 600
|
||||
|
||||
def test_skills_empty_list_no_skills(self):
|
||||
config = CustomSubagentConfig(
|
||||
description="test",
|
||||
system_prompt="test",
|
||||
skills=[],
|
||||
)
|
||||
assert config.skills == []
|
||||
|
||||
def test_rejects_zero_max_turns(self):
|
||||
with pytest.raises(ValueError):
|
||||
CustomSubagentConfig(
|
||||
description="test",
|
||||
system_prompt="test",
|
||||
max_turns=0,
|
||||
)
|
||||
|
||||
def test_rejects_zero_timeout(self):
|
||||
with pytest.raises(ValueError):
|
||||
CustomSubagentConfig(
|
||||
description="test",
|
||||
system_prompt="test",
|
||||
timeout_seconds=0,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# SubagentsAppConfig.custom_agents and get_skills_for()
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestSubagentsAppConfigCustomAgents:
|
||||
def test_default_custom_agents_empty(self):
|
||||
config = SubagentsAppConfig()
|
||||
assert config.custom_agents == {}
|
||||
|
||||
def test_custom_agents_loaded(self):
|
||||
config = SubagentsAppConfig(
|
||||
custom_agents={
|
||||
"analysis": CustomSubagentConfig(
|
||||
description="Analysis agent",
|
||||
system_prompt="You analyze data.",
|
||||
skills=["data-analysis"],
|
||||
),
|
||||
}
|
||||
)
|
||||
assert "analysis" in config.custom_agents
|
||||
assert config.custom_agents["analysis"].skills == ["data-analysis"]
|
||||
|
||||
def test_multiple_custom_agents(self):
|
||||
config = SubagentsAppConfig(
|
||||
custom_agents={
|
||||
"analysis": CustomSubagentConfig(
|
||||
description="Analysis",
|
||||
system_prompt="analyze",
|
||||
skills=["data-analysis"],
|
||||
),
|
||||
"researcher": CustomSubagentConfig(
|
||||
description="Research",
|
||||
system_prompt="research",
|
||||
skills=["web-search"],
|
||||
),
|
||||
}
|
||||
)
|
||||
assert len(config.custom_agents) == 2
|
||||
|
||||
|
||||
class TestGetSkillsFor:
|
||||
def test_returns_none_when_no_override(self):
|
||||
config = SubagentsAppConfig()
|
||||
assert config.get_skills_for("general-purpose") is None
|
||||
assert config.get_skills_for("unknown") is None
|
||||
|
||||
def test_returns_skills_whitelist(self):
|
||||
config = SubagentsAppConfig(
|
||||
agents={
|
||||
"general-purpose": SubagentOverrideConfig(skills=["web-search", "coding"]),
|
||||
}
|
||||
)
|
||||
assert config.get_skills_for("general-purpose") == ["web-search", "coding"]
|
||||
|
||||
def test_returns_empty_list_for_no_skills(self):
|
||||
config = SubagentsAppConfig(
|
||||
agents={
|
||||
"bash": SubagentOverrideConfig(skills=[]),
|
||||
}
|
||||
)
|
||||
assert config.get_skills_for("bash") == []
|
||||
|
||||
def test_returns_none_for_unrelated_agent(self):
|
||||
config = SubagentsAppConfig(
|
||||
agents={
|
||||
"bash": SubagentOverrideConfig(skills=["web-search"]),
|
||||
}
|
||||
)
|
||||
assert config.get_skills_for("general-purpose") is None
|
||||
|
||||
def test_returns_none_when_skills_not_set(self):
|
||||
config = SubagentsAppConfig(
|
||||
agents={
|
||||
"bash": SubagentOverrideConfig(timeout_seconds=300),
|
||||
}
|
||||
)
|
||||
assert config.get_skills_for("bash") is None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# load_subagents_config_from_dict with skills and custom_agents
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestLoadSubagentsConfigWithSkills:
|
||||
def teardown_method(self):
|
||||
_reset_subagents_config()
|
||||
|
||||
def test_load_with_skills_override(self):
|
||||
load_subagents_config_from_dict(
|
||||
{
|
||||
"timeout_seconds": 900,
|
||||
"agents": {
|
||||
"general-purpose": {"skills": ["web-search", "data-analysis"]},
|
||||
},
|
||||
}
|
||||
)
|
||||
cfg = get_subagents_app_config()
|
||||
assert cfg.get_skills_for("general-purpose") == ["web-search", "data-analysis"]
|
||||
|
||||
def test_load_with_empty_skills(self):
|
||||
load_subagents_config_from_dict(
|
||||
{
|
||||
"timeout_seconds": 900,
|
||||
"agents": {
|
||||
"bash": {"skills": []},
|
||||
},
|
||||
}
|
||||
)
|
||||
cfg = get_subagents_app_config()
|
||||
assert cfg.get_skills_for("bash") == []
|
||||
|
||||
def test_load_with_custom_agents(self):
|
||||
load_subagents_config_from_dict(
|
||||
{
|
||||
"timeout_seconds": 900,
|
||||
"custom_agents": {
|
||||
"analysis": {
|
||||
"description": "Data analysis specialist",
|
||||
"system_prompt": "You are a data analysis subagent.",
|
||||
"skills": ["data-analysis", "visualization"],
|
||||
"tools": ["bash", "read_file"],
|
||||
"max_turns": 80,
|
||||
"timeout_seconds": 600,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
cfg = get_subagents_app_config()
|
||||
assert "analysis" in cfg.custom_agents
|
||||
custom = cfg.custom_agents["analysis"]
|
||||
assert custom.skills == ["data-analysis", "visualization"]
|
||||
assert custom.tools == ["bash", "read_file"]
|
||||
assert custom.max_turns == 80
|
||||
assert custom.timeout_seconds == 600
|
||||
|
||||
def test_load_with_both_overrides_and_custom(self):
|
||||
load_subagents_config_from_dict(
|
||||
{
|
||||
"timeout_seconds": 900,
|
||||
"agents": {
|
||||
"general-purpose": {"skills": ["web-search"]},
|
||||
},
|
||||
"custom_agents": {
|
||||
"analysis": {
|
||||
"description": "Analysis",
|
||||
"system_prompt": "Analyze.",
|
||||
"skills": ["data-analysis"],
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
cfg = get_subagents_app_config()
|
||||
assert cfg.get_skills_for("general-purpose") == ["web-search"]
|
||||
assert cfg.custom_agents["analysis"].skills == ["data-analysis"]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Registry: custom agent lookup
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestRegistryCustomAgentLookup:
|
||||
def teardown_method(self):
|
||||
_reset_subagents_config()
|
||||
|
||||
def test_custom_agent_found(self):
|
||||
from deerflow.subagents.registry import get_subagent_config
|
||||
|
||||
load_subagents_config_from_dict(
|
||||
{
|
||||
"custom_agents": {
|
||||
"analysis": {
|
||||
"description": "Data analysis specialist",
|
||||
"system_prompt": "You are a data analysis subagent.",
|
||||
"skills": ["data-analysis"],
|
||||
"tools": ["bash", "read_file"],
|
||||
"max_turns": 80,
|
||||
"timeout_seconds": 600,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
config = get_subagent_config("analysis")
|
||||
assert config is not None
|
||||
assert config.name == "analysis"
|
||||
assert config.skills == ["data-analysis"]
|
||||
assert config.tools == ["bash", "read_file"]
|
||||
assert config.max_turns == 80
|
||||
assert config.timeout_seconds == 600
|
||||
assert config.model == "inherit"
|
||||
|
||||
def test_custom_agent_not_found(self):
|
||||
from deerflow.subagents.registry import get_subagent_config
|
||||
|
||||
_reset_subagents_config()
|
||||
assert get_subagent_config("nonexistent") is None
|
||||
|
||||
def test_builtin_takes_priority_over_custom(self):
|
||||
"""If a custom agent has the same name as a builtin, builtin wins."""
|
||||
from deerflow.subagents.builtins import BUILTIN_SUBAGENTS
|
||||
from deerflow.subagents.registry import get_subagent_config
|
||||
|
||||
load_subagents_config_from_dict(
|
||||
{
|
||||
"custom_agents": {
|
||||
"general-purpose": {
|
||||
"description": "Custom override attempt",
|
||||
"system_prompt": "Should not be used",
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
config = get_subagent_config("general-purpose")
|
||||
# Should get the builtin description, not the custom one
|
||||
assert config.description == BUILTIN_SUBAGENTS["general-purpose"].description
|
||||
|
||||
def test_custom_agent_with_override(self):
|
||||
"""Per-agent overrides also apply to custom agents."""
|
||||
from deerflow.subagents.registry import get_subagent_config
|
||||
|
||||
load_subagents_config_from_dict(
|
||||
{
|
||||
"custom_agents": {
|
||||
"analysis": {
|
||||
"description": "Analysis",
|
||||
"system_prompt": "Analyze.",
|
||||
"timeout_seconds": 600,
|
||||
},
|
||||
},
|
||||
"agents": {
|
||||
"analysis": {"timeout_seconds": 300, "skills": ["overridden-skill"]},
|
||||
},
|
||||
}
|
||||
)
|
||||
config = get_subagent_config("analysis")
|
||||
assert config is not None
|
||||
assert config.timeout_seconds == 300 # Override applied
|
||||
assert config.skills == ["overridden-skill"] # Override applied
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Registry: skills override on builtin agents
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestRegistrySkillsOverride:
|
||||
def teardown_method(self):
|
||||
_reset_subagents_config()
|
||||
|
||||
def test_skills_override_applied_to_builtin(self):
|
||||
from deerflow.subagents.registry import get_subagent_config
|
||||
|
||||
load_subagents_config_from_dict(
|
||||
{
|
||||
"agents": {
|
||||
"general-purpose": {"skills": ["web-search", "data-analysis"]},
|
||||
},
|
||||
}
|
||||
)
|
||||
config = get_subagent_config("general-purpose")
|
||||
assert config.skills == ["web-search", "data-analysis"]
|
||||
|
||||
def test_empty_skills_override(self):
|
||||
from deerflow.subagents.registry import get_subagent_config
|
||||
|
||||
load_subagents_config_from_dict(
|
||||
{
|
||||
"agents": {
|
||||
"bash": {"skills": []},
|
||||
},
|
||||
}
|
||||
)
|
||||
config = get_subagent_config("bash")
|
||||
assert config.skills == []
|
||||
|
||||
def test_no_skills_override_keeps_default(self):
|
||||
from deerflow.subagents.registry import get_subagent_config
|
||||
|
||||
_reset_subagents_config()
|
||||
config = get_subagent_config("general-purpose")
|
||||
assert config.skills is None # Default: inherit all
|
||||
|
||||
def test_skills_override_does_not_mutate_builtin(self):
|
||||
from deerflow.subagents.builtins import BUILTIN_SUBAGENTS
|
||||
from deerflow.subagents.registry import get_subagent_config
|
||||
|
||||
load_subagents_config_from_dict(
|
||||
{
|
||||
"agents": {
|
||||
"general-purpose": {"skills": ["web-search"]},
|
||||
},
|
||||
}
|
||||
)
|
||||
_ = get_subagent_config("general-purpose")
|
||||
assert BUILTIN_SUBAGENTS["general-purpose"].skills is None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Registry: get_available_subagent_names merges custom types
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestRegistryAvailableNames:
|
||||
def teardown_method(self):
|
||||
_reset_subagents_config()
|
||||
|
||||
def test_includes_builtin_names(self):
|
||||
from deerflow.subagents.registry import get_subagent_names
|
||||
|
||||
_reset_subagents_config()
|
||||
names = get_subagent_names()
|
||||
assert "general-purpose" in names
|
||||
assert "bash" in names
|
||||
|
||||
def test_includes_custom_names(self):
|
||||
from deerflow.subagents.registry import get_subagent_names
|
||||
|
||||
load_subagents_config_from_dict(
|
||||
{
|
||||
"custom_agents": {
|
||||
"analysis": {
|
||||
"description": "Analysis",
|
||||
"system_prompt": "Analyze.",
|
||||
},
|
||||
"researcher": {
|
||||
"description": "Research",
|
||||
"system_prompt": "Research.",
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
names = get_subagent_names()
|
||||
assert "general-purpose" in names
|
||||
assert "bash" in names
|
||||
assert "analysis" in names
|
||||
assert "researcher" in names
|
||||
|
||||
def test_no_duplicates_when_custom_name_matches_builtin(self):
|
||||
from deerflow.subagents.registry import get_subagent_names
|
||||
|
||||
load_subagents_config_from_dict(
|
||||
{
|
||||
"custom_agents": {
|
||||
"general-purpose": {
|
||||
"description": "Duplicate name",
|
||||
"system_prompt": "test",
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
names = get_subagent_names()
|
||||
assert names.count("general-purpose") == 1
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Registry: list_subagents includes custom agents
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestRegistryListSubagentsWithCustom:
|
||||
def teardown_method(self):
|
||||
_reset_subagents_config()
|
||||
|
||||
def test_list_includes_custom_agents(self):
|
||||
from deerflow.subagents.registry import list_subagents
|
||||
|
||||
load_subagents_config_from_dict(
|
||||
{
|
||||
"custom_agents": {
|
||||
"analysis": {
|
||||
"description": "Analysis",
|
||||
"system_prompt": "Analyze.",
|
||||
"skills": ["data-analysis"],
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
configs = list_subagents()
|
||||
names = {c.name for c in configs}
|
||||
assert "general-purpose" in names
|
||||
assert "bash" in names
|
||||
assert "analysis" in names
|
||||
|
||||
def test_list_custom_agent_has_correct_skills(self):
|
||||
from deerflow.subagents.registry import list_subagents
|
||||
|
||||
load_subagents_config_from_dict(
|
||||
{
|
||||
"custom_agents": {
|
||||
"analysis": {
|
||||
"description": "Analysis",
|
||||
"system_prompt": "Analyze.",
|
||||
"skills": ["data-analysis", "visualization"],
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
by_name = {c.name: c for c in list_subagents()}
|
||||
assert by_name["analysis"].skills == ["data-analysis", "visualization"]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Skills filter passthrough: verify config.skills is used in task_tool assembly
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestSkillsFilterPassthrough:
|
||||
"""Test that SubagentConfig.skills is correctly passed to get_skills_prompt_section."""
|
||||
|
||||
def test_none_skills_passes_none_to_prompt(self):
|
||||
"""When config.skills is None, available_skills=None should be passed (inherit all)."""
|
||||
config = SubagentConfig(
|
||||
name="test",
|
||||
description="test",
|
||||
system_prompt="test",
|
||||
skills=None,
|
||||
)
|
||||
# Verify: set(None) would raise, so the code must check for None first
|
||||
available = set(config.skills) if config.skills is not None else None
|
||||
assert available is None
|
||||
|
||||
def test_empty_skills_passes_empty_set(self):
|
||||
"""When config.skills is [], available_skills=set() should be passed (no skills)."""
|
||||
config = SubagentConfig(
|
||||
name="test",
|
||||
description="test",
|
||||
system_prompt="test",
|
||||
skills=[],
|
||||
)
|
||||
available = set(config.skills) if config.skills is not None else None
|
||||
assert available == set()
|
||||
|
||||
def test_skills_whitelist_passes_correct_set(self):
|
||||
"""When config.skills has values, those should be passed as available_skills."""
|
||||
config = SubagentConfig(
|
||||
name="test",
|
||||
description="test",
|
||||
system_prompt="test",
|
||||
skills=["data-analysis", "web-search"],
|
||||
)
|
||||
available = set(config.skills) if config.skills is not None else None
|
||||
assert available == {"data-analysis", "web-search"}
|
||||
@@ -49,6 +49,8 @@ def test_generate_suggestions_parses_and_limits(monkeypatch):
|
||||
result = asyncio.run(suggestions.generate_suggestions("t1", req))
|
||||
|
||||
assert result.suggestions == ["Q1", "Q2", "Q3"]
|
||||
fake_model.ainvoke.assert_awaited_once()
|
||||
assert fake_model.ainvoke.await_args.kwargs["config"] == {"run_name": "suggest_agent"}
|
||||
|
||||
|
||||
def test_generate_suggestions_parses_list_block_content(monkeypatch):
|
||||
@@ -67,6 +69,8 @@ def test_generate_suggestions_parses_list_block_content(monkeypatch):
|
||||
result = asyncio.run(suggestions.generate_suggestions("t1", req))
|
||||
|
||||
assert result.suggestions == ["Q1", "Q2"]
|
||||
fake_model.ainvoke.assert_awaited_once()
|
||||
assert fake_model.ainvoke.await_args.kwargs["config"] == {"run_name": "suggest_agent"}
|
||||
|
||||
|
||||
def test_generate_suggestions_parses_output_text_block_content(monkeypatch):
|
||||
@@ -85,6 +89,8 @@ def test_generate_suggestions_parses_output_text_block_content(monkeypatch):
|
||||
result = asyncio.run(suggestions.generate_suggestions("t1", req))
|
||||
|
||||
assert result.suggestions == ["Q1", "Q2"]
|
||||
fake_model.ainvoke.assert_awaited_once()
|
||||
assert fake_model.ainvoke.await_args.kwargs["config"] == {"run_name": "suggest_agent"}
|
||||
|
||||
|
||||
def test_generate_suggestions_returns_empty_on_model_error(monkeypatch):
|
||||
|
||||
@@ -4,7 +4,7 @@ from types import SimpleNamespace
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
from langchain_core.messages import AIMessage, HumanMessage, RemoveMessage
|
||||
from langchain_core.messages import AIMessage, HumanMessage, RemoveMessage, ToolMessage
|
||||
|
||||
from deerflow.agents.memory.summarization_hook import memory_flush_hook
|
||||
from deerflow.agents.middlewares.summarization_middleware import DeerFlowSummarizationMiddleware, SummarizationEvent
|
||||
@@ -29,7 +29,16 @@ def _runtime(thread_id: str | None = "thread-1", agent_name: str | None = None)
|
||||
return SimpleNamespace(context=context)
|
||||
|
||||
|
||||
def _middleware(*, before_summarization=None, trigger=("messages", 4), keep=("messages", 2)) -> DeerFlowSummarizationMiddleware:
|
||||
def _middleware(
|
||||
*,
|
||||
before_summarization=None,
|
||||
trigger=("messages", 4),
|
||||
keep=("messages", 2),
|
||||
skill_file_read_tool_names=None,
|
||||
preserve_recent_skill_count: int = 0,
|
||||
preserve_recent_skill_tokens: int = 0,
|
||||
preserve_recent_skill_tokens_per_skill: int = 0,
|
||||
) -> DeerFlowSummarizationMiddleware:
|
||||
model = MagicMock()
|
||||
model.invoke.return_value = SimpleNamespace(text="compressed summary")
|
||||
return DeerFlowSummarizationMiddleware(
|
||||
@@ -38,9 +47,34 @@ def _middleware(*, before_summarization=None, trigger=("messages", 4), keep=("me
|
||||
keep=keep,
|
||||
token_counter=len,
|
||||
before_summarization=before_summarization,
|
||||
skill_file_read_tool_names=skill_file_read_tool_names,
|
||||
preserve_recent_skill_count=preserve_recent_skill_count,
|
||||
preserve_recent_skill_tokens=preserve_recent_skill_tokens,
|
||||
preserve_recent_skill_tokens_per_skill=preserve_recent_skill_tokens_per_skill,
|
||||
)
|
||||
|
||||
|
||||
def _skill_read_call(tool_id: str, skill: str) -> dict:
|
||||
return {
|
||||
"name": "read_file",
|
||||
"id": tool_id,
|
||||
"args": {"path": f"/mnt/skills/public/{skill}/SKILL.md"},
|
||||
}
|
||||
|
||||
|
||||
def _skill_conversation() -> list:
|
||||
return [
|
||||
HumanMessage(content="u1"),
|
||||
AIMessage(content="", tool_calls=[_skill_read_call("t1", "alpha")]),
|
||||
ToolMessage(content="alpha skill body", tool_call_id="t1"),
|
||||
HumanMessage(content="u2"),
|
||||
AIMessage(content="", tool_calls=[_skill_read_call("t2", "beta")]),
|
||||
ToolMessage(content="beta skill body", tool_call_id="t2"),
|
||||
HumanMessage(content="u3"),
|
||||
AIMessage(content="final"),
|
||||
]
|
||||
|
||||
|
||||
def test_before_summarization_hook_receives_messages_before_compression() -> None:
|
||||
captured: list[SummarizationEvent] = []
|
||||
middleware = _middleware(before_summarization=[captured.append])
|
||||
@@ -167,6 +201,295 @@ def test_memory_flush_hook_enqueues_filtered_messages_and_flushes(monkeypatch: p
|
||||
assert add_kwargs["reinforcement_detected"] is False
|
||||
|
||||
|
||||
def test_skill_rescue_keeps_recent_skill_reads_out_of_summary() -> None:
|
||||
captured: list[SummarizationEvent] = []
|
||||
middleware = _middleware(
|
||||
before_summarization=[captured.append],
|
||||
trigger=("messages", 4),
|
||||
keep=("messages", 2),
|
||||
preserve_recent_skill_count=5,
|
||||
preserve_recent_skill_tokens=10_000,
|
||||
preserve_recent_skill_tokens_per_skill=10_000,
|
||||
)
|
||||
|
||||
result = middleware.before_model({"messages": _skill_conversation()}, _runtime())
|
||||
|
||||
assert len(captured) == 1
|
||||
summarized_ids = {id(m) for m in captured[0].messages_to_summarize}
|
||||
preserved = captured[0].preserved_messages
|
||||
|
||||
# Both skill-read bundles should be rescued into preserved_messages,
|
||||
# tool_call ↔ tool_result pairs stay intact.
|
||||
assert any(isinstance(m, ToolMessage) and m.content == "alpha skill body" for m in preserved)
|
||||
assert any(isinstance(m, ToolMessage) and m.content == "beta skill body" for m in preserved)
|
||||
for m in preserved:
|
||||
if isinstance(m, ToolMessage) and m.content in {"alpha skill body", "beta skill body"}:
|
||||
assert id(m) not in summarized_ids
|
||||
|
||||
# Preserved output order: rescued bundles first, then the tail kept by parent cutoff.
|
||||
contents = [getattr(m, "content", None) for m in preserved]
|
||||
assert contents[-2:] == ["u3", "final"]
|
||||
|
||||
# The final emitted state should start with RemoveMessage + summary, then preserved messages.
|
||||
emitted = result["messages"]
|
||||
assert isinstance(emitted[0], RemoveMessage)
|
||||
assert emitted[1].content.startswith("Here is a summary")
|
||||
assert list(emitted[-2:]) == list(preserved[-2:])
|
||||
|
||||
|
||||
def test_skill_rescue_respects_count_budget() -> None:
|
||||
captured: list[SummarizationEvent] = []
|
||||
middleware = _middleware(
|
||||
before_summarization=[captured.append],
|
||||
trigger=("messages", 4),
|
||||
keep=("messages", 2),
|
||||
preserve_recent_skill_count=1,
|
||||
preserve_recent_skill_tokens=10_000,
|
||||
preserve_recent_skill_tokens_per_skill=10_000,
|
||||
)
|
||||
|
||||
middleware.before_model({"messages": _skill_conversation()}, _runtime())
|
||||
|
||||
preserved = captured[0].preserved_messages
|
||||
summarized = captured[0].messages_to_summarize
|
||||
# Newest skill (beta) rescued; older skill (alpha) falls into summary.
|
||||
assert any(isinstance(m, ToolMessage) and m.content == "beta skill body" for m in preserved)
|
||||
assert not any(isinstance(m, ToolMessage) and m.content == "alpha skill body" for m in preserved)
|
||||
assert any(isinstance(m, ToolMessage) and m.content == "alpha skill body" for m in summarized)
|
||||
|
||||
|
||||
def test_skill_rescue_uses_injected_skills_container_path() -> None:
|
||||
captured: list[SummarizationEvent] = []
|
||||
middleware = _middleware(
|
||||
before_summarization=[captured.append],
|
||||
trigger=("messages", 4),
|
||||
keep=("messages", 2),
|
||||
preserve_recent_skill_count=5,
|
||||
preserve_recent_skill_tokens=10_000,
|
||||
preserve_recent_skill_tokens_per_skill=10_000,
|
||||
)
|
||||
middleware._skills_container_path = "/custom/skills"
|
||||
messages = [
|
||||
HumanMessage(content="u1"),
|
||||
AIMessage(content="", tool_calls=[{"name": "read_file", "id": "t1", "args": {"path": "/custom/skills/demo/SKILL.md"}}]),
|
||||
ToolMessage(content="demo skill body", tool_call_id="t1"),
|
||||
HumanMessage(content="u2"),
|
||||
AIMessage(content="final"),
|
||||
]
|
||||
|
||||
middleware.before_model({"messages": messages}, _runtime())
|
||||
|
||||
preserved = captured[0].preserved_messages
|
||||
assert any(isinstance(m, ToolMessage) and m.content == "demo skill body" for m in preserved)
|
||||
|
||||
|
||||
def test_skill_rescue_uses_configured_skill_read_tool_names() -> None:
|
||||
captured: list[SummarizationEvent] = []
|
||||
middleware = _middleware(
|
||||
before_summarization=[captured.append],
|
||||
trigger=("messages", 4),
|
||||
keep=("messages", 2),
|
||||
skill_file_read_tool_names=["custom_read"],
|
||||
preserve_recent_skill_count=5,
|
||||
preserve_recent_skill_tokens=10_000,
|
||||
preserve_recent_skill_tokens_per_skill=10_000,
|
||||
)
|
||||
middleware._skills_container_path = "/custom/skills"
|
||||
messages = [
|
||||
HumanMessage(content="u1"),
|
||||
AIMessage(content="", tool_calls=[{"name": "custom_read", "id": "t1", "args": {"path": "/custom/skills/demo/SKILL.md"}}]),
|
||||
ToolMessage(content="demo skill body", tool_call_id="t1"),
|
||||
HumanMessage(content="u2"),
|
||||
AIMessage(content="final"),
|
||||
]
|
||||
|
||||
middleware.before_model({"messages": messages}, _runtime())
|
||||
|
||||
preserved = captured[0].preserved_messages
|
||||
assert any(isinstance(m, ToolMessage) and m.content == "demo skill body" for m in preserved)
|
||||
|
||||
|
||||
def test_skill_rescue_respects_per_skill_token_cap() -> None:
|
||||
captured: list[SummarizationEvent] = []
|
||||
middleware = _middleware(
|
||||
before_summarization=[captured.append],
|
||||
trigger=("messages", 4),
|
||||
keep=("messages", 2),
|
||||
preserve_recent_skill_count=5,
|
||||
preserve_recent_skill_tokens=10_000,
|
||||
# token_counter=len counts one token per message; per-skill cap of 0 rejects every bundle.
|
||||
preserve_recent_skill_tokens_per_skill=0,
|
||||
)
|
||||
|
||||
middleware.before_model({"messages": _skill_conversation()}, _runtime())
|
||||
|
||||
preserved = captured[0].preserved_messages
|
||||
assert not any(isinstance(m, ToolMessage) and m.content in {"alpha skill body", "beta skill body"} for m in preserved)
|
||||
|
||||
|
||||
def test_skill_rescue_disabled_when_count_zero() -> None:
|
||||
captured: list[SummarizationEvent] = []
|
||||
middleware = _middleware(
|
||||
before_summarization=[captured.append],
|
||||
trigger=("messages", 4),
|
||||
keep=("messages", 2),
|
||||
preserve_recent_skill_count=0,
|
||||
preserve_recent_skill_tokens=10_000,
|
||||
preserve_recent_skill_tokens_per_skill=10_000,
|
||||
)
|
||||
|
||||
middleware.before_model({"messages": _skill_conversation()}, _runtime())
|
||||
|
||||
preserved = captured[0].preserved_messages
|
||||
assert not any(isinstance(m, ToolMessage) for m in preserved)
|
||||
|
||||
|
||||
def test_skill_rescue_ignores_non_skill_tool_reads() -> None:
|
||||
captured: list[SummarizationEvent] = []
|
||||
middleware = _middleware(
|
||||
before_summarization=[captured.append],
|
||||
trigger=("messages", 4),
|
||||
keep=("messages", 2),
|
||||
preserve_recent_skill_count=5,
|
||||
preserve_recent_skill_tokens=10_000,
|
||||
preserve_recent_skill_tokens_per_skill=10_000,
|
||||
)
|
||||
|
||||
messages = [
|
||||
HumanMessage(content="u1"),
|
||||
AIMessage(
|
||||
content="",
|
||||
tool_calls=[{"name": "read_file", "id": "t1", "args": {"path": "/mnt/user-data/workspace/notes.md"}}],
|
||||
),
|
||||
ToolMessage(content="user notes", tool_call_id="t1"),
|
||||
HumanMessage(content="u2"),
|
||||
AIMessage(content="done"),
|
||||
]
|
||||
|
||||
middleware.before_model({"messages": messages}, _runtime())
|
||||
|
||||
preserved = captured[0].preserved_messages
|
||||
assert not any(isinstance(m, ToolMessage) and m.content == "user notes" for m in preserved)
|
||||
|
||||
|
||||
def test_skill_rescue_does_not_preserve_non_skill_outputs_from_mixed_tool_calls() -> None:
|
||||
captured: list[SummarizationEvent] = []
|
||||
middleware = _middleware(
|
||||
before_summarization=[captured.append],
|
||||
trigger=("messages", 4),
|
||||
keep=("messages", 2),
|
||||
preserve_recent_skill_count=5,
|
||||
preserve_recent_skill_tokens=10_000,
|
||||
preserve_recent_skill_tokens_per_skill=10_000,
|
||||
)
|
||||
|
||||
messages = [
|
||||
HumanMessage(content="u1"),
|
||||
AIMessage(
|
||||
content="",
|
||||
tool_calls=[
|
||||
_skill_read_call("skill-1", "alpha"),
|
||||
{"name": "read_file", "id": "file-1", "args": {"path": "/mnt/user-data/workspace/notes.md"}},
|
||||
],
|
||||
),
|
||||
ToolMessage(content="alpha skill body", tool_call_id="skill-1"),
|
||||
ToolMessage(content="user notes", tool_call_id="file-1"),
|
||||
HumanMessage(content="u2"),
|
||||
AIMessage(content="done"),
|
||||
]
|
||||
|
||||
middleware.before_model({"messages": messages}, _runtime())
|
||||
|
||||
preserved = captured[0].preserved_messages
|
||||
summarized = captured[0].messages_to_summarize
|
||||
|
||||
preserved_ai = next(m for m in preserved if isinstance(m, AIMessage) and m.tool_calls)
|
||||
summarized_ai = next(m for m in summarized if isinstance(m, AIMessage) and m.tool_calls)
|
||||
|
||||
assert [tc["id"] for tc in preserved_ai.tool_calls] == ["skill-1"]
|
||||
assert [tc["id"] for tc in summarized_ai.tool_calls] == ["file-1"]
|
||||
assert any(isinstance(m, ToolMessage) and m.content == "alpha skill body" for m in preserved)
|
||||
assert not any(isinstance(m, ToolMessage) and m.content == "user notes" for m in preserved)
|
||||
assert any(isinstance(m, ToolMessage) and m.content == "user notes" for m in summarized)
|
||||
|
||||
|
||||
def test_skill_rescue_clears_content_on_rescued_ai_clone() -> None:
|
||||
captured: list[SummarizationEvent] = []
|
||||
middleware = _middleware(
|
||||
before_summarization=[captured.append],
|
||||
trigger=("messages", 4),
|
||||
keep=("messages", 2),
|
||||
preserve_recent_skill_count=5,
|
||||
preserve_recent_skill_tokens=10_000,
|
||||
preserve_recent_skill_tokens_per_skill=10_000,
|
||||
)
|
||||
|
||||
messages = [
|
||||
HumanMessage(content="u1"),
|
||||
AIMessage(
|
||||
content="reading skill and notes",
|
||||
tool_calls=[
|
||||
_skill_read_call("skill-1", "alpha"),
|
||||
{"name": "read_file", "id": "file-1", "args": {"path": "/mnt/user-data/workspace/notes.md"}},
|
||||
],
|
||||
),
|
||||
ToolMessage(content="alpha skill body", tool_call_id="skill-1"),
|
||||
ToolMessage(content="user notes", tool_call_id="file-1"),
|
||||
HumanMessage(content="u2"),
|
||||
AIMessage(content="done"),
|
||||
]
|
||||
|
||||
middleware.before_model({"messages": messages}, _runtime())
|
||||
|
||||
preserved = captured[0].preserved_messages
|
||||
summarized = captured[0].messages_to_summarize
|
||||
|
||||
preserved_ai = next(m for m in preserved if isinstance(m, AIMessage) and m.tool_calls)
|
||||
summarized_ai = next(m for m in summarized if isinstance(m, AIMessage) and m.tool_calls)
|
||||
|
||||
assert preserved_ai.content == ""
|
||||
assert summarized_ai.content == "reading skill and notes"
|
||||
|
||||
|
||||
def test_skill_rescue_only_preserves_skill_calls_with_matched_tool_results() -> None:
|
||||
captured: list[SummarizationEvent] = []
|
||||
middleware = _middleware(
|
||||
before_summarization=[captured.append],
|
||||
trigger=("messages", 4),
|
||||
keep=("messages", 2),
|
||||
preserve_recent_skill_count=5,
|
||||
preserve_recent_skill_tokens=10_000,
|
||||
preserve_recent_skill_tokens_per_skill=10_000,
|
||||
)
|
||||
|
||||
messages = [
|
||||
HumanMessage(content="u1"),
|
||||
AIMessage(
|
||||
content="",
|
||||
tool_calls=[
|
||||
_skill_read_call("skill-1", "alpha"),
|
||||
_skill_read_call("skill-2", "beta"),
|
||||
],
|
||||
),
|
||||
ToolMessage(content="alpha skill body", tool_call_id="skill-1"),
|
||||
HumanMessage(content="u2"),
|
||||
AIMessage(content="done"),
|
||||
]
|
||||
|
||||
middleware.before_model({"messages": messages}, _runtime())
|
||||
|
||||
preserved = captured[0].preserved_messages
|
||||
summarized = captured[0].messages_to_summarize
|
||||
|
||||
preserved_ai = next(m for m in preserved if isinstance(m, AIMessage) and m.tool_calls)
|
||||
summarized_ai = next(m for m in summarized if isinstance(m, AIMessage) and m.tool_calls)
|
||||
|
||||
assert [tc["id"] for tc in preserved_ai.tool_calls] == ["skill-1"]
|
||||
assert [tc["id"] for tc in summarized_ai.tool_calls] == ["skill-2"]
|
||||
assert any(isinstance(m, ToolMessage) and m.content == "alpha skill body" for m in preserved)
|
||||
assert not any(isinstance(m, ToolMessage) and getattr(m, "tool_call_id", None) == "skill-2" for m in preserved)
|
||||
|
||||
|
||||
def test_memory_flush_hook_preserves_agent_scoped_memory(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
queue = MagicMock()
|
||||
monkeypatch.setattr("deerflow.agents.memory.summarization_hook.get_memory_config", lambda: MemoryConfig(enabled=True))
|
||||
|
||||
@@ -143,7 +143,7 @@ def test_task_tool_emits_running_and_completed_events(monkeypatch):
|
||||
monkeypatch.setattr(task_tool_module, "SubagentStatus", FakeSubagentStatus)
|
||||
monkeypatch.setattr(task_tool_module, "SubagentExecutor", DummyExecutor)
|
||||
monkeypatch.setattr(task_tool_module, "get_subagent_config", lambda _: config)
|
||||
monkeypatch.setattr(task_tool_module, "get_skills_prompt_section", lambda: "Skills Appendix")
|
||||
|
||||
monkeypatch.setattr(task_tool_module, "get_background_task_result", lambda _: next(responses))
|
||||
monkeypatch.setattr(task_tool_module, "get_stream_writer", lambda: events.append)
|
||||
monkeypatch.setattr(task_tool_module.asyncio, "sleep", _no_sleep)
|
||||
@@ -165,7 +165,9 @@ def test_task_tool_emits_running_and_completed_events(monkeypatch):
|
||||
assert captured["executor_kwargs"]["thread_id"] == "thread-1"
|
||||
assert captured["executor_kwargs"]["parent_model"] == "ark-model"
|
||||
assert captured["executor_kwargs"]["config"].max_turns == 7
|
||||
assert "Skills Appendix" in captured["executor_kwargs"]["config"].system_prompt
|
||||
# Skills are no longer appended to system_prompt; they are loaded per-session
|
||||
# by SubagentExecutor and injected as conversation items (Codex pattern).
|
||||
assert captured["executor_kwargs"]["config"].system_prompt == "Base system prompt"
|
||||
|
||||
get_available_tools.assert_called_once_with(model_name="ark-model", groups=None, subagent_enabled=False)
|
||||
|
||||
@@ -199,7 +201,6 @@ def test_task_tool_propagates_tool_groups_to_subagent(monkeypatch):
|
||||
monkeypatch.setattr(task_tool_module, "SubagentStatus", FakeSubagentStatus)
|
||||
monkeypatch.setattr(task_tool_module, "SubagentExecutor", DummyExecutor)
|
||||
monkeypatch.setattr(task_tool_module, "get_subagent_config", lambda _: config)
|
||||
monkeypatch.setattr(task_tool_module, "get_skills_prompt_section", lambda: "")
|
||||
monkeypatch.setattr(
|
||||
task_tool_module,
|
||||
"get_background_task_result",
|
||||
@@ -222,6 +223,90 @@ def test_task_tool_propagates_tool_groups_to_subagent(monkeypatch):
|
||||
get_available_tools.assert_called_once_with(model_name="ark-model", groups=parent_tool_groups, subagent_enabled=False)
|
||||
|
||||
|
||||
def test_task_tool_inherits_parent_skill_allowlist_for_default_subagent(monkeypatch):
|
||||
config = _make_subagent_config()
|
||||
runtime = _make_runtime()
|
||||
runtime.config["metadata"]["available_skills"] = ["safe-skill"]
|
||||
events = []
|
||||
captured = {}
|
||||
|
||||
class DummyExecutor:
|
||||
def __init__(self, **kwargs):
|
||||
captured["config"] = kwargs["config"]
|
||||
|
||||
def execute_async(self, prompt, task_id=None):
|
||||
return task_id or "generated-task-id"
|
||||
|
||||
monkeypatch.setattr(task_tool_module, "SubagentStatus", FakeSubagentStatus)
|
||||
monkeypatch.setattr(task_tool_module, "SubagentExecutor", DummyExecutor)
|
||||
monkeypatch.setattr(task_tool_module, "get_subagent_config", lambda _: config)
|
||||
monkeypatch.setattr(
|
||||
task_tool_module,
|
||||
"get_background_task_result",
|
||||
lambda _: _make_result(FakeSubagentStatus.COMPLETED, result="done"),
|
||||
)
|
||||
monkeypatch.setattr(task_tool_module, "get_stream_writer", lambda: events.append)
|
||||
monkeypatch.setattr(task_tool_module.asyncio, "sleep", _no_sleep)
|
||||
monkeypatch.setattr("deerflow.tools.get_available_tools", MagicMock(return_value=[]))
|
||||
|
||||
output = _run_task_tool(
|
||||
runtime=runtime,
|
||||
description="执行任务",
|
||||
prompt="use skills",
|
||||
subagent_type="general-purpose",
|
||||
tool_call_id="tc-skills",
|
||||
)
|
||||
|
||||
assert output == "Task Succeeded. Result: done"
|
||||
assert captured["config"].skills == ["safe-skill"]
|
||||
|
||||
|
||||
def test_task_tool_intersects_parent_and_subagent_skill_allowlists(monkeypatch):
|
||||
config = _make_subagent_config()
|
||||
config = SubagentConfig(
|
||||
name=config.name,
|
||||
description=config.description,
|
||||
system_prompt=config.system_prompt,
|
||||
max_turns=config.max_turns,
|
||||
timeout_seconds=config.timeout_seconds,
|
||||
skills=["safe-skill", "other-skill"],
|
||||
)
|
||||
runtime = _make_runtime()
|
||||
runtime.config["metadata"]["available_skills"] = ["safe-skill"]
|
||||
events = []
|
||||
captured = {}
|
||||
|
||||
class DummyExecutor:
|
||||
def __init__(self, **kwargs):
|
||||
captured["config"] = kwargs["config"]
|
||||
|
||||
def execute_async(self, prompt, task_id=None):
|
||||
return task_id or "generated-task-id"
|
||||
|
||||
monkeypatch.setattr(task_tool_module, "SubagentStatus", FakeSubagentStatus)
|
||||
monkeypatch.setattr(task_tool_module, "SubagentExecutor", DummyExecutor)
|
||||
monkeypatch.setattr(task_tool_module, "get_subagent_config", lambda _: config)
|
||||
monkeypatch.setattr(
|
||||
task_tool_module,
|
||||
"get_background_task_result",
|
||||
lambda _: _make_result(FakeSubagentStatus.COMPLETED, result="done"),
|
||||
)
|
||||
monkeypatch.setattr(task_tool_module, "get_stream_writer", lambda: events.append)
|
||||
monkeypatch.setattr(task_tool_module.asyncio, "sleep", _no_sleep)
|
||||
monkeypatch.setattr("deerflow.tools.get_available_tools", MagicMock(return_value=[]))
|
||||
|
||||
output = _run_task_tool(
|
||||
runtime=runtime,
|
||||
description="执行任务",
|
||||
prompt="use skills",
|
||||
subagent_type="general-purpose",
|
||||
tool_call_id="tc-skills-intersection",
|
||||
)
|
||||
|
||||
assert output == "Task Succeeded. Result: done"
|
||||
assert captured["config"].skills == ["safe-skill"]
|
||||
|
||||
|
||||
def test_task_tool_no_tool_groups_passes_none(monkeypatch):
|
||||
"""Verify that when metadata has no tool_groups, groups=None is passed (backward compat)."""
|
||||
config = _make_subagent_config()
|
||||
@@ -240,7 +325,6 @@ def test_task_tool_no_tool_groups_passes_none(monkeypatch):
|
||||
monkeypatch.setattr(task_tool_module, "SubagentStatus", FakeSubagentStatus)
|
||||
monkeypatch.setattr(task_tool_module, "SubagentExecutor", DummyExecutor)
|
||||
monkeypatch.setattr(task_tool_module, "get_subagent_config", lambda _: config)
|
||||
monkeypatch.setattr(task_tool_module, "get_skills_prompt_section", lambda: "")
|
||||
monkeypatch.setattr(
|
||||
task_tool_module,
|
||||
"get_background_task_result",
|
||||
@@ -279,7 +363,6 @@ def test_task_tool_runtime_none_passes_groups_none(monkeypatch):
|
||||
monkeypatch.setattr(task_tool_module, "SubagentStatus", FakeSubagentStatus)
|
||||
monkeypatch.setattr(task_tool_module, "SubagentExecutor", DummyExecutor)
|
||||
monkeypatch.setattr(task_tool_module, "get_subagent_config", lambda _: config)
|
||||
monkeypatch.setattr(task_tool_module, "get_skills_prompt_section", lambda: "")
|
||||
monkeypatch.setattr(
|
||||
task_tool_module,
|
||||
"get_background_task_result",
|
||||
@@ -311,7 +394,7 @@ def test_task_tool_runtime_none_passes_groups_none(monkeypatch):
|
||||
type("DummyExecutor", (), {"__init__": lambda self, **kwargs: None, "execute_async": lambda self, prompt, task_id=None: task_id}),
|
||||
)
|
||||
monkeypatch.setattr(task_tool_module, "get_subagent_config", lambda _: config)
|
||||
monkeypatch.setattr(task_tool_module, "get_skills_prompt_section", lambda: "")
|
||||
|
||||
monkeypatch.setattr(
|
||||
task_tool_module,
|
||||
"get_background_task_result",
|
||||
@@ -345,7 +428,7 @@ def test_task_tool_returns_timed_out_message(monkeypatch):
|
||||
type("DummyExecutor", (), {"__init__": lambda self, **kwargs: None, "execute_async": lambda self, prompt, task_id=None: task_id}),
|
||||
)
|
||||
monkeypatch.setattr(task_tool_module, "get_subagent_config", lambda _: config)
|
||||
monkeypatch.setattr(task_tool_module, "get_skills_prompt_section", lambda: "")
|
||||
|
||||
monkeypatch.setattr(
|
||||
task_tool_module,
|
||||
"get_background_task_result",
|
||||
@@ -381,7 +464,7 @@ def test_task_tool_polling_safety_timeout(monkeypatch):
|
||||
type("DummyExecutor", (), {"__init__": lambda self, **kwargs: None, "execute_async": lambda self, prompt, task_id=None: task_id}),
|
||||
)
|
||||
monkeypatch.setattr(task_tool_module, "get_subagent_config", lambda _: config)
|
||||
monkeypatch.setattr(task_tool_module, "get_skills_prompt_section", lambda: "")
|
||||
|
||||
monkeypatch.setattr(
|
||||
task_tool_module,
|
||||
"get_background_task_result",
|
||||
@@ -417,7 +500,7 @@ def test_cleanup_called_on_completed(monkeypatch):
|
||||
type("DummyExecutor", (), {"__init__": lambda self, **kwargs: None, "execute_async": lambda self, prompt, task_id=None: task_id}),
|
||||
)
|
||||
monkeypatch.setattr(task_tool_module, "get_subagent_config", lambda _: config)
|
||||
monkeypatch.setattr(task_tool_module, "get_skills_prompt_section", lambda: "")
|
||||
|
||||
monkeypatch.setattr(
|
||||
task_tool_module,
|
||||
"get_background_task_result",
|
||||
@@ -457,7 +540,7 @@ def test_cleanup_called_on_failed(monkeypatch):
|
||||
type("DummyExecutor", (), {"__init__": lambda self, **kwargs: None, "execute_async": lambda self, prompt, task_id=None: task_id}),
|
||||
)
|
||||
monkeypatch.setattr(task_tool_module, "get_subagent_config", lambda _: config)
|
||||
monkeypatch.setattr(task_tool_module, "get_skills_prompt_section", lambda: "")
|
||||
|
||||
monkeypatch.setattr(
|
||||
task_tool_module,
|
||||
"get_background_task_result",
|
||||
@@ -497,7 +580,7 @@ def test_cleanup_called_on_timed_out(monkeypatch):
|
||||
type("DummyExecutor", (), {"__init__": lambda self, **kwargs: None, "execute_async": lambda self, prompt, task_id=None: task_id}),
|
||||
)
|
||||
monkeypatch.setattr(task_tool_module, "get_subagent_config", lambda _: config)
|
||||
monkeypatch.setattr(task_tool_module, "get_skills_prompt_section", lambda: "")
|
||||
|
||||
monkeypatch.setattr(
|
||||
task_tool_module,
|
||||
"get_background_task_result",
|
||||
@@ -544,7 +627,7 @@ def test_cleanup_not_called_on_polling_safety_timeout(monkeypatch):
|
||||
type("DummyExecutor", (), {"__init__": lambda self, **kwargs: None, "execute_async": lambda self, prompt, task_id=None: task_id}),
|
||||
)
|
||||
monkeypatch.setattr(task_tool_module, "get_subagent_config", lambda _: config)
|
||||
monkeypatch.setattr(task_tool_module, "get_skills_prompt_section", lambda: "")
|
||||
|
||||
monkeypatch.setattr(
|
||||
task_tool_module,
|
||||
"get_background_task_result",
|
||||
@@ -597,7 +680,7 @@ def test_cleanup_scheduled_on_cancellation(monkeypatch):
|
||||
type("DummyExecutor", (), {"__init__": lambda self, **kwargs: None, "execute_async": lambda self, prompt, task_id=None: task_id}),
|
||||
)
|
||||
monkeypatch.setattr(task_tool_module, "get_subagent_config", lambda _: config)
|
||||
monkeypatch.setattr(task_tool_module, "get_skills_prompt_section", lambda: "")
|
||||
|
||||
monkeypatch.setattr(task_tool_module, "get_background_task_result", get_result)
|
||||
monkeypatch.setattr(task_tool_module, "get_stream_writer", lambda: events.append)
|
||||
monkeypatch.setattr(task_tool_module.asyncio, "sleep", cancel_on_first_sleep)
|
||||
@@ -648,7 +731,7 @@ def test_cancelled_cleanup_stops_after_timeout(monkeypatch):
|
||||
type("DummyExecutor", (), {"__init__": lambda self, **kwargs: None, "execute_async": lambda self, prompt, task_id=None: task_id}),
|
||||
)
|
||||
monkeypatch.setattr(task_tool_module, "get_subagent_config", lambda _: config)
|
||||
monkeypatch.setattr(task_tool_module, "get_skills_prompt_section", lambda: "")
|
||||
|
||||
monkeypatch.setattr(
|
||||
task_tool_module,
|
||||
"get_background_task_result",
|
||||
@@ -703,7 +786,7 @@ def test_cancellation_calls_request_cancel(monkeypatch):
|
||||
type("DummyExecutor", (), {"__init__": lambda self, **kwargs: None, "execute_async": lambda self, prompt, task_id=None: task_id}),
|
||||
)
|
||||
monkeypatch.setattr(task_tool_module, "get_subagent_config", lambda _: config)
|
||||
monkeypatch.setattr(task_tool_module, "get_skills_prompt_section", lambda: "")
|
||||
|
||||
monkeypatch.setattr(
|
||||
task_tool_module,
|
||||
"get_background_task_result",
|
||||
@@ -761,7 +844,7 @@ def test_task_tool_returns_cancelled_message(monkeypatch):
|
||||
type("DummyExecutor", (), {"__init__": lambda self, **kwargs: None, "execute_async": lambda self, prompt, task_id=None: task_id}),
|
||||
)
|
||||
monkeypatch.setattr(task_tool_module, "get_subagent_config", lambda _: config)
|
||||
monkeypatch.setattr(task_tool_module, "get_skills_prompt_section", lambda: "")
|
||||
|
||||
monkeypatch.setattr(task_tool_module, "get_background_task_result", lambda _: next(responses))
|
||||
monkeypatch.setattr(task_tool_module, "get_stream_writer", lambda: events.append)
|
||||
monkeypatch.setattr(task_tool_module.asyncio, "sleep", _no_sleep)
|
||||
|
||||
@@ -23,7 +23,7 @@ class TestThreadDataMiddleware:
|
||||
middleware = ThreadDataMiddleware(base_dir=str(tmp_path), lazy_init=True)
|
||||
runtime = Runtime(context=None)
|
||||
monkeypatch.setattr(
|
||||
"deerflow.agents.middlewares.thread_data_middleware.get_config",
|
||||
"langgraph.config.get_config",
|
||||
lambda: {"configurable": {"thread_id": "thread-from-config"}},
|
||||
)
|
||||
|
||||
@@ -37,7 +37,7 @@ class TestThreadDataMiddleware:
|
||||
middleware = ThreadDataMiddleware(base_dir=str(tmp_path), lazy_init=True)
|
||||
runtime = Runtime(context={})
|
||||
monkeypatch.setattr(
|
||||
"deerflow.agents.middlewares.thread_data_middleware.get_config",
|
||||
"langgraph.config.get_config",
|
||||
lambda: {"configurable": {"thread_id": "thread-from-config"}},
|
||||
)
|
||||
|
||||
@@ -50,7 +50,7 @@ class TestThreadDataMiddleware:
|
||||
def test_before_agent_raises_clear_error_when_thread_id_missing_everywhere(self, tmp_path, monkeypatch):
|
||||
middleware = ThreadDataMiddleware(base_dir=str(tmp_path), lazy_init=True)
|
||||
monkeypatch.setattr(
|
||||
"deerflow.agents.middlewares.thread_data_middleware.get_config",
|
||||
"langgraph.config.get_config",
|
||||
lambda: {"configurable": {}},
|
||||
)
|
||||
|
||||
|
||||
@@ -93,6 +93,7 @@ class TestTitleMiddlewareCoreLogic:
|
||||
assert title == "短标题"
|
||||
title_middleware_module.create_chat_model.assert_called_once_with(thinking_enabled=False)
|
||||
model.ainvoke.assert_awaited_once()
|
||||
assert model.ainvoke.await_args.kwargs["config"] == {"run_name": "title_agent"}
|
||||
|
||||
def test_generate_title_normalizes_structured_message_content(self, monkeypatch):
|
||||
_set_test_title_config(max_chars=20)
|
||||
|
||||
@@ -0,0 +1,106 @@
|
||||
"""Tests for tool name deduplication in get_available_tools() (issue #1803).
|
||||
|
||||
Duplicate tool registrations previously passed through silently and could
|
||||
produce mangled function-name schemas that caused 100% tool call failures.
|
||||
``get_available_tools()`` now deduplicates by name, config-loaded tools taking
|
||||
priority, and logs a warning for every skipped duplicate.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from langchain_core.tools import BaseTool, tool
|
||||
|
||||
from deerflow.tools.tools import get_available_tools
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Fixture tools
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@tool
|
||||
def _tool_alpha(x: str) -> str:
|
||||
"""Alpha tool."""
|
||||
return x
|
||||
|
||||
|
||||
@tool
|
||||
def _tool_alpha_dup(x: str) -> str:
|
||||
"""Duplicate of alpha — same name, different object."""
|
||||
return x
|
||||
|
||||
|
||||
# Rename duplicate to share the same .name as _tool_alpha
|
||||
_tool_alpha_dup.name = _tool_alpha.name # type: ignore[attr-defined]
|
||||
|
||||
|
||||
@tool
|
||||
def _tool_beta(x: str) -> str:
|
||||
"""Beta tool."""
|
||||
return x
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Deduplication behaviour
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _make_minimal_config(tools):
|
||||
"""Return an AppConfig-like mock with the given tools list."""
|
||||
config = MagicMock()
|
||||
config.tools = tools
|
||||
config.models = []
|
||||
config.tool_search.enabled = False
|
||||
config.sandbox = MagicMock()
|
||||
return config
|
||||
|
||||
|
||||
@patch("deerflow.tools.tools.get_app_config")
|
||||
@patch("deerflow.tools.tools.is_host_bash_allowed", return_value=True)
|
||||
@patch("deerflow.tools.tools.reset_deferred_registry")
|
||||
def test_no_duplicates_returned(mock_reset, mock_bash, mock_cfg):
|
||||
"""get_available_tools() never returns two tools with the same name."""
|
||||
mock_cfg.return_value = _make_minimal_config([])
|
||||
|
||||
# Patch the builtin tools so we control exactly what comes back.
|
||||
with patch("deerflow.tools.tools.BUILTIN_TOOLS", [_tool_alpha, _tool_alpha_dup, _tool_beta]):
|
||||
result = get_available_tools(include_mcp=False)
|
||||
|
||||
names = [t.name for t in result]
|
||||
assert len(names) == len(set(names)), f"Duplicate names detected: {names}"
|
||||
|
||||
|
||||
@patch("deerflow.tools.tools.get_app_config")
|
||||
@patch("deerflow.tools.tools.is_host_bash_allowed", return_value=True)
|
||||
@patch("deerflow.tools.tools.reset_deferred_registry")
|
||||
def test_first_occurrence_wins(mock_reset, mock_bash, mock_cfg):
|
||||
"""When duplicates exist, the first occurrence is kept."""
|
||||
mock_cfg.return_value = _make_minimal_config([])
|
||||
|
||||
sentinel_alpha = MagicMock(spec=BaseTool, name="_sentinel")
|
||||
sentinel_alpha.name = _tool_alpha.name # same name
|
||||
sentinel_alpha_dup = MagicMock(spec=BaseTool, name="_sentinel_dup")
|
||||
sentinel_alpha_dup.name = _tool_alpha.name # same name — should be dropped
|
||||
|
||||
with patch("deerflow.tools.tools.BUILTIN_TOOLS", [sentinel_alpha, sentinel_alpha_dup, _tool_beta]):
|
||||
result = get_available_tools(include_mcp=False)
|
||||
|
||||
returned_alpha = next(t for t in result if t.name == _tool_alpha.name)
|
||||
assert returned_alpha is sentinel_alpha
|
||||
|
||||
|
||||
@patch("deerflow.tools.tools.get_app_config")
|
||||
@patch("deerflow.tools.tools.is_host_bash_allowed", return_value=True)
|
||||
@patch("deerflow.tools.tools.reset_deferred_registry")
|
||||
def test_duplicate_triggers_warning(mock_reset, mock_bash, mock_cfg, caplog):
|
||||
"""A warning is logged for every skipped duplicate."""
|
||||
import logging
|
||||
|
||||
mock_cfg.return_value = _make_minimal_config([])
|
||||
|
||||
with patch("deerflow.tools.tools.BUILTIN_TOOLS", [_tool_alpha, _tool_alpha_dup]):
|
||||
with caplog.at_level(logging.WARNING, logger="deerflow.tools.tools"):
|
||||
get_available_tools(include_mcp=False)
|
||||
|
||||
assert any("Duplicate tool name" in r.message for r in caplog.records), "Expected a duplicate-tool warning in log output"
|
||||
@@ -2,8 +2,10 @@
|
||||
|
||||
import json
|
||||
import sys
|
||||
from types import SimpleNamespace
|
||||
|
||||
import pytest
|
||||
from langchain_core.messages import ToolMessage
|
||||
from langchain_core.tools import tool as langchain_tool
|
||||
|
||||
from deerflow.config.tool_search_config import ToolSearchConfig, load_tool_search_config_from_dict
|
||||
@@ -83,6 +85,16 @@ class TestDeferredToolRegistry:
|
||||
assert "github_create_issue" in names
|
||||
assert "slack_send_message" in names
|
||||
|
||||
def test_deferred_names(self, registry):
|
||||
names = registry.deferred_names
|
||||
assert "github_create_issue" in names
|
||||
assert "slack_send_message" in names
|
||||
assert len(names) == 6
|
||||
|
||||
def test_contains(self, registry):
|
||||
assert registry.contains("github_create_issue") is True
|
||||
assert registry.contains("not_registered") is False
|
||||
|
||||
def test_search_select_single(self, registry):
|
||||
results = registry.search("select:github_create_issue")
|
||||
assert len(results) == 1
|
||||
@@ -509,3 +521,89 @@ class TestToolSearchPromotion:
|
||||
assert "slack_send_message" not in remaining
|
||||
assert "slack_list_channels" not in remaining
|
||||
assert len(registry) == 4
|
||||
|
||||
|
||||
class TestDeferredToolExecutionGate:
|
||||
def test_unpromoted_deferred_tool_call_is_blocked(self, registry):
|
||||
from deerflow.agents.middlewares.deferred_tool_filter_middleware import DeferredToolFilterMiddleware
|
||||
|
||||
set_deferred_registry(registry)
|
||||
middleware = DeferredToolFilterMiddleware()
|
||||
request = SimpleNamespace(tool_call={"name": "github_create_issue", "id": "call-1"})
|
||||
called = False
|
||||
|
||||
def handler(_request):
|
||||
nonlocal called
|
||||
called = True
|
||||
return ToolMessage(content="executed", tool_call_id="call-1", name="github_create_issue")
|
||||
|
||||
result = middleware.wrap_tool_call(request, handler)
|
||||
|
||||
assert called is False
|
||||
assert isinstance(result, ToolMessage)
|
||||
assert result.status == "error"
|
||||
assert result.tool_call_id == "call-1"
|
||||
assert "tool_search" in result.content
|
||||
assert "github_create_issue" in result.content
|
||||
|
||||
def test_promoted_deferred_tool_call_is_allowed(self, registry):
|
||||
from deerflow.agents.middlewares.deferred_tool_filter_middleware import DeferredToolFilterMiddleware
|
||||
|
||||
registry.promote({"github_create_issue"})
|
||||
set_deferred_registry(registry)
|
||||
middleware = DeferredToolFilterMiddleware()
|
||||
request = SimpleNamespace(tool_call={"name": "github_create_issue", "id": "call-1"})
|
||||
called = False
|
||||
|
||||
def handler(_request):
|
||||
nonlocal called
|
||||
called = True
|
||||
return ToolMessage(content="executed", tool_call_id="call-1", name="github_create_issue")
|
||||
|
||||
result = middleware.wrap_tool_call(request, handler)
|
||||
|
||||
assert called is True
|
||||
assert isinstance(result, ToolMessage)
|
||||
assert result.content == "executed"
|
||||
|
||||
def test_non_deferred_tool_call_is_allowed(self, registry):
|
||||
from deerflow.agents.middlewares.deferred_tool_filter_middleware import DeferredToolFilterMiddleware
|
||||
|
||||
set_deferred_registry(registry)
|
||||
middleware = DeferredToolFilterMiddleware()
|
||||
request = SimpleNamespace(tool_call={"name": "local_tool", "id": "call-1"})
|
||||
called = False
|
||||
|
||||
def handler(_request):
|
||||
nonlocal called
|
||||
called = True
|
||||
return ToolMessage(content="executed", tool_call_id="call-1", name="local_tool")
|
||||
|
||||
result = middleware.wrap_tool_call(request, handler)
|
||||
|
||||
assert called is True
|
||||
assert isinstance(result, ToolMessage)
|
||||
assert result.content == "executed"
|
||||
|
||||
@pytest.mark.anyio
|
||||
async def test_unpromoted_deferred_tool_call_is_blocked_async(self, registry):
|
||||
from deerflow.agents.middlewares.deferred_tool_filter_middleware import DeferredToolFilterMiddleware
|
||||
|
||||
set_deferred_registry(registry)
|
||||
middleware = DeferredToolFilterMiddleware()
|
||||
request = SimpleNamespace(tool_call={"name": "github_create_issue", "id": "call-1"})
|
||||
called = False
|
||||
|
||||
async def handler(_request):
|
||||
nonlocal called
|
||||
called = True
|
||||
return ToolMessage(content="executed", tool_call_id="call-1", name="github_create_issue")
|
||||
|
||||
result = await middleware.awrap_tool_call(request, handler)
|
||||
|
||||
assert called is False
|
||||
assert isinstance(result, ToolMessage)
|
||||
assert result.status == "error"
|
||||
assert result.tool_call_id == "call-1"
|
||||
assert "tool_search" in result.content
|
||||
assert "github_create_issue" in result.content
|
||||
|
||||
Generated
+115
-77
@@ -686,7 +686,9 @@ dependencies = [
|
||||
|
||||
[package.dev-dependencies]
|
||||
dev = [
|
||||
{ name = "prompt-toolkit" },
|
||||
{ name = "pytest" },
|
||||
{ name = "pytest-asyncio" },
|
||||
{ name = "ruff" },
|
||||
]
|
||||
|
||||
@@ -708,7 +710,9 @@ requires-dist = [
|
||||
|
||||
[package.metadata.requires-dev]
|
||||
dev = [
|
||||
{ name = "prompt-toolkit", specifier = ">=3.0.0" },
|
||||
{ name = "pytest", specifier = ">=9.0.3" },
|
||||
{ name = "pytest-asyncio", specifier = ">=1.3.0" },
|
||||
{ name = "ruff", specifier = ">=0.14.11" },
|
||||
]
|
||||
|
||||
@@ -1856,82 +1860,82 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "lxml"
|
||||
version = "6.0.2"
|
||||
version = "6.1.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/aa/88/262177de60548e5a2bfc46ad28232c9e9cbde697bd94132aeb80364675cb/lxml-6.0.2.tar.gz", hash = "sha256:cd79f3367bd74b317dda655dc8fcfa304d9eb6e4fb06b7168c5cf27f96e0cd62", size = 4073426, upload-time = "2025-09-22T04:04:59.287Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/28/30/9abc9e34c657c33834eaf6cd02124c61bdf5944d802aa48e69be8da3585d/lxml-6.1.0.tar.gz", hash = "sha256:bfd57d8008c4965709a919c3e9a98f76c2c7cb319086b3d26858250620023b13", size = 4197006, upload-time = "2026-04-18T04:32:51.613Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/f3/c8/8ff2bc6b920c84355146cd1ab7d181bc543b89241cfb1ebee824a7c81457/lxml-6.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:a59f5448ba2ceccd06995c95ea59a7674a10de0810f2ce90c9006f3cbc044456", size = 8661887, upload-time = "2025-09-22T04:01:17.265Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/37/6f/9aae1008083bb501ef63284220ce81638332f9ccbfa53765b2b7502203cf/lxml-6.0.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e8113639f3296706fbac34a30813929e29247718e88173ad849f57ca59754924", size = 4667818, upload-time = "2025-09-22T04:01:19.688Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f1/ca/31fb37f99f37f1536c133476674c10b577e409c0a624384147653e38baf2/lxml-6.0.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a8bef9b9825fa8bc816a6e641bb67219489229ebc648be422af695f6e7a4fa7f", size = 4950807, upload-time = "2025-09-22T04:01:21.487Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/da/87/f6cb9442e4bada8aab5ae7e1046264f62fdbeaa6e3f6211b93f4c0dd97f1/lxml-6.0.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:65ea18d710fd14e0186c2f973dc60bb52039a275f82d3c44a0e42b43440ea534", size = 5109179, upload-time = "2025-09-22T04:01:23.32Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c8/20/a7760713e65888db79bbae4f6146a6ae5c04e4a204a3c48896c408cd6ed2/lxml-6.0.2-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c371aa98126a0d4c739ca93ceffa0fd7a5d732e3ac66a46e74339acd4d334564", size = 5023044, upload-time = "2025-09-22T04:01:25.118Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a2/b0/7e64e0460fcb36471899f75831509098f3fd7cd02a3833ac517433cb4f8f/lxml-6.0.2-cp312-cp312-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:700efd30c0fa1a3581d80a748157397559396090a51d306ea59a70020223d16f", size = 5359685, upload-time = "2025-09-22T04:01:27.398Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b9/e1/e5df362e9ca4e2f48ed6411bd4b3a0ae737cc842e96877f5bf9428055ab4/lxml-6.0.2-cp312-cp312-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c33e66d44fe60e72397b487ee92e01da0d09ba2d66df8eae42d77b6d06e5eba0", size = 5654127, upload-time = "2025-09-22T04:01:29.629Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c6/d1/232b3309a02d60f11e71857778bfcd4acbdb86c07db8260caf7d008b08f8/lxml-6.0.2-cp312-cp312-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:90a345bbeaf9d0587a3aaffb7006aa39ccb6ff0e96a57286c0cb2fd1520ea192", size = 5253958, upload-time = "2025-09-22T04:01:31.535Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/35/35/d955a070994725c4f7d80583a96cab9c107c57a125b20bb5f708fe941011/lxml-6.0.2-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:064fdadaf7a21af3ed1dcaa106b854077fbeada827c18f72aec9346847cd65d0", size = 4711541, upload-time = "2025-09-22T04:01:33.801Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/be/667d17363b38a78c4bd63cfd4b4632029fd68d2c2dc81f25ce9eb5224dd5/lxml-6.0.2-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fbc74f42c3525ac4ffa4b89cbdd00057b6196bcefe8bce794abd42d33a018092", size = 5267426, upload-time = "2025-09-22T04:01:35.639Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ea/47/62c70aa4a1c26569bc958c9ca86af2bb4e1f614e8c04fb2989833874f7ae/lxml-6.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6ddff43f702905a4e32bc24f3f2e2edfe0f8fde3277d481bffb709a4cced7a1f", size = 5064917, upload-time = "2025-09-22T04:01:37.448Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/55/6ceddaca353ebd0f1908ef712c597f8570cc9c58130dbb89903198e441fd/lxml-6.0.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:6da5185951d72e6f5352166e3da7b0dc27aa70bd1090b0eb3f7f7212b53f1bb8", size = 4788795, upload-time = "2025-09-22T04:01:39.165Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cf/e8/fd63e15da5e3fd4c2146f8bbb3c14e94ab850589beab88e547b2dbce22e1/lxml-6.0.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:57a86e1ebb4020a38d295c04fc79603c7899e0df71588043eb218722dabc087f", size = 5676759, upload-time = "2025-09-22T04:01:41.506Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/76/47/b3ec58dc5c374697f5ba37412cd2728f427d056315d124dd4b61da381877/lxml-6.0.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:2047d8234fe735ab77802ce5f2297e410ff40f5238aec569ad7c8e163d7b19a6", size = 5255666, upload-time = "2025-09-22T04:01:43.363Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/19/93/03ba725df4c3d72afd9596eef4a37a837ce8e4806010569bedfcd2cb68fd/lxml-6.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6f91fd2b2ea15a6800c8e24418c0775a1694eefc011392da73bc6cef2623b322", size = 5277989, upload-time = "2025-09-22T04:01:45.215Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c6/80/c06de80bfce881d0ad738576f243911fccf992687ae09fd80b734712b39c/lxml-6.0.2-cp312-cp312-win32.whl", hash = "sha256:3ae2ce7d6fedfb3414a2b6c5e20b249c4c607f72cb8d2bb7cc9c6ec7c6f4e849", size = 3611456, upload-time = "2025-09-22T04:01:48.243Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f7/d7/0cdfb6c3e30893463fb3d1e52bc5f5f99684a03c29a0b6b605cfae879cd5/lxml-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:72c87e5ee4e58a8354fb9c7c84cbf95a1c8236c127a5d1b7683f04bed8361e1f", size = 4011793, upload-time = "2025-09-22T04:01:50.042Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ea/7b/93c73c67db235931527301ed3785f849c78991e2e34f3fd9a6663ffda4c5/lxml-6.0.2-cp312-cp312-win_arm64.whl", hash = "sha256:61cb10eeb95570153e0c0e554f58df92ecf5109f75eacad4a95baa709e26c3d6", size = 3672836, upload-time = "2025-09-22T04:01:52.145Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/53/fd/4e8f0540608977aea078bf6d79f128e0e2c2bba8af1acf775c30baa70460/lxml-6.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:9b33d21594afab46f37ae58dfadd06636f154923c4e8a4d754b0127554eb2e77", size = 8648494, upload-time = "2025-09-22T04:01:54.242Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/f4/2a94a3d3dfd6c6b433501b8d470a1960a20ecce93245cf2db1706adf6c19/lxml-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6c8963287d7a4c5c9a432ff487c52e9c5618667179c18a204bdedb27310f022f", size = 4661146, upload-time = "2025-09-22T04:01:56.282Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/25/2e/4efa677fa6b322013035d38016f6ae859d06cac67437ca7dc708a6af7028/lxml-6.0.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1941354d92699fb5ffe6ed7b32f9649e43c2feb4b97205f75866f7d21aa91452", size = 4946932, upload-time = "2025-09-22T04:01:58.989Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ce/0f/526e78a6d38d109fdbaa5049c62e1d32fdd70c75fb61c4eadf3045d3d124/lxml-6.0.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bb2f6ca0ae2d983ded09357b84af659c954722bbf04dea98030064996d156048", size = 5100060, upload-time = "2025-09-22T04:02:00.812Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/81/76/99de58d81fa702cc0ea7edae4f4640416c2062813a00ff24bd70ac1d9c9b/lxml-6.0.2-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eb2a12d704f180a902d7fa778c6d71f36ceb7b0d317f34cdc76a5d05aa1dd1df", size = 5019000, upload-time = "2025-09-22T04:02:02.671Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b5/35/9e57d25482bc9a9882cb0037fdb9cc18f4b79d85df94fa9d2a89562f1d25/lxml-6.0.2-cp313-cp313-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:6ec0e3f745021bfed19c456647f0298d60a24c9ff86d9d051f52b509663feeb1", size = 5348496, upload-time = "2025-09-22T04:02:04.904Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a6/8e/cb99bd0b83ccc3e8f0f528e9aa1f7a9965dfec08c617070c5db8d63a87ce/lxml-6.0.2-cp313-cp313-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:846ae9a12d54e368933b9759052d6206a9e8b250291109c48e350c1f1f49d916", size = 5643779, upload-time = "2025-09-22T04:02:06.689Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d0/34/9e591954939276bb679b73773836c6684c22e56d05980e31d52a9a8deb18/lxml-6.0.2-cp313-cp313-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ef9266d2aa545d7374938fb5c484531ef5a2ec7f2d573e62f8ce722c735685fd", size = 5244072, upload-time = "2025-09-22T04:02:08.587Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8d/27/b29ff065f9aaca443ee377aff699714fcbffb371b4fce5ac4ca759e436d5/lxml-6.0.2-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:4077b7c79f31755df33b795dc12119cb557a0106bfdab0d2c2d97bd3cf3dffa6", size = 4718675, upload-time = "2025-09-22T04:02:10.783Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2b/9f/f756f9c2cd27caa1a6ef8c32ae47aadea697f5c2c6d07b0dae133c244fbe/lxml-6.0.2-cp313-cp313-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a7c5d5e5f1081955358533be077166ee97ed2571d6a66bdba6ec2f609a715d1a", size = 5255171, upload-time = "2025-09-22T04:02:12.631Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/61/46/bb85ea42d2cb1bd8395484fd72f38e3389611aa496ac7772da9205bbda0e/lxml-6.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:8f8d0cbd0674ee89863a523e6994ac25fd5be9c8486acfc3e5ccea679bad2679", size = 5057175, upload-time = "2025-09-22T04:02:14.718Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/95/0c/443fc476dcc8e41577f0af70458c50fe299a97bb6b7505bb1ae09aa7f9ac/lxml-6.0.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:2cbcbf6d6e924c28f04a43f3b6f6e272312a090f269eff68a2982e13e5d57659", size = 4785688, upload-time = "2025-09-22T04:02:16.957Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/48/78/6ef0b359d45bb9697bc5a626e1992fa5d27aa3f8004b137b2314793b50a0/lxml-6.0.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dfb874cfa53340009af6bdd7e54ebc0d21012a60a4e65d927c2e477112e63484", size = 5660655, upload-time = "2025-09-22T04:02:18.815Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/ea/e1d33808f386bc1339d08c0dcada6e4712d4ed8e93fcad5f057070b7988a/lxml-6.0.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:fb8dae0b6b8b7f9e96c26fdd8121522ce5de9bb5538010870bd538683d30e9a2", size = 5247695, upload-time = "2025-09-22T04:02:20.593Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4f/47/eba75dfd8183673725255247a603b4ad606f4ae657b60c6c145b381697da/lxml-6.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:358d9adae670b63e95bc59747c72f4dc97c9ec58881d4627fe0120da0f90d314", size = 5269841, upload-time = "2025-09-22T04:02:22.489Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/76/04/5c5e2b8577bc936e219becb2e98cdb1aca14a4921a12995b9d0c523502ae/lxml-6.0.2-cp313-cp313-win32.whl", hash = "sha256:e8cd2415f372e7e5a789d743d133ae474290a90b9023197fd78f32e2dc6873e2", size = 3610700, upload-time = "2025-09-22T04:02:24.465Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fe/0a/4643ccc6bb8b143e9f9640aa54e38255f9d3b45feb2cbe7ae2ca47e8782e/lxml-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:b30d46379644fbfc3ab81f8f82ae4de55179414651f110a1514f0b1f8f6cb2d7", size = 4010347, upload-time = "2025-09-22T04:02:26.286Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/31/ef/dcf1d29c3f530577f61e5fe2f1bd72929acf779953668a8a47a479ae6f26/lxml-6.0.2-cp313-cp313-win_arm64.whl", hash = "sha256:13dcecc9946dca97b11b7c40d29fba63b55ab4170d3c0cf8c0c164343b9bfdcf", size = 3671248, upload-time = "2025-09-22T04:02:27.918Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/03/15/d4a377b385ab693ce97b472fe0c77c2b16ec79590e688b3ccc71fba19884/lxml-6.0.2-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:b0c732aa23de8f8aec23f4b580d1e52905ef468afb4abeafd3fec77042abb6fe", size = 8659801, upload-time = "2025-09-22T04:02:30.113Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c8/e8/c128e37589463668794d503afaeb003987373c5f94d667124ffd8078bbd9/lxml-6.0.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:4468e3b83e10e0317a89a33d28f7aeba1caa4d1a6fd457d115dd4ffe90c5931d", size = 4659403, upload-time = "2025-09-22T04:02:32.119Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/ce/74903904339decdf7da7847bb5741fc98a5451b42fc419a86c0c13d26fe2/lxml-6.0.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:abd44571493973bad4598a3be7e1d807ed45aa2adaf7ab92ab7c62609569b17d", size = 4966974, upload-time = "2025-09-22T04:02:34.155Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1f/d3/131dec79ce61c5567fecf82515bd9bc36395df42501b50f7f7f3bd065df0/lxml-6.0.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:370cd78d5855cfbffd57c422851f7d3864e6ae72d0da615fca4dad8c45d375a5", size = 5102953, upload-time = "2025-09-22T04:02:36.054Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3a/ea/a43ba9bb750d4ffdd885f2cd333572f5bb900cd2408b67fdda07e85978a0/lxml-6.0.2-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:901e3b4219fa04ef766885fb40fa516a71662a4c61b80c94d25336b4934b71c0", size = 5055054, upload-time = "2025-09-22T04:02:38.154Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/60/23/6885b451636ae286c34628f70a7ed1fcc759f8d9ad382d132e1c8d3d9bfd/lxml-6.0.2-cp314-cp314-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:a4bf42d2e4cf52c28cc1812d62426b9503cdb0c87a6de81442626aa7d69707ba", size = 5352421, upload-time = "2025-09-22T04:02:40.413Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/48/5b/fc2ddfc94ddbe3eebb8e9af6e3fd65e2feba4967f6a4e9683875c394c2d8/lxml-6.0.2-cp314-cp314-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b2c7fdaa4d7c3d886a42534adec7cfac73860b89b4e5298752f60aa5984641a0", size = 5673684, upload-time = "2025-09-22T04:02:42.288Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/29/9c/47293c58cc91769130fbf85531280e8cc7868f7fbb6d92f4670071b9cb3e/lxml-6.0.2-cp314-cp314-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:98a5e1660dc7de2200b00d53fa00bcd3c35a3608c305d45a7bbcaf29fa16e83d", size = 5252463, upload-time = "2025-09-22T04:02:44.165Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9b/da/ba6eceb830c762b48e711ded880d7e3e89fc6c7323e587c36540b6b23c6b/lxml-6.0.2-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:dc051506c30b609238d79eda75ee9cab3e520570ec8219844a72a46020901e37", size = 4698437, upload-time = "2025-09-22T04:02:46.524Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a5/24/7be3f82cb7990b89118d944b619e53c656c97dc89c28cfb143fdb7cd6f4d/lxml-6.0.2-cp314-cp314-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8799481bbdd212470d17513a54d568f44416db01250f49449647b5ab5b5dccb9", size = 5269890, upload-time = "2025-09-22T04:02:48.812Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1b/bd/dcfb9ea1e16c665efd7538fc5d5c34071276ce9220e234217682e7d2c4a5/lxml-6.0.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:9261bb77c2dab42f3ecd9103951aeca2c40277701eb7e912c545c1b16e0e4917", size = 5097185, upload-time = "2025-09-22T04:02:50.746Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/21/04/a60b0ff9314736316f28316b694bccbbabe100f8483ad83852d77fc7468e/lxml-6.0.2-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:65ac4a01aba353cfa6d5725b95d7aed6356ddc0a3cd734de00124d285b04b64f", size = 4745895, upload-time = "2025-09-22T04:02:52.968Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/bd/7d54bd1846e5a310d9c715921c5faa71cf5c0853372adf78aee70c8d7aa2/lxml-6.0.2-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:b22a07cbb82fea98f8a2fd814f3d1811ff9ed76d0fc6abc84eb21527596e7cc8", size = 5695246, upload-time = "2025-09-22T04:02:54.798Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fd/32/5643d6ab947bc371da21323acb2a6e603cedbe71cb4c99c8254289ab6f4e/lxml-6.0.2-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:d759cdd7f3e055d6bc8d9bec3ad905227b2e4c785dc16c372eb5b5e83123f48a", size = 5260797, upload-time = "2025-09-22T04:02:57.058Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/33/da/34c1ec4cff1eea7d0b4cd44af8411806ed943141804ac9c5d565302afb78/lxml-6.0.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:945da35a48d193d27c188037a05fec5492937f66fb1958c24fc761fb9d40d43c", size = 5277404, upload-time = "2025-09-22T04:02:58.966Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/82/57/4eca3e31e54dc89e2c3507e1cd411074a17565fa5ffc437c4ae0a00d439e/lxml-6.0.2-cp314-cp314-win32.whl", hash = "sha256:be3aaa60da67e6153eb15715cc2e19091af5dc75faef8b8a585aea372507384b", size = 3670072, upload-time = "2025-09-22T04:03:38.05Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e3/e0/c96cf13eccd20c9421ba910304dae0f619724dcf1702864fd59dd386404d/lxml-6.0.2-cp314-cp314-win_amd64.whl", hash = "sha256:fa25afbadead523f7001caf0c2382afd272c315a033a7b06336da2637d92d6ed", size = 4080617, upload-time = "2025-09-22T04:03:39.835Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/5d/b3f03e22b3d38d6f188ef044900a9b29b2fe0aebb94625ce9fe244011d34/lxml-6.0.2-cp314-cp314-win_arm64.whl", hash = "sha256:063eccf89df5b24e361b123e257e437f9e9878f425ee9aae3144c77faf6da6d8", size = 3754930, upload-time = "2025-09-22T04:03:41.565Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5e/5c/42c2c4c03554580708fc738d13414801f340c04c3eff90d8d2d227145275/lxml-6.0.2-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:6162a86d86893d63084faaf4ff937b3daea233e3682fb4474db07395794fa80d", size = 8910380, upload-time = "2025-09-22T04:03:01.645Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bf/4f/12df843e3e10d18d468a7557058f8d3733e8b6e12401f30b1ef29360740f/lxml-6.0.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:414aaa94e974e23a3e92e7ca5b97d10c0cf37b6481f50911032c69eeb3991bba", size = 4775632, upload-time = "2025-09-22T04:03:03.814Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e4/0c/9dc31e6c2d0d418483cbcb469d1f5a582a1cd00a1f4081953d44051f3c50/lxml-6.0.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:48461bd21625458dd01e14e2c38dd0aea69addc3c4f960c30d9f59d7f93be601", size = 4975171, upload-time = "2025-09-22T04:03:05.651Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e7/2b/9b870c6ca24c841bdd887504808f0417aa9d8d564114689266f19ddf29c8/lxml-6.0.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:25fcc59afc57d527cfc78a58f40ab4c9b8fd096a9a3f964d2781ffb6eb33f4ed", size = 5110109, upload-time = "2025-09-22T04:03:07.452Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bf/0c/4f5f2a4dd319a178912751564471355d9019e220c20d7db3fb8307ed8582/lxml-6.0.2-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5179c60288204e6ddde3f774a93350177e08876eaf3ab78aa3a3649d43eb7d37", size = 5041061, upload-time = "2025-09-22T04:03:09.297Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/12/64/554eed290365267671fe001a20d72d14f468ae4e6acef1e179b039436967/lxml-6.0.2-cp314-cp314t-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:967aab75434de148ec80597b75062d8123cadf2943fb4281f385141e18b21338", size = 5306233, upload-time = "2025-09-22T04:03:11.651Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7a/31/1d748aa275e71802ad9722df32a7a35034246b42c0ecdd8235412c3396ef/lxml-6.0.2-cp314-cp314t-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d100fcc8930d697c6561156c6810ab4a508fb264c8b6779e6e61e2ed5e7558f9", size = 5604739, upload-time = "2025-09-22T04:03:13.592Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8f/41/2c11916bcac09ed561adccacceaedd2bf0e0b25b297ea92aab99fd03d0fa/lxml-6.0.2-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ca59e7e13e5981175b8b3e4ab84d7da57993eeff53c07764dcebda0d0e64ecd", size = 5225119, upload-time = "2025-09-22T04:03:15.408Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/99/05/4e5c2873d8f17aa018e6afde417c80cc5d0c33be4854cce3ef5670c49367/lxml-6.0.2-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:957448ac63a42e2e49531b9d6c0fa449a1970dbc32467aaad46f11545be9af1d", size = 4633665, upload-time = "2025-09-22T04:03:17.262Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0f/c9/dcc2da1bebd6275cdc723b515f93edf548b82f36a5458cca3578bc899332/lxml-6.0.2-cp314-cp314t-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b7fc49c37f1786284b12af63152fe1d0990722497e2d5817acfe7a877522f9a9", size = 5234997, upload-time = "2025-09-22T04:03:19.14Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9c/e2/5172e4e7468afca64a37b81dba152fc5d90e30f9c83c7c3213d6a02a5ce4/lxml-6.0.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e19e0643cc936a22e837f79d01a550678da8377d7d801a14487c10c34ee49c7e", size = 5090957, upload-time = "2025-09-22T04:03:21.436Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a5/b3/15461fd3e5cd4ddcb7938b87fc20b14ab113b92312fc97afe65cd7c85de1/lxml-6.0.2-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:1db01e5cf14345628e0cbe71067204db658e2fb8e51e7f33631f5f4735fefd8d", size = 4764372, upload-time = "2025-09-22T04:03:23.27Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/05/33/f310b987c8bf9e61c4dd8e8035c416bd3230098f5e3cfa69fc4232de7059/lxml-6.0.2-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:875c6b5ab39ad5291588aed6925fac99d0097af0dd62f33c7b43736043d4a2ec", size = 5634653, upload-time = "2025-09-22T04:03:25.767Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/70/ff/51c80e75e0bc9382158133bdcf4e339b5886c6ee2418b5199b3f1a61ed6d/lxml-6.0.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:cdcbed9ad19da81c480dfd6dd161886db6096083c9938ead313d94b30aadf272", size = 5233795, upload-time = "2025-09-22T04:03:27.62Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/56/4d/4856e897df0d588789dd844dbed9d91782c4ef0b327f96ce53c807e13128/lxml-6.0.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:80dadc234ebc532e09be1975ff538d154a7fa61ea5031c03d25178855544728f", size = 5257023, upload-time = "2025-09-22T04:03:30.056Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0f/85/86766dfebfa87bea0ab78e9ff7a4b4b45225df4b4d3b8cc3c03c5cd68464/lxml-6.0.2-cp314-cp314t-win32.whl", hash = "sha256:da08e7bb297b04e893d91087df19638dc7a6bb858a954b0cc2b9f5053c922312", size = 3911420, upload-time = "2025-09-22T04:03:32.198Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fe/1a/b248b355834c8e32614650b8008c69ffeb0ceb149c793961dd8c0b991bb3/lxml-6.0.2-cp314-cp314t-win_amd64.whl", hash = "sha256:252a22982dca42f6155125ac76d3432e548a7625d56f5a273ee78a5057216eca", size = 4406837, upload-time = "2025-09-22T04:03:34.027Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/92/aa/df863bcc39c5e0946263454aba394de8a9084dbaff8ad143846b0d844739/lxml-6.0.2-cp314-cp314t-win_arm64.whl", hash = "sha256:bb4c1847b303835d89d785a18801a883436cdfd5dc3d62947f9c49e24f0f5a2c", size = 3822205, upload-time = "2025-09-22T04:03:36.249Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d2/d4/9326838b59dc36dfae42eec9656b97520f9997eee1de47b8316aaeed169c/lxml-6.1.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d2f17a16cd8751e8eb233a7e41aecdf8e511712e00088bf9be455f604cd0d28d", size = 8570663, upload-time = "2026-04-18T04:27:48.253Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d8/a4/053745ce1f8303ccbb788b86c0db3a91b973675cefc42566a188637b7c40/lxml-6.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f0cea5b1d3e6e77d71bd2b9972eb2446221a69dc52bb0b9c3c6f6e5700592d93", size = 4624024, upload-time = "2026-04-18T04:27:52.594Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/90/97/a517944b20f8fd0932ad2109482bee4e29fe721416387a363306667941f6/lxml-6.1.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fc46da94826188ed45cb53bd8e3fc076ae22675aea2087843d4735627f867c6d", size = 4930895, upload-time = "2026-04-18T04:32:56.29Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/94/7c/e08a970727d556caa040a44773c7b7e3ad0f0d73dedc863543e9a8b931f2/lxml-6.1.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9147d8e386ec3b82c3b15d88927f734f565b0aaadef7def562b853adca45784a", size = 5093820, upload-time = "2026-04-18T04:32:58.94Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/88/ee/2a5c2aa2c32016a226ca25d3e1056a8102ea6e1fe308bf50213586635400/lxml-6.1.0-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5715e0e28736a070f3f34a7ccc09e2fdcba0e3060abbcf61a1a5718ff6d6b105", size = 5005790, upload-time = "2026-04-18T04:33:01.272Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e3/38/a0db9be8f38ad6043ab9429487c128dd1d30f07956ef43040402f8da49e8/lxml-6.1.0-cp312-cp312-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4937460dc5df0cdd2f06a86c285c28afda06aefa3af949f9477d3e8df430c485", size = 5630827, upload-time = "2026-04-18T04:33:04.036Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/31/ba/3c13d3fc24b7cacf675f808a3a1baabf43a30d0cd24c98f94548e9aa58eb/lxml-6.1.0-cp312-cp312-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bc783ee3147e60a25aa0445ea82b3e8aabb83b240f2b95d32cb75587ff781814", size = 5240445, upload-time = "2026-04-18T04:33:06.87Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/55/ba/eeef4ccba09b2212fe239f46c1692a98db1878e0872ae320756488878a94/lxml-6.1.0-cp312-cp312-manylinux_2_28_i686.whl", hash = "sha256:40d9189f80075f2e1f88db21ef815a2b17b28adf8e50aaf5c789bfe737027f32", size = 5350121, upload-time = "2026-04-18T04:33:09.365Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7e/01/1da87c7b587c38d0cbe77a01aae3b9c1c49ed47d76918ef3db8fc151b1ca/lxml-6.1.0-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:05b9b8787e35bec69e68daf4952b2e6dfcfb0db7ecf1a06f8cdfbbac4eb71aad", size = 4694949, upload-time = "2026-04-18T04:33:11.628Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a1/88/7db0fe66d5aaf128443ee1623dec3db1576f3e4c17751ec0ef5866468590/lxml-6.1.0-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0f0f08beb0182e3e9a86fae124b3c47a7b41b7b69b225e1377db983802404e54", size = 5243901, upload-time = "2026-04-18T04:33:13.95Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/a8/1346726af7d1f6fca1f11223ba34001462b0a3660416986d37641708d57c/lxml-6.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:73becf6d8c81d4c76b1014dbd3584cb26d904492dcf73ca85dc8bff08dcd6d2d", size = 5048054, upload-time = "2026-04-18T04:33:16.965Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2e/b7/85057012f035d1a0c87e02f8c723ca3c3e6e0728bcf4cb62080b21b1c1e3/lxml-6.1.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1ae225f66e5938f4fa29d37e009a3bb3b13032ac57eb4eb42afa44f6e4054e69", size = 4777324, upload-time = "2026-04-18T04:33:19.832Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/75/6c/ad2f94a91073ef570f33718040e8e160d5fb93331cf1ab3ca1323f939e2d/lxml-6.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:690022c7fae793b0489aa68a658822cea83e0d5933781811cabbf5ea3bcfe73d", size = 5645702, upload-time = "2026-04-18T04:33:22.436Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3b/89/0bb6c0bd549c19004c60eea9dc554dd78fd647b72314ef25d460e0d208c6/lxml-6.1.0-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:63aeafc26aac0be8aff14af7871249e87ea1319be92090bfd632ec68e03b16a5", size = 5232901, upload-time = "2026-04-18T04:33:26.21Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a1/d9/d609a11fb567da9399f525193e2b49847b5a409cdebe737f06a8b7126bdc/lxml-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:264c605ab9c0e4aa1a679636f4582c4d3313700009fac3ec9c3412ed0d8f3e1d", size = 5261333, upload-time = "2026-04-18T04:33:28.984Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a6/3a/ac3f99ec8ac93089e7dd556f279e0d14c24de0a74a507e143a2e4b496e7c/lxml-6.1.0-cp312-cp312-win32.whl", hash = "sha256:56971379bc5ee8037c5a0f09fa88f66cdb7d37c3e38af3e45cf539f41131ac1f", size = 3596289, upload-time = "2026-04-18T04:27:42.819Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/a7/0a915557538593cb1bbeedcd40e13c7a261822c26fecbbdb71dad0c2f540/lxml-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:bba078de0031c219e5dd06cf3e6bf8fb8e6e64a77819b358f53bb132e3e03366", size = 3997059, upload-time = "2026-04-18T04:27:46.764Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/92/96/a5dc078cf0126fbfbc35611d77ecd5da80054b5893e28fb213a5613b9e1d/lxml-6.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:c3592631e652afa34999a088f98ba7dfc7d6aff0d535c410bea77a71743f3819", size = 3659552, upload-time = "2026-04-18T04:27:51.133Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/08/03/69347590f1cf4a6d5a4944bb6099e6d37f334784f16062234e1f892fdb1d/lxml-6.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a0092f2b107b69601adf562a57c956fbb596e05e3e6651cabd3054113b007e45", size = 8559689, upload-time = "2026-04-18T04:31:57.785Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3f/58/25e00bb40b185c974cfe156c110474d9a8a8390d5f7c92a4e328189bb60e/lxml-6.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fc7140d7a7386e6b545d41b7358f4d02b656d4053f5fa6859f92f4b9c2572c4d", size = 4617892, upload-time = "2026-04-18T04:32:01.78Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f5/54/92ad98a94ac318dc4f97aaac22ff8d1b94212b2ae8af5b6e9b354bf825f7/lxml-6.1.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:419c58fc92cc3a2c3fa5f78c63dbf5da70c1fa9c1b25f25727ecee89a96c7de2", size = 4923489, upload-time = "2026-04-18T04:33:31.401Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/15/3b/a20aecfab42bdf4f9b390590d345857ad3ffd7c51988d1c89c53a0c73faf/lxml-6.1.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:37fabd1452852636cf38ecdcc9dd5ca4bba7a35d6c53fa09725deeb894a87491", size = 5082162, upload-time = "2026-04-18T04:33:34.262Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/45/26/2cdb3d281ac1bd175603e290cbe4bad6eff127c0f8de90bafd6f8548f0fd/lxml-6.1.0-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a2853c8b2170cc6cd54a6b4d50d2c1a8a7aeca201f23804b4898525c7a152cfc", size = 4993247, upload-time = "2026-04-18T04:33:36.674Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f6/05/d735aef963740022a08185c84821f689fc903acb3d50326e6b1e9886cc22/lxml-6.1.0-cp313-cp313-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8e369cbd690e788c8d15e56222d91a09c6a417f49cbc543040cba0fe2e25a79e", size = 5613042, upload-time = "2026-04-18T04:33:39.205Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ee/b8/ead7c10efff731738c72e59ed6eb5791854879fbed7ae98781a12006263a/lxml-6.1.0-cp313-cp313-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e69aa6805905807186eb00e66c6d97a935c928275182eb02ee40ba00da9623b2", size = 5228304, upload-time = "2026-04-18T04:33:41.647Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6b/10/e9842d2ec322ea65f0a7270aa0315a53abed06058b88ef1b027f620e7a5f/lxml-6.1.0-cp313-cp313-manylinux_2_28_i686.whl", hash = "sha256:4bd1bdb8a9e0e2dd229de19b5f8aebac80e916921b4b2c6ef8a52bc131d0c1f9", size = 5341578, upload-time = "2026-04-18T04:33:44.596Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/89/54/40d9403d7c2775fa7301d3ddd3464689bfe9ba71acc17dfff777071b4fdc/lxml-6.1.0-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:cbd7b79cdcb4986ad78a2662625882747f09db5e4cd7b2ae178a88c9c51b3dfe", size = 4700209, upload-time = "2026-04-18T04:33:47.552Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/85/b2/bbdcc2cf45dfc7dfffef4fd97e5c47b15919b6a365247d95d6f684ef5e82/lxml-6.1.0-cp313-cp313-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:43e4d297f11080ec9d64a4b1ad7ac02b4484c9f0e2179d9c4ef78e886e747b88", size = 5232365, upload-time = "2026-04-18T04:33:50.249Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/48/5a/b06875665e53aaba7127611a7bed3b7b9658e20b22bc2dd217a0b7ab0091/lxml-6.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cc16682cc987a3da00aa56a3aa3075b08edb10d9b1e476938cfdbee8f3b67181", size = 5043654, upload-time = "2026-04-18T04:33:52.71Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e9/9c/e71a069d09641c1a7abeb30e693f828c7c90a41cbe3d650b2d734d876f85/lxml-6.1.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:d6d8efe71429635f0559579092bb5e60560d7b9115ee38c4adbea35632e7fa24", size = 4769326, upload-time = "2026-04-18T04:33:55.244Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cc/06/7a9cd84b3d4ed79adf35f874750abb697dec0b4a81a836037b36e47c091a/lxml-6.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7e39ab3a28af7784e206d8606ec0e4bcad0190f63a492bca95e94e5a4aef7f6e", size = 5635879, upload-time = "2026-04-18T04:33:58.509Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cc/f0/9d57916befc1e54c451712c7ee48e9e74e80ae4d03bdce49914e0aee42cd/lxml-6.1.0-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:9eb667bf50856c4a58145f8ca2d5e5be160191e79eb9e30855a476191b3c3495", size = 5224048, upload-time = "2026-04-18T04:34:00.943Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/99/75/90c4eefda0c08c92221fe0753db2d6699a4c628f76ff4465ec20dea84cc1/lxml-6.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7f4a77d6f7edf9230cee3e1f7f6764722a41604ee5681844f18db9a81ea0ec33", size = 5250241, upload-time = "2026-04-18T04:34:03.365Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5e/73/16596f7e4e38fa33084b9ccbccc22a15f82a290a055126f2c1541236d2ff/lxml-6.1.0-cp313-cp313-win32.whl", hash = "sha256:28902146ffbe5222df411c5d19e5352490122e14447e98cd118907ee3fd6ee62", size = 3596938, upload-time = "2026-04-18T04:31:56.206Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8e/63/981401c5680c1eb30893f00a19641ac80db5d1e7086c62cb4b13ed813038/lxml-6.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:4a1503c56e4e2b38dc76f2f2da7bae69670c0f1933e27cfa34b2fa5876410b16", size = 3995728, upload-time = "2026-04-18T04:31:58.763Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e7/e8/c358a38ac3e541d16a1b527e4e9cb78c0419b0506a070ace11777e5e8404/lxml-6.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:e0af85773850417d994d019741239b901b22c6680206f46a34766926e466141d", size = 3658372, upload-time = "2026-04-18T04:32:03.629Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/eb/45/cee4cf203ef0bab5c52afc118da61d6b460c928f2893d40023cfa27e0b80/lxml-6.1.0-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:ab863fd37458fed6456525f297d21239d987800c46e67da5ef04fc6b3dd93ac8", size = 8576713, upload-time = "2026-04-18T04:32:06.831Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8a/a7/eda05babeb7e046839204eaf254cd4d7c9130ce2bbf0d9e90ea41af5654d/lxml-6.1.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:6fd8b1df8254ff4fd93fd31da1fc15770bde23ac045be9bb1f87425702f61cc9", size = 4623874, upload-time = "2026-04-18T04:32:10.755Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e7/e9/db5846de9b436b91890a62f29d80cd849ea17948a49bf532d5278ee69a9e/lxml-6.1.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:47024feaae386a92a146af0d2aeed65229bf6fff738e6a11dda6b0015fb8fd03", size = 4949535, upload-time = "2026-04-18T04:34:06.657Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/ba/0d3593373dcae1d68f40dc3c41a5a92f2544e68115eb2f62319a4c2a6500/lxml-6.1.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3f00972f84450204cd5d93a5395965e348956aaceaadec693a22ec743f8ae3eb", size = 5086881, upload-time = "2026-04-18T04:34:09.556Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/43/76/759a7484539ad1af0d125a9afe9c3fb5f82a8779fd1f5f56319d9e4ea2fd/lxml-6.1.0-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97faa0860e13b05b15a51fb4986421ef7a30f0b3334061c416e0981e9450ca4c", size = 5031305, upload-time = "2026-04-18T04:34:12.336Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dc/b9/c1f0daf981a11e47636126901fd4ab82429e18c57aeb0fc3ad2940b42d8b/lxml-6.1.0-cp314-cp314-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:972a6451204798675407beaad97b868d0c733d9a74dafefc63120b81b8c2de28", size = 5647522, upload-time = "2026-04-18T04:34:14.89Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/31/e6/1f533dcd205275363d9ba3511bcec52fa2df86abf8abe6a5f2c599f0dc31/lxml-6.1.0-cp314-cp314-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fe022f20bc4569ec66b63b3fb275a3d628d9d32da6326b2982584104db6d3086", size = 5239310, upload-time = "2026-04-18T04:34:17.652Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c3/8c/4175fb709c78a6e315ed814ed33be3defd8b8721067e70419a6cf6f971da/lxml-6.1.0-cp314-cp314-manylinux_2_28_i686.whl", hash = "sha256:75c4c7c619a744f972f4451bf5adf6d0fb00992a1ffc9fd78e13b0bc817cc99f", size = 5350799, upload-time = "2026-04-18T04:34:20.529Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fd/77/6ffdebc5994975f0dde4acb59761902bd9d9bb84422b9a0bd239a7da9ca8/lxml-6.1.0-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:3648f20d25102a22b6061c688beb3a805099ea4beb0a01ce62975d926944d292", size = 4697693, upload-time = "2026-04-18T04:34:23.541Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f8/f1/565f36bd5c73294602d48e04d23f81ff4c8736be6ba5e1d1ec670ac9be80/lxml-6.1.0-cp314-cp314-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:77b9f99b17cbf14026d1e618035077060fc7195dd940d025149f3e2e830fbfcb", size = 5250708, upload-time = "2026-04-18T04:34:26.001Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/11/a68ab9dd18c5c499404deb4005f4bc4e0e88e5b72cd755ad96efec81d18d/lxml-6.1.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:32662519149fd7a9db354175aa5e417d83485a8039b8aaa62f873ceee7ea4cad", size = 5084737, upload-time = "2026-04-18T04:34:28.32Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ab/78/e8f41e2c74f4af564e6a0348aea69fb6daaefa64bc071ef469823d22cc18/lxml-6.1.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:73d658216fc173cf2c939e90e07b941c5e12736b0bf6a99e7af95459cfe8eabb", size = 4737817, upload-time = "2026-04-18T04:34:30.784Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/06/2d/aa4e117aa2ce2f3b35d9ff246be74a2f8e853baba5d2a92c64744474603a/lxml-6.1.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:ac4db068889f8772a4a698c5980ec302771bb545e10c4b095d4c8be26749616f", size = 5670753, upload-time = "2026-04-18T04:34:33.675Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/08/f5/dd745d50c0409031dbfcc4881740542a01e54d6f0110bd420fa7782110b8/lxml-6.1.0-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:45e9dfbd1b661eb64ba0d4dbe762bd210c42d86dd1e5bd2bdf89d634231beb43", size = 5238071, upload-time = "2026-04-18T04:34:36.12Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3e/74/ad424f36d0340a904665867dab310a3f1f4c96ff4039698de83b77f44c1f/lxml-6.1.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:89e8d73d09ac696a5ba42ec69787913d53284f12092f651506779314f10ba585", size = 5264319, upload-time = "2026-04-18T04:34:39.035Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/53/36/a15d8b3514ec889bfd6aa3609107fcb6c9189f8dc347f1c0b81eded8d87c/lxml-6.1.0-cp314-cp314-win32.whl", hash = "sha256:ebe33f4ec1b2de38ceb225a1749a2965855bffeef435ba93cd2d5d540783bf2f", size = 3657139, upload-time = "2026-04-18T04:32:20.006Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1a/a4/263ebb0710851a3c6c937180a9a86df1206fdfe53cc43005aa2237fd7736/lxml-6.1.0-cp314-cp314-win_amd64.whl", hash = "sha256:398443df51c538bd578529aa7e5f7afc6c292644174b47961f3bf87fe5741120", size = 4064195, upload-time = "2026-04-18T04:32:23.876Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/80/68/2000f29d323b6c286de077ad20b429fc52272e44eae6d295467043e56012/lxml-6.1.0-cp314-cp314-win_arm64.whl", hash = "sha256:8c8984e1d8c4b3949e419158fda14d921ff703a9ed8a47236c6eb7a2b6cb4946", size = 3741870, upload-time = "2026-04-18T04:32:27.922Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/30/e9/21383c7c8d43799f0da90224c0d7c921870d476ec9b3e01e1b2c0b8237c5/lxml-6.1.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:1081dd10bc6fa437db2500e13993abf7cc30716d0a2f40e65abb935f02ec559c", size = 8827548, upload-time = "2026-04-18T04:32:15.094Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a5/01/c6bc11cd587030dd4f719f65c5657960649fe3e19196c844c75bf32cd0d6/lxml-6.1.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:dabecc48db5f42ba348d1f5d5afdc54c6c4cc758e676926c7cd327045749517d", size = 4735866, upload-time = "2026-04-18T04:32:18.924Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f3/01/757132fff5f4acf25463b5298f1a46099f3a94480b806547b29ce5e385de/lxml-6.1.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e3dd5fe19c9e0ac818a9c7f132a5e43c1339ec1cbbfecb1a938bd3a47875b7c9", size = 4969476, upload-time = "2026-04-18T04:34:41.889Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fd/fb/1bc8b9d27ed64be7c8903db6c89e74dc8c2cd9ec630a7462e4654316dc5b/lxml-6.1.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9e7b0a4ca6dcc007a4cef00a761bba2dea959de4bd2df98f926b33c92ca5dfb9", size = 5103719, upload-time = "2026-04-18T04:34:44.797Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/e7/5bf82fa28133536a54601aae633b14988e89ed61d4c1eb6b899b023233aa/lxml-6.1.0-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d27bbe326c6b539c64b42638b18bc6003a8d88f76213a97ac9ed4f885efeab7", size = 5027890, upload-time = "2026-04-18T04:34:47.634Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2d/20/e048db5d4b4ea0366648aa595f26bb764b2670903fc585b87436d0a5032c/lxml-6.1.0-cp314-cp314t-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c4e425db0c5445ef0ad56b0eec54f89b88b2d884656e536a90b2f52aecb4ca86", size = 5596008, upload-time = "2026-04-18T04:34:51.503Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9a/c2/d10807bc8da4824b39e5bd01b5d05c077b6fd01bd91584167edf6b269d22/lxml-6.1.0-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4b89b098105b8599dc57adac95d1813409ac476d3c948a498775d3d0c6124bfb", size = 5224451, upload-time = "2026-04-18T04:34:54.263Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3c/15/2ebea45bea427e7f0057e9ce7b2d62c5aba20c6b001cca89ed0aadb3ad41/lxml-6.1.0-cp314-cp314t-manylinux_2_28_i686.whl", hash = "sha256:c4a699432846df86cc3de502ee85f445ebad748a1c6021d445f3e514d2cd4b1c", size = 5312135, upload-time = "2026-04-18T04:34:56.818Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/31/e2/87eeae151b0be2a308d49a7ec444ff3eb192b14251e62addb29d0bf3778f/lxml-6.1.0-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:30e7b2ed63b6c8e97cca8af048589a788ab5c9c905f36d9cf1c2bb549f450d2f", size = 4639126, upload-time = "2026-04-18T04:34:59.704Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a3/51/8a3f6a20902ad604dd746ec7b4000311b240d389dac5e9d95adefd349e0c/lxml-6.1.0-cp314-cp314t-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:022981127642fe19866d2907d76241bb07ed21749601f727d5d5dd1ce5d1b773", size = 5232579, upload-time = "2026-04-18T04:35:02.658Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6d/d2/650d619bdbe048d2c3f2c31edb00e35670a5e2d65b4fe3b61bce37b19121/lxml-6.1.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:23cad0cc86046d4222f7f418910e46b89971c5a45d3c8abfad0f64b7b05e4a9b", size = 5084206, upload-time = "2026-04-18T04:35:05.175Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dd/8a/672ca1a3cbeabd1f511ca275a916c0514b747f4b85bdaae103b8fa92f307/lxml-6.1.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:21c3302068f50d1e8728c67c87ba92aa87043abee517aa2576cca1855326b405", size = 4758906, upload-time = "2026-04-18T04:35:08.098Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/be/f1/ef4b691da85c916cb2feb1eec7414f678162798ac85e042fa164419ac05c/lxml-6.1.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:be10838781cb3be19251e276910cd508fe127e27c3242e50521521a0f3781690", size = 5620553, upload-time = "2026-04-18T04:35:11.23Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/59/17/94e81def74107809755ac2782fdad4404420f1c92ca83433d117a6d5acf0/lxml-6.1.0-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:2173a7bffe97667bbf0767f8a99e587740a8c56fdf3befac4b09cb29a80276fd", size = 5229458, upload-time = "2026-04-18T04:35:14.254Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/21/55/c4be91b0f830a871fc1b0d730943d56013b683d4671d5198260e2eae722b/lxml-6.1.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:c6854e9cf99c84beb004eecd7d3a3868ef1109bf2b1df92d7bc11e96a36c2180", size = 5247861, upload-time = "2026-04-18T04:35:17.006Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c2/ca/77123e4d77df3cb1e968ade7b1f808f5d3a5c1c96b18a33895397de292c1/lxml-6.1.0-cp314-cp314t-win32.whl", hash = "sha256:00750d63ef0031a05331b9223463b1c7c02b9004cef2346a5b2877f0f9494dd2", size = 3897377, upload-time = "2026-04-18T04:32:07.656Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/64/ce/3554833989d074267c063209bae8b09815e5656456a2d332b947806b05ff/lxml-6.1.0-cp314-cp314t-win_amd64.whl", hash = "sha256:80410c3a7e3c617af04de17caa9f9f20adaa817093293d69eae7d7d0522836f5", size = 4392701, upload-time = "2026-04-18T04:32:12.113Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2b/a0/9b916c68c0e57752c07f8f64b30138d9d4059dbeb27b90274dedbea128ff/lxml-6.1.0-cp314-cp314t-win_arm64.whl", hash = "sha256:26dd9f57ee3bd41e7d35b4c98a2ffd89ed11591649f421f0ec19f67d50ec67ac", size = 3817120, upload-time = "2026-04-18T04:32:15.803Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2707,6 +2711,18 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/0c/dd/f0183ed0145e58cf9d286c1b2c14f63ccee987a4ff79ac85acc31b5d86bd/primp-0.15.0-cp38-abi3-win_amd64.whl", hash = "sha256:aeb6bd20b06dfc92cfe4436939c18de88a58c640752cf7f30d9e4ae893cdec32", size = 3149967, upload-time = "2025-04-17T11:41:07.067Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prompt-toolkit"
|
||||
version = "3.0.52"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "wcwidth" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a1/96/06e01a7b38dce6fe1db213e061a4602dd6032a8a97ef6c1a862537732421/prompt_toolkit-3.0.52.tar.gz", hash = "sha256:28cde192929c8e7321de85de1ddbe736f1375148b02f2e17edd840042b1be855", size = 434198, upload-time = "2025-08-27T15:24:02.057Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/84/03/0d3ce49e2505ae70cf43bc5bb3033955d2fc9f932163e84dc0779cc47f48/prompt_toolkit-3.0.52-py3-none-any.whl", hash = "sha256:9aac639a3bbd33284347de5ad8d68ecc044b91a762dc39b7c21095fcd6a19955", size = 391431, upload-time = "2025-08-27T15:23:59.498Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "propcache"
|
||||
version = "0.4.1"
|
||||
@@ -3113,6 +3129,19 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d4/24/a372aaf5c9b7208e7112038812994107bc65a84cd00e0354a88c2c77a617/pytest-9.0.3-py3-none-any.whl", hash = "sha256:2c5efc453d45394fdd706ade797c0a81091eccd1d6e4bccfcd476e2b8e0ab5d9", size = 375249, upload-time = "2026-04-07T17:16:16.13Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pytest-asyncio"
|
||||
version = "1.3.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pytest" },
|
||||
{ name = "typing-extensions", marker = "python_full_version < '3.13'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/90/2c/8af215c0f776415f3590cac4f9086ccefd6fd463befeae41cd4d3f193e5a/pytest_asyncio-1.3.0.tar.gz", hash = "sha256:d7f52f36d231b80ee124cd216ffb19369aa168fc10095013c6b014a34d3ee9e5", size = 50087, upload-time = "2025-11-10T16:07:47.256Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/35/f8b19922b6a25bc0880171a2f1a003eaeb93657475193ab516fd87cac9da/pytest_asyncio-1.3.0-py3-none-any.whl", hash = "sha256:611e26147c7f77640e6d0a92a38ed17c3e9848063698d5c93d5aa7aa11cebff5", size = 15075, upload-time = "2025-11-10T16:07:45.537Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "python-dateutil"
|
||||
version = "2.9.0.post0"
|
||||
@@ -3127,11 +3156,11 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "python-dotenv"
|
||||
version = "1.2.1"
|
||||
version = "1.2.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f0/26/19cadc79a718c5edbec86fd4919a6b6d3f681039a2f6d66d14be94e75fb9/python_dotenv-1.2.1.tar.gz", hash = "sha256:42667e897e16ab0d66954af0e60a9caa94f0fd4ecf3aaf6d2d260eec1aa36ad6", size = 44221, upload-time = "2025-10-26T15:12:10.434Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/82/ed/0301aeeac3e5353ef3d94b6ec08bbcabd04a72018415dcb29e588514bba8/python_dotenv-1.2.2.tar.gz", hash = "sha256:2c371a91fbd7ba082c2c1dc1f8bf89ca22564a087c2c287cd9b662adde799cf3", size = 50135, upload-time = "2026-03-01T16:00:26.196Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/14/1b/a298b06749107c305e1fe0f814c6c74aea7b2f1e10989cb30f544a1b3253/python_dotenv-1.2.1-py3-none-any.whl", hash = "sha256:b81ee9561e9ca4004139c6cbba3a238c32b03e4894671e181b671e8cb8425d61", size = 21230, upload-time = "2025-10-26T15:12:09.109Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0b/d7/1959b9648791274998a9c3526f6d0ec8fd2233e4d4acce81bbae76b44b2a/python_dotenv-1.2.2-py3-none-any.whl", hash = "sha256:1d8214789a24de455a8b8bd8ae6fe3c6b69a5e3d64aa8a8e5d68e694bbcb285a", size = 22101, upload-time = "2026-03-01T16:00:25.09Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3960,6 +3989,15 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e3/bd/fa9bb053192491b3867ba07d2343d9f2252e00811567d30ae8d0f78136fe/watchfiles-1.1.1-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:a916a2932da8f8ab582f242c065f5c81bed3462849ca79ee357dd9551b0e9b01", size = 622112, upload-time = "2025-10-14T15:05:50.941Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wcwidth"
|
||||
version = "0.6.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/35/a2/8e3becb46433538a38726c948d3399905a4c7cabd0df578ede5dc51f0ec2/wcwidth-0.6.0.tar.gz", hash = "sha256:cdc4e4262d6ef9a1a57e018384cbeb1208d8abbc64176027e2c2455c81313159", size = 159684, upload-time = "2026-02-06T19:19:40.919Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/68/5a/199c59e0a824a3db2b89c5d2dade7ab5f9624dbf6448dc291b46d5ec94d3/wcwidth-0.6.0-py3-none-any.whl", hash = "sha256:1a3a1e510b553315f8e146c54764f4fb6264ffad731b3d78088cdb1478ffbdad", size = 94189, upload-time = "2026-02-06T19:19:39.646Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "webencodings"
|
||||
version = "0.5.1"
|
||||
|
||||
+63
-3
@@ -12,7 +12,7 @@
|
||||
# ============================================================================
|
||||
# Bump this number when the config schema changes.
|
||||
# Run `make config-upgrade` to merge new fields into your local config.yaml.
|
||||
config_version: 7
|
||||
config_version: 8
|
||||
|
||||
# ============================================================================
|
||||
# Logging
|
||||
@@ -326,6 +326,27 @@ models:
|
||||
# chat_template_kwargs:
|
||||
# enable_thinking: true
|
||||
|
||||
|
||||
# Example: Qwen3-Coder deployed on MindIE Engine
|
||||
# - name: Qwen3_Coder_480B_MindIE
|
||||
# display_name: Qwen3-Coder-480B (MindIE)
|
||||
# use: deerflow.models.mindie_provider:MindIEChatModel
|
||||
# model: Qwen3-Coder-480B-A35B-Instruct-Client
|
||||
# base_url: http://localhost:8989/v1
|
||||
# api_key: $OPENAI_API_KEY
|
||||
# temperature: 0
|
||||
# max_retries: 1
|
||||
# supports_thinking: false
|
||||
# supports_vision: false
|
||||
# supports_reasoning_effort: false
|
||||
# # --- Advanced Network Settings ---
|
||||
# # Due to MindIE's streaming limitations with tool calling, the provider
|
||||
# # uses mock-streaming (awaiting full generation). Extended timeouts are required.
|
||||
# read_timeout: 900.0 # 15 minutes to prevent drops during long document generation
|
||||
# connect_timeout: 30.0
|
||||
# write_timeout: 60.0
|
||||
# pool_timeout: 30.0
|
||||
|
||||
# ============================================================================
|
||||
# Tool Groups Configuration
|
||||
# ============================================================================
|
||||
@@ -577,15 +598,41 @@ sandbox:
|
||||
# # Optional global max-turn override for all subagents
|
||||
# # max_turns: 120
|
||||
#
|
||||
# # Optional per-agent overrides
|
||||
# # Optional per-agent overrides (applies to both built-in and custom agents)
|
||||
# agents:
|
||||
# general-purpose:
|
||||
# timeout_seconds: 1800 # 30 minutes for complex multi-step tasks
|
||||
# max_turns: 160
|
||||
# # model: qwen3:32b # Use a specific model (default: inherit from lead agent)
|
||||
# # skills: # Skill whitelist (default: inherit all enabled skills)
|
||||
# # - web-search
|
||||
# # - data-analysis
|
||||
# bash:
|
||||
# timeout_seconds: 300 # 5 minutes for quick command execution
|
||||
# max_turns: 80
|
||||
# # skills: [] # No skills for bash agent
|
||||
#
|
||||
# # Custom subagent types: define specialized agents with their own prompts,
|
||||
# # tools, skills, and model configuration. Custom agents are available via
|
||||
# # the `task` tool alongside built-in types (general-purpose, bash).
|
||||
# # custom_agents:
|
||||
# # analysis:
|
||||
# # description: "Data analysis specialist for processing datasets and generating insights"
|
||||
# # system_prompt: |
|
||||
# # You are a data analysis subagent. Focus on:
|
||||
# # - Processing and analyzing datasets
|
||||
# # - Generating visualizations
|
||||
# # - Providing statistical insights
|
||||
# # tools: # Tool whitelist (null = inherit all)
|
||||
# # - bash
|
||||
# # - read_file
|
||||
# # - write_file
|
||||
# # skills: # Skill whitelist (null = inherit all, [] = none)
|
||||
# # - data-analysis
|
||||
# # - visualization
|
||||
# # model: inherit # 'inherit' uses parent's model
|
||||
# # max_turns: 80
|
||||
# # timeout_seconds: 600
|
||||
#
|
||||
# # Model override: by default, subagents inherit the lead agent's model.
|
||||
# # Set `model` to use a different model (e.g., a local Ollama model for cost savings).
|
||||
@@ -700,6 +747,19 @@ summarization:
|
||||
# The prompt should guide the model to extract important context
|
||||
summary_prompt: null
|
||||
|
||||
# Recently-loaded skill files are excluded from summarization so the agent
|
||||
# does not lose skill instructions after a compression pass. Claude Code uses
|
||||
# a similar strategy (keep the most recent ~5 skills, ~25k total tokens, with
|
||||
# a ~5k cap per skill). Set preserve_recent_skill_count to 0 to disable.
|
||||
preserve_recent_skill_count: 5
|
||||
preserve_recent_skill_tokens: 25000
|
||||
preserve_recent_skill_tokens_per_skill: 5000
|
||||
skill_file_read_tool_names:
|
||||
- read_file
|
||||
- read
|
||||
- view
|
||||
- cat
|
||||
|
||||
# ============================================================================
|
||||
# Memory Configuration
|
||||
# ============================================================================
|
||||
@@ -807,7 +867,7 @@ checkpointer:
|
||||
# enabled: false
|
||||
# bot_token: $SLACK_BOT_TOKEN # xoxb-...
|
||||
# app_token: $SLACK_APP_TOKEN # xapp-... (Socket Mode)
|
||||
# allowed_users: [] # empty = allow all
|
||||
# allowed_users: [] # empty = allow all; can also be a single Slack user ID string, e.g. U123456, but list form is recommended
|
||||
#
|
||||
# telegram:
|
||||
# enabled: false
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
{
|
||||
"mcpInterceptors": [
|
||||
"my_package.mcp.auth:build_auth_interceptor"
|
||||
],
|
||||
"mcpServers": {
|
||||
"filesystem": {
|
||||
"enabled": false,
|
||||
|
||||
@@ -89,7 +89,7 @@
|
||||
"tokenlens": "^1.3.1",
|
||||
"unist-util-visit": "^5.0.0",
|
||||
"use-stick-to-bottom": "^1.1.1",
|
||||
"uuid": "^13.0.0",
|
||||
"uuid": "^14.0.0",
|
||||
"zod": "^3.24.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
Generated
+129
-182
@@ -219,8 +219,8 @@ importers:
|
||||
specifier: ^1.1.1
|
||||
version: 1.1.3(react@19.2.4)
|
||||
uuid:
|
||||
specifier: ^13.0.0
|
||||
version: 13.0.0
|
||||
specifier: ^14.0.0
|
||||
version: 14.0.0
|
||||
zod:
|
||||
specifier: ^3.24.2
|
||||
version: 3.25.76
|
||||
@@ -744,105 +744,89 @@ packages:
|
||||
resolution: {integrity: sha512-excjX8DfsIcJ10x1Kzr4RcWe1edC9PquDRRPx3YVCvQv+U5p7Yin2s32ftzikXojb1PIFc/9Mt28/y+iRklkrw==}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@img/sharp-libvips-linux-arm@1.2.4':
|
||||
resolution: {integrity: sha512-bFI7xcKFELdiNCVov8e44Ia4u2byA+l3XtsAj+Q8tfCwO6BQ8iDojYdvoPMqsKDkuoOo+X6HZA0s0q11ANMQ8A==}
|
||||
cpu: [arm]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@img/sharp-libvips-linux-ppc64@1.2.4':
|
||||
resolution: {integrity: sha512-FMuvGijLDYG6lW+b/UvyilUWu5Ayu+3r2d1S8notiGCIyYU/76eig1UfMmkZ7vwgOrzKzlQbFSuQfgm7GYUPpA==}
|
||||
cpu: [ppc64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@img/sharp-libvips-linux-riscv64@1.2.4':
|
||||
resolution: {integrity: sha512-oVDbcR4zUC0ce82teubSm+x6ETixtKZBh/qbREIOcI3cULzDyb18Sr/Wcyx7NRQeQzOiHTNbZFF1UwPS2scyGA==}
|
||||
cpu: [riscv64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@img/sharp-libvips-linux-s390x@1.2.4':
|
||||
resolution: {integrity: sha512-qmp9VrzgPgMoGZyPvrQHqk02uyjA0/QrTO26Tqk6l4ZV0MPWIW6LTkqOIov+J1yEu7MbFQaDpwdwJKhbJvuRxQ==}
|
||||
cpu: [s390x]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@img/sharp-libvips-linux-x64@1.2.4':
|
||||
resolution: {integrity: sha512-tJxiiLsmHc9Ax1bz3oaOYBURTXGIRDODBqhveVHonrHJ9/+k89qbLl0bcJns+e4t4rvaNBxaEZsFtSfAdquPrw==}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@img/sharp-libvips-linuxmusl-arm64@1.2.4':
|
||||
resolution: {integrity: sha512-FVQHuwx1IIuNow9QAbYUzJ+En8KcVm9Lk5+uGUQJHaZmMECZmOlix9HnH7n1TRkXMS0pGxIJokIVB9SuqZGGXw==}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
libc: [musl]
|
||||
|
||||
'@img/sharp-libvips-linuxmusl-x64@1.2.4':
|
||||
resolution: {integrity: sha512-+LpyBk7L44ZIXwz/VYfglaX/okxezESc6UxDSoyo2Ks6Jxc4Y7sGjpgU9s4PMgqgjj1gZCylTieNamqA1MF7Dg==}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
libc: [musl]
|
||||
|
||||
'@img/sharp-linux-arm64@0.34.5':
|
||||
resolution: {integrity: sha512-bKQzaJRY/bkPOXyKx5EVup7qkaojECG6NLYswgktOZjaXecSAeCWiZwwiFf3/Y+O1HrauiE3FVsGxFg8c24rZg==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@img/sharp-linux-arm@0.34.5':
|
||||
resolution: {integrity: sha512-9dLqsvwtg1uuXBGZKsxem9595+ujv0sJ6Vi8wcTANSFpwV/GONat5eCkzQo/1O6zRIkh0m/8+5BjrRr7jDUSZw==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [arm]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@img/sharp-linux-ppc64@0.34.5':
|
||||
resolution: {integrity: sha512-7zznwNaqW6YtsfrGGDA6BRkISKAAE1Jo0QdpNYXNMHu2+0dTrPflTLNkpc8l7MUP5M16ZJcUvysVWWrMefZquA==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [ppc64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@img/sharp-linux-riscv64@0.34.5':
|
||||
resolution: {integrity: sha512-51gJuLPTKa7piYPaVs8GmByo7/U7/7TZOq+cnXJIHZKavIRHAP77e3N2HEl3dgiqdD/w0yUfiJnII77PuDDFdw==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [riscv64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@img/sharp-linux-s390x@0.34.5':
|
||||
resolution: {integrity: sha512-nQtCk0PdKfho3eC5MrbQoigJ2gd1CgddUMkabUj+rBevs8tZ2cULOx46E7oyX+04WGfABgIwmMC0VqieTiR4jg==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [s390x]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@img/sharp-linux-x64@0.34.5':
|
||||
resolution: {integrity: sha512-MEzd8HPKxVxVenwAa+JRPwEC7QFjoPWuS5NZnBt6B3pu7EG2Ge0id1oLHZpPJdn3OQK+BQDiw9zStiHBTJQQQQ==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@img/sharp-linuxmusl-arm64@0.34.5':
|
||||
resolution: {integrity: sha512-fprJR6GtRsMt6Kyfq44IsChVZeGN97gTD331weR1ex1c1rypDEABN6Tm2xa1wE6lYb5DdEnk03NZPqA7Id21yg==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
libc: [musl]
|
||||
|
||||
'@img/sharp-linuxmusl-x64@0.34.5':
|
||||
resolution: {integrity: sha512-Jg8wNT1MUzIvhBFxViqrEhWDGzqymo3sV7z7ZsaWbZNDLXRJZoRGrjulp60YYtV4wfY8VIKcWidjojlLcWrd8Q==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
libc: [musl]
|
||||
|
||||
'@img/sharp-wasm32@0.34.5':
|
||||
resolution: {integrity: sha512-OdWTEiVkY2PHwqkbBI8frFxQQFekHaSSkUIJkwzclWZe64O1X4UlUjqqqLaPbUpMOQk6FBu/HtlGXNblIs0huw==}
|
||||
@@ -1002,42 +986,36 @@ packages:
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@napi-rs/simple-git-linux-arm64-musl@0.1.22':
|
||||
resolution: {integrity: sha512-MOs7fPyJiU/wqOpKzAOmOpxJ/TZfP4JwmvPad/cXTOWYwwyppMlXFRms3i98EU3HOazI/wMU2Ksfda3+TBluWA==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
libc: [musl]
|
||||
|
||||
'@napi-rs/simple-git-linux-ppc64-gnu@0.1.22':
|
||||
resolution: {integrity: sha512-L59dR30VBShRUIZ5/cQHU25upNgKS0AMQ7537J6LCIUEFwwXrKORZKJ8ceR+s3Sr/4jempWVvMdjEpFDE4HYww==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [ppc64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@napi-rs/simple-git-linux-s390x-gnu@0.1.22':
|
||||
resolution: {integrity: sha512-4FHkPlCSIZUGC6HiADffbe6NVoTBMd65pIwcd40IDbtFKOgFMBA+pWRqKiQ21FERGH16Zed7XHJJoY3jpOqtmQ==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [s390x]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@napi-rs/simple-git-linux-x64-gnu@0.1.22':
|
||||
resolution: {integrity: sha512-Ei1tM5Ho/dwknF3pOzqkNW9Iv8oFzRxE8uOhrITcdlpxRxVrBVptUF6/0WPdvd7R9747D/q61QG/AVyWsWLFKw==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@napi-rs/simple-git-linux-x64-musl@0.1.22':
|
||||
resolution: {integrity: sha512-zRYxg7it0p3rLyEJYoCoL2PQJNgArVLyNavHW03TFUAYkYi5bxQ/UFNVpgxMaXohr5yu7qCBqeo9j4DWeysalg==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
libc: [musl]
|
||||
|
||||
'@napi-rs/simple-git-win32-arm64-msvc@0.1.22':
|
||||
resolution: {integrity: sha512-XGFR1fj+Y9cWACcovV2Ey/R2xQOZKs8t+7KHPerYdJ4PtjVzGznI4c2EBHXtdOIYvkw7tL5rZ7FN1HJKdD5Quw==}
|
||||
@@ -1087,28 +1065,24 @@ packages:
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@next/swc-linux-arm64-musl@16.1.7':
|
||||
resolution: {integrity: sha512-uufcze7LYv0FQg9GnNeZ3/whYfo+1Q3HnQpm16o6Uyi0OVzLlk2ZWoY7j07KADZFY8qwDbsmFnMQP3p3+Ftprw==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
libc: [musl]
|
||||
|
||||
'@next/swc-linux-x64-gnu@16.1.7':
|
||||
resolution: {integrity: sha512-KWVf2gxYvHtvuT+c4MBOGxuse5TD7DsMFYSxVxRBnOzok/xryNeQSjXgxSv9QpIVlaGzEn/pIuI6Koosx8CGWA==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@next/swc-linux-x64-musl@16.1.7':
|
||||
resolution: {integrity: sha512-HguhaGwsGr1YAGs68uRKc4aGWxLET+NevJskOcCAwXbwj0fYX0RgZW2gsOCzr9S11CSQPIkxmoSbuVaBp4Z3dA==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
libc: [musl]
|
||||
|
||||
'@next/swc-win32-arm64-msvc@16.1.7':
|
||||
resolution: {integrity: sha512-S0n3KrDJokKTeFyM/vGGGR8+pCmXYrjNTk2ZozOL1C/JFdfUIL9O1ATaJOl5r2POe56iRChbsszrjMAdWSv7kQ==}
|
||||
@@ -1744,28 +1718,24 @@ packages:
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@resvg/resvg-js-linux-arm64-musl@2.6.2':
|
||||
resolution: {integrity: sha512-3h3dLPWNgSsD4lQBJPb4f+kvdOSJHa5PjTYVsWHxLUzH4IFTJUAnmuWpw4KqyQ3NA5QCyhw4TWgxk3jRkQxEKg==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
libc: [musl]
|
||||
|
||||
'@resvg/resvg-js-linux-x64-gnu@2.6.2':
|
||||
resolution: {integrity: sha512-IVUe+ckIerA7xMZ50duAZzwf1U7khQe2E0QpUxu5MBJNao5RqC0zwV/Zm965vw6D3gGFUl7j4m+oJjubBVoftw==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@resvg/resvg-js-linux-x64-musl@2.6.2':
|
||||
resolution: {integrity: sha512-UOf83vqTzoYQO9SZ0fPl2ZIFtNIz/Rr/y+7X8XRX1ZnBYsQ/tTb+cj9TE+KHOdmlTFBxhYzVkP2lRByCzqi4jQ==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
libc: [musl]
|
||||
|
||||
'@resvg/resvg-js-win32-arm64-msvc@2.6.2':
|
||||
resolution: {integrity: sha512-7C/RSgCa+7vqZ7qAbItfiaAWhyRSoD4l4BQAbVDqRRsRgY+S+hgS3in0Rxr7IorKUpGE69X48q6/nOAuTJQxeQ==}
|
||||
@@ -1793,141 +1763,128 @@ packages:
|
||||
resolution: {integrity: sha512-FqALmHI8D4o6lk/LRWDnhw95z5eO+eAa6ORjVg09YRR7BkcM6oPHU9uyC0gtQG5vpFLvgpeU4+zEAz2H8APHNw==}
|
||||
engines: {node: '>= 10'}
|
||||
|
||||
'@rollup/rollup-android-arm-eabi@4.60.1':
|
||||
resolution: {integrity: sha512-d6FinEBLdIiK+1uACUttJKfgZREXrF0Qc2SmLII7W2AD8FfiZ9Wjd+rD/iRuf5s5dWrr1GgwXCvPqOuDquOowA==}
|
||||
'@rollup/rollup-android-arm-eabi@4.60.2':
|
||||
resolution: {integrity: sha512-dnlp69efPPg6Uaw2dVqzWRfAWRnYVb1XJ8CyyhIbZeaq4CA5/mLeZ1IEt9QqQxmbdvagjLIm2ZL8BxXv5lH4Yw==}
|
||||
cpu: [arm]
|
||||
os: [android]
|
||||
|
||||
'@rollup/rollup-android-arm64@4.60.1':
|
||||
resolution: {integrity: sha512-YjG/EwIDvvYI1YvYbHvDz/BYHtkY4ygUIXHnTdLhG+hKIQFBiosfWiACWortsKPKU/+dUwQQCKQM3qrDe8c9BA==}
|
||||
'@rollup/rollup-android-arm64@4.60.2':
|
||||
resolution: {integrity: sha512-OqZTwDRDchGRHHm/hwLOL7uVPB9aUvI0am/eQuWMNyFHf5PSEQmyEeYYheA0EPPKUO/l0uigCp+iaTjoLjVoHg==}
|
||||
cpu: [arm64]
|
||||
os: [android]
|
||||
|
||||
'@rollup/rollup-darwin-arm64@4.60.1':
|
||||
resolution: {integrity: sha512-mjCpF7GmkRtSJwon+Rq1N8+pI+8l7w5g9Z3vWj4T7abguC4Czwi3Yu/pFaLvA3TTeMVjnu3ctigusqWUfjZzvw==}
|
||||
'@rollup/rollup-darwin-arm64@4.60.2':
|
||||
resolution: {integrity: sha512-UwRE7CGpvSVEQS8gUMBe1uADWjNnVgP3Iusyda1nSRwNDCsRjnGc7w6El6WLQsXmZTbLZx9cecegumcitNfpmA==}
|
||||
cpu: [arm64]
|
||||
os: [darwin]
|
||||
|
||||
'@rollup/rollup-darwin-x64@4.60.1':
|
||||
resolution: {integrity: sha512-haZ7hJ1JT4e9hqkoT9R/19XW2QKqjfJVv+i5AGg57S+nLk9lQnJ1F/eZloRO3o9Scy9CM3wQ9l+dkXtcBgN5Ew==}
|
||||
'@rollup/rollup-darwin-x64@4.60.2':
|
||||
resolution: {integrity: sha512-gjEtURKLCC5VXm1I+2i1u9OhxFsKAQJKTVB8WvDAHF+oZlq0GTVFOlTlO1q3AlCTE/DF32c16ESvfgqR7343/g==}
|
||||
cpu: [x64]
|
||||
os: [darwin]
|
||||
|
||||
'@rollup/rollup-freebsd-arm64@4.60.1':
|
||||
resolution: {integrity: sha512-czw90wpQq3ZsAVBlinZjAYTKduOjTywlG7fEeWKUA7oCmpA8xdTkxZZlwNJKWqILlq0wehoZcJYfBvOyhPTQ6w==}
|
||||
'@rollup/rollup-freebsd-arm64@4.60.2':
|
||||
resolution: {integrity: sha512-Bcl6CYDeAgE70cqZaMojOi/eK63h5Me97ZqAQoh77VPjMysA/4ORQBRGo3rRy45x4MzVlU9uZxs8Uwy7ZaKnBw==}
|
||||
cpu: [arm64]
|
||||
os: [freebsd]
|
||||
|
||||
'@rollup/rollup-freebsd-x64@4.60.1':
|
||||
resolution: {integrity: sha512-KVB2rqsxTHuBtfOeySEyzEOB7ltlB/ux38iu2rBQzkjbwRVlkhAGIEDiiYnO2kFOkJp+Z7pUXKyrRRFuFUKt+g==}
|
||||
'@rollup/rollup-freebsd-x64@4.60.2':
|
||||
resolution: {integrity: sha512-LU+TPda3mAE2QB0/Hp5VyeKJivpC6+tlOXd1VMoXV/YFMvk/MNk5iXeBfB4MQGRWyOYVJ01625vjkr0Az98OJQ==}
|
||||
cpu: [x64]
|
||||
os: [freebsd]
|
||||
|
||||
'@rollup/rollup-linux-arm-gnueabihf@4.60.1':
|
||||
resolution: {integrity: sha512-L+34Qqil+v5uC0zEubW7uByo78WOCIrBvci69E7sFASRl0X7b/MB6Cqd1lky/CtcSVTydWa2WZwFuWexjS5o6g==}
|
||||
'@rollup/rollup-linux-arm-gnueabihf@4.60.2':
|
||||
resolution: {integrity: sha512-2QxQrM+KQ7DAW4o22j+XZ6RKdxjLD7BOWTP0Bv0tmjdyhXSsr2Ul1oJDQqh9Zf5qOwTuTc7Ek83mOFaKnodPjg==}
|
||||
cpu: [arm]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@rollup/rollup-linux-arm-musleabihf@4.60.1':
|
||||
resolution: {integrity: sha512-n83O8rt4v34hgFzlkb1ycniJh7IR5RCIqt6mz1VRJD6pmhRi0CXdmfnLu9dIUS6buzh60IvACM842Ffb3xd6Gg==}
|
||||
'@rollup/rollup-linux-arm-musleabihf@4.60.2':
|
||||
resolution: {integrity: sha512-TbziEu2DVsTEOPif2mKWkMeDMLoYjx95oESa9fkQQK7r/Orta0gnkcDpzwufEcAO2BLBsD7mZkXGFqEdMRRwfw==}
|
||||
cpu: [arm]
|
||||
os: [linux]
|
||||
libc: [musl]
|
||||
|
||||
'@rollup/rollup-linux-arm64-gnu@4.60.1':
|
||||
resolution: {integrity: sha512-Nql7sTeAzhTAja3QXeAI48+/+GjBJ+QmAH13snn0AJSNL50JsDqotyudHyMbO2RbJkskbMbFJfIJKWA6R1LCJQ==}
|
||||
'@rollup/rollup-linux-arm64-gnu@4.60.2':
|
||||
resolution: {integrity: sha512-bO/rVDiDUuM2YfuCUwZ1t1cP+/yqjqz+Xf2VtkdppefuOFS2OSeAfgafaHNkFn0t02hEyXngZkxtGqXcXwO8Rg==}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@rollup/rollup-linux-arm64-musl@4.60.1':
|
||||
resolution: {integrity: sha512-+pUymDhd0ys9GcKZPPWlFiZ67sTWV5UU6zOJat02M1+PiuSGDziyRuI/pPue3hoUwm2uGfxdL+trT6Z9rxnlMA==}
|
||||
'@rollup/rollup-linux-arm64-musl@4.60.2':
|
||||
resolution: {integrity: sha512-hr26p7e93Rl0Za+JwW7EAnwAvKkehh12BU1Llm9Ykiibg4uIr2rbpxG9WCf56GuvidlTG9KiiQT/TXT1yAWxTA==}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
libc: [musl]
|
||||
|
||||
'@rollup/rollup-linux-loong64-gnu@4.60.1':
|
||||
resolution: {integrity: sha512-VSvgvQeIcsEvY4bKDHEDWcpW4Yw7BtlKG1GUT4FzBUlEKQK0rWHYBqQt6Fm2taXS+1bXvJT6kICu5ZwqKCnvlQ==}
|
||||
'@rollup/rollup-linux-loong64-gnu@4.60.2':
|
||||
resolution: {integrity: sha512-pOjB/uSIyDt+ow3k/RcLvUAOGpysT2phDn7TTUB3n75SlIgZzM6NKAqlErPhoFU+npgY3/n+2HYIQVbF70P9/A==}
|
||||
cpu: [loong64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@rollup/rollup-linux-loong64-musl@4.60.1':
|
||||
resolution: {integrity: sha512-4LqhUomJqwe641gsPp6xLfhqWMbQV04KtPp7/dIp0nzPxAkNY1AbwL5W0MQpcalLYk07vaW9Kp1PBhdpZYYcEw==}
|
||||
'@rollup/rollup-linux-loong64-musl@4.60.2':
|
||||
resolution: {integrity: sha512-2/w+q8jszv9Ww1c+6uJT3OwqhdmGP2/4T17cu8WuwyUuuaCDDJ2ojdyYwZzCxx0GcsZBhzi3HmH+J5pZNXnd+Q==}
|
||||
cpu: [loong64]
|
||||
os: [linux]
|
||||
libc: [musl]
|
||||
|
||||
'@rollup/rollup-linux-ppc64-gnu@4.60.1':
|
||||
resolution: {integrity: sha512-tLQQ9aPvkBxOc/EUT6j3pyeMD6Hb8QF2BTBnCQWP/uu1lhc9AIrIjKnLYMEroIz/JvtGYgI9dF3AxHZNaEH0rw==}
|
||||
'@rollup/rollup-linux-ppc64-gnu@4.60.2':
|
||||
resolution: {integrity: sha512-11+aL5vKheYgczxtPVVRhdptAM2H7fcDR5Gw4/bTcteuZBlH4oP9f5s9zYO9aGZvoGeBpqXI/9TZZihZ609wKw==}
|
||||
cpu: [ppc64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@rollup/rollup-linux-ppc64-musl@4.60.1':
|
||||
resolution: {integrity: sha512-RMxFhJwc9fSXP6PqmAz4cbv3kAyvD1etJFjTx4ONqFP9DkTkXsAMU4v3Vyc5BgzC+anz7nS/9tp4obsKfqkDHg==}
|
||||
'@rollup/rollup-linux-ppc64-musl@4.60.2':
|
||||
resolution: {integrity: sha512-i16fokAGK46IVZuV8LIIwMdtqhin9hfYkCh8pf8iC3QU3LpwL+1FSFGej+O7l3E/AoknL6Dclh2oTdnRMpTzFQ==}
|
||||
cpu: [ppc64]
|
||||
os: [linux]
|
||||
libc: [musl]
|
||||
|
||||
'@rollup/rollup-linux-riscv64-gnu@4.60.1':
|
||||
resolution: {integrity: sha512-QKgFl+Yc1eEk6MmOBfRHYF6lTxiiiV3/z/BRrbSiW2I7AFTXoBFvdMEyglohPj//2mZS4hDOqeB0H1ACh3sBbg==}
|
||||
'@rollup/rollup-linux-riscv64-gnu@4.60.2':
|
||||
resolution: {integrity: sha512-49FkKS6RGQoriDSK/6E2GkAsAuU5kETFCh7pG4yD/ylj9rKhTmO3elsnmBvRD4PgJPds5W2PkhC82aVwmUcJ7A==}
|
||||
cpu: [riscv64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@rollup/rollup-linux-riscv64-musl@4.60.1':
|
||||
resolution: {integrity: sha512-RAjXjP/8c6ZtzatZcA1RaQr6O1TRhzC+adn8YZDnChliZHviqIjmvFwHcxi4JKPSDAt6Uhf/7vqcBzQJy0PDJg==}
|
||||
'@rollup/rollup-linux-riscv64-musl@4.60.2':
|
||||
resolution: {integrity: sha512-mjYNkHPfGpUR00DuM1ZZIgs64Hpf4bWcz9Z41+4Q+pgDx73UwWdAYyf6EG/lRFldmdHHzgrYyge5akFUW0D3mQ==}
|
||||
cpu: [riscv64]
|
||||
os: [linux]
|
||||
libc: [musl]
|
||||
|
||||
'@rollup/rollup-linux-s390x-gnu@4.60.1':
|
||||
resolution: {integrity: sha512-wcuocpaOlaL1COBYiA89O6yfjlp3RwKDeTIA0hM7OpmhR1Bjo9j31G1uQVpDlTvwxGn2nQs65fBFL5UFd76FcQ==}
|
||||
'@rollup/rollup-linux-s390x-gnu@4.60.2':
|
||||
resolution: {integrity: sha512-ALyvJz965BQk8E9Al/JDKKDLH2kfKFLTGMlgkAbbYtZuJt9LU8DW3ZoDMCtQpXAltZxwBHevXz5u+gf0yA0YoA==}
|
||||
cpu: [s390x]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@rollup/rollup-linux-x64-gnu@4.60.1':
|
||||
resolution: {integrity: sha512-77PpsFQUCOiZR9+LQEFg9GClyfkNXj1MP6wRnzYs0EeWbPcHs02AXu4xuUbM1zhwn3wqaizle3AEYg5aeoohhg==}
|
||||
'@rollup/rollup-linux-x64-gnu@4.60.2':
|
||||
resolution: {integrity: sha512-UQjrkIdWrKI626Du8lCQ6MJp/6V1LAo2bOK9OTu4mSn8GGXIkPXk/Vsp4bLHCd9Z9Iz2OTEaokUE90VweJgIYQ==}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@rollup/rollup-linux-x64-musl@4.60.1':
|
||||
resolution: {integrity: sha512-5cIATbk5vynAjqqmyBjlciMJl1+R/CwX9oLk/EyiFXDWd95KpHdrOJT//rnUl4cUcskrd0jCCw3wpZnhIHdD9w==}
|
||||
'@rollup/rollup-linux-x64-musl@4.60.2':
|
||||
resolution: {integrity: sha512-bTsRGj6VlSdn/XD4CGyzMnzaBs9bsRxy79eTqTCBsA8TMIEky7qg48aPkvJvFe1HyzQ5oMZdg7AnVlWQSKLTnw==}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
libc: [musl]
|
||||
|
||||
'@rollup/rollup-openbsd-x64@4.60.1':
|
||||
resolution: {integrity: sha512-cl0w09WsCi17mcmWqqglez9Gk8isgeWvoUZ3WiJFYSR3zjBQc2J5/ihSjpl+VLjPqjQ/1hJRcqBfLjssREQILw==}
|
||||
'@rollup/rollup-openbsd-x64@4.60.2':
|
||||
resolution: {integrity: sha512-6d4Z3534xitaA1FcMWP7mQPq5zGwBmGbhphh2DwaA1aNIXUu3KTOfwrWpbwI4/Gr0uANo7NTtaykFyO2hPuFLg==}
|
||||
cpu: [x64]
|
||||
os: [openbsd]
|
||||
|
||||
'@rollup/rollup-openharmony-arm64@4.60.1':
|
||||
resolution: {integrity: sha512-4Cv23ZrONRbNtbZa37mLSueXUCtN7MXccChtKpUnQNgF010rjrjfHx3QxkS2PI7LqGT5xXyYs1a7LbzAwT0iCA==}
|
||||
'@rollup/rollup-openharmony-arm64@4.60.2':
|
||||
resolution: {integrity: sha512-NetAg5iO2uN7eB8zE5qrZ3CSil+7IJt4WDFLcC75Ymywq1VZVD6qJ6EvNLjZ3rEm6gB7XW5JdT60c6MN35Z85Q==}
|
||||
cpu: [arm64]
|
||||
os: [openharmony]
|
||||
|
||||
'@rollup/rollup-win32-arm64-msvc@4.60.1':
|
||||
resolution: {integrity: sha512-i1okWYkA4FJICtr7KpYzFpRTHgy5jdDbZiWfvny21iIKky5YExiDXP+zbXzm3dUcFpkEeYNHgQ5fuG236JPq0g==}
|
||||
'@rollup/rollup-win32-arm64-msvc@4.60.2':
|
||||
resolution: {integrity: sha512-NCYhOotpgWZ5kdxCZsv6Iudx0wX8980Q/oW4pNFNihpBKsDbEA1zpkfxJGC0yugsUuyDZ7gL37dbzwhR0VI7pQ==}
|
||||
cpu: [arm64]
|
||||
os: [win32]
|
||||
|
||||
'@rollup/rollup-win32-ia32-msvc@4.60.1':
|
||||
resolution: {integrity: sha512-u09m3CuwLzShA0EYKMNiFgcjjzwqtUMLmuCJLeZWjjOYA3IT2Di09KaxGBTP9xVztWyIWjVdsB2E9goMjZvTQg==}
|
||||
'@rollup/rollup-win32-ia32-msvc@4.60.2':
|
||||
resolution: {integrity: sha512-RXsaOqXxfoUBQoOgvmmijVxJnW2IGB0eoMO7F8FAjaj0UTywUO/luSqimWBJn04WNgUkeNhh7fs7pESXajWmkg==}
|
||||
cpu: [ia32]
|
||||
os: [win32]
|
||||
|
||||
'@rollup/rollup-win32-x64-gnu@4.60.1':
|
||||
resolution: {integrity: sha512-k+600V9Zl1CM7eZxJgMyTUzmrmhB/0XZnF4pRypKAlAgxmedUA+1v9R+XOFv56W4SlHEzfeMtzujLJD22Uz5zg==}
|
||||
'@rollup/rollup-win32-x64-gnu@4.60.2':
|
||||
resolution: {integrity: sha512-qdAzEULD+/hzObedtmV6iBpdL5TIbKVztGiK7O3/KYSf+HIzU257+MX1EXJcyIiDbMAqmbwaufcYPvyRryeZtA==}
|
||||
cpu: [x64]
|
||||
os: [win32]
|
||||
|
||||
'@rollup/rollup-win32-x64-msvc@4.60.1':
|
||||
resolution: {integrity: sha512-lWMnixq/QzxyhTV6NjQJ4SFo1J6PvOX8vUx5Wb4bBPsEb+8xZ89Bz6kOXpfXj9ak9AHTQVQzlgzBEc1SyM27xQ==}
|
||||
'@rollup/rollup-win32-x64-msvc@4.60.2':
|
||||
resolution: {integrity: sha512-Nd/SgG27WoA9e+/TdK74KnHz852TLa94ovOYySo/yMPuTmpckK/jIF2jSwS3g7ELSKXK13/cVdmg1Z/DaCWKxA==}
|
||||
cpu: [x64]
|
||||
os: [win32]
|
||||
|
||||
@@ -2053,28 +2010,24 @@ packages:
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@tailwindcss/oxide-linux-arm64-musl@4.1.18':
|
||||
resolution: {integrity: sha512-1px92582HkPQlaaCkdRcio71p8bc8i/ap5807tPRDK/uw953cauQBT8c5tVGkOwrHMfc2Yh6UuxaH4vtTjGvHg==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
libc: [musl]
|
||||
|
||||
'@tailwindcss/oxide-linux-x64-gnu@4.1.18':
|
||||
resolution: {integrity: sha512-v3gyT0ivkfBLoZGF9LyHmts0Isc8jHZyVcbzio6Wpzifg/+5ZJpDiRiUhDLkcr7f/r38SWNe7ucxmGW3j3Kb/g==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@tailwindcss/oxide-linux-x64-musl@4.1.18':
|
||||
resolution: {integrity: sha512-bhJ2y2OQNlcRwwgOAGMY0xTFStt4/wyU6pvI6LSuZpRgKQwxTec0/3Scu91O8ir7qCR3AuepQKLU/kX99FouqQ==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
libc: [musl]
|
||||
|
||||
'@tailwindcss/oxide-wasm32-wasi@4.1.18':
|
||||
resolution: {integrity: sha512-LffYTvPjODiP6PT16oNeUQJzNVyJl1cjIebq/rWWBF+3eDst5JGEFSc5cWxyRCJ0Mxl+KyIkqRxk1XPEs9x8TA==}
|
||||
@@ -2474,49 +2427,41 @@ packages:
|
||||
resolution: {integrity: sha512-34gw7PjDGB9JgePJEmhEqBhWvCiiWCuXsL9hYphDF7crW7UgI05gyBAi6MF58uGcMOiOqSJ2ybEeCvHcq0BCmQ==}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@unrs/resolver-binding-linux-arm64-musl@1.11.1':
|
||||
resolution: {integrity: sha512-RyMIx6Uf53hhOtJDIamSbTskA99sPHS96wxVE/bJtePJJtpdKGXO1wY90oRdXuYOGOTuqjT8ACccMc4K6QmT3w==}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
libc: [musl]
|
||||
|
||||
'@unrs/resolver-binding-linux-ppc64-gnu@1.11.1':
|
||||
resolution: {integrity: sha512-D8Vae74A4/a+mZH0FbOkFJL9DSK2R6TFPC9M+jCWYia/q2einCubX10pecpDiTmkJVUH+y8K3BZClycD8nCShA==}
|
||||
cpu: [ppc64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@unrs/resolver-binding-linux-riscv64-gnu@1.11.1':
|
||||
resolution: {integrity: sha512-frxL4OrzOWVVsOc96+V3aqTIQl1O2TjgExV4EKgRY09AJ9leZpEg8Ak9phadbuX0BA4k8U5qtvMSQQGGmaJqcQ==}
|
||||
cpu: [riscv64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@unrs/resolver-binding-linux-riscv64-musl@1.11.1':
|
||||
resolution: {integrity: sha512-mJ5vuDaIZ+l/acv01sHoXfpnyrNKOk/3aDoEdLO/Xtn9HuZlDD6jKxHlkN8ZhWyLJsRBxfv9GYM2utQ1SChKew==}
|
||||
cpu: [riscv64]
|
||||
os: [linux]
|
||||
libc: [musl]
|
||||
|
||||
'@unrs/resolver-binding-linux-s390x-gnu@1.11.1':
|
||||
resolution: {integrity: sha512-kELo8ebBVtb9sA7rMe1Cph4QHreByhaZ2QEADd9NzIQsYNQpt9UkM9iqr2lhGr5afh885d/cB5QeTXSbZHTYPg==}
|
||||
cpu: [s390x]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@unrs/resolver-binding-linux-x64-gnu@1.11.1':
|
||||
resolution: {integrity: sha512-C3ZAHugKgovV5YvAMsxhq0gtXuwESUKc5MhEtjBpLoHPLYM+iuwSj3lflFwK3DPm68660rZ7G8BMcwSro7hD5w==}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
'@unrs/resolver-binding-linux-x64-musl@1.11.1':
|
||||
resolution: {integrity: sha512-rV0YSoyhK2nZ4vEswT/QwqzqQXw5I6CjoaYMOX0TqBlWhojUf8P94mvI7nuJTeaCkkds3QE4+zS8Ko+GdXuZtA==}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
libc: [musl]
|
||||
|
||||
'@unrs/resolver-binding-wasm32-wasi@1.11.1':
|
||||
resolution: {integrity: sha512-5u4RkfxJm+Ng7IWgkzi3qrFOvLvQYnPBmjmZQ8+szTK/b31fQCnleNl1GgEt7nIsZRIf5PLhPwT0WM+q45x/UQ==}
|
||||
@@ -3813,8 +3758,8 @@ packages:
|
||||
resolution: {integrity: sha512-Ox1pJVrDCyGHMG9CFg1tmrRUMRPRsAWYc/PinY0XzJU4K7y7vjNoLKIQ7BR5UJMCxNN8EM1MNDmHWA/B3aZUuw==}
|
||||
engines: {node: '>=6'}
|
||||
|
||||
hookable@6.1.0:
|
||||
resolution: {integrity: sha512-ZoKZSJgu8voGK2geJS+6YtYjvIzu9AOM/KZXsBxr83uhLL++e9pEv/dlgwgy3dvHg06kTz6JOh1hk3C8Ceiymw==}
|
||||
hookable@6.1.1:
|
||||
resolution: {integrity: sha512-U9LYDy1CwhMCnprUfeAZWZGByVbhd54hwepegYTK7Pi5NvqEj63ifz5z+xukznehT7i6NIZRu89Ay1AZmRsLEQ==}
|
||||
|
||||
html-url-attributes@3.0.1:
|
||||
resolution: {integrity: sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ==}
|
||||
@@ -4181,28 +4126,24 @@ packages:
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
lightningcss-linux-arm64-musl@1.30.2:
|
||||
resolution: {integrity: sha512-5Vh9dGeblpTxWHpOx8iauV02popZDsCYMPIgiuw97OJ5uaDsL86cnqSFs5LZkG3ghHoX5isLgWzMs+eD1YzrnA==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
libc: [musl]
|
||||
|
||||
lightningcss-linux-x64-gnu@1.30.2:
|
||||
resolution: {integrity: sha512-Cfd46gdmj1vQ+lR6VRTTadNHu6ALuw2pKR9lYq4FnhvgBc4zWY1EtZcAc6EffShbb1MFrIPfLDXD6Xprbnni4w==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
libc: [glibc]
|
||||
|
||||
lightningcss-linux-x64-musl@1.30.2:
|
||||
resolution: {integrity: sha512-XJaLUUFXb6/QG2lGIW6aIk6jKdtjtcffUT0NKvIqhSBY3hh9Ch+1LCeH80dR9q9LBjG3ewbDjnumefsLsP6aiA==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
libc: [musl]
|
||||
|
||||
lightningcss-win32-arm64-msvc@1.30.2:
|
||||
resolution: {integrity: sha512-FZn+vaj7zLv//D/192WFFVA0RgHawIcHqLX9xuWiQt7P0PtdFEVaxgF9rjM/IRYHQXNnk61/H/gb2Ei+kUQ4xQ==}
|
||||
@@ -4243,8 +4184,8 @@ packages:
|
||||
resolution: {integrity: sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==}
|
||||
hasBin: true
|
||||
|
||||
lru-cache@11.3.3:
|
||||
resolution: {integrity: sha512-JvNw9Y81y33E+BEYPr0U7omo+U9AySnsMsEiXgwT6yqd31VQWTLNQqmT4ou5eqPFUrTfIDFta2wKhB1hyohtAQ==}
|
||||
lru-cache@11.3.5:
|
||||
resolution: {integrity: sha512-NxVFwLAnrd9i7KUBxC4DrUhmgjzOs+1Qm50D3oF1/oL+r1NpZ4gA7xvG0/zJ8evR7zIKn4vLf7qTNduWFtCrRw==}
|
||||
engines: {node: 20 || >=22}
|
||||
|
||||
lucide-react@0.542.0:
|
||||
@@ -4834,12 +4775,12 @@ packages:
|
||||
resolution: {integrity: sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==}
|
||||
engines: {node: ^10 || ^12 || >=14}
|
||||
|
||||
postcss@8.5.6:
|
||||
resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==}
|
||||
postcss@8.5.10:
|
||||
resolution: {integrity: sha512-pMMHxBOZKFU6HgAZ4eyGnwXF/EvPGGqUr0MnZ5+99485wwW41kW91A4LOGxSHhgugZmSChL5AlElNdwlNgcnLQ==}
|
||||
engines: {node: ^10 || ^12 || >=14}
|
||||
|
||||
postcss@8.5.9:
|
||||
resolution: {integrity: sha512-7a70Nsot+EMX9fFU3064K/kdHWZqGVY+BADLyXc8Dfv+mTLLVl6JzJpPaCZ2kQL9gIJvKXSLMHhqdRRjwQeFtw==}
|
||||
postcss@8.5.6:
|
||||
resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==}
|
||||
engines: {node: ^10 || ^12 || >=14}
|
||||
|
||||
prelude-ls@1.2.1:
|
||||
@@ -5125,8 +5066,8 @@ packages:
|
||||
robust-predicates@3.0.2:
|
||||
resolution: {integrity: sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg==}
|
||||
|
||||
rollup@4.60.1:
|
||||
resolution: {integrity: sha512-VmtB2rFU/GroZ4oL8+ZqXgSA38O6GR8KSIvWmEFv63pQ0G6KaBH9s07PO8XTXP4vI+3UJUEypOfjkGfmSBBR0w==}
|
||||
rollup@4.60.2:
|
||||
resolution: {integrity: sha512-J9qZyW++QK/09NyN/zeO0dG/1GdGfyp9lV8ajHnRVLfo/uFsbji5mHnDgn/qYdUHyCkM2N+8VyspgZclfAh0eQ==}
|
||||
engines: {node: '>=18.0.0', npm: '>=8.0.0'}
|
||||
hasBin: true
|
||||
|
||||
@@ -5685,6 +5626,10 @@ packages:
|
||||
resolution: {integrity: sha512-XQegIaBTVUjSHliKqcnFqYypAd4S+WCYt5NIeRs6w/UAry7z8Y9j5ZwRRL4kzq9U3sD6v+85er9FvkEaBpji2w==}
|
||||
hasBin: true
|
||||
|
||||
uuid@14.0.0:
|
||||
resolution: {integrity: sha512-Qo+uWgilfSmAhXCMav1uYFynlQO7fMFiMVZsQqZRMIXp0O7rR7qjkj+cPvBHLgBqi960QCoo/PH2/6ZtVqKvrg==}
|
||||
hasBin: true
|
||||
|
||||
vfile-location@5.0.3:
|
||||
resolution: {integrity: sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg==}
|
||||
|
||||
@@ -7478,79 +7423,79 @@ snapshots:
|
||||
|
||||
'@resvg/resvg-wasm@2.6.2': {}
|
||||
|
||||
'@rollup/rollup-android-arm-eabi@4.60.1':
|
||||
'@rollup/rollup-android-arm-eabi@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-android-arm64@4.60.1':
|
||||
'@rollup/rollup-android-arm64@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-darwin-arm64@4.60.1':
|
||||
'@rollup/rollup-darwin-arm64@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-darwin-x64@4.60.1':
|
||||
'@rollup/rollup-darwin-x64@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-freebsd-arm64@4.60.1':
|
||||
'@rollup/rollup-freebsd-arm64@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-freebsd-x64@4.60.1':
|
||||
'@rollup/rollup-freebsd-x64@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-linux-arm-gnueabihf@4.60.1':
|
||||
'@rollup/rollup-linux-arm-gnueabihf@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-linux-arm-musleabihf@4.60.1':
|
||||
'@rollup/rollup-linux-arm-musleabihf@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-linux-arm64-gnu@4.60.1':
|
||||
'@rollup/rollup-linux-arm64-gnu@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-linux-arm64-musl@4.60.1':
|
||||
'@rollup/rollup-linux-arm64-musl@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-linux-loong64-gnu@4.60.1':
|
||||
'@rollup/rollup-linux-loong64-gnu@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-linux-loong64-musl@4.60.1':
|
||||
'@rollup/rollup-linux-loong64-musl@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-linux-ppc64-gnu@4.60.1':
|
||||
'@rollup/rollup-linux-ppc64-gnu@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-linux-ppc64-musl@4.60.1':
|
||||
'@rollup/rollup-linux-ppc64-musl@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-linux-riscv64-gnu@4.60.1':
|
||||
'@rollup/rollup-linux-riscv64-gnu@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-linux-riscv64-musl@4.60.1':
|
||||
'@rollup/rollup-linux-riscv64-musl@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-linux-s390x-gnu@4.60.1':
|
||||
'@rollup/rollup-linux-s390x-gnu@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-linux-x64-gnu@4.60.1':
|
||||
'@rollup/rollup-linux-x64-gnu@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-linux-x64-musl@4.60.1':
|
||||
'@rollup/rollup-linux-x64-musl@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-openbsd-x64@4.60.1':
|
||||
'@rollup/rollup-openbsd-x64@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-openharmony-arm64@4.60.1':
|
||||
'@rollup/rollup-openharmony-arm64@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-win32-arm64-msvc@4.60.1':
|
||||
'@rollup/rollup-win32-arm64-msvc@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-win32-ia32-msvc@4.60.1':
|
||||
'@rollup/rollup-win32-ia32-msvc@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-win32-x64-gnu@4.60.1':
|
||||
'@rollup/rollup-win32-x64-gnu@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rollup/rollup-win32-x64-msvc@4.60.1':
|
||||
'@rollup/rollup-win32-x64-msvc@4.60.2':
|
||||
optional: true
|
||||
|
||||
'@rtsao/scc@1.1.0': {}
|
||||
@@ -8093,7 +8038,7 @@ snapshots:
|
||||
|
||||
'@unhead/vue@2.1.4(vue@3.5.28(typescript@5.9.3))':
|
||||
dependencies:
|
||||
hookable: 6.1.0
|
||||
hookable: 6.1.1
|
||||
unhead: 2.1.4
|
||||
vue: 3.5.28(typescript@5.9.3)
|
||||
|
||||
@@ -8244,7 +8189,7 @@ snapshots:
|
||||
'@vue/shared': 3.5.28
|
||||
estree-walker: 2.0.2
|
||||
magic-string: 0.30.21
|
||||
postcss: 8.5.9
|
||||
postcss: 8.5.10
|
||||
source-map-js: 1.2.1
|
||||
|
||||
'@vue/compiler-ssr@3.5.28':
|
||||
@@ -9789,7 +9734,7 @@ snapshots:
|
||||
|
||||
hex-rgb@4.3.0: {}
|
||||
|
||||
hookable@6.1.0: {}
|
||||
hookable@6.1.1: {}
|
||||
|
||||
html-url-attributes@3.0.1: {}
|
||||
|
||||
@@ -10158,7 +10103,7 @@ snapshots:
|
||||
dependencies:
|
||||
js-tokens: 4.0.0
|
||||
|
||||
lru-cache@11.3.3: {}
|
||||
lru-cache@11.3.5: {}
|
||||
|
||||
lucide-react@0.542.0(react@19.2.4):
|
||||
dependencies:
|
||||
@@ -11152,13 +11097,13 @@ snapshots:
|
||||
picocolors: 1.1.1
|
||||
source-map-js: 1.2.1
|
||||
|
||||
postcss@8.5.6:
|
||||
postcss@8.5.10:
|
||||
dependencies:
|
||||
nanoid: 3.3.11
|
||||
picocolors: 1.1.1
|
||||
source-map-js: 1.2.1
|
||||
|
||||
postcss@8.5.9:
|
||||
postcss@8.5.6:
|
||||
dependencies:
|
||||
nanoid: 3.3.11
|
||||
picocolors: 1.1.1
|
||||
@@ -11493,35 +11438,35 @@ snapshots:
|
||||
|
||||
robust-predicates@3.0.2: {}
|
||||
|
||||
rollup@4.60.1:
|
||||
rollup@4.60.2:
|
||||
dependencies:
|
||||
'@types/estree': 1.0.8
|
||||
optionalDependencies:
|
||||
'@rollup/rollup-android-arm-eabi': 4.60.1
|
||||
'@rollup/rollup-android-arm64': 4.60.1
|
||||
'@rollup/rollup-darwin-arm64': 4.60.1
|
||||
'@rollup/rollup-darwin-x64': 4.60.1
|
||||
'@rollup/rollup-freebsd-arm64': 4.60.1
|
||||
'@rollup/rollup-freebsd-x64': 4.60.1
|
||||
'@rollup/rollup-linux-arm-gnueabihf': 4.60.1
|
||||
'@rollup/rollup-linux-arm-musleabihf': 4.60.1
|
||||
'@rollup/rollup-linux-arm64-gnu': 4.60.1
|
||||
'@rollup/rollup-linux-arm64-musl': 4.60.1
|
||||
'@rollup/rollup-linux-loong64-gnu': 4.60.1
|
||||
'@rollup/rollup-linux-loong64-musl': 4.60.1
|
||||
'@rollup/rollup-linux-ppc64-gnu': 4.60.1
|
||||
'@rollup/rollup-linux-ppc64-musl': 4.60.1
|
||||
'@rollup/rollup-linux-riscv64-gnu': 4.60.1
|
||||
'@rollup/rollup-linux-riscv64-musl': 4.60.1
|
||||
'@rollup/rollup-linux-s390x-gnu': 4.60.1
|
||||
'@rollup/rollup-linux-x64-gnu': 4.60.1
|
||||
'@rollup/rollup-linux-x64-musl': 4.60.1
|
||||
'@rollup/rollup-openbsd-x64': 4.60.1
|
||||
'@rollup/rollup-openharmony-arm64': 4.60.1
|
||||
'@rollup/rollup-win32-arm64-msvc': 4.60.1
|
||||
'@rollup/rollup-win32-ia32-msvc': 4.60.1
|
||||
'@rollup/rollup-win32-x64-gnu': 4.60.1
|
||||
'@rollup/rollup-win32-x64-msvc': 4.60.1
|
||||
'@rollup/rollup-android-arm-eabi': 4.60.2
|
||||
'@rollup/rollup-android-arm64': 4.60.2
|
||||
'@rollup/rollup-darwin-arm64': 4.60.2
|
||||
'@rollup/rollup-darwin-x64': 4.60.2
|
||||
'@rollup/rollup-freebsd-arm64': 4.60.2
|
||||
'@rollup/rollup-freebsd-x64': 4.60.2
|
||||
'@rollup/rollup-linux-arm-gnueabihf': 4.60.2
|
||||
'@rollup/rollup-linux-arm-musleabihf': 4.60.2
|
||||
'@rollup/rollup-linux-arm64-gnu': 4.60.2
|
||||
'@rollup/rollup-linux-arm64-musl': 4.60.2
|
||||
'@rollup/rollup-linux-loong64-gnu': 4.60.2
|
||||
'@rollup/rollup-linux-loong64-musl': 4.60.2
|
||||
'@rollup/rollup-linux-ppc64-gnu': 4.60.2
|
||||
'@rollup/rollup-linux-ppc64-musl': 4.60.2
|
||||
'@rollup/rollup-linux-riscv64-gnu': 4.60.2
|
||||
'@rollup/rollup-linux-riscv64-musl': 4.60.2
|
||||
'@rollup/rollup-linux-s390x-gnu': 4.60.2
|
||||
'@rollup/rollup-linux-x64-gnu': 4.60.2
|
||||
'@rollup/rollup-linux-x64-musl': 4.60.2
|
||||
'@rollup/rollup-openbsd-x64': 4.60.2
|
||||
'@rollup/rollup-openharmony-arm64': 4.60.2
|
||||
'@rollup/rollup-win32-arm64-msvc': 4.60.2
|
||||
'@rollup/rollup-win32-ia32-msvc': 4.60.2
|
||||
'@rollup/rollup-win32-x64-gnu': 4.60.2
|
||||
'@rollup/rollup-win32-x64-msvc': 4.60.2
|
||||
fsevents: 2.3.3
|
||||
|
||||
rou3@0.7.12: {}
|
||||
@@ -12011,7 +11956,7 @@ snapshots:
|
||||
|
||||
unhead@2.1.4:
|
||||
dependencies:
|
||||
hookable: 6.1.0
|
||||
hookable: 6.1.1
|
||||
|
||||
unicode-trie@2.0.0:
|
||||
dependencies:
|
||||
@@ -12119,7 +12064,7 @@ snapshots:
|
||||
chokidar: 5.0.0
|
||||
destr: 2.0.5
|
||||
h3: 1.15.11
|
||||
lru-cache: 11.3.3
|
||||
lru-cache: 11.3.5
|
||||
node-fetch-native: 1.6.7
|
||||
ofetch: 1.5.1
|
||||
ufo: 1.6.3
|
||||
@@ -12174,6 +12119,8 @@ snapshots:
|
||||
|
||||
uuid@13.0.0: {}
|
||||
|
||||
uuid@14.0.0: {}
|
||||
|
||||
vfile-location@5.0.3:
|
||||
dependencies:
|
||||
'@types/unist': 3.0.3
|
||||
@@ -12194,8 +12141,8 @@ snapshots:
|
||||
esbuild: 0.27.7
|
||||
fdir: 6.5.0(picomatch@4.0.4)
|
||||
picomatch: 4.0.4
|
||||
postcss: 8.5.9
|
||||
rollup: 4.60.1
|
||||
postcss: 8.5.10
|
||||
rollup: 4.60.2
|
||||
tinyglobby: 0.2.16
|
||||
optionalDependencies:
|
||||
'@types/node': 20.19.33
|
||||
|
||||
@@ -11,6 +11,7 @@ import { BrainIcon, ChevronDownIcon } from "lucide-react";
|
||||
import type { ComponentProps, ReactNode } from "react";
|
||||
import { createContext, memo, useContext, useEffect, useState } from "react";
|
||||
import { Streamdown } from "streamdown";
|
||||
import { reasoningPlugins } from "@/core/streamdown/plugins";
|
||||
import { Shimmer } from "./shimmer";
|
||||
|
||||
type ReasoningContextValue = {
|
||||
@@ -122,9 +123,9 @@ const defaultGetThinkingMessage = (isStreaming: boolean, duration?: number) => {
|
||||
return <Shimmer duration={1}>Thinking...</Shimmer>;
|
||||
}
|
||||
if (duration === undefined) {
|
||||
return <p>Thought for a few seconds</p>;
|
||||
return <span>Thought for a few seconds</span>;
|
||||
}
|
||||
return <p>Thought for {duration} seconds</p>;
|
||||
return <span>Thought for {duration} seconds</span>;
|
||||
};
|
||||
|
||||
export const ReasoningTrigger = memo(
|
||||
@@ -177,7 +178,7 @@ export const ReasoningContent = memo(
|
||||
)}
|
||||
{...props}
|
||||
>
|
||||
<Streamdown {...props}>{children}</Streamdown>
|
||||
<Streamdown {...reasoningPlugins}>{children}</Streamdown>
|
||||
</CollapsibleContent>
|
||||
),
|
||||
);
|
||||
|
||||
@@ -9,7 +9,6 @@ import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogDescription,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
} from "@/components/ui/dialog";
|
||||
|
||||
@@ -44,14 +43,12 @@ function CommandDialog({
|
||||
}) {
|
||||
return (
|
||||
<Dialog {...props}>
|
||||
<DialogHeader className="sr-only">
|
||||
<DialogTitle>{title}</DialogTitle>
|
||||
<DialogDescription>{description}</DialogDescription>
|
||||
</DialogHeader>
|
||||
<DialogContent
|
||||
className={cn("overflow-hidden p-0", className)}
|
||||
showCloseButton={showCloseButton}
|
||||
>
|
||||
<DialogTitle className="sr-only">{title}</DialogTitle>
|
||||
<DialogDescription className="sr-only">{description}</DialogDescription>
|
||||
<Command className="[&_[cmdk-group-heading]]:text-muted-foreground **:data-[slot=command-input-wrapper]:h-12 [&_[cmdk-group-heading]]:px-2 [&_[cmdk-group-heading]]:font-medium [&_[cmdk-group]]:px-2 [&_[cmdk-group]:not([hidden])_~[cmdk-group]]:pt-0 [&_[cmdk-input-wrapper]_svg]:h-5 [&_[cmdk-input-wrapper]_svg]:w-5 [&_[cmdk-input]]:h-12 [&_[cmdk-item]]:px-2 [&_[cmdk-item]]:py-3 [&_[cmdk-item]_svg]:h-5 [&_[cmdk-item]_svg]:w-5">
|
||||
{children}
|
||||
</Command>
|
||||
|
||||
@@ -79,14 +79,27 @@ export function AgentCard({ agent }: AgentCardProps) {
|
||||
)}
|
||||
</CardHeader>
|
||||
|
||||
{agent.tool_groups && agent.tool_groups.length > 0 && (
|
||||
{(agent.tool_groups?.length ?? agent.skills?.length ?? 0) > 0 && (
|
||||
<CardContent className="pt-0 pb-3">
|
||||
<div className="flex flex-wrap gap-1">
|
||||
{agent.tool_groups.map((group) => (
|
||||
<Badge key={group} variant="outline" className="text-xs">
|
||||
{agent.tool_groups?.map((group) => (
|
||||
<Badge
|
||||
key={`tg:${group}`}
|
||||
variant="outline"
|
||||
className="text-xs"
|
||||
>
|
||||
{group}
|
||||
</Badge>
|
||||
))}
|
||||
{agent.skills?.map((skill) => (
|
||||
<Badge
|
||||
key={`sk:${skill}`}
|
||||
variant="secondary"
|
||||
className="text-xs"
|
||||
>
|
||||
{skill}
|
||||
</Badge>
|
||||
))}
|
||||
</div>
|
||||
</CardContent>
|
||||
)}
|
||||
|
||||
@@ -555,8 +555,8 @@ export function MemorySettingsPage() {
|
||||
</div>
|
||||
) : null}
|
||||
|
||||
<div className="flex flex-col gap-3 xl:flex-row xl:items-center xl:justify-between">
|
||||
<div className="flex flex-1 flex-col gap-3 sm:flex-row sm:items-center">
|
||||
<div className="flex min-w-0 flex-col gap-3 xl:flex-row xl:items-center xl:justify-between">
|
||||
<div className="flex min-w-0 flex-1 flex-col gap-3 sm:flex-row sm:items-center">
|
||||
<Input
|
||||
value={query}
|
||||
onChange={(event) => setQuery(event.target.value)}
|
||||
@@ -579,7 +579,7 @@ export function MemorySettingsPage() {
|
||||
</ToggleGroup>
|
||||
</div>
|
||||
|
||||
<div className="flex flex-wrap gap-2">
|
||||
<div className="flex min-w-0 flex-wrap gap-2 xl:justify-end">
|
||||
<input
|
||||
ref={fileInputRef}
|
||||
type="file"
|
||||
@@ -624,12 +624,12 @@ export function MemorySettingsPage() {
|
||||
) : null}
|
||||
|
||||
{shouldRenderSummariesBlock ? (
|
||||
<div className="rounded-lg border p-4">
|
||||
<div className="min-w-0 rounded-lg border p-4">
|
||||
<div className="text-muted-foreground mb-4 text-sm">
|
||||
{summaryReadOnly}
|
||||
</div>
|
||||
<Streamdown
|
||||
className="size-full [&>*:first-child]:mt-0 [&>*:last-child]:mb-0"
|
||||
className="size-full min-w-0 [overflow-wrap:anywhere] [&>*:first-child]:mt-0 [&>*:last-child]:mb-0"
|
||||
{...streamdownPlugins}
|
||||
>
|
||||
{summariesToMarkdown(memory, filteredSectionGroups, t)}
|
||||
@@ -638,7 +638,7 @@ export function MemorySettingsPage() {
|
||||
) : null}
|
||||
|
||||
{shouldRenderFactsBlock ? (
|
||||
<div className="rounded-lg border p-4">
|
||||
<div className="min-w-0 rounded-lg border p-4">
|
||||
<div className="mb-4">
|
||||
<h3 className="text-base font-medium">
|
||||
{t.settings.memory.markdown.facts}
|
||||
@@ -661,7 +661,7 @@ export function MemorySettingsPage() {
|
||||
key={fact.id}
|
||||
className="flex flex-col gap-3 rounded-md border p-3 sm:flex-row sm:items-start sm:justify-between"
|
||||
>
|
||||
<div className="min-w-0 space-y-2">
|
||||
<div className="min-w-0 space-y-2 [overflow-wrap:anywhere]">
|
||||
<div className="flex flex-wrap gap-x-4 gap-y-1 text-sm">
|
||||
<span>
|
||||
<span className="text-muted-foreground">
|
||||
@@ -697,7 +697,7 @@ export function MemorySettingsPage() {
|
||||
)}
|
||||
</span>
|
||||
</div>
|
||||
<p className="text-sm break-words">
|
||||
<p className="text-sm [overflow-wrap:anywhere]">
|
||||
{fact.content}
|
||||
</p>
|
||||
</div>
|
||||
|
||||
@@ -97,7 +97,7 @@ export function SettingsDialog(props: SettingsDialogProps) {
|
||||
{t.settings.description}
|
||||
</p>
|
||||
</DialogHeader>
|
||||
<div className="grid min-h-0 flex-1 gap-4 md:grid-cols-[220px_1fr]">
|
||||
<div className="grid min-h-0 flex-1 gap-4 md:grid-cols-[220px_minmax(0,1fr)]">
|
||||
<nav className="bg-sidebar min-h-0 overflow-y-auto rounded-lg border p-2">
|
||||
<ul className="space-y-1 pr-1">
|
||||
{sections.map(({ id, label, icon: Icon }) => {
|
||||
@@ -122,8 +122,8 @@ export function SettingsDialog(props: SettingsDialogProps) {
|
||||
})}
|
||||
</ul>
|
||||
</nav>
|
||||
<ScrollArea className="h-full min-h-0 rounded-lg border">
|
||||
<div className="space-y-8 p-6">
|
||||
<ScrollArea className="h-full min-h-0 min-w-0 rounded-lg border">
|
||||
<div className="min-w-0 space-y-8 p-6">
|
||||
{activeSection === "appearance" && <AppearanceSettingsPage />}
|
||||
{activeSection === "memory" && <MemorySettingsPage />}
|
||||
{activeSection === "tools" && <ToolSettingsPage />}
|
||||
|
||||
@@ -3,6 +3,7 @@ export interface Agent {
|
||||
description: string;
|
||||
model: string | null;
|
||||
tool_groups: string[] | null;
|
||||
skills: string[] | null;
|
||||
soul?: string | null;
|
||||
}
|
||||
|
||||
@@ -11,6 +12,7 @@ export interface CreateAgentRequest {
|
||||
description?: string;
|
||||
model?: string | null;
|
||||
tool_groups?: string[] | null;
|
||||
skills?: string[] | null;
|
||||
soul?: string;
|
||||
}
|
||||
|
||||
@@ -18,5 +20,6 @@ export interface UpdateAgentRequest {
|
||||
description?: string | null;
|
||||
model?: string | null;
|
||||
tool_groups?: string[] | null;
|
||||
skills?: string[] | null;
|
||||
soul?: string | null;
|
||||
}
|
||||
|
||||
@@ -28,6 +28,15 @@ export const streamdownPluginsWithWordAnimation = {
|
||||
] as StreamdownProps["rehypePlugins"],
|
||||
};
|
||||
|
||||
// Plugins for reasoning/thinking content — derived from streamdownPlugins but without rehypeRaw,
|
||||
// to prevent LLM-hallucinated HTML tags (e.g. <simd>) from being rendered as DOM elements.
|
||||
export const reasoningPlugins = {
|
||||
remarkPlugins: streamdownPlugins.remarkPlugins,
|
||||
rehypePlugins: streamdownPlugins.rehypePlugins?.filter(
|
||||
(p) => p !== rehypeRaw,
|
||||
) as StreamdownProps["rehypePlugins"],
|
||||
};
|
||||
|
||||
// Plugins for human messages - no autolink to prevent URL bleeding into adjacent text
|
||||
export const humanMessagePlugins = {
|
||||
remarkPlugins: [
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
import { createElement } from "react";
|
||||
import { renderToStaticMarkup } from "react-dom/server";
|
||||
import { expect, test, vi } from "vitest";
|
||||
|
||||
vi.mock("streamdown", () => ({
|
||||
Streamdown: ({ children }: { children: string }) =>
|
||||
createElement("div", null, children),
|
||||
}));
|
||||
|
||||
import {
|
||||
Reasoning,
|
||||
ReasoningContent,
|
||||
ReasoningTrigger,
|
||||
} from "@/components/ai-elements/reasoning";
|
||||
|
||||
test("ReasoningTrigger default message uses phrasing content", () => {
|
||||
const html = renderToStaticMarkup(
|
||||
createElement(
|
||||
Reasoning,
|
||||
{ isStreaming: false, defaultOpen: false },
|
||||
createElement(ReasoningTrigger, null),
|
||||
createElement(ReasoningContent, null, "test"),
|
||||
),
|
||||
);
|
||||
|
||||
expect(html).toContain("Thought for a few seconds");
|
||||
expect(html).not.toMatch(/<button\b[^>]*>[\s\S]*?<p\b/i);
|
||||
});
|
||||
@@ -0,0 +1,13 @@
|
||||
import rehypeRaw from "rehype-raw";
|
||||
import { expect, test } from "vitest";
|
||||
|
||||
import { reasoningPlugins, streamdownPlugins } from "@/core/streamdown/plugins";
|
||||
|
||||
test("streamdownPlugins includes rehypeRaw", () => {
|
||||
expect(streamdownPlugins.rehypePlugins).toContain(rehypeRaw);
|
||||
});
|
||||
|
||||
test("reasoningPlugins does not include rehypeRaw", () => {
|
||||
const flat = reasoningPlugins.rehypePlugins?.flat();
|
||||
expect(flat).not.toContain(rehypeRaw);
|
||||
});
|
||||
+1
-1
@@ -279,7 +279,7 @@ if ! $GATEWAY_MODE; then
|
||||
LANGGRAPH_ALLOW_BLOCKING_FLAG="--allow-blocking"
|
||||
fi
|
||||
run_service "LangGraph" \
|
||||
"cd backend && NO_COLOR=1 CLICOLOR=0 CLICOLOR_FORCE=0 PY_COLORS=0 TERM=dumb uv run langgraph dev --no-browser $LANGGRAPH_ALLOW_BLOCKING_FLAG --n-jobs-per-worker $LANGGRAPH_JOBS_PER_WORKER --server-log-level $LANGGRAPH_LOG_LEVEL $LANGGRAPH_EXTRA_FLAGS 2>&1 | perl -pe 's/\e\[[0-9;]*[[:alpha:]]//g' > ../logs/langgraph.log" \
|
||||
"cd backend && NO_COLOR=1 CLICOLOR=0 CLICOLOR_FORCE=0 PY_COLORS=0 TERM=dumb uv run langgraph dev --no-browser $LANGGRAPH_ALLOW_BLOCKING_FLAG --n-jobs-per-worker $LANGGRAPH_JOBS_PER_WORKER --server-log-level $LANGGRAPH_LOG_LEVEL $LANGGRAPH_EXTRA_FLAGS 2>&1 | LC_ALL=C LC_CTYPE=C LANG=C perl -pe 's/\e\[[0-9;]*[[:alpha:]]//g' > ../logs/langgraph.log" \
|
||||
2024 60
|
||||
else
|
||||
echo "⏩ Skipping LangGraph (Gateway mode — runtime embedded in Gateway)"
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
---
|
||||
name: bootstrap
|
||||
description: Generate a personalized SOUL.md through a warm, adaptive onboarding conversation. Trigger when the user wants to create, set up, or initialize their AI partner's identity — e.g., "create my SOUL.md", "bootstrap my agent", "set up my AI partner", "define who you are", "let's do onboarding", "personalize this AI", "make you mine", or when a SOUL.md is missing. Also trigger for updates: "update my SOUL.md", "change my AI's personality", "tweak the soul".
|
||||
description: >-
|
||||
Generate a personalized SOUL.md through a warm, adaptive onboarding conversation.
|
||||
Trigger when the user wants to create, set up, or initialize their AI partner's
|
||||
identity — e.g., "create my SOUL.md", "bootstrap my agent", "set up my AI
|
||||
partner", "define who you are", "let's do onboarding", "personalize this AI",
|
||||
"make you mine", or when a SOUL.md is missing. Also trigger for updates:
|
||||
"update my SOUL.md", "change my AI's personality", "tweak the soul".
|
||||
---
|
||||
|
||||
# Bootstrap Soul
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: chart-visualization
|
||||
description: This skill should be used when the user wants to visualize data. It intelligently selects the most suitable chart type from 26 available options, extracts parameters based on detailed specifications, and generates a chart image using a JavaScript script.
|
||||
dependency:
|
||||
compatibility:
|
||||
nodejs: ">=18.0.0"
|
||||
---
|
||||
|
||||
|
||||
@@ -215,14 +215,14 @@ def action_inspect(con: duckdb.DuckDBPyConnection, table_map: dict[str, str]) ->
|
||||
non_null_sql = f'SELECT {", ".join(non_null_parts)} FROM "{table_name}"'
|
||||
try:
|
||||
non_null_counts = con.execute(non_null_sql).fetchone()
|
||||
output_parts.append(f"\nNon-null counts:")
|
||||
output_parts.append("\nNon-null counts:")
|
||||
for i, c in enumerate(col_names):
|
||||
output_parts.append(f" {c}: {non_null_counts[i]} / {row_count}")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Sample data (first 5 rows)
|
||||
output_parts.append(f"\nSample data (first 5 rows):")
|
||||
output_parts.append("\nSample data (first 5 rows):")
|
||||
try:
|
||||
sample = con.execute(f'SELECT * FROM "{table_name}" LIMIT 5').fetchdf()
|
||||
output_parts.append(sample.to_string(index=False))
|
||||
@@ -465,7 +465,7 @@ def action_summary(
|
||||
LIMIT 5
|
||||
""").fetchall()
|
||||
if top_vals:
|
||||
output_parts.append(f" top values:")
|
||||
output_parts.append(" top values:")
|
||||
for val, freq in top_vals:
|
||||
pct = (freq / row_count * 100) if row_count > 0 else 0
|
||||
output_parts.append(f" {val}: {freq} ({pct:.1f}%)")
|
||||
|
||||
@@ -447,8 +447,8 @@ def main() -> None:
|
||||
port = server.server_address[1]
|
||||
|
||||
url = f"http://localhost:{port}"
|
||||
print(f"\n Eval Viewer")
|
||||
print(f" ─────────────────────────────────")
|
||||
print("\n Eval Viewer")
|
||||
print(" ─────────────────────────────────")
|
||||
print(f" URL: {url}")
|
||||
print(f" Workspace: {workspace}")
|
||||
print(f" Feedback: {feedback_path}")
|
||||
@@ -456,7 +456,7 @@ def main() -> None:
|
||||
print(f" Previous: {args.previous_workspace} ({len(previous)} runs)")
|
||||
if benchmark_path:
|
||||
print(f" Benchmark: {benchmark_path}")
|
||||
print(f"\n Press Ctrl+C to stop.\n")
|
||||
print("\n Press Ctrl+C to stop.\n")
|
||||
|
||||
webbrowser.open(url)
|
||||
|
||||
|
||||
@@ -389,7 +389,7 @@ def main():
|
||||
configs = [k for k in run_summary if k != "delta"]
|
||||
delta = run_summary.get("delta", {})
|
||||
|
||||
print(f"\nSummary:")
|
||||
print("\nSummary:")
|
||||
for config in configs:
|
||||
pr = run_summary[config]["pass_rate"]["mean"]
|
||||
label = config.replace("_", " ").title()
|
||||
|
||||
@@ -4,7 +4,6 @@ Quick validation script for skills - minimal version
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
import yaml
|
||||
from pathlib import Path
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user