refactor(config): eliminate global mutable state, wire DeerFlowContext into runtime

- Freeze all config models (AppConfig + 15 sub-configs) with frozen=True
- Purify from_file() — remove 9 load_*_from_dict() side-effect calls
- Replace mtime/reload/push/pop machinery with single ContextVar + init_app_config()
- Delete 10 sub-module globals and their getters/setters/loaders
- Migrate 50+ consumers from get_*_config() to get_app_config().xxx

- Expand DeerFlowContext: app_config + thread_id + agent_name (frozen dataclass)
- Wire into Gateway runtime (worker.py) and DeerFlowClient via context= parameter
- Remove sandbox_id from runtime.context — flows through ThreadState.sandbox only
- Middleware/tools access runtime.context directly via Runtime[DeerFlowContext] generic
- resolve_context() retained at server entry points for LangGraph Server fallback
This commit is contained in:
greatmengqi
2026-04-13 23:49:31 +08:00
parent c4d273a68a
commit edf345cd72
111 changed files with 4848 additions and 4079 deletions
+3 -1
View File
@@ -179,7 +179,9 @@ Setup: Copy `config.example.yaml` to `config.yaml` in the **project root** direc
**Config Versioning**: `config.example.yaml` has a `config_version` field. On startup, `AppConfig.from_file()` compares user version vs example version and emits a warning if outdated. Missing `config_version` = version 0. Run `make config-upgrade` to auto-merge missing fields. When changing the config schema, bump `config_version` in `config.example.yaml`. **Config Versioning**: `config.example.yaml` has a `config_version` field. On startup, `AppConfig.from_file()` compares user version vs example version and emits a warning if outdated. Missing `config_version` = version 0. Run `make config-upgrade` to auto-merge missing fields. When changing the config schema, bump `config_version` in `config.example.yaml`.
**Config Caching**: `get_app_config()` caches the parsed config, but automatically reloads it when the resolved config path changes or the file's mtime increases. This keeps Gateway and LangGraph reads aligned with `config.yaml` edits without requiring a manual process restart. **Config Lifecycle**: All config models are `frozen=True` (immutable after construction). `AppConfig.from_file()` is a pure function — no side effects on sub-module globals. `get_app_config()` is backed by a single `ContextVar`, set once via `init_app_config()` at process startup. To update config at runtime (e.g., Gateway API updates MCP/Skills), construct a new `AppConfig.from_file()` and call `init_app_config()` again. No mtime detection, no auto-reload.
**DeerFlowContext**: Per-invocation typed context for the agent execution path, injected via LangGraph `Runtime[DeerFlowContext]`. Holds `app_config: AppConfig`, `thread_id: str`, `agent_name: str | None`. Gateway runtime and `DeerFlowClient` construct full `DeerFlowContext` at invoke time; LangGraph Server path uses a fallback via `resolve_context()`. Middleware and tools access context through `resolve_context(runtime)` which returns a typed `DeerFlowContext` regardless of entry point. Mutable runtime state (`sandbox_id`) flows through `ThreadState.sandbox`, not context.
Configuration priority: Configuration priority:
1. Explicit `config_path` argument 1. Explicit `config_path` argument
+2 -2
View File
@@ -67,9 +67,9 @@ class ChannelService:
@classmethod @classmethod
def from_app_config(cls) -> ChannelService: def from_app_config(cls) -> ChannelService:
"""Create a ChannelService from the application config.""" """Create a ChannelService from the application config."""
from deerflow.config.app_config import get_app_config from deerflow.config.app_config import AppConfig
config = get_app_config() config = AppConfig.current()
channels_config = {} channels_config = {}
# extra fields are allowed by AppConfig (extra="allow") # extra fields are allowed by AppConfig (extra="allow")
extra = config.model_extra or {} extra = config.model_extra or {}
+2 -2
View File
@@ -21,7 +21,7 @@ from app.gateway.routers import (
threads, threads,
uploads, uploads,
) )
from deerflow.config.app_config import get_app_config from deerflow.config.app_config import AppConfig
# Configure logging # Configure logging
logging.basicConfig( logging.basicConfig(
@@ -39,7 +39,7 @@ async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
# Load config and check necessary environment variables at startup # Load config and check necessary environment variables at startup
try: try:
get_app_config() AppConfig.current()
logger.info("Configuration loaded successfully") logger.info("Configuration loaded successfully")
except Exception as e: except Exception as e:
error_msg = f"Failed to load configuration during gateway startup: {e}" error_msg = f"Failed to load configuration during gateway startup: {e}"
+9 -7
View File
@@ -6,7 +6,8 @@ from typing import Literal
from fastapi import APIRouter, HTTPException from fastapi import APIRouter, HTTPException
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from deerflow.config.extensions_config import ExtensionsConfig, get_extensions_config, reload_extensions_config from deerflow.config.app_config import AppConfig
from deerflow.config.extensions_config import ExtensionsConfig
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
router = APIRouter(prefix="/api", tags=["mcp"]) router = APIRouter(prefix="/api", tags=["mcp"])
@@ -90,9 +91,9 @@ async def get_mcp_configuration() -> McpConfigResponse:
} }
``` ```
""" """
config = get_extensions_config() ext = AppConfig.current().extensions
return McpConfigResponse(mcp_servers={name: McpServerConfigResponse(**server.model_dump()) for name, server in config.mcp_servers.items()}) return McpConfigResponse(mcp_servers={name: McpServerConfigResponse(**server.model_dump()) for name, server in ext.mcp_servers.items()})
@router.put( @router.put(
@@ -143,12 +144,12 @@ async def update_mcp_configuration(request: McpConfigUpdateRequest) -> McpConfig
logger.info(f"No existing extensions config found. Creating new config at: {config_path}") logger.info(f"No existing extensions config found. Creating new config at: {config_path}")
# Load current config to preserve skills configuration # Load current config to preserve skills configuration
current_config = get_extensions_config() current_ext = AppConfig.current().extensions
# Convert request to dict format for JSON serialization # Convert request to dict format for JSON serialization
config_data = { config_data = {
"mcpServers": {name: server.model_dump() for name, server in request.mcp_servers.items()}, "mcpServers": {name: server.model_dump() for name, server in request.mcp_servers.items()},
"skills": {name: {"enabled": skill.enabled} for name, skill in current_config.skills.items()}, "skills": {name: {"enabled": skill.enabled} for name, skill in current_ext.skills.items()},
} }
# Write the configuration to file # Write the configuration to file
@@ -161,8 +162,9 @@ async def update_mcp_configuration(request: McpConfigUpdateRequest) -> McpConfig
# will detect config file changes via mtime and reinitialize MCP tools automatically # will detect config file changes via mtime and reinitialize MCP tools automatically
# Reload the configuration and update the global cache # Reload the configuration and update the global cache
reloaded_config = reload_extensions_config() AppConfig.init(AppConfig.from_file())
return McpConfigResponse(mcp_servers={name: McpServerConfigResponse(**server.model_dump()) for name, server in reloaded_config.mcp_servers.items()}) reloaded_ext = AppConfig.current().extensions
return McpConfigResponse(mcp_servers={name: McpServerConfigResponse(**server.model_dump()) for name, server in reloaded_ext.mcp_servers.items()})
except Exception as e: except Exception as e:
logger.error(f"Failed to update MCP configuration: {e}", exc_info=True) logger.error(f"Failed to update MCP configuration: {e}", exc_info=True)
+3 -3
View File
@@ -12,7 +12,7 @@ from deerflow.agents.memory.updater import (
reload_memory_data, reload_memory_data,
update_memory_fact, update_memory_fact,
) )
from deerflow.config.memory_config import get_memory_config from deerflow.config.app_config import AppConfig
router = APIRouter(prefix="/api", tags=["memory"]) router = APIRouter(prefix="/api", tags=["memory"])
@@ -311,7 +311,7 @@ async def get_memory_config_endpoint() -> MemoryConfigResponse:
} }
``` ```
""" """
config = get_memory_config() config = AppConfig.current().memory
return MemoryConfigResponse( return MemoryConfigResponse(
enabled=config.enabled, enabled=config.enabled,
storage_path=config.storage_path, storage_path=config.storage_path,
@@ -336,7 +336,7 @@ async def get_memory_status() -> MemoryStatusResponse:
Returns: Returns:
Combined memory configuration and current data. Combined memory configuration and current data.
""" """
config = get_memory_config() config = AppConfig.current().memory
memory_data = get_memory_data() memory_data = get_memory_data()
return MemoryStatusResponse( return MemoryStatusResponse(
+3 -3
View File
@@ -1,7 +1,7 @@
from fastapi import APIRouter, HTTPException from fastapi import APIRouter, HTTPException
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from deerflow.config import get_app_config from deerflow.config.app_config import AppConfig
router = APIRouter(prefix="/api", tags=["models"]) router = APIRouter(prefix="/api", tags=["models"])
@@ -58,7 +58,7 @@ async def list_models() -> ModelsListResponse:
} }
``` ```
""" """
config = get_app_config() config = AppConfig.current()
models = [ models = [
ModelResponse( ModelResponse(
name=model.name, name=model.name,
@@ -101,7 +101,7 @@ async def get_model(model_name: str) -> ModelResponse:
} }
``` ```
""" """
config = get_app_config() config = AppConfig.current()
model = config.get_model_config(model_name) model = config.get_model_config(model_name)
if model is None: if model is None:
raise HTTPException(status_code=404, detail=f"Model '{model_name}' not found") raise HTTPException(status_code=404, detail=f"Model '{model_name}' not found")
+7 -6
View File
@@ -8,7 +8,8 @@ from pydantic import BaseModel, Field
from app.gateway.path_utils import resolve_thread_virtual_path from app.gateway.path_utils import resolve_thread_virtual_path
from deerflow.agents.lead_agent.prompt import refresh_skills_system_prompt_cache_async from deerflow.agents.lead_agent.prompt import refresh_skills_system_prompt_cache_async
from deerflow.config.extensions_config import ExtensionsConfig, SkillStateConfig, get_extensions_config, reload_extensions_config from deerflow.config.app_config import AppConfig
from deerflow.config.extensions_config import ExtensionsConfig, SkillStateConfig
from deerflow.skills import Skill, load_skills from deerflow.skills import Skill, load_skills
from deerflow.skills.installer import SkillAlreadyExistsError, install_skill_from_archive from deerflow.skills.installer import SkillAlreadyExistsError, install_skill_from_archive
from deerflow.skills.manager import ( from deerflow.skills.manager import (
@@ -325,19 +326,19 @@ async def update_skill(skill_name: str, request: SkillUpdateRequest) -> SkillRes
config_path = Path.cwd().parent / "extensions_config.json" config_path = Path.cwd().parent / "extensions_config.json"
logger.info(f"No existing extensions config found. Creating new config at: {config_path}") logger.info(f"No existing extensions config found. Creating new config at: {config_path}")
extensions_config = get_extensions_config() ext = AppConfig.current().extensions
extensions_config.skills[skill_name] = SkillStateConfig(enabled=request.enabled) ext.skills[skill_name] = SkillStateConfig(enabled=request.enabled)
config_data = { config_data = {
"mcpServers": {name: server.model_dump() for name, server in extensions_config.mcp_servers.items()}, "mcpServers": {name: server.model_dump() for name, server in ext.mcp_servers.items()},
"skills": {name: {"enabled": skill_config.enabled} for name, skill_config in extensions_config.skills.items()}, "skills": {name: {"enabled": skill_config.enabled} for name, skill_config in ext.skills.items()},
} }
with open(config_path, "w", encoding="utf-8") as f: with open(config_path, "w", encoding="utf-8") as f:
json.dump(config_data, f, indent=2) json.dump(config_data, f, indent=2)
logger.info(f"Skills configuration updated and saved to: {config_path}") logger.info(f"Skills configuration updated and saved to: {config_path}")
reload_extensions_config() AppConfig.init(AppConfig.from_file())
await refresh_skills_system_prompt_cache_async() await refresh_skills_system_prompt_cache_async()
skills = load_skills(enabled_only=False) skills = load_skills(enabled_only=False)
@@ -29,7 +29,7 @@ from deerflow.agents.checkpointer.provider import (
POSTGRES_INSTALL, POSTGRES_INSTALL,
SQLITE_INSTALL, SQLITE_INSTALL,
) )
from deerflow.config.app_config import get_app_config from deerflow.config.app_config import AppConfig
from deerflow.runtime.store._sqlite_utils import ensure_sqlite_parent_dir, resolve_sqlite_conn_str from deerflow.runtime.store._sqlite_utils import ensure_sqlite_parent_dir, resolve_sqlite_conn_str
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -94,7 +94,7 @@ async def make_checkpointer() -> AsyncIterator[Checkpointer]:
Yields an ``InMemorySaver`` when no checkpointer is configured in *config.yaml*. Yields an ``InMemorySaver`` when no checkpointer is configured in *config.yaml*.
""" """
config = get_app_config() config = AppConfig.current()
if config.checkpointer is None: if config.checkpointer is None:
from langgraph.checkpoint.memory import InMemorySaver from langgraph.checkpoint.memory import InMemorySaver
@@ -25,7 +25,7 @@ from collections.abc import Iterator
from langgraph.types import Checkpointer from langgraph.types import Checkpointer
from deerflow.config.app_config import get_app_config from deerflow.config.app_config import AppConfig
from deerflow.config.checkpointer_config import CheckpointerConfig from deerflow.config.checkpointer_config import CheckpointerConfig
from deerflow.runtime.store._sqlite_utils import resolve_sqlite_conn_str from deerflow.runtime.store._sqlite_utils import resolve_sqlite_conn_str
@@ -113,25 +113,10 @@ def get_checkpointer() -> Checkpointer:
if _checkpointer is not None: if _checkpointer is not None:
return _checkpointer return _checkpointer
# Ensure app config is loaded before checking checkpointer config try:
# This prevents returning InMemorySaver when config.yaml actually has a checkpointer section config = AppConfig.current().checkpointer
# but hasn't been loaded yet except (LookupError, FileNotFoundError):
from deerflow.config.app_config import _app_config config = None
from deerflow.config.checkpointer_config import get_checkpointer_config
config = get_checkpointer_config()
if config is None and _app_config is None:
# Only load app config lazily when neither the app config nor an explicit
# checkpointer config has been initialized yet. This keeps tests that
# intentionally set the global checkpointer config isolated from any
# ambient config.yaml on disk.
try:
get_app_config()
except FileNotFoundError:
# In test environments without config.yaml, this is expected.
pass
config = get_checkpointer_config()
if config is None: if config is None:
from langgraph.checkpoint.memory import InMemorySaver from langgraph.checkpoint.memory import InMemorySaver
@@ -180,7 +165,7 @@ def checkpointer_context() -> Iterator[Checkpointer]:
Yields an ``InMemorySaver`` when no checkpointer is configured in *config.yaml*. Yields an ``InMemorySaver`` when no checkpointer is configured in *config.yaml*.
""" """
config = get_app_config() config = AppConfig.current()
if config.checkpointer is None: if config.checkpointer is None:
from langgraph.checkpoint.memory import InMemorySaver from langgraph.checkpoint.memory import InMemorySaver
@@ -3,6 +3,7 @@ import logging
from langchain.agents import create_agent from langchain.agents import create_agent
from langchain.agents.middleware import AgentMiddleware, SummarizationMiddleware from langchain.agents.middleware import AgentMiddleware, SummarizationMiddleware
from langchain_core.runnables import RunnableConfig from langchain_core.runnables import RunnableConfig
from langgraph.graph.state import CompiledStateGraph
from deerflow.agents.lead_agent.prompt import apply_prompt_template from deerflow.agents.lead_agent.prompt import apply_prompt_template
from deerflow.agents.middlewares.clarification_middleware import ClarificationMiddleware from deerflow.agents.middlewares.clarification_middleware import ClarificationMiddleware
@@ -16,8 +17,8 @@ from deerflow.agents.middlewares.tool_error_handling_middleware import build_lea
from deerflow.agents.middlewares.view_image_middleware import ViewImageMiddleware from deerflow.agents.middlewares.view_image_middleware import ViewImageMiddleware
from deerflow.agents.thread_state import ThreadState from deerflow.agents.thread_state import ThreadState
from deerflow.config.agents_config import load_agent_config from deerflow.config.agents_config import load_agent_config
from deerflow.config.app_config import get_app_config from deerflow.config.app_config import AppConfig
from deerflow.config.summarization_config import get_summarization_config from deerflow.config.deer_flow_context import DeerFlowContext
from deerflow.models import create_chat_model from deerflow.models import create_chat_model
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -25,7 +26,7 @@ logger = logging.getLogger(__name__)
def _resolve_model_name(requested_model_name: str | None = None) -> str: def _resolve_model_name(requested_model_name: str | None = None) -> str:
"""Resolve a runtime model name safely, falling back to default if invalid. Returns None if no models are configured.""" """Resolve a runtime model name safely, falling back to default if invalid. Returns None if no models are configured."""
app_config = get_app_config() app_config = AppConfig.current()
default_model_name = app_config.models[0].name if app_config.models else None default_model_name = app_config.models[0].name if app_config.models else None
if default_model_name is None: if default_model_name is None:
raise ValueError("No chat models are configured. Please configure at least one model in config.yaml.") raise ValueError("No chat models are configured. Please configure at least one model in config.yaml.")
@@ -40,7 +41,7 @@ def _resolve_model_name(requested_model_name: str | None = None) -> str:
def _create_summarization_middleware() -> SummarizationMiddleware | None: def _create_summarization_middleware() -> SummarizationMiddleware | None:
"""Create and configure the summarization middleware from config.""" """Create and configure the summarization middleware from config."""
config = get_summarization_config() config = AppConfig.current().summarization
if not config.enabled: if not config.enabled:
return None return None
@@ -230,7 +231,7 @@ def _build_middlewares(config: RunnableConfig, model_name: str | None, agent_nam
middlewares.append(todo_list_middleware) middlewares.append(todo_list_middleware)
# Add TokenUsageMiddleware when token_usage tracking is enabled # Add TokenUsageMiddleware when token_usage tracking is enabled
if get_app_config().token_usage.enabled: if AppConfig.current().token_usage.enabled:
middlewares.append(TokenUsageMiddleware()) middlewares.append(TokenUsageMiddleware())
# Add TitleMiddleware # Add TitleMiddleware
@@ -241,7 +242,7 @@ def _build_middlewares(config: RunnableConfig, model_name: str | None, agent_nam
# Add ViewImageMiddleware only if the current model supports vision. # Add ViewImageMiddleware only if the current model supports vision.
# Use the resolved runtime model_name from make_lead_agent to avoid stale config values. # Use the resolved runtime model_name from make_lead_agent to avoid stale config values.
app_config = get_app_config() app_config = AppConfig.current()
model_config = app_config.get_model_config(model_name) if model_name else None model_config = app_config.get_model_config(model_name) if model_name else None
if model_config is not None and model_config.supports_vision: if model_config is not None and model_config.supports_vision:
middlewares.append(ViewImageMiddleware()) middlewares.append(ViewImageMiddleware())
@@ -270,7 +271,7 @@ def _build_middlewares(config: RunnableConfig, model_name: str | None, agent_nam
return middlewares return middlewares
def make_lead_agent(config: RunnableConfig): def make_lead_agent(config: RunnableConfig) -> CompiledStateGraph:
# Lazy import to avoid circular dependency # Lazy import to avoid circular dependency
from deerflow.tools import get_available_tools from deerflow.tools import get_available_tools
from deerflow.tools.builtins import setup_agent from deerflow.tools.builtins import setup_agent
@@ -293,7 +294,7 @@ def make_lead_agent(config: RunnableConfig):
# Final model name resolution: request → agent config → global default, with fallback for unknown names # Final model name resolution: request → agent config → global default, with fallback for unknown names
model_name = _resolve_model_name(requested_model_name or agent_model_name) model_name = _resolve_model_name(requested_model_name or agent_model_name)
app_config = get_app_config() app_config = AppConfig.current()
model_config = app_config.get_model_config(model_name) model_config = app_config.get_model_config(model_name)
if model_config is None: if model_config is None:
@@ -336,6 +337,7 @@ def make_lead_agent(config: RunnableConfig):
middleware=_build_middlewares(config, model_name=model_name), middleware=_build_middlewares(config, model_name=model_name),
system_prompt=apply_prompt_template(subagent_enabled=subagent_enabled, max_concurrent_subagents=max_concurrent_subagents, available_skills=set(["bootstrap"])), system_prompt=apply_prompt_template(subagent_enabled=subagent_enabled, max_concurrent_subagents=max_concurrent_subagents, available_skills=set(["bootstrap"])),
state_schema=ThreadState, state_schema=ThreadState,
context_schema=DeerFlowContext,
) )
# Default lead agent (unchanged behavior) # Default lead agent (unchanged behavior)
@@ -347,4 +349,5 @@ def make_lead_agent(config: RunnableConfig):
subagent_enabled=subagent_enabled, max_concurrent_subagents=max_concurrent_subagents, agent_name=agent_name, available_skills=set(agent_config.skills) if agent_config and agent_config.skills is not None else None subagent_enabled=subagent_enabled, max_concurrent_subagents=max_concurrent_subagents, agent_name=agent_name, available_skills=set(agent_config.skills) if agent_config and agent_config.skills is not None else None
), ),
state_schema=ThreadState, state_schema=ThreadState,
context_schema=DeerFlowContext,
) )
@@ -5,6 +5,7 @@ from datetime import datetime
from functools import lru_cache from functools import lru_cache
from deerflow.config.agents_config import load_agent_soul from deerflow.config.agents_config import load_agent_soul
from deerflow.config.app_config import AppConfig
from deerflow.skills import load_skills from deerflow.skills import load_skills
from deerflow.skills.types import Skill from deerflow.skills.types import Skill
from deerflow.subagents import get_available_subagent_names from deerflow.subagents import get_available_subagent_names
@@ -518,9 +519,8 @@ def _get_memory_context(agent_name: str | None = None) -> str:
""" """
try: try:
from deerflow.agents.memory import format_memory_for_injection, get_memory_data from deerflow.agents.memory import format_memory_for_injection, get_memory_data
from deerflow.config.memory_config import get_memory_config
config = get_memory_config() config = AppConfig.current().memory
if not config.enabled or not config.injection_enabled: if not config.enabled or not config.injection_enabled:
return "" return ""
@@ -576,9 +576,7 @@ def get_skills_prompt_section(available_skills: set[str] | None = None) -> str:
skills = _get_enabled_skills() skills = _get_enabled_skills()
try: try:
from deerflow.config import get_app_config config = AppConfig.current()
config = get_app_config()
container_base_path = config.skills.container_path container_base_path = config.skills.container_path
skill_evolution_enabled = config.skill_evolution.enabled skill_evolution_enabled = config.skill_evolution.enabled
except Exception: except Exception:
@@ -617,9 +615,7 @@ def get_deferred_tools_prompt_section() -> str:
from deerflow.tools.builtins.tool_search import get_deferred_registry from deerflow.tools.builtins.tool_search import get_deferred_registry
try: try:
from deerflow.config import get_app_config if not AppConfig.current().tool_search.enabled:
if not get_app_config().tool_search.enabled:
return "" return ""
except Exception: except Exception:
return "" return ""
@@ -635,9 +631,7 @@ def get_deferred_tools_prompt_section() -> str:
def _build_acp_section() -> str: def _build_acp_section() -> str:
"""Build the ACP agent prompt section, only if ACP agents are configured.""" """Build the ACP agent prompt section, only if ACP agents are configured."""
try: try:
from deerflow.config.acp_config import get_acp_agents agents = AppConfig.current().acp_agents
agents = get_acp_agents()
if not agents: if not agents:
return "" return ""
except Exception: except Exception:
@@ -655,9 +649,7 @@ def _build_acp_section() -> str:
def _build_custom_mounts_section() -> str: def _build_custom_mounts_section() -> str:
"""Build a prompt section for explicitly configured sandbox mounts.""" """Build a prompt section for explicitly configured sandbox mounts."""
try: try:
from deerflow.config import get_app_config mounts = AppConfig.current().sandbox.mounts or []
mounts = get_app_config().sandbox.mounts or []
except Exception: except Exception:
logger.exception("Failed to load configured sandbox mounts for the lead-agent prompt") logger.exception("Failed to load configured sandbox mounts for the lead-agent prompt")
return "" return ""
@@ -7,7 +7,7 @@ from dataclasses import dataclass, field
from datetime import UTC, datetime from datetime import UTC, datetime
from typing import Any from typing import Any
from deerflow.config.memory_config import get_memory_config from deerflow.config.app_config import AppConfig
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -56,7 +56,7 @@ class MemoryUpdateQueue:
correction_detected: Whether recent turns include an explicit correction signal. correction_detected: Whether recent turns include an explicit correction signal.
reinforcement_detected: Whether recent turns include a positive reinforcement signal. reinforcement_detected: Whether recent turns include a positive reinforcement signal.
""" """
config = get_memory_config() config = AppConfig.current().memory
if not config.enabled: if not config.enabled:
return return
@@ -87,7 +87,7 @@ class MemoryUpdateQueue:
def _reset_timer(self) -> None: def _reset_timer(self) -> None:
"""Reset the debounce timer.""" """Reset the debounce timer."""
config = get_memory_config() config = AppConfig.current().memory
# Cancel existing timer if any # Cancel existing timer if any
if self._timer is not None: if self._timer is not None:
@@ -9,7 +9,7 @@ from pathlib import Path
from typing import Any from typing import Any
from deerflow.config.agents_config import AGENT_NAME_PATTERN from deerflow.config.agents_config import AGENT_NAME_PATTERN
from deerflow.config.memory_config import get_memory_config from deerflow.config.app_config import AppConfig
from deerflow.config.paths import get_paths from deerflow.config.paths import get_paths
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -84,7 +84,7 @@ class FileMemoryStorage(MemoryStorage):
self._validate_agent_name(agent_name) self._validate_agent_name(agent_name)
return get_paths().agent_memory_file(agent_name) return get_paths().agent_memory_file(agent_name)
config = get_memory_config() config = AppConfig.current().memory
if config.storage_path: if config.storage_path:
p = Path(config.storage_path) p = Path(config.storage_path)
return p if p.is_absolute() else get_paths().base_dir / p return p if p.is_absolute() else get_paths().base_dir / p
@@ -177,7 +177,7 @@ def get_memory_storage() -> MemoryStorage:
if _storage_instance is not None: if _storage_instance is not None:
return _storage_instance return _storage_instance
config = get_memory_config() config = AppConfig.current().memory
storage_class_path = config.storage_class storage_class_path = config.storage_class
try: try:
@@ -16,7 +16,7 @@ from deerflow.agents.memory.storage import (
get_memory_storage, get_memory_storage,
utc_now_iso_z, utc_now_iso_z,
) )
from deerflow.config.memory_config import get_memory_config from deerflow.config.app_config import AppConfig
from deerflow.models import create_chat_model from deerflow.models import create_chat_model
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -265,7 +265,7 @@ class MemoryUpdater:
def _get_model(self): def _get_model(self):
"""Get the model for memory updates.""" """Get the model for memory updates."""
config = get_memory_config() config = AppConfig.current().memory
model_name = self._model_name or config.model_name model_name = self._model_name or config.model_name
return create_chat_model(name=model_name, thinking_enabled=False) return create_chat_model(name=model_name, thinking_enabled=False)
@@ -289,7 +289,7 @@ class MemoryUpdater:
Returns: Returns:
True if update was successful, False otherwise. True if update was successful, False otherwise.
""" """
config = get_memory_config() config = AppConfig.current().memory
if not config.enabled: if not config.enabled:
return False return False
@@ -378,7 +378,7 @@ class MemoryUpdater:
Returns: Returns:
Updated memory data. Updated memory data.
""" """
config = get_memory_config() config = AppConfig.current().memory
now = utc_now_iso_z() now = utc_now_iso_z()
# Update user sections # Update user sections
@@ -24,6 +24,8 @@ from langchain.agents.middleware import AgentMiddleware
from langchain_core.messages import HumanMessage from langchain_core.messages import HumanMessage
from langgraph.runtime import Runtime from langgraph.runtime import Runtime
from deerflow.config.deer_flow_context import DeerFlowContext
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# Defaults — can be overridden via constructor # Defaults — can be overridden via constructor
@@ -180,12 +182,9 @@ class LoopDetectionMiddleware(AgentMiddleware[AgentState]):
self._tool_freq: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int)) self._tool_freq: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int))
self._tool_freq_warned: dict[str, set[str]] = defaultdict(set) self._tool_freq_warned: dict[str, set[str]] = defaultdict(set)
def _get_thread_id(self, runtime: Runtime) -> str: def _get_thread_id(self, runtime: Runtime[DeerFlowContext]) -> str:
"""Extract thread_id from runtime context for per-thread tracking.""" """Extract thread_id from runtime context for per-thread tracking."""
thread_id = runtime.context.get("thread_id") if runtime.context else None return runtime.context.thread_id or "default"
if thread_id:
return thread_id
return "default"
def _evict_if_needed(self) -> None: def _evict_if_needed(self) -> None:
"""Evict least recently used threads if over the limit. """Evict least recently used threads if over the limit.
@@ -350,11 +349,11 @@ class LoopDetectionMiddleware(AgentMiddleware[AgentState]):
return None return None
@override @override
def after_model(self, state: AgentState, runtime: Runtime) -> dict | None: def after_model(self, state: AgentState, runtime: Runtime[DeerFlowContext]) -> dict | None:
return self._apply(state, runtime) return self._apply(state, runtime)
@override @override
async def aafter_model(self, state: AgentState, runtime: Runtime) -> dict | None: async def aafter_model(self, state: AgentState, runtime: Runtime[DeerFlowContext]) -> dict | None:
return self._apply(state, runtime) return self._apply(state, runtime)
def reset(self, thread_id: str | None = None) -> None: def reset(self, thread_id: str | None = None) -> None:
@@ -6,11 +6,10 @@ from typing import Any, override
from langchain.agents import AgentState from langchain.agents import AgentState
from langchain.agents.middleware import AgentMiddleware from langchain.agents.middleware import AgentMiddleware
from langgraph.config import get_config
from langgraph.runtime import Runtime from langgraph.runtime import Runtime
from deerflow.agents.memory.queue import get_memory_queue from deerflow.agents.memory.queue import get_memory_queue
from deerflow.config.memory_config import get_memory_config from deerflow.config.deer_flow_context import DeerFlowContext
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -193,7 +192,7 @@ class MemoryMiddleware(AgentMiddleware[MemoryMiddlewareState]):
self._agent_name = agent_name self._agent_name = agent_name
@override @override
def after_agent(self, state: MemoryMiddlewareState, runtime: Runtime) -> dict | None: def after_agent(self, state: MemoryMiddlewareState, runtime: Runtime[DeerFlowContext]) -> dict | None:
"""Queue conversation for memory update after agent completes. """Queue conversation for memory update after agent completes.
Args: Args:
@@ -203,15 +202,11 @@ class MemoryMiddleware(AgentMiddleware[MemoryMiddlewareState]):
Returns: Returns:
None (no state changes needed from this middleware). None (no state changes needed from this middleware).
""" """
config = get_memory_config() memory_config = runtime.context.app_config.memory
if not config.enabled: if not memory_config.enabled:
return None return None
# Get thread ID from runtime context first, then fall back to LangGraph's configurable metadata thread_id = runtime.context.thread_id
thread_id = runtime.context.get("thread_id") if runtime.context else None
if thread_id is None:
config_data = get_config()
thread_id = config_data.get("configurable", {}).get("thread_id")
if not thread_id: if not thread_id:
logger.debug("No thread_id in context, skipping memory update") logger.debug("No thread_id in context, skipping memory update")
return None return None
@@ -3,10 +3,10 @@ from typing import NotRequired, override
from langchain.agents import AgentState from langchain.agents import AgentState
from langchain.agents.middleware import AgentMiddleware from langchain.agents.middleware import AgentMiddleware
from langgraph.config import get_config
from langgraph.runtime import Runtime from langgraph.runtime import Runtime
from deerflow.agents.thread_state import ThreadDataState from deerflow.agents.thread_state import ThreadDataState
from deerflow.config.deer_flow_context import DeerFlowContext
from deerflow.config.paths import Paths, get_paths from deerflow.config.paths import Paths, get_paths
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -74,14 +74,10 @@ class ThreadDataMiddleware(AgentMiddleware[ThreadDataMiddlewareState]):
return self._get_thread_paths(thread_id) return self._get_thread_paths(thread_id)
@override @override
def before_agent(self, state: ThreadDataMiddlewareState, runtime: Runtime) -> dict | None: def before_agent(self, state: ThreadDataMiddlewareState, runtime: Runtime[DeerFlowContext]) -> dict | None:
context = runtime.context or {} thread_id = runtime.context.thread_id
thread_id = context.get("thread_id")
if thread_id is None:
config = get_config()
thread_id = config.get("configurable", {}).get("thread_id")
if thread_id is None: if not thread_id:
raise ValueError("Thread ID is required in runtime context or config.configurable") raise ValueError("Thread ID is required in runtime context or config.configurable")
if self._lazy_init: if self._lazy_init:
@@ -7,7 +7,7 @@ from langchain.agents import AgentState
from langchain.agents.middleware import AgentMiddleware from langchain.agents.middleware import AgentMiddleware
from langgraph.runtime import Runtime from langgraph.runtime import Runtime
from deerflow.config.title_config import get_title_config from deerflow.config.app_config import AppConfig
from deerflow.models import create_chat_model from deerflow.models import create_chat_model
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -45,7 +45,7 @@ class TitleMiddleware(AgentMiddleware[TitleMiddlewareState]):
def _should_generate_title(self, state: TitleMiddlewareState) -> bool: def _should_generate_title(self, state: TitleMiddlewareState) -> bool:
"""Check if we should generate a title for this thread.""" """Check if we should generate a title for this thread."""
config = get_title_config() config = AppConfig.current().title
if not config.enabled: if not config.enabled:
return False return False
@@ -70,7 +70,7 @@ class TitleMiddleware(AgentMiddleware[TitleMiddlewareState]):
Returns (prompt_string, user_msg) so callers can use user_msg as fallback. Returns (prompt_string, user_msg) so callers can use user_msg as fallback.
""" """
config = get_title_config() config = AppConfig.current().title
messages = state.get("messages", []) messages = state.get("messages", [])
user_msg_content = next((m.content for m in messages if m.type == "human"), "") user_msg_content = next((m.content for m in messages if m.type == "human"), "")
@@ -88,13 +88,13 @@ class TitleMiddleware(AgentMiddleware[TitleMiddlewareState]):
def _parse_title(self, content: object) -> str: def _parse_title(self, content: object) -> str:
"""Normalize model output into a clean title string.""" """Normalize model output into a clean title string."""
config = get_title_config() config = AppConfig.current().title
title_content = self._normalize_content(content) title_content = self._normalize_content(content)
title = title_content.strip().strip('"').strip("'") title = title_content.strip().strip('"').strip("'")
return title[: config.max_chars] if len(title) > config.max_chars else title return title[: config.max_chars] if len(title) > config.max_chars else title
def _fallback_title(self, user_msg: str) -> str: def _fallback_title(self, user_msg: str) -> str:
config = get_title_config() config = AppConfig.current().title
fallback_chars = min(config.max_chars, 50) fallback_chars = min(config.max_chars, 50)
if len(user_msg) > fallback_chars: if len(user_msg) > fallback_chars:
return user_msg[:fallback_chars].rstrip() + "..." return user_msg[:fallback_chars].rstrip() + "..."
@@ -113,7 +113,7 @@ class TitleMiddleware(AgentMiddleware[TitleMiddlewareState]):
if not self._should_generate_title(state): if not self._should_generate_title(state):
return None return None
config = get_title_config() config = AppConfig.current().title
prompt, user_msg = self._build_title_prompt(state) prompt, user_msg = self._build_title_prompt(state)
try: try:
@@ -94,9 +94,9 @@ def _build_runtime_middlewares(
middlewares.append(LLMErrorHandlingMiddleware()) middlewares.append(LLMErrorHandlingMiddleware())
# Guardrail middleware (if configured) # Guardrail middleware (if configured)
from deerflow.config.guardrails_config import get_guardrails_config from deerflow.config.app_config import AppConfig
guardrails_config = get_guardrails_config() guardrails_config = AppConfig.current().guardrails
if guardrails_config.enabled and guardrails_config.provider: if guardrails_config.enabled and guardrails_config.provider:
import inspect import inspect
@@ -9,6 +9,7 @@ from langchain.agents.middleware import AgentMiddleware
from langchain_core.messages import HumanMessage from langchain_core.messages import HumanMessage
from langgraph.runtime import Runtime from langgraph.runtime import Runtime
from deerflow.config.deer_flow_context import DeerFlowContext
from deerflow.config.paths import Paths, get_paths from deerflow.config.paths import Paths, get_paths
from deerflow.utils.file_conversion import extract_outline from deerflow.utils.file_conversion import extract_outline
@@ -184,7 +185,7 @@ class UploadsMiddleware(AgentMiddleware[UploadsMiddlewareState]):
return files if files else None return files if files else None
@override @override
def before_agent(self, state: UploadsMiddlewareState, runtime: Runtime) -> dict | None: def before_agent(self, state: UploadsMiddlewareState, runtime: Runtime[DeerFlowContext]) -> dict | None:
"""Inject uploaded files information before agent execution. """Inject uploaded files information before agent execution.
New files come from the current message's additional_kwargs.files. New files come from the current message's additional_kwargs.files.
@@ -213,14 +214,7 @@ class UploadsMiddleware(AgentMiddleware[UploadsMiddlewareState]):
return None return None
# Resolve uploads directory for existence checks # Resolve uploads directory for existence checks
thread_id = (runtime.context or {}).get("thread_id") thread_id = runtime.context.thread_id
if thread_id is None:
try:
from langgraph.config import get_config
thread_id = get_config().get("configurable", {}).get("thread_id")
except RuntimeError:
pass # get_config() raises outside a runnable context (e.g. unit tests)
uploads_dir = self._paths.sandbox_uploads_dir(thread_id) if thread_id else None uploads_dir = self._paths.sandbox_uploads_dir(thread_id) if thread_id else None
# Get newly uploaded files from the current message's additional_kwargs.files # Get newly uploaded files from the current message's additional_kwargs.files
+18 -20
View File
@@ -36,8 +36,9 @@ from deerflow.agents.lead_agent.agent import _build_middlewares
from deerflow.agents.lead_agent.prompt import apply_prompt_template from deerflow.agents.lead_agent.prompt import apply_prompt_template
from deerflow.agents.thread_state import ThreadState from deerflow.agents.thread_state import ThreadState
from deerflow.config.agents_config import AGENT_NAME_PATTERN from deerflow.config.agents_config import AGENT_NAME_PATTERN
from deerflow.config.app_config import get_app_config, reload_app_config from deerflow.config.app_config import AppConfig
from deerflow.config.extensions_config import ExtensionsConfig, SkillStateConfig, get_extensions_config, reload_extensions_config from deerflow.config.deer_flow_context import DeerFlowContext
from deerflow.config.extensions_config import ExtensionsConfig, SkillStateConfig
from deerflow.config.paths import get_paths from deerflow.config.paths import get_paths
from deerflow.models import create_chat_model from deerflow.models import create_chat_model
from deerflow.skills.installer import install_skill_from_archive from deerflow.skills.installer import install_skill_from_archive
@@ -141,8 +142,8 @@ class DeerFlowClient:
middlewares: Optional list of custom middlewares to inject into the agent. middlewares: Optional list of custom middlewares to inject into the agent.
""" """
if config_path is not None: if config_path is not None:
reload_app_config(config_path) AppConfig.init(AppConfig.from_file(config_path))
self._app_config = get_app_config() self._app_config = AppConfig.current()
if agent_name is not None and not AGENT_NAME_PATTERN.match(agent_name): if agent_name is not None and not AGENT_NAME_PATTERN.match(agent_name):
raise ValueError(f"Invalid agent name '{agent_name}'. Must match pattern: {AGENT_NAME_PATTERN.pattern}") raise ValueError(f"Invalid agent name '{agent_name}'. Must match pattern: {AGENT_NAME_PATTERN.pattern}")
@@ -551,9 +552,7 @@ class DeerFlowClient:
self._ensure_agent(config) self._ensure_agent(config)
state: dict[str, Any] = {"messages": [HumanMessage(content=message)]} state: dict[str, Any] = {"messages": [HumanMessage(content=message)]}
context = {"thread_id": thread_id} context = DeerFlowContext(app_config=self._app_config, thread_id=thread_id, agent_name=self._agent_name)
if self._agent_name:
context["agent_name"] = self._agent_name
seen_ids: set[str] = set() seen_ids: set[str] = set()
# Cross-mode handoff: ids already streamed via LangGraph ``messages`` # Cross-mode handoff: ids already streamed via LangGraph ``messages``
@@ -816,8 +815,8 @@ class DeerFlowClient:
Dict with "mcp_servers" key mapping server name to config, Dict with "mcp_servers" key mapping server name to config,
matching the Gateway API ``McpConfigResponse`` schema. matching the Gateway API ``McpConfigResponse`` schema.
""" """
config = get_extensions_config() ext = AppConfig.current().extensions
return {"mcp_servers": {name: server.model_dump() for name, server in config.mcp_servers.items()}} return {"mcp_servers": {name: server.model_dump() for name, server in ext.mcp_servers.items()}}
def update_mcp_config(self, mcp_servers: dict[str, dict]) -> dict: def update_mcp_config(self, mcp_servers: dict[str, dict]) -> dict:
"""Update MCP server configurations. """Update MCP server configurations.
@@ -839,18 +838,19 @@ class DeerFlowClient:
if config_path is None: if config_path is None:
raise FileNotFoundError("Cannot locate extensions_config.json. Set DEER_FLOW_EXTENSIONS_CONFIG_PATH or ensure it exists in the project root.") raise FileNotFoundError("Cannot locate extensions_config.json. Set DEER_FLOW_EXTENSIONS_CONFIG_PATH or ensure it exists in the project root.")
current_config = get_extensions_config() current_ext = AppConfig.current().extensions
config_data = { config_data = {
"mcpServers": mcp_servers, "mcpServers": mcp_servers,
"skills": {name: {"enabled": skill.enabled} for name, skill in current_config.skills.items()}, "skills": {name: {"enabled": skill.enabled} for name, skill in current_ext.skills.items()},
} }
self._atomic_write_json(config_path, config_data) self._atomic_write_json(config_path, config_data)
self._agent = None self._agent = None
self._agent_config_key = None self._agent_config_key = None
reloaded = reload_extensions_config() AppConfig.init(AppConfig.from_file())
reloaded = AppConfig.current().extensions
return {"mcp_servers": {name: server.model_dump() for name, server in reloaded.mcp_servers.items()}} return {"mcp_servers": {name: server.model_dump() for name, server in reloaded.mcp_servers.items()}}
# ------------------------------------------------------------------ # ------------------------------------------------------------------
@@ -904,19 +904,19 @@ class DeerFlowClient:
if config_path is None: if config_path is None:
raise FileNotFoundError("Cannot locate extensions_config.json. Set DEER_FLOW_EXTENSIONS_CONFIG_PATH or ensure it exists in the project root.") raise FileNotFoundError("Cannot locate extensions_config.json. Set DEER_FLOW_EXTENSIONS_CONFIG_PATH or ensure it exists in the project root.")
extensions_config = get_extensions_config() ext = AppConfig.current().extensions
extensions_config.skills[name] = SkillStateConfig(enabled=enabled) ext.skills[name] = SkillStateConfig(enabled=enabled)
config_data = { config_data = {
"mcpServers": {n: s.model_dump() for n, s in extensions_config.mcp_servers.items()}, "mcpServers": {n: s.model_dump() for n, s in ext.mcp_servers.items()},
"skills": {n: {"enabled": sc.enabled} for n, sc in extensions_config.skills.items()}, "skills": {n: {"enabled": sc.enabled} for n, sc in ext.skills.items()},
} }
self._atomic_write_json(config_path, config_data) self._atomic_write_json(config_path, config_data)
self._agent = None self._agent = None
self._agent_config_key = None self._agent_config_key = None
reload_extensions_config() AppConfig.init(AppConfig.from_file())
updated = next((s for s in load_skills(enabled_only=False) if s.name == name), None) updated = next((s for s in load_skills(enabled_only=False) if s.name == name), None)
if updated is None: if updated is None:
@@ -999,9 +999,7 @@ class DeerFlowClient:
Returns: Returns:
Memory config dict. Memory config dict.
""" """
from deerflow.config.memory_config import get_memory_config config = AppConfig.current().memory
config = get_memory_config()
return { return {
"enabled": config.enabled, "enabled": config.enabled,
"storage_path": config.storage_path, "storage_path": config.storage_path,
@@ -25,7 +25,7 @@ except ImportError: # pragma: no cover - Windows fallback
fcntl = None # type: ignore[assignment] fcntl = None # type: ignore[assignment]
import msvcrt import msvcrt
from deerflow.config import get_app_config from deerflow.config.app_config import AppConfig
from deerflow.config.paths import VIRTUAL_PATH_PREFIX, get_paths from deerflow.config.paths import VIRTUAL_PATH_PREFIX, get_paths
from deerflow.sandbox.sandbox import Sandbox from deerflow.sandbox.sandbox import Sandbox
from deerflow.sandbox.sandbox_provider import SandboxProvider from deerflow.sandbox.sandbox_provider import SandboxProvider
@@ -148,7 +148,7 @@ class AioSandboxProvider(SandboxProvider):
def _load_config(self) -> dict: def _load_config(self) -> dict:
"""Load sandbox configuration from app config.""" """Load sandbox configuration from app config."""
config = get_app_config() config = AppConfig.current()
sandbox_config = config.sandbox sandbox_config = config.sandbox
idle_timeout = getattr(sandbox_config, "idle_timeout", None) idle_timeout = getattr(sandbox_config, "idle_timeout", None)
@@ -279,7 +279,7 @@ class AioSandboxProvider(SandboxProvider):
so the host Docker daemon can resolve the path. so the host Docker daemon can resolve the path.
""" """
try: try:
config = get_app_config() config = AppConfig.current()
skills_path = config.skills.get_skills_path() skills_path = config.skills.get_skills_path()
container_path = config.skills.container_path container_path = config.skills.container_path
@@ -7,7 +7,7 @@ import logging
from langchain.tools import tool from langchain.tools import tool
from deerflow.config import get_app_config from deerflow.config.app_config import AppConfig
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -63,7 +63,7 @@ def web_search_tool(
query: Search keywords describing what you want to find. Be specific for better results. query: Search keywords describing what you want to find. Be specific for better results.
max_results: Maximum number of results to return. Default is 5. max_results: Maximum number of results to return. Default is 5.
""" """
config = get_app_config().get_tool_config("web_search") config = AppConfig.current().get_tool_config("web_search")
# Override max_results from config if set # Override max_results from config if set
if config is not None and "max_results" in config.model_extra: if config is not None and "max_results" in config.model_extra:
@@ -3,11 +3,11 @@ import json
from exa_py import Exa from exa_py import Exa
from langchain.tools import tool from langchain.tools import tool
from deerflow.config import get_app_config from deerflow.config.app_config import AppConfig
def _get_exa_client(tool_name: str = "web_search") -> Exa: def _get_exa_client(tool_name: str = "web_search") -> Exa:
config = get_app_config().get_tool_config(tool_name) config = AppConfig.current().get_tool_config(tool_name)
api_key = None api_key = None
if config is not None and "api_key" in config.model_extra: if config is not None and "api_key" in config.model_extra:
api_key = config.model_extra.get("api_key") api_key = config.model_extra.get("api_key")
@@ -22,7 +22,7 @@ def web_search_tool(query: str) -> str:
query: The query to search for. query: The query to search for.
""" """
try: try:
config = get_app_config().get_tool_config("web_search") config = AppConfig.current().get_tool_config("web_search")
max_results = 5 max_results = 5
search_type = "auto" search_type = "auto"
contents_max_characters = 1000 contents_max_characters = 1000
@@ -3,11 +3,11 @@ import json
from firecrawl import FirecrawlApp from firecrawl import FirecrawlApp
from langchain.tools import tool from langchain.tools import tool
from deerflow.config import get_app_config from deerflow.config.app_config import AppConfig
def _get_firecrawl_client(tool_name: str = "web_search") -> FirecrawlApp: def _get_firecrawl_client(tool_name: str = "web_search") -> FirecrawlApp:
config = get_app_config().get_tool_config(tool_name) config = AppConfig.current().get_tool_config(tool_name)
api_key = None api_key = None
if config is not None and "api_key" in config.model_extra: if config is not None and "api_key" in config.model_extra:
api_key = config.model_extra.get("api_key") api_key = config.model_extra.get("api_key")
@@ -22,7 +22,7 @@ def web_search_tool(query: str) -> str:
query: The query to search for. query: The query to search for.
""" """
try: try:
config = get_app_config().get_tool_config("web_search") config = AppConfig.current().get_tool_config("web_search")
max_results = 5 max_results = 5
if config is not None: if config is not None:
max_results = config.model_extra.get("max_results", max_results) max_results = config.model_extra.get("max_results", max_results)
@@ -7,7 +7,7 @@ import logging
from langchain.tools import tool from langchain.tools import tool
from deerflow.config import get_app_config from deerflow.config.app_config import AppConfig
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -99,7 +99,7 @@ def image_search_tool(
type_image: Image type filter. Options: "photo", "clipart", "gif", "transparent", "line". Use "photo" for realistic references. type_image: Image type filter. Options: "photo", "clipart", "gif", "transparent", "line". Use "photo" for realistic references.
layout: Layout filter. Options: "Square", "Tall", "Wide". Choose based on your generation needs. layout: Layout filter. Options: "Square", "Tall", "Wide". Choose based on your generation needs.
""" """
config = get_app_config().get_tool_config("image_search") config = AppConfig.current().get_tool_config("image_search")
# Override max_results from config if set # Override max_results from config if set
if config is not None and "max_results" in config.model_extra: if config is not None and "max_results" in config.model_extra:
@@ -1,6 +1,6 @@
from langchain.tools import tool from langchain.tools import tool
from deerflow.config import get_app_config from deerflow.config.app_config import AppConfig
from deerflow.utils.readability import ReadabilityExtractor from deerflow.utils.readability import ReadabilityExtractor
from .infoquest_client import InfoQuestClient from .infoquest_client import InfoQuestClient
@@ -9,12 +9,12 @@ readability_extractor = ReadabilityExtractor()
def _get_infoquest_client() -> InfoQuestClient: def _get_infoquest_client() -> InfoQuestClient:
search_config = get_app_config().get_tool_config("web_search") search_config = AppConfig.current().get_tool_config("web_search")
search_time_range = -1 search_time_range = -1
if search_config is not None and "search_time_range" in search_config.model_extra: if search_config is not None and "search_time_range" in search_config.model_extra:
search_time_range = search_config.model_extra.get("search_time_range") search_time_range = search_config.model_extra.get("search_time_range")
fetch_config = get_app_config().get_tool_config("web_fetch") fetch_config = AppConfig.current().get_tool_config("web_fetch")
fetch_time = -1 fetch_time = -1
if fetch_config is not None and "fetch_time" in fetch_config.model_extra: if fetch_config is not None and "fetch_time" in fetch_config.model_extra:
fetch_time = fetch_config.model_extra.get("fetch_time") fetch_time = fetch_config.model_extra.get("fetch_time")
@@ -25,7 +25,7 @@ def _get_infoquest_client() -> InfoQuestClient:
if fetch_config is not None and "navigation_timeout" in fetch_config.model_extra: if fetch_config is not None and "navigation_timeout" in fetch_config.model_extra:
navigation_timeout = fetch_config.model_extra.get("navigation_timeout") navigation_timeout = fetch_config.model_extra.get("navigation_timeout")
image_search_config = get_app_config().get_tool_config("image_search") image_search_config = AppConfig.current().get_tool_config("image_search")
image_search_time_range = -1 image_search_time_range = -1
if image_search_config is not None and "image_search_time_range" in image_search_config.model_extra: if image_search_config is not None and "image_search_time_range" in image_search_config.model_extra:
image_search_time_range = image_search_config.model_extra.get("image_search_time_range") image_search_time_range = image_search_config.model_extra.get("image_search_time_range")
@@ -1,7 +1,7 @@
from langchain.tools import tool from langchain.tools import tool
from deerflow.community.jina_ai.jina_client import JinaClient from deerflow.community.jina_ai.jina_client import JinaClient
from deerflow.config import get_app_config from deerflow.config.app_config import AppConfig
from deerflow.utils.readability import ReadabilityExtractor from deerflow.utils.readability import ReadabilityExtractor
readability_extractor = ReadabilityExtractor() readability_extractor = ReadabilityExtractor()
@@ -20,7 +20,7 @@ async def web_fetch_tool(url: str) -> str:
""" """
jina_client = JinaClient() jina_client = JinaClient()
timeout = 10 timeout = 10
config = get_app_config().get_tool_config("web_fetch") config = AppConfig.current().get_tool_config("web_fetch")
if config is not None and "timeout" in config.model_extra: if config is not None and "timeout" in config.model_extra:
timeout = config.model_extra.get("timeout") timeout = config.model_extra.get("timeout")
html_content = await jina_client.crawl(url, return_format="html", timeout=timeout) html_content = await jina_client.crawl(url, return_format="html", timeout=timeout)
@@ -3,11 +3,11 @@ import json
from langchain.tools import tool from langchain.tools import tool
from tavily import TavilyClient from tavily import TavilyClient
from deerflow.config import get_app_config from deerflow.config.app_config import AppConfig
def _get_tavily_client() -> TavilyClient: def _get_tavily_client() -> TavilyClient:
config = get_app_config().get_tool_config("web_search") config = AppConfig.current().get_tool_config("web_search")
api_key = None api_key = None
if config is not None and "api_key" in config.model_extra: if config is not None and "api_key" in config.model_extra:
api_key = config.model_extra.get("api_key") api_key = config.model_extra.get("api_key")
@@ -21,7 +21,7 @@ def web_search_tool(query: str) -> str:
Args: Args:
query: The query to search for. query: The query to search for.
""" """
config = get_app_config().get_tool_config("web_search") config = AppConfig.current().get_tool_config("web_search")
max_results = 5 max_results = 5
if config is not None and "max_results" in config.model_extra: if config is not None and "max_results" in config.model_extra:
max_results = config.model_extra.get("max_results") max_results = config.model_extra.get("max_results")
@@ -1,6 +1,6 @@
from .app_config import get_app_config from .app_config import AppConfig
from .extensions_config import ExtensionsConfig, get_extensions_config from .extensions_config import ExtensionsConfig
from .memory_config import MemoryConfig, get_memory_config from .memory_config import MemoryConfig
from .paths import Paths, get_paths from .paths import Paths, get_paths
from .skill_evolution_config import SkillEvolutionConfig from .skill_evolution_config import SkillEvolutionConfig
from .skills_config import SkillsConfig from .skills_config import SkillsConfig
@@ -13,18 +13,16 @@ from .tracing_config import (
) )
__all__ = [ __all__ = [
"get_app_config", "AppConfig",
"SkillEvolutionConfig",
"Paths",
"get_paths",
"SkillsConfig",
"ExtensionsConfig", "ExtensionsConfig",
"get_extensions_config",
"MemoryConfig", "MemoryConfig",
"get_memory_config", "Paths",
"get_tracing_config", "SkillEvolutionConfig",
"get_explicitly_enabled_tracing_providers", "SkillsConfig",
"get_enabled_tracing_providers", "get_enabled_tracing_providers",
"get_explicitly_enabled_tracing_providers",
"get_paths",
"get_tracing_config",
"is_tracing_enabled", "is_tracing_enabled",
"validate_enabled_tracing_providers", "validate_enabled_tracing_providers",
] ]
@@ -1,16 +1,13 @@
"""ACP (Agent Client Protocol) agent configuration loaded from config.yaml.""" """ACP (Agent Client Protocol) agent configuration loaded from config.yaml."""
import logging from pydantic import BaseModel, ConfigDict, Field
from collections.abc import Mapping
from pydantic import BaseModel, Field
logger = logging.getLogger(__name__)
class ACPAgentConfig(BaseModel): class ACPAgentConfig(BaseModel):
"""Configuration for a single ACP-compatible agent.""" """Configuration for a single ACP-compatible agent."""
model_config = ConfigDict(frozen=True)
command: str = Field(description="Command to launch the ACP agent subprocess") command: str = Field(description="Command to launch the ACP agent subprocess")
args: list[str] = Field(default_factory=list, description="Additional command arguments") args: list[str] = Field(default_factory=list, description="Additional command arguments")
env: dict[str, str] = Field(default_factory=dict, description="Environment variables to inject into the agent subprocess. Values starting with $ are resolved from host environment variables.") env: dict[str, str] = Field(default_factory=dict, description="Environment variables to inject into the agent subprocess. Values starting with $ are resolved from host environment variables.")
@@ -24,28 +21,3 @@ class ACPAgentConfig(BaseModel):
"are denied — the agent must be configured to operate without requesting permissions." "are denied — the agent must be configured to operate without requesting permissions."
), ),
) )
_acp_agents: dict[str, ACPAgentConfig] = {}
def get_acp_agents() -> dict[str, ACPAgentConfig]:
"""Get the currently configured ACP agents.
Returns:
Mapping of agent name -> ACPAgentConfig. Empty dict if no ACP agents are configured.
"""
return _acp_agents
def load_acp_config_from_dict(config_dict: Mapping[str, Mapping[str, object]] | None) -> None:
"""Load ACP agent configuration from a dictionary (typically from config.yaml).
Args:
config_dict: Mapping of agent name -> config fields.
"""
global _acp_agents
if config_dict is None:
config_dict = {}
_acp_agents = {name: ACPAgentConfig(**cfg) for name, cfg in config_dict.items()}
logger.info("ACP config loaded: %d agent(s): %s", len(_acp_agents), list(_acp_agents.keys()))
@@ -5,7 +5,7 @@ import re
from typing import Any from typing import Any
import yaml import yaml
from pydantic import BaseModel from pydantic import BaseModel, ConfigDict
from deerflow.config.paths import get_paths from deerflow.config.paths import get_paths
@@ -18,6 +18,8 @@ AGENT_NAME_PATTERN = re.compile(r"^[A-Za-z0-9-]+$")
class AgentConfig(BaseModel): class AgentConfig(BaseModel):
"""Configuration for a custom agent.""" """Configuration for a custom agent."""
model_config = ConfigDict(frozen=True)
name: str name: str
description: str = "" description: str = ""
model: str | None = None model: str | None = None
@@ -1,29 +1,31 @@
from __future__ import annotations
import logging import logging
import os import os
from contextvars import ContextVar from contextvars import ContextVar
from pathlib import Path from pathlib import Path
from typing import Any, Self from typing import Any, ClassVar, Self
import yaml import yaml
from dotenv import load_dotenv from dotenv import load_dotenv
from pydantic import BaseModel, ConfigDict, Field from pydantic import BaseModel, ConfigDict, Field
from deerflow.config.acp_config import load_acp_config_from_dict from deerflow.config.acp_config import ACPAgentConfig
from deerflow.config.checkpointer_config import CheckpointerConfig, load_checkpointer_config_from_dict from deerflow.config.checkpointer_config import CheckpointerConfig
from deerflow.config.extensions_config import ExtensionsConfig from deerflow.config.extensions_config import ExtensionsConfig
from deerflow.config.guardrails_config import GuardrailsConfig, load_guardrails_config_from_dict from deerflow.config.guardrails_config import GuardrailsConfig
from deerflow.config.memory_config import MemoryConfig, load_memory_config_from_dict from deerflow.config.memory_config import MemoryConfig
from deerflow.config.model_config import ModelConfig from deerflow.config.model_config import ModelConfig
from deerflow.config.sandbox_config import SandboxConfig from deerflow.config.sandbox_config import SandboxConfig
from deerflow.config.skill_evolution_config import SkillEvolutionConfig from deerflow.config.skill_evolution_config import SkillEvolutionConfig
from deerflow.config.skills_config import SkillsConfig from deerflow.config.skills_config import SkillsConfig
from deerflow.config.stream_bridge_config import StreamBridgeConfig, load_stream_bridge_config_from_dict from deerflow.config.stream_bridge_config import StreamBridgeConfig
from deerflow.config.subagents_config import SubagentsAppConfig, load_subagents_config_from_dict from deerflow.config.subagents_config import SubagentsAppConfig
from deerflow.config.summarization_config import SummarizationConfig, load_summarization_config_from_dict from deerflow.config.summarization_config import SummarizationConfig
from deerflow.config.title_config import TitleConfig, load_title_config_from_dict from deerflow.config.title_config import TitleConfig
from deerflow.config.token_usage_config import TokenUsageConfig from deerflow.config.token_usage_config import TokenUsageConfig
from deerflow.config.tool_config import ToolConfig, ToolGroupConfig from deerflow.config.tool_config import ToolConfig, ToolGroupConfig
from deerflow.config.tool_search_config import ToolSearchConfig, load_tool_search_config_from_dict from deerflow.config.tool_search_config import ToolSearchConfig
load_dotenv() load_dotenv()
@@ -55,9 +57,10 @@ class AppConfig(BaseModel):
memory: MemoryConfig = Field(default_factory=MemoryConfig, description="Memory subsystem configuration") memory: MemoryConfig = Field(default_factory=MemoryConfig, description="Memory subsystem configuration")
subagents: SubagentsAppConfig = Field(default_factory=SubagentsAppConfig, description="Subagent runtime configuration") subagents: SubagentsAppConfig = Field(default_factory=SubagentsAppConfig, description="Subagent runtime configuration")
guardrails: GuardrailsConfig = Field(default_factory=GuardrailsConfig, description="Guardrail middleware configuration") guardrails: GuardrailsConfig = Field(default_factory=GuardrailsConfig, description="Guardrail middleware configuration")
model_config = ConfigDict(extra="allow", frozen=False) model_config = ConfigDict(extra="allow", frozen=True)
checkpointer: CheckpointerConfig | None = Field(default=None, description="Checkpointer configuration") checkpointer: CheckpointerConfig | None = Field(default=None, description="Checkpointer configuration")
stream_bridge: StreamBridgeConfig | None = Field(default=None, description="Stream bridge configuration") stream_bridge: StreamBridgeConfig | None = Field(default=None, description="Stream bridge configuration")
acp_agents: dict[str, ACPAgentConfig] = Field(default_factory=dict, description="ACP agent configurations keyed by agent name")
@classmethod @classmethod
def resolve_config_path(cls, config_path: str | None = None) -> Path: def resolve_config_path(cls, config_path: str | None = None) -> Path:
@@ -105,41 +108,6 @@ class AppConfig(BaseModel):
config_data = cls.resolve_env_variables(config_data) config_data = cls.resolve_env_variables(config_data)
# Load title config if present
if "title" in config_data:
load_title_config_from_dict(config_data["title"])
# Load summarization config if present
if "summarization" in config_data:
load_summarization_config_from_dict(config_data["summarization"])
# Load memory config if present
if "memory" in config_data:
load_memory_config_from_dict(config_data["memory"])
# Load subagents config if present
if "subagents" in config_data:
load_subagents_config_from_dict(config_data["subagents"])
# Load tool_search config if present
if "tool_search" in config_data:
load_tool_search_config_from_dict(config_data["tool_search"])
# Load guardrails config if present
if "guardrails" in config_data:
load_guardrails_config_from_dict(config_data["guardrails"])
# Load checkpointer config if present
if "checkpointer" in config_data:
load_checkpointer_config_from_dict(config_data["checkpointer"])
# Load stream bridge config if present
if "stream_bridge" in config_data:
load_stream_bridge_config_from_dict(config_data["stream_bridge"])
# Always refresh ACP agent config so removed entries do not linger across reloads.
load_acp_config_from_dict(config_data.get("acp_agents", {}))
# Load extensions config separately (it's in a different file) # Load extensions config separately (it's in a different file)
extensions_config = ExtensionsConfig.from_file() extensions_config = ExtensionsConfig.from_file()
config_data["extensions"] = extensions_config.model_dump() config_data["extensions"] = extensions_config.model_dump()
@@ -250,130 +218,26 @@ class AppConfig(BaseModel):
""" """
return next((group for group in self.tool_groups if group.name == name), None) return next((group for group in self.tool_groups if group.name == name), None)
# -- Lifecycle (class-level singleton via ContextVar) --
_app_config: AppConfig | None = None _current: ClassVar[ContextVar[AppConfig]] = ContextVar("deerflow_app_config")
_app_config_path: Path | None = None
_app_config_mtime: float | None = None
_app_config_is_custom = False
_current_app_config: ContextVar[AppConfig | None] = ContextVar("deerflow_current_app_config", default=None)
_current_app_config_stack: ContextVar[tuple[AppConfig | None, ...]] = ContextVar("deerflow_current_app_config_stack", default=())
@classmethod
def init(cls, config: AppConfig) -> None:
"""Set the AppConfig for the current context. Call once at process startup."""
cls._current.set(config)
def _get_config_mtime(config_path: Path) -> float | None: @classmethod
"""Get the modification time of a config file if it exists.""" def current(cls) -> AppConfig:
try: """Get the current AppConfig.
return config_path.stat().st_mtime
except OSError:
return None
Auto-initializes from config file on first access for backward compatibility.
def _load_and_cache_app_config(config_path: str | None = None) -> AppConfig: Prefer calling AppConfig.init() explicitly at process startup.
"""Load config from disk and refresh cache metadata.""" """
global _app_config, _app_config_path, _app_config_mtime, _app_config_is_custom try:
return cls._current.get()
resolved_path = AppConfig.resolve_config_path(config_path) except LookupError:
_app_config = AppConfig.from_file(str(resolved_path)) logger.debug("AppConfig not initialized, auto-loading from file")
_app_config_path = resolved_path config = cls.from_file()
_app_config_mtime = _get_config_mtime(resolved_path) cls._current.set(config)
_app_config_is_custom = False return config
return _app_config
def get_app_config() -> AppConfig:
"""Get the DeerFlow config instance.
Returns a cached singleton instance and automatically reloads it when the
underlying config file path or modification time changes. Use
`reload_app_config()` to force a reload, or `reset_app_config()` to clear
the cache.
"""
global _app_config, _app_config_path, _app_config_mtime
runtime_override = _current_app_config.get()
if runtime_override is not None:
return runtime_override
if _app_config is not None and _app_config_is_custom:
return _app_config
resolved_path = AppConfig.resolve_config_path()
current_mtime = _get_config_mtime(resolved_path)
should_reload = _app_config is None or _app_config_path != resolved_path or _app_config_mtime != current_mtime
if should_reload:
if _app_config_path == resolved_path and _app_config_mtime is not None and current_mtime is not None and _app_config_mtime != current_mtime:
logger.info(
"Config file has been modified (mtime: %s -> %s), reloading AppConfig",
_app_config_mtime,
current_mtime,
)
_load_and_cache_app_config(str(resolved_path))
return _app_config
def reload_app_config(config_path: str | None = None) -> AppConfig:
"""Reload the config from file and update the cached instance.
This is useful when the config file has been modified and you want
to pick up the changes without restarting the application.
Args:
config_path: Optional path to config file. If not provided,
uses the default resolution strategy.
Returns:
The newly loaded AppConfig instance.
"""
return _load_and_cache_app_config(config_path)
def reset_app_config() -> None:
"""Reset the cached config instance.
This clears the singleton cache, causing the next call to
`get_app_config()` to reload from file. Useful for testing
or when switching between different configurations.
"""
global _app_config, _app_config_path, _app_config_mtime, _app_config_is_custom
_app_config = None
_app_config_path = None
_app_config_mtime = None
_app_config_is_custom = False
def set_app_config(config: AppConfig) -> None:
"""Set a custom config instance.
This allows injecting a custom or mock config for testing purposes.
Args:
config: The AppConfig instance to use.
"""
global _app_config, _app_config_path, _app_config_mtime, _app_config_is_custom
_app_config = config
_app_config_path = None
_app_config_mtime = None
_app_config_is_custom = True
def peek_current_app_config() -> AppConfig | None:
"""Return the runtime-scoped AppConfig override, if one is active."""
return _current_app_config.get()
def push_current_app_config(config: AppConfig) -> None:
"""Push a runtime-scoped AppConfig override for the current execution context."""
stack = _current_app_config_stack.get()
_current_app_config_stack.set(stack + (_current_app_config.get(),))
_current_app_config.set(config)
def pop_current_app_config() -> None:
"""Pop the latest runtime-scoped AppConfig override for the current execution context."""
stack = _current_app_config_stack.get()
if not stack:
_current_app_config.set(None)
return
previous = stack[-1]
_current_app_config_stack.set(stack[:-1])
_current_app_config.set(previous)
@@ -2,7 +2,7 @@
from typing import Literal from typing import Literal
from pydantic import BaseModel, Field from pydantic import BaseModel, ConfigDict, Field
CheckpointerType = Literal["memory", "sqlite", "postgres"] CheckpointerType = Literal["memory", "sqlite", "postgres"]
@@ -10,6 +10,8 @@ CheckpointerType = Literal["memory", "sqlite", "postgres"]
class CheckpointerConfig(BaseModel): class CheckpointerConfig(BaseModel):
"""Configuration for LangGraph state persistence checkpointer.""" """Configuration for LangGraph state persistence checkpointer."""
model_config = ConfigDict(frozen=True)
type: CheckpointerType = Field( type: CheckpointerType = Field(
description="Checkpointer backend type. " description="Checkpointer backend type. "
"'memory' is in-process only (lost on restart). " "'memory' is in-process only (lost on restart). "
@@ -23,24 +25,3 @@ class CheckpointerConfig(BaseModel):
"For sqlite, use a file path like '.deer-flow/checkpoints.db' or ':memory:' for in-memory. " "For sqlite, use a file path like '.deer-flow/checkpoints.db' or ':memory:' for in-memory. "
"For postgres, use a DSN like 'postgresql://user:pass@localhost:5432/db'.", "For postgres, use a DSN like 'postgresql://user:pass@localhost:5432/db'.",
) )
# Global configuration instance — None means no checkpointer is configured.
_checkpointer_config: CheckpointerConfig | None = None
def get_checkpointer_config() -> CheckpointerConfig | None:
"""Get the current checkpointer configuration, or None if not configured."""
return _checkpointer_config
def set_checkpointer_config(config: CheckpointerConfig | None) -> None:
"""Set the checkpointer configuration."""
global _checkpointer_config
_checkpointer_config = config
def load_checkpointer_config_from_dict(config_dict: dict) -> None:
"""Load checkpointer configuration from a dictionary."""
global _checkpointer_config
_checkpointer_config = CheckpointerConfig(**config_dict)
@@ -0,0 +1,59 @@
"""Per-invocation context for DeerFlow agent execution.
Injected via LangGraph Runtime. Middleware and tools access this
via Runtime[DeerFlowContext] parameters, through resolve_context().
"""
from __future__ import annotations
from dataclasses import dataclass
from typing import Any
@dataclass(frozen=True)
class DeerFlowContext:
"""Typed, immutable, per-invocation context injected via LangGraph Runtime.
Fields are all known at run start and never change during execution.
Mutable runtime state (e.g. sandbox_id) flows through ThreadState, not here.
"""
app_config: Any # AppConfig — typed as Any to avoid circular import at module level
thread_id: str
agent_name: str | None = None
def resolve_context(runtime: Any) -> DeerFlowContext:
"""Extract or construct DeerFlowContext from runtime.
Gateway/Client paths: runtime.context is already DeerFlowContext return directly.
LangGraph Server / legacy dict path: construct from dict context or configurable fallback.
"""
ctx = getattr(runtime, "context", None)
if isinstance(ctx, DeerFlowContext):
return ctx
from deerflow.config.app_config import AppConfig
# Try dict context first (legacy path, tests), then configurable
if isinstance(ctx, dict):
return DeerFlowContext(
app_config=AppConfig.current(),
thread_id=ctx.get("thread_id", ""),
agent_name=ctx.get("agent_name"),
)
# No context at all — fall back to LangGraph configurable
try:
from langgraph.config import get_config
cfg = get_config().get("configurable", {})
except RuntimeError:
# Outside runnable context (e.g. unit tests)
cfg = {}
return DeerFlowContext(
app_config=AppConfig.current(),
thread_id=cfg.get("thread_id", ""),
agent_name=cfg.get("agent_name"),
)
@@ -11,6 +11,8 @@ from pydantic import BaseModel, ConfigDict, Field
class McpOAuthConfig(BaseModel): class McpOAuthConfig(BaseModel):
"""OAuth configuration for an MCP server (HTTP/SSE transports).""" """OAuth configuration for an MCP server (HTTP/SSE transports)."""
model_config = ConfigDict(extra="allow", frozen=True)
enabled: bool = Field(default=True, description="Whether OAuth token injection is enabled") enabled: bool = Field(default=True, description="Whether OAuth token injection is enabled")
token_url: str = Field(description="OAuth token endpoint URL") token_url: str = Field(description="OAuth token endpoint URL")
grant_type: Literal["client_credentials", "refresh_token"] = Field( grant_type: Literal["client_credentials", "refresh_token"] = Field(
@@ -28,12 +30,13 @@ class McpOAuthConfig(BaseModel):
default_token_type: str = Field(default="Bearer", description="Default token type when missing in token response") default_token_type: str = Field(default="Bearer", description="Default token type when missing in token response")
refresh_skew_seconds: int = Field(default=60, description="Refresh token this many seconds before expiry") refresh_skew_seconds: int = Field(default=60, description="Refresh token this many seconds before expiry")
extra_token_params: dict[str, str] = Field(default_factory=dict, description="Additional form params sent to token endpoint") extra_token_params: dict[str, str] = Field(default_factory=dict, description="Additional form params sent to token endpoint")
model_config = ConfigDict(extra="allow")
class McpServerConfig(BaseModel): class McpServerConfig(BaseModel):
"""Configuration for a single MCP server.""" """Configuration for a single MCP server."""
model_config = ConfigDict(extra="allow", frozen=True)
enabled: bool = Field(default=True, description="Whether this MCP server is enabled") enabled: bool = Field(default=True, description="Whether this MCP server is enabled")
type: str = Field(default="stdio", description="Transport type: 'stdio', 'sse', or 'http'") type: str = Field(default="stdio", description="Transport type: 'stdio', 'sse', or 'http'")
command: str | None = Field(default=None, description="Command to execute to start the MCP server (for stdio type)") command: str | None = Field(default=None, description="Command to execute to start the MCP server (for stdio type)")
@@ -43,12 +46,13 @@ class McpServerConfig(BaseModel):
headers: dict[str, str] = Field(default_factory=dict, description="HTTP headers to send (for sse or http type)") headers: dict[str, str] = Field(default_factory=dict, description="HTTP headers to send (for sse or http type)")
oauth: McpOAuthConfig | None = Field(default=None, description="OAuth configuration (for sse or http type)") oauth: McpOAuthConfig | None = Field(default=None, description="OAuth configuration (for sse or http type)")
description: str = Field(default="", description="Human-readable description of what this MCP server provides") description: str = Field(default="", description="Human-readable description of what this MCP server provides")
model_config = ConfigDict(extra="allow")
class SkillStateConfig(BaseModel): class SkillStateConfig(BaseModel):
"""Configuration for a single skill's state.""" """Configuration for a single skill's state."""
model_config = ConfigDict(frozen=True)
enabled: bool = Field(default=True, description="Whether this skill is enabled") enabled: bool = Field(default=True, description="Whether this skill is enabled")
@@ -64,7 +68,7 @@ class ExtensionsConfig(BaseModel):
default_factory=dict, default_factory=dict,
description="Map of skill name to state configuration", description="Map of skill name to state configuration",
) )
model_config = ConfigDict(extra="allow", populate_by_name=True) model_config = ConfigDict(extra="allow", frozen=True, populate_by_name=True)
@classmethod @classmethod
def resolve_config_path(cls, config_path: str | None = None) -> Path | None: def resolve_config_path(cls, config_path: str | None = None) -> Path | None:
@@ -195,62 +199,3 @@ class ExtensionsConfig(BaseModel):
# Default to enable for public & custom skill # Default to enable for public & custom skill
return skill_category in ("public", "custom") return skill_category in ("public", "custom")
return skill_config.enabled return skill_config.enabled
_extensions_config: ExtensionsConfig | None = None
def get_extensions_config() -> ExtensionsConfig:
"""Get the extensions config instance.
Returns a cached singleton instance. Use `reload_extensions_config()` to reload
from file, or `reset_extensions_config()` to clear the cache.
Returns:
The cached ExtensionsConfig instance.
"""
global _extensions_config
if _extensions_config is None:
_extensions_config = ExtensionsConfig.from_file()
return _extensions_config
def reload_extensions_config(config_path: str | None = None) -> ExtensionsConfig:
"""Reload the extensions config from file and update the cached instance.
This is useful when the config file has been modified and you want
to pick up the changes without restarting the application.
Args:
config_path: Optional path to extensions config file. If not provided,
uses the default resolution strategy.
Returns:
The newly loaded ExtensionsConfig instance.
"""
global _extensions_config
_extensions_config = ExtensionsConfig.from_file(config_path)
return _extensions_config
def reset_extensions_config() -> None:
"""Reset the cached extensions config instance.
This clears the singleton cache, causing the next call to
`get_extensions_config()` to reload from file. Useful for testing
or when switching between different configurations.
"""
global _extensions_config
_extensions_config = None
def set_extensions_config(config: ExtensionsConfig) -> None:
"""Set a custom extensions config instance.
This allows injecting a custom or mock config for testing purposes.
Args:
config: The ExtensionsConfig instance to use.
"""
global _extensions_config
_extensions_config = config
@@ -1,11 +1,13 @@
"""Configuration for pre-tool-call authorization.""" """Configuration for pre-tool-call authorization."""
from pydantic import BaseModel, Field from pydantic import BaseModel, ConfigDict, Field
class GuardrailProviderConfig(BaseModel): class GuardrailProviderConfig(BaseModel):
"""Configuration for a guardrail provider.""" """Configuration for a guardrail provider."""
model_config = ConfigDict(frozen=True)
use: str = Field(description="Class path (e.g. 'deerflow.guardrails.builtin:AllowlistProvider')") use: str = Field(description="Class path (e.g. 'deerflow.guardrails.builtin:AllowlistProvider')")
config: dict = Field(default_factory=dict, description="Provider-specific settings passed as kwargs") config: dict = Field(default_factory=dict, description="Provider-specific settings passed as kwargs")
@@ -18,31 +20,9 @@ class GuardrailsConfig(BaseModel):
agent's passport reference, and returns an allow/deny decision. agent's passport reference, and returns an allow/deny decision.
""" """
model_config = ConfigDict(frozen=True)
enabled: bool = Field(default=False, description="Enable guardrail middleware") enabled: bool = Field(default=False, description="Enable guardrail middleware")
fail_closed: bool = Field(default=True, description="Block tool calls if provider errors") fail_closed: bool = Field(default=True, description="Block tool calls if provider errors")
passport: str | None = Field(default=None, description="OAP passport path or hosted agent ID") passport: str | None = Field(default=None, description="OAP passport path or hosted agent ID")
provider: GuardrailProviderConfig | None = Field(default=None, description="Guardrail provider configuration") provider: GuardrailProviderConfig | None = Field(default=None, description="Guardrail provider configuration")
_guardrails_config: GuardrailsConfig | None = None
def get_guardrails_config() -> GuardrailsConfig:
"""Get the guardrails config, returning defaults if not loaded."""
global _guardrails_config
if _guardrails_config is None:
_guardrails_config = GuardrailsConfig()
return _guardrails_config
def load_guardrails_config_from_dict(data: dict) -> GuardrailsConfig:
"""Load guardrails config from a dict (called during AppConfig loading)."""
global _guardrails_config
_guardrails_config = GuardrailsConfig.model_validate(data)
return _guardrails_config
def reset_guardrails_config() -> None:
"""Reset the cached config instance. Used in tests to prevent singleton leaks."""
global _guardrails_config
_guardrails_config = None
@@ -1,11 +1,13 @@
"""Configuration for memory mechanism.""" """Configuration for memory mechanism."""
from pydantic import BaseModel, Field from pydantic import BaseModel, ConfigDict, Field
class MemoryConfig(BaseModel): class MemoryConfig(BaseModel):
"""Configuration for global memory mechanism.""" """Configuration for global memory mechanism."""
model_config = ConfigDict(frozen=True)
enabled: bool = Field( enabled: bool = Field(
default=True, default=True,
description="Whether to enable memory mechanism", description="Whether to enable memory mechanism",
@@ -59,24 +61,3 @@ class MemoryConfig(BaseModel):
le=8000, le=8000,
description="Maximum tokens to use for memory injection", description="Maximum tokens to use for memory injection",
) )
# Global configuration instance
_memory_config: MemoryConfig = MemoryConfig()
def get_memory_config() -> MemoryConfig:
"""Get the current memory configuration."""
return _memory_config
def set_memory_config(config: MemoryConfig) -> None:
"""Set the memory configuration."""
global _memory_config
_memory_config = config
def load_memory_config_from_dict(config_dict: dict) -> None:
"""Load memory configuration from a dictionary."""
global _memory_config
_memory_config = MemoryConfig(**config_dict)
@@ -12,7 +12,7 @@ class ModelConfig(BaseModel):
description="Class path of the model provider(e.g. langchain_openai.ChatOpenAI)", description="Class path of the model provider(e.g. langchain_openai.ChatOpenAI)",
) )
model: str = Field(..., description="Model name") model: str = Field(..., description="Model name")
model_config = ConfigDict(extra="allow") model_config = ConfigDict(extra="allow", frozen=True)
use_responses_api: bool | None = Field( use_responses_api: bool | None = Field(
default=None, default=None,
description="Whether to route OpenAI ChatOpenAI calls through the /v1/responses API", description="Whether to route OpenAI ChatOpenAI calls through the /v1/responses API",
@@ -4,6 +4,8 @@ from pydantic import BaseModel, ConfigDict, Field
class VolumeMountConfig(BaseModel): class VolumeMountConfig(BaseModel):
"""Configuration for a volume mount.""" """Configuration for a volume mount."""
model_config = ConfigDict(frozen=True)
host_path: str = Field(..., description="Path on the host machine") host_path: str = Field(..., description="Path on the host machine")
container_path: str = Field(..., description="Path inside the container") container_path: str = Field(..., description="Path inside the container")
read_only: bool = Field(default=False, description="Whether the mount is read-only") read_only: bool = Field(default=False, description="Whether the mount is read-only")
@@ -80,4 +82,4 @@ class SandboxConfig(BaseModel):
description="Maximum characters to keep from ls tool output. Output exceeding this limit is head-truncated. Set to 0 to disable truncation.", description="Maximum characters to keep from ls tool output. Output exceeding this limit is head-truncated. Set to 0 to disable truncation.",
) )
model_config = ConfigDict(extra="allow") model_config = ConfigDict(extra="allow", frozen=True)
@@ -1,9 +1,11 @@
from pydantic import BaseModel, Field from pydantic import BaseModel, ConfigDict, Field
class SkillEvolutionConfig(BaseModel): class SkillEvolutionConfig(BaseModel):
"""Configuration for agent-managed skill evolution.""" """Configuration for agent-managed skill evolution."""
model_config = ConfigDict(frozen=True)
enabled: bool = Field( enabled: bool = Field(
default=False, default=False,
description="Whether the agent can create and modify skills under skills/custom.", description="Whether the agent can create and modify skills under skills/custom.",
@@ -1,6 +1,6 @@
from pathlib import Path from pathlib import Path
from pydantic import BaseModel, Field from pydantic import BaseModel, ConfigDict, Field
def _default_repo_root() -> Path: def _default_repo_root() -> Path:
@@ -11,6 +11,8 @@ def _default_repo_root() -> Path:
class SkillsConfig(BaseModel): class SkillsConfig(BaseModel):
"""Configuration for skills system""" """Configuration for skills system"""
model_config = ConfigDict(frozen=True)
path: str | None = Field( path: str | None = Field(
default=None, default=None,
description="Path to skills directory. If not specified, defaults to ../skills relative to backend directory", description="Path to skills directory. If not specified, defaults to ../skills relative to backend directory",
@@ -2,7 +2,7 @@
from typing import Literal from typing import Literal
from pydantic import BaseModel, Field from pydantic import BaseModel, ConfigDict, Field
StreamBridgeType = Literal["memory", "redis"] StreamBridgeType = Literal["memory", "redis"]
@@ -10,6 +10,8 @@ StreamBridgeType = Literal["memory", "redis"]
class StreamBridgeConfig(BaseModel): class StreamBridgeConfig(BaseModel):
"""Configuration for the stream bridge that connects agent workers to SSE endpoints.""" """Configuration for the stream bridge that connects agent workers to SSE endpoints."""
model_config = ConfigDict(frozen=True)
type: StreamBridgeType = Field( type: StreamBridgeType = Field(
default="memory", default="memory",
description="Stream bridge backend type. 'memory' uses in-process asyncio.Queue (single-process only). 'redis' uses Redis Streams (planned for Phase 2, not yet implemented).", description="Stream bridge backend type. 'memory' uses in-process asyncio.Queue (single-process only). 'redis' uses Redis Streams (planned for Phase 2, not yet implemented).",
@@ -22,25 +24,3 @@ class StreamBridgeConfig(BaseModel):
default=256, default=256,
description="Maximum number of events buffered per run in the memory bridge.", description="Maximum number of events buffered per run in the memory bridge.",
) )
# Global configuration instance — None means no stream bridge is configured
# (falls back to memory with defaults).
_stream_bridge_config: StreamBridgeConfig | None = None
def get_stream_bridge_config() -> StreamBridgeConfig | None:
"""Get the current stream bridge configuration, or None if not configured."""
return _stream_bridge_config
def set_stream_bridge_config(config: StreamBridgeConfig | None) -> None:
"""Set the stream bridge configuration."""
global _stream_bridge_config
_stream_bridge_config = config
def load_stream_bridge_config_from_dict(config_dict: dict) -> None:
"""Load stream bridge configuration from a dictionary."""
global _stream_bridge_config
_stream_bridge_config = StreamBridgeConfig(**config_dict)
@@ -1,15 +1,13 @@
"""Configuration for the subagent system loaded from config.yaml.""" """Configuration for the subagent system loaded from config.yaml."""
import logging from pydantic import BaseModel, ConfigDict, Field
from pydantic import BaseModel, Field
logger = logging.getLogger(__name__)
class SubagentOverrideConfig(BaseModel): class SubagentOverrideConfig(BaseModel):
"""Per-agent configuration overrides.""" """Per-agent configuration overrides."""
model_config = ConfigDict(frozen=True)
timeout_seconds: int | None = Field( timeout_seconds: int | None = Field(
default=None, default=None,
ge=1, ge=1,
@@ -25,6 +23,8 @@ class SubagentOverrideConfig(BaseModel):
class SubagentsAppConfig(BaseModel): class SubagentsAppConfig(BaseModel):
"""Configuration for the subagent system.""" """Configuration for the subagent system."""
model_config = ConfigDict(frozen=True)
timeout_seconds: int = Field( timeout_seconds: int = Field(
default=900, default=900,
ge=1, ge=1,
@@ -62,41 +62,3 @@ class SubagentsAppConfig(BaseModel):
if self.max_turns is not None: if self.max_turns is not None:
return self.max_turns return self.max_turns
return builtin_default return builtin_default
_subagents_config: SubagentsAppConfig = SubagentsAppConfig()
def get_subagents_app_config() -> SubagentsAppConfig:
"""Get the current subagents configuration."""
return _subagents_config
def load_subagents_config_from_dict(config_dict: dict) -> None:
"""Load subagents configuration from a dictionary."""
global _subagents_config
_subagents_config = SubagentsAppConfig(**config_dict)
overrides_summary = {}
for name, override in _subagents_config.agents.items():
parts = []
if override.timeout_seconds is not None:
parts.append(f"timeout={override.timeout_seconds}s")
if override.max_turns is not None:
parts.append(f"max_turns={override.max_turns}")
if parts:
overrides_summary[name] = ", ".join(parts)
if overrides_summary:
logger.info(
"Subagents config loaded: default timeout=%ss, default max_turns=%s, per-agent overrides=%s",
_subagents_config.timeout_seconds,
_subagents_config.max_turns,
overrides_summary,
)
else:
logger.info(
"Subagents config loaded: default timeout=%ss, default max_turns=%s, no per-agent overrides",
_subagents_config.timeout_seconds,
_subagents_config.max_turns,
)
@@ -2,7 +2,7 @@
from typing import Literal from typing import Literal
from pydantic import BaseModel, Field from pydantic import BaseModel, ConfigDict, Field
ContextSizeType = Literal["fraction", "tokens", "messages"] ContextSizeType = Literal["fraction", "tokens", "messages"]
@@ -10,6 +10,8 @@ ContextSizeType = Literal["fraction", "tokens", "messages"]
class ContextSize(BaseModel): class ContextSize(BaseModel):
"""Context size specification for trigger or keep parameters.""" """Context size specification for trigger or keep parameters."""
model_config = ConfigDict(frozen=True)
type: ContextSizeType = Field(description="Type of context size specification") type: ContextSizeType = Field(description="Type of context size specification")
value: int | float = Field(description="Value for the context size specification") value: int | float = Field(description="Value for the context size specification")
@@ -21,6 +23,8 @@ class ContextSize(BaseModel):
class SummarizationConfig(BaseModel): class SummarizationConfig(BaseModel):
"""Configuration for automatic conversation summarization.""" """Configuration for automatic conversation summarization."""
model_config = ConfigDict(frozen=True)
enabled: bool = Field( enabled: bool = Field(
default=False, default=False,
description="Whether to enable automatic conversation summarization", description="Whether to enable automatic conversation summarization",
@@ -51,24 +55,3 @@ class SummarizationConfig(BaseModel):
default=None, default=None,
description="Custom prompt template for generating summaries. If not provided, uses the default LangChain prompt.", description="Custom prompt template for generating summaries. If not provided, uses the default LangChain prompt.",
) )
# Global configuration instance
_summarization_config: SummarizationConfig = SummarizationConfig()
def get_summarization_config() -> SummarizationConfig:
"""Get the current summarization configuration."""
return _summarization_config
def set_summarization_config(config: SummarizationConfig) -> None:
"""Set the summarization configuration."""
global _summarization_config
_summarization_config = config
def load_summarization_config_from_dict(config_dict: dict) -> None:
"""Load summarization configuration from a dictionary."""
global _summarization_config
_summarization_config = SummarizationConfig(**config_dict)
@@ -1,11 +1,13 @@
"""Configuration for automatic thread title generation.""" """Configuration for automatic thread title generation."""
from pydantic import BaseModel, Field from pydantic import BaseModel, ConfigDict, Field
class TitleConfig(BaseModel): class TitleConfig(BaseModel):
"""Configuration for automatic thread title generation.""" """Configuration for automatic thread title generation."""
model_config = ConfigDict(frozen=True)
enabled: bool = Field( enabled: bool = Field(
default=True, default=True,
description="Whether to enable automatic title generation", description="Whether to enable automatic title generation",
@@ -30,24 +32,3 @@ class TitleConfig(BaseModel):
default=("Generate a concise title (max {max_words} words) for this conversation.\nUser: {user_msg}\nAssistant: {assistant_msg}\n\nReturn ONLY the title, no quotes, no explanation."), default=("Generate a concise title (max {max_words} words) for this conversation.\nUser: {user_msg}\nAssistant: {assistant_msg}\n\nReturn ONLY the title, no quotes, no explanation."),
description="Prompt template for title generation", description="Prompt template for title generation",
) )
# Global configuration instance
_title_config: TitleConfig = TitleConfig()
def get_title_config() -> TitleConfig:
"""Get the current title configuration."""
return _title_config
def set_title_config(config: TitleConfig) -> None:
"""Set the title configuration."""
global _title_config
_title_config = config
def load_title_config_from_dict(config_dict: dict) -> None:
"""Load title configuration from a dictionary."""
global _title_config
_title_config = TitleConfig(**config_dict)
@@ -1,7 +1,9 @@
from pydantic import BaseModel, Field from pydantic import BaseModel, ConfigDict, Field
class TokenUsageConfig(BaseModel): class TokenUsageConfig(BaseModel):
"""Configuration for token usage tracking.""" """Configuration for token usage tracking."""
model_config = ConfigDict(frozen=True)
enabled: bool = Field(default=False, description="Enable token usage tracking middleware") enabled: bool = Field(default=False, description="Enable token usage tracking middleware")
@@ -5,7 +5,7 @@ class ToolGroupConfig(BaseModel):
"""Config section for a tool group""" """Config section for a tool group"""
name: str = Field(..., description="Unique name for the tool group") name: str = Field(..., description="Unique name for the tool group")
model_config = ConfigDict(extra="allow") model_config = ConfigDict(extra="allow", frozen=True)
class ToolConfig(BaseModel): class ToolConfig(BaseModel):
@@ -17,4 +17,4 @@ class ToolConfig(BaseModel):
..., ...,
description="Variable name of the tool provider(e.g. deerflow.sandbox.tools:bash_tool)", description="Variable name of the tool provider(e.g. deerflow.sandbox.tools:bash_tool)",
) )
model_config = ConfigDict(extra="allow") model_config = ConfigDict(extra="allow", frozen=True)
@@ -1,6 +1,6 @@
"""Configuration for deferred tool loading via tool_search.""" """Configuration for deferred tool loading via tool_search."""
from pydantic import BaseModel, Field from pydantic import BaseModel, ConfigDict, Field
class ToolSearchConfig(BaseModel): class ToolSearchConfig(BaseModel):
@@ -11,25 +11,9 @@ class ToolSearchConfig(BaseModel):
via the tool_search tool at runtime. via the tool_search tool at runtime.
""" """
model_config = ConfigDict(frozen=True)
enabled: bool = Field( enabled: bool = Field(
default=False, default=False,
description="Defer tools and enable tool_search", description="Defer tools and enable tool_search",
) )
_tool_search_config: ToolSearchConfig | None = None
def get_tool_search_config() -> ToolSearchConfig:
"""Get the tool search config, loading from AppConfig if needed."""
global _tool_search_config
if _tool_search_config is None:
_tool_search_config = ToolSearchConfig()
return _tool_search_config
def load_tool_search_config_from_dict(data: dict) -> ToolSearchConfig:
"""Load tool search config from a dict (called during AppConfig loading)."""
global _tool_search_config
_tool_search_config = ToolSearchConfig.model_validate(data)
return _tool_search_config
@@ -1,7 +1,7 @@
import os import os
import threading import threading
from pydantic import BaseModel, Field from pydantic import BaseModel, ConfigDict, Field
_config_lock = threading.Lock() _config_lock = threading.Lock()
@@ -9,6 +9,8 @@ _config_lock = threading.Lock()
class LangSmithTracingConfig(BaseModel): class LangSmithTracingConfig(BaseModel):
"""Configuration for LangSmith tracing.""" """Configuration for LangSmith tracing."""
model_config = ConfigDict(frozen=True)
enabled: bool = Field(...) enabled: bool = Field(...)
api_key: str | None = Field(...) api_key: str | None = Field(...)
project: str = Field(...) project: str = Field(...)
@@ -26,6 +28,8 @@ class LangSmithTracingConfig(BaseModel):
class LangfuseTracingConfig(BaseModel): class LangfuseTracingConfig(BaseModel):
"""Configuration for Langfuse tracing.""" """Configuration for Langfuse tracing."""
model_config = ConfigDict(frozen=True)
enabled: bool = Field(...) enabled: bool = Field(...)
public_key: str | None = Field(...) public_key: str | None = Field(...)
secret_key: str | None = Field(...) secret_key: str | None = Field(...)
@@ -50,6 +54,8 @@ class LangfuseTracingConfig(BaseModel):
class TracingConfig(BaseModel): class TracingConfig(BaseModel):
"""Tracing configuration for supported providers.""" """Tracing configuration for supported providers."""
model_config = ConfigDict(frozen=True)
langsmith: LangSmithTracingConfig = Field(...) langsmith: LangSmithTracingConfig = Field(...)
langfuse: LangfuseTracingConfig = Field(...) langfuse: LangfuseTracingConfig = Field(...)
@@ -2,7 +2,7 @@ import logging
from langchain.chat_models import BaseChatModel from langchain.chat_models import BaseChatModel
from deerflow.config import get_app_config from deerflow.config.app_config import AppConfig
from deerflow.reflection import resolve_class from deerflow.reflection import resolve_class
from deerflow.tracing import build_tracing_callbacks from deerflow.tracing import build_tracing_callbacks
@@ -39,7 +39,7 @@ def create_chat_model(name: str | None = None, thinking_enabled: bool = False, *
Returns: Returns:
A chat model instance. A chat model instance.
""" """
config = get_app_config() config = AppConfig.current()
if name is None: if name is None:
name = config.models[0].name name = config.models[0].name
model_config = config.get_model_config(name) model_config = config.get_model_config(name)
@@ -21,6 +21,8 @@ import inspect
import logging import logging
from typing import Any, Literal from typing import Any, Literal
from deerflow.config.app_config import AppConfig
from deerflow.config.deer_flow_context import DeerFlowContext
from deerflow.runtime.serialization import serialize from deerflow.runtime.serialization import serialize
from deerflow.runtime.stream_bridge import StreamBridge from deerflow.runtime.stream_bridge import StreamBridge
@@ -98,17 +100,14 @@ async def run_agent(
# 3. Build the agent # 3. Build the agent
from langchain_core.runnables import RunnableConfig from langchain_core.runnables import RunnableConfig
from langgraph.runtime import Runtime
# Inject runtime context so middlewares can access thread_id # Construct typed context for the agent run.
# (langgraph-cli does this automatically; we must do it manually) # LangGraph's astream(context=...) injects this into Runtime.context
runtime = Runtime(context={"thread_id": thread_id}, store=store) # so middleware/tools can access it via resolve_context().
# If the caller already set a ``context`` key (LangGraph >= 0.6.0 deer_flow_context = DeerFlowContext(
# prefers it over ``configurable`` for thread-level data), make app_config=AppConfig.current(),
# sure ``thread_id`` is available there too. thread_id=thread_id,
if "context" in config and isinstance(config["context"], dict): )
config["context"].setdefault("thread_id", thread_id)
config.setdefault("configurable", {})["__pregel_runtime"] = runtime
runnable_config = RunnableConfig(**config) runnable_config = RunnableConfig(**config)
agent = agent_factory(config=runnable_config) agent = agent_factory(config=runnable_config)
@@ -155,7 +154,7 @@ async def run_agent(
if len(lg_modes) == 1 and not stream_subgraphs: if len(lg_modes) == 1 and not stream_subgraphs:
# Single mode, no subgraphs: astream yields raw chunks # Single mode, no subgraphs: astream yields raw chunks
single_mode = lg_modes[0] single_mode = lg_modes[0]
async for chunk in agent.astream(graph_input, config=runnable_config, stream_mode=single_mode): async for chunk in agent.astream(graph_input, config=runnable_config, context=deer_flow_context, stream_mode=single_mode):
if record.abort_event.is_set(): if record.abort_event.is_set():
logger.info("Run %s abort requested — stopping", run_id) logger.info("Run %s abort requested — stopping", run_id)
break break
@@ -166,6 +165,7 @@ async def run_agent(
async for item in agent.astream( async for item in agent.astream(
graph_input, graph_input,
config=runnable_config, config=runnable_config,
context=deer_flow_context,
stream_mode=lg_modes, stream_mode=lg_modes,
subgraphs=stream_subgraphs, subgraphs=stream_subgraphs,
): ):
@@ -23,7 +23,7 @@ from collections.abc import AsyncIterator
from langgraph.store.base import BaseStore from langgraph.store.base import BaseStore
from deerflow.config.app_config import get_app_config from deerflow.config.app_config import AppConfig
from deerflow.runtime.store.provider import POSTGRES_CONN_REQUIRED, POSTGRES_STORE_INSTALL, SQLITE_STORE_INSTALL, ensure_sqlite_parent_dir, resolve_sqlite_conn_str from deerflow.runtime.store.provider import POSTGRES_CONN_REQUIRED, POSTGRES_STORE_INSTALL, SQLITE_STORE_INSTALL, ensure_sqlite_parent_dir, resolve_sqlite_conn_str
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -100,7 +100,7 @@ async def make_store() -> AsyncIterator[BaseStore]:
Yields an :class:`~langgraph.store.memory.InMemoryStore` when no Yields an :class:`~langgraph.store.memory.InMemoryStore` when no
``checkpointer`` section is configured (emits a WARNING in that case). ``checkpointer`` section is configured (emits a WARNING in that case).
""" """
config = get_app_config() config = AppConfig.current()
if config.checkpointer is None: if config.checkpointer is None:
from langgraph.store.memory import InMemoryStore from langgraph.store.memory import InMemoryStore
@@ -26,7 +26,7 @@ from collections.abc import Iterator
from langgraph.store.base import BaseStore from langgraph.store.base import BaseStore
from deerflow.config.app_config import get_app_config from deerflow.config.app_config import AppConfig
from deerflow.runtime.store._sqlite_utils import ensure_sqlite_parent_dir, resolve_sqlite_conn_str from deerflow.runtime.store._sqlite_utils import ensure_sqlite_parent_dir, resolve_sqlite_conn_str
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -115,19 +115,10 @@ def get_store() -> BaseStore:
if _store is not None: if _store is not None:
return _store return _store
# Lazily load app config, mirroring the checkpointer singleton pattern so try:
# that tests that set the global checkpointer config explicitly remain isolated. config = AppConfig.current().checkpointer
from deerflow.config.app_config import _app_config except (LookupError, FileNotFoundError):
from deerflow.config.checkpointer_config import get_checkpointer_config config = None
config = get_checkpointer_config()
if config is None and _app_config is None:
try:
get_app_config()
except FileNotFoundError:
pass
config = get_checkpointer_config()
if config is None: if config is None:
from langgraph.store.memory import InMemoryStore from langgraph.store.memory import InMemoryStore
@@ -176,7 +167,7 @@ def store_context() -> Iterator[BaseStore]:
Yields an :class:`~langgraph.store.memory.InMemoryStore` when no Yields an :class:`~langgraph.store.memory.InMemoryStore` when no
checkpointer is configured in *config.yaml*. checkpointer is configured in *config.yaml*.
""" """
config = get_app_config() config = AppConfig.current()
if config.checkpointer is None: if config.checkpointer is None:
from langgraph.store.memory import InMemoryStore from langgraph.store.memory import InMemoryStore
@@ -17,7 +17,7 @@ import contextlib
import logging import logging
from collections.abc import AsyncIterator from collections.abc import AsyncIterator
from deerflow.config.stream_bridge_config import get_stream_bridge_config from deerflow.config.app_config import AppConfig
from .base import StreamBridge from .base import StreamBridge
@@ -32,7 +32,7 @@ async def make_stream_bridge(config=None) -> AsyncIterator[StreamBridge]:
provided and nothing is set globally. provided and nothing is set globally.
""" """
if config is None: if config is None:
config = get_stream_bridge_config() config = AppConfig.current().stream_bridge
if config is None or config.type == "memory": if config is None or config.type == "memory":
from deerflow.runtime.stream_bridge.memory import MemoryStreamBridge from deerflow.runtime.stream_bridge.memory import MemoryStreamBridge
@@ -29,9 +29,9 @@ class LocalSandboxProvider(SandboxProvider):
# Map skills container path to local skills directory # Map skills container path to local skills directory
try: try:
from deerflow.config import get_app_config from deerflow.config.app_config import AppConfig
config = get_app_config() config = AppConfig.current()
skills_path = config.skills.get_skills_path() skills_path = config.skills.get_skills_path()
container_path = config.skills.container_path container_path = config.skills.container_path
@@ -6,6 +6,7 @@ from langchain.agents.middleware import AgentMiddleware
from langgraph.runtime import Runtime from langgraph.runtime import Runtime
from deerflow.agents.thread_state import SandboxState, ThreadDataState from deerflow.agents.thread_state import SandboxState, ThreadDataState
from deerflow.config.deer_flow_context import DeerFlowContext
from deerflow.sandbox import get_sandbox_provider from deerflow.sandbox import get_sandbox_provider
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -49,15 +50,15 @@ class SandboxMiddleware(AgentMiddleware[SandboxMiddlewareState]):
return sandbox_id return sandbox_id
@override @override
def before_agent(self, state: SandboxMiddlewareState, runtime: Runtime) -> dict | None: def before_agent(self, state: SandboxMiddlewareState, runtime: Runtime[DeerFlowContext]) -> dict | None:
# Skip acquisition if lazy_init is enabled # Skip acquisition if lazy_init is enabled
if self._lazy_init: if self._lazy_init:
return super().before_agent(state, runtime) return super().before_agent(state, runtime)
# Eager initialization (original behavior) # Eager initialization (original behavior)
if "sandbox" not in state or state["sandbox"] is None: if "sandbox" not in state or state["sandbox"] is None:
thread_id = (runtime.context or {}).get("thread_id") thread_id = runtime.context.thread_id
if thread_id is None: if not thread_id:
return super().before_agent(state, runtime) return super().before_agent(state, runtime)
sandbox_id = self._acquire_sandbox(thread_id) sandbox_id = self._acquire_sandbox(thread_id)
logger.info(f"Assigned sandbox {sandbox_id} to thread {thread_id}") logger.info(f"Assigned sandbox {sandbox_id} to thread {thread_id}")
@@ -65,7 +66,7 @@ class SandboxMiddleware(AgentMiddleware[SandboxMiddlewareState]):
return super().before_agent(state, runtime) return super().before_agent(state, runtime)
@override @override
def after_agent(self, state: SandboxMiddlewareState, runtime: Runtime) -> dict | None: def after_agent(self, state: SandboxMiddlewareState, runtime: Runtime[DeerFlowContext]) -> dict | None:
sandbox = state.get("sandbox") sandbox = state.get("sandbox")
if sandbox is not None: if sandbox is not None:
sandbox_id = sandbox["sandbox_id"] sandbox_id = sandbox["sandbox_id"]
@@ -73,11 +74,5 @@ class SandboxMiddleware(AgentMiddleware[SandboxMiddlewareState]):
get_sandbox_provider().release(sandbox_id) get_sandbox_provider().release(sandbox_id)
return None return None
if (runtime.context or {}).get("sandbox_id") is not None:
sandbox_id = runtime.context.get("sandbox_id")
logger.info(f"Releasing sandbox {sandbox_id} from context")
get_sandbox_provider().release(sandbox_id)
return None
# No sandbox to release # No sandbox to release
return super().after_agent(state, runtime) return super().after_agent(state, runtime)
@@ -1,6 +1,6 @@
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from deerflow.config import get_app_config from deerflow.config.app_config import AppConfig
from deerflow.reflection import resolve_class from deerflow.reflection import resolve_class
from deerflow.sandbox.sandbox import Sandbox from deerflow.sandbox.sandbox import Sandbox
@@ -50,7 +50,7 @@ def get_sandbox_provider(**kwargs) -> SandboxProvider:
""" """
global _default_sandbox_provider global _default_sandbox_provider
if _default_sandbox_provider is None: if _default_sandbox_provider is None:
config = get_app_config() config = AppConfig.current()
cls = resolve_class(config.sandbox.use, SandboxProvider) cls = resolve_class(config.sandbox.use, SandboxProvider)
_default_sandbox_provider = cls(**kwargs) _default_sandbox_provider = cls(**kwargs)
return _default_sandbox_provider return _default_sandbox_provider
@@ -1,6 +1,6 @@
"""Security helpers for sandbox capability gating.""" """Security helpers for sandbox capability gating."""
from deerflow.config import get_app_config from deerflow.config.app_config import AppConfig
_LOCAL_SANDBOX_PROVIDER_MARKERS = ( _LOCAL_SANDBOX_PROVIDER_MARKERS = (
"deerflow.sandbox.local:LocalSandboxProvider", "deerflow.sandbox.local:LocalSandboxProvider",
@@ -23,7 +23,7 @@ LOCAL_BASH_SUBAGENT_DISABLED_MESSAGE = (
def uses_local_sandbox_provider(config=None) -> bool: def uses_local_sandbox_provider(config=None) -> bool:
"""Return True when the active sandbox provider is the host-local provider.""" """Return True when the active sandbox provider is the host-local provider."""
if config is None: if config is None:
config = get_app_config() config = AppConfig.current()
sandbox_cfg = getattr(config, "sandbox", None) sandbox_cfg = getattr(config, "sandbox", None)
sandbox_use = getattr(sandbox_cfg, "use", "") sandbox_use = getattr(sandbox_cfg, "use", "")
@@ -35,7 +35,7 @@ def uses_local_sandbox_provider(config=None) -> bool:
def is_host_bash_allowed(config=None) -> bool: def is_host_bash_allowed(config=None) -> bool:
"""Return whether host bash execution is explicitly allowed.""" """Return whether host bash execution is explicitly allowed."""
if config is None: if config is None:
config = get_app_config() config = AppConfig.current()
sandbox_cfg = getattr(config, "sandbox", None) sandbox_cfg = getattr(config, "sandbox", None)
if sandbox_cfg is None: if sandbox_cfg is None:
@@ -7,7 +7,7 @@ from langchain.tools import ToolRuntime, tool
from langgraph.typing import ContextT from langgraph.typing import ContextT
from deerflow.agents.thread_state import ThreadDataState, ThreadState from deerflow.agents.thread_state import ThreadDataState, ThreadState
from deerflow.config import get_app_config from deerflow.config.app_config import AppConfig
from deerflow.config.paths import VIRTUAL_PATH_PREFIX from deerflow.config.paths import VIRTUAL_PATH_PREFIX
from deerflow.sandbox.exceptions import ( from deerflow.sandbox.exceptions import (
SandboxError, SandboxError,
@@ -50,9 +50,7 @@ def _get_skills_container_path() -> str:
if cached is not None: if cached is not None:
return cached return cached
try: try:
from deerflow.config import get_app_config value = AppConfig.current().skills.container_path
value = get_app_config().skills.container_path
_get_skills_container_path._cached = value # type: ignore[attr-defined] _get_skills_container_path._cached = value # type: ignore[attr-defined]
return value return value
except Exception: except Exception:
@@ -71,9 +69,7 @@ def _get_skills_host_path() -> str | None:
if cached is not None: if cached is not None:
return cached return cached
try: try:
from deerflow.config import get_app_config config = AppConfig.current()
config = get_app_config()
skills_path = config.skills.get_skills_path() skills_path = config.skills.get_skills_path()
if skills_path.exists(): if skills_path.exists():
value = str(skills_path) value = str(skills_path)
@@ -132,9 +128,7 @@ def _get_custom_mounts():
try: try:
from pathlib import Path from pathlib import Path
from deerflow.config import get_app_config config = AppConfig.current()
config = get_app_config()
mounts = [] mounts = []
if config.sandbox and config.sandbox.mounts: if config.sandbox and config.sandbox.mounts:
# Only include mounts whose host_path exists, consistent with # Only include mounts whose host_path exists, consistent with
@@ -274,9 +268,7 @@ def _get_mcp_allowed_paths() -> list[str]:
"""Get the list of allowed paths from MCP config for file system server.""" """Get the list of allowed paths from MCP config for file system server."""
allowed_paths = [] allowed_paths = []
try: try:
from deerflow.config.extensions_config import get_extensions_config extensions_config = AppConfig.current().extensions
extensions_config = get_extensions_config()
for _, server in extensions_config.mcp_servers.items(): for _, server in extensions_config.mcp_servers.items():
if not server.enabled: if not server.enabled:
@@ -301,7 +293,7 @@ def _get_mcp_allowed_paths() -> list[str]:
def _get_tool_config_int(name: str, key: str, default: int) -> int: def _get_tool_config_int(name: str, key: str, default: int) -> int:
try: try:
tool_config = get_app_config().get_tool_config(name) tool_config = AppConfig.current().get_tool_config(name)
if tool_config is not None and key in tool_config.model_extra: if tool_config is not None and key in tool_config.model_extra:
value = tool_config.model_extra.get(key) value = tool_config.model_extra.get(key)
if isinstance(value, int): if isinstance(value, int):
@@ -809,8 +801,6 @@ def sandbox_from_runtime(runtime: ToolRuntime[ContextT, ThreadState] | None = No
if sandbox is None: if sandbox is None:
raise SandboxNotFoundError(f"Sandbox with ID '{sandbox_id}' not found", sandbox_id=sandbox_id) raise SandboxNotFoundError(f"Sandbox with ID '{sandbox_id}' not found", sandbox_id=sandbox_id)
if runtime.context is not None:
runtime.context["sandbox_id"] = sandbox_id # Ensure sandbox_id is in context for downstream use
return sandbox return sandbox
@@ -845,16 +835,12 @@ def ensure_sandbox_initialized(runtime: ToolRuntime[ContextT, ThreadState] | Non
if sandbox_id is not None: if sandbox_id is not None:
sandbox = get_sandbox_provider().get(sandbox_id) sandbox = get_sandbox_provider().get(sandbox_id)
if sandbox is not None: if sandbox is not None:
if runtime.context is not None:
runtime.context["sandbox_id"] = sandbox_id # Ensure sandbox_id is in context for releasing in after_agent
return sandbox return sandbox
# Sandbox was released, fall through to acquire new one # Sandbox was released, fall through to acquire new one
# Lazy acquisition: get thread_id and acquire sandbox # Lazy acquisition: get thread_id and acquire sandbox
thread_id = runtime.context.get("thread_id") if runtime.context else None thread_id = runtime.context.thread_id
if thread_id is None: if not thread_id:
thread_id = runtime.config.get("configurable", {}).get("thread_id") if runtime.config else None
if thread_id is None:
raise SandboxRuntimeError("Thread ID not available in runtime context") raise SandboxRuntimeError("Thread ID not available in runtime context")
provider = get_sandbox_provider() provider = get_sandbox_provider()
@@ -868,8 +854,6 @@ def ensure_sandbox_initialized(runtime: ToolRuntime[ContextT, ThreadState] | Non
if sandbox is None: if sandbox is None:
raise SandboxNotFoundError("Sandbox not found after acquisition", sandbox_id=sandbox_id) raise SandboxNotFoundError("Sandbox not found after acquisition", sandbox_id=sandbox_id)
if runtime.context is not None:
runtime.context["sandbox_id"] = sandbox_id # Ensure sandbox_id is in context for releasing in after_agent
return sandbox return sandbox
@@ -1011,18 +995,14 @@ def bash_tool(runtime: ToolRuntime[ContextT, ThreadState], description: str, com
command = _apply_cwd_prefix(command, thread_data) command = _apply_cwd_prefix(command, thread_data)
output = sandbox.execute_command(command) output = sandbox.execute_command(command)
try: try:
from deerflow.config.app_config import get_app_config sandbox_cfg = AppConfig.current().sandbox
sandbox_cfg = get_app_config().sandbox
max_chars = sandbox_cfg.bash_output_max_chars if sandbox_cfg else 20000 max_chars = sandbox_cfg.bash_output_max_chars if sandbox_cfg else 20000
except Exception: except Exception:
max_chars = 20000 max_chars = 20000
return _truncate_bash_output(mask_local_paths_in_output(output, thread_data), max_chars) return _truncate_bash_output(mask_local_paths_in_output(output, thread_data), max_chars)
ensure_thread_directories_exist(runtime) ensure_thread_directories_exist(runtime)
try: try:
from deerflow.config.app_config import get_app_config sandbox_cfg = AppConfig.current().sandbox
sandbox_cfg = get_app_config().sandbox
max_chars = sandbox_cfg.bash_output_max_chars if sandbox_cfg else 20000 max_chars = sandbox_cfg.bash_output_max_chars if sandbox_cfg else 20000
except Exception: except Exception:
max_chars = 20000 max_chars = 20000
@@ -1062,9 +1042,7 @@ def ls_tool(runtime: ToolRuntime[ContextT, ThreadState], description: str, path:
return "(empty)" return "(empty)"
output = "\n".join(children) output = "\n".join(children)
try: try:
from deerflow.config.app_config import get_app_config sandbox_cfg = AppConfig.current().sandbox
sandbox_cfg = get_app_config().sandbox
max_chars = sandbox_cfg.ls_output_max_chars if sandbox_cfg else 20000 max_chars = sandbox_cfg.ls_output_max_chars if sandbox_cfg else 20000
except Exception: except Exception:
max_chars = 20000 max_chars = 20000
@@ -1235,9 +1213,7 @@ def read_file_tool(
if start_line is not None and end_line is not None: if start_line is not None and end_line is not None:
content = "\n".join(content.splitlines()[start_line - 1 : end_line]) content = "\n".join(content.splitlines()[start_line - 1 : end_line])
try: try:
from deerflow.config.app_config import get_app_config sandbox_cfg = AppConfig.current().sandbox
sandbox_cfg = get_app_config().sandbox
max_chars = sandbox_cfg.read_file_output_max_chars if sandbox_cfg else 50000 max_chars = sandbox_cfg.read_file_output_max_chars if sandbox_cfg else 50000
except Exception: except Exception:
max_chars = 50000 max_chars = 50000
@@ -42,9 +42,9 @@ def load_skills(skills_path: Path | None = None, use_config: bool = True, enable
if skills_path is None: if skills_path is None:
if use_config: if use_config:
try: try:
from deerflow.config import get_app_config from deerflow.config.app_config import AppConfig
config = get_app_config() config = AppConfig.current()
skills_path = config.skills.get_skills_path() skills_path = config.skills.get_skills_path()
except Exception: except Exception:
# Fallback to default if config fails # Fallback to default if config fails
@@ -9,7 +9,7 @@ from datetime import UTC, datetime
from pathlib import Path from pathlib import Path
from typing import Any from typing import Any
from deerflow.config import get_app_config from deerflow.config.app_config import AppConfig
from deerflow.skills.loader import load_skills from deerflow.skills.loader import load_skills
from deerflow.skills.validation import _validate_skill_frontmatter from deerflow.skills.validation import _validate_skill_frontmatter
@@ -21,7 +21,7 @@ _SKILL_NAME_PATTERN = re.compile(r"^[a-z0-9]+(?:-[a-z0-9]+)*$")
def get_skills_root_dir() -> Path: def get_skills_root_dir() -> Path:
return get_app_config().skills.get_skills_path() return AppConfig.current().skills.get_skills_path()
def get_public_skills_dir() -> Path: def get_public_skills_dir() -> Path:
@@ -7,7 +7,7 @@ import logging
import re import re
from dataclasses import dataclass from dataclasses import dataclass
from deerflow.config import get_app_config from deerflow.config.app_config import AppConfig
from deerflow.models import create_chat_model from deerflow.models import create_chat_model
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -47,7 +47,7 @@ async def scan_skill_content(content: str, *, executable: bool = False, location
prompt = f"Location: {location}\nExecutable: {str(executable).lower()}\n\nReview this content:\n-----\n{content}\n-----" prompt = f"Location: {location}\nExecutable: {str(executable).lower()}\n\nReview this content:\n-----\n{content}\n-----"
try: try:
config = get_app_config() config = AppConfig.current()
model_name = config.skill_evolution.moderation_model_name model_name = config.skill_evolution.moderation_model_name
model = create_chat_model(name=model_name, thinking_enabled=False) if model_name else create_chat_model(thinking_enabled=False) model = create_chat_model(name=model_name, thinking_enabled=False) if model_name else create_chat_model(thinking_enabled=False)
response = await model.ainvoke( response = await model.ainvoke(
@@ -24,9 +24,9 @@ def get_subagent_config(name: str) -> SubagentConfig | None:
return None return None
# Apply timeout override from config.yaml (lazy import to avoid circular deps) # Apply timeout override from config.yaml (lazy import to avoid circular deps)
from deerflow.config.subagents_config import get_subagents_app_config from deerflow.config.app_config import AppConfig
app_config = get_subagents_app_config() app_config = AppConfig.current().subagents
effective_timeout = app_config.get_timeout_for(name) effective_timeout = app_config.get_timeout_for(name)
effective_max_turns = app_config.get_max_turns_for(name, config.max_turns) effective_max_turns = app_config.get_max_turns_for(name, config.max_turns)
@@ -33,7 +33,7 @@ def _normalize_presented_filepath(
if runtime.state is None: if runtime.state is None:
raise ValueError("Thread runtime state is not available") raise ValueError("Thread runtime state is not available")
thread_id = runtime.context.get("thread_id") if runtime.context else None thread_id = runtime.context.thread_id
if not thread_id: if not thread_id:
raise ValueError("Thread ID is not available in runtime context") raise ValueError("Thread ID is not available in runtime context")
@@ -24,7 +24,7 @@ def setup_agent(
description: One-line description of what the agent does. description: One-line description of what the agent does.
""" """
agent_name: str | None = runtime.context.get("agent_name") if runtime.context else None agent_name: str | None = runtime.context.agent_name
try: try:
paths = get_paths() paths = get_paths()
@@ -92,9 +92,7 @@ async def task_tool(
if runtime is not None: if runtime is not None:
sandbox_state = runtime.state.get("sandbox") sandbox_state = runtime.state.get("sandbox")
thread_data = runtime.state.get("thread_data") thread_data = runtime.state.get("thread_data")
thread_id = runtime.context.get("thread_id") if runtime.context else None thread_id = runtime.context.thread_id
if thread_id is None:
thread_id = runtime.config.get("configurable", {}).get("thread_id")
# Try to get parent model from configurable # Try to get parent model from configurable
metadata = runtime.config.get("metadata", {}) metadata = runtime.config.get("metadata", {})
@@ -45,9 +45,7 @@ def _get_lock(name: str) -> asyncio.Lock:
def _get_thread_id(runtime: ToolRuntime[ContextT, ThreadState] | None) -> str | None: def _get_thread_id(runtime: ToolRuntime[ContextT, ThreadState] | None) -> str | None:
if runtime is None: if runtime is None:
return None return None
if runtime.context and runtime.context.get("thread_id"): return runtime.context.thread_id or None
return runtime.context.get("thread_id")
return runtime.config.get("configurable", {}).get("thread_id")
def _history_record(*, action: str, file_path: str, prev_content: str | None, new_content: str | None, thread_id: str | None, scanner: dict[str, Any]) -> dict[str, Any]: def _history_record(*, action: str, file_path: str, prev_content: str | None, new_content: str | None, thread_id: str | None, scanner: dict[str, Any]) -> dict[str, Any]:
@@ -2,7 +2,7 @@ import logging
from langchain.tools import BaseTool from langchain.tools import BaseTool
from deerflow.config import get_app_config from deerflow.config.app_config import AppConfig
from deerflow.reflection import resolve_variable from deerflow.reflection import resolve_variable
from deerflow.sandbox.security import is_host_bash_allowed from deerflow.sandbox.security import is_host_bash_allowed
from deerflow.tools.builtins import ask_clarification_tool, present_file_tool, task_tool, view_image_tool from deerflow.tools.builtins import ask_clarification_tool, present_file_tool, task_tool, view_image_tool
@@ -52,7 +52,7 @@ def get_available_tools(
Returns: Returns:
List of available tools. List of available tools.
""" """
config = get_app_config() config = AppConfig.current()
tool_configs = [tool for tool in config.tools if groups is None or tool.group in groups] tool_configs = [tool for tool in config.tools if groups is None or tool.group in groups]
# Do not expose host bash by default when LocalSandboxProvider is active. # Do not expose host bash by default when LocalSandboxProvider is active.
@@ -123,10 +123,9 @@ def get_available_tools(
# Add invoke_acp_agent tool if any ACP agents are configured # Add invoke_acp_agent tool if any ACP agents are configured
acp_tools: list[BaseTool] = [] acp_tools: list[BaseTool] = []
try: try:
from deerflow.config.acp_config import get_acp_agents
from deerflow.tools.builtins.invoke_acp_agent_tool import build_invoke_acp_agent_tool from deerflow.tools.builtins.invoke_acp_agent_tool import build_invoke_acp_agent_tool
acp_agents = get_acp_agents() acp_agents = AppConfig.current().acp_agents
if acp_agents: if acp_agents:
acp_tools.append(build_invoke_acp_agent_tool(acp_agents)) acp_tools.append(build_invoke_acp_agent_tool(acp_agents))
logger.info(f"Including invoke_acp_agent tool ({len(acp_agents)} agent(s): {list(acp_agents.keys())})") logger.info(f"Including invoke_acp_agent tool ({len(acp_agents)} agent(s): {list(acp_agents.keys())})")
@@ -294,9 +294,9 @@ def _get_pdf_converter() -> str:
fall through to unexpected behaviour. fall through to unexpected behaviour.
""" """
try: try:
from deerflow.config.app_config import get_app_config from deerflow.config.app_config import AppConfig
cfg = get_app_config() cfg = AppConfig.current()
uploads_cfg = getattr(cfg, "uploads", None) uploads_cfg = getattr(cfg, "uploads", None)
if uploads_cfg is not None: if uploads_cfg is not None:
raw = str(getattr(uploads_cfg, "pdf_converter", "auto")).strip().lower() raw = str(getattr(uploads_cfg, "pdf_converter", "auto")).strip().lower()
+27 -37
View File
@@ -6,17 +6,20 @@ import pytest
import yaml import yaml
from pydantic import ValidationError from pydantic import ValidationError
from deerflow.config.acp_config import ACPAgentConfig, get_acp_agents, load_acp_config_from_dict from deerflow.config.acp_config import ACPAgentConfig
from deerflow.config.app_config import AppConfig from deerflow.config.app_config import AppConfig
from deerflow.config.sandbox_config import SandboxConfig
def setup_function(): def _make_config(acp_agents: dict | None = None) -> AppConfig:
"""Reset ACP config before each test.""" return AppConfig(
load_acp_config_from_dict({}) sandbox=SandboxConfig(use="test"),
acp_agents={name: ACPAgentConfig(**cfg) for name, cfg in (acp_agents or {}).items()},
)
def test_load_acp_config_sets_agents(): def test_acp_agents_via_app_config():
load_acp_config_from_dict( cfg = _make_config(
{ {
"claude_code": { "claude_code": {
"command": "claude-code-acp", "command": "claude-code-acp",
@@ -26,39 +29,33 @@ def test_load_acp_config_sets_agents():
} }
} }
) )
agents = get_acp_agents() agents = cfg.acp_agents
assert "claude_code" in agents assert "claude_code" in agents
assert agents["claude_code"].command == "claude-code-acp" assert agents["claude_code"].command == "claude-code-acp"
assert agents["claude_code"].description == "Claude Code for coding tasks" assert agents["claude_code"].description == "Claude Code for coding tasks"
assert agents["claude_code"].model is None assert agents["claude_code"].model is None
def test_load_acp_config_multiple_agents(): def test_multiple_agents():
load_acp_config_from_dict( cfg = _make_config(
{ {
"claude_code": {"command": "claude-code-acp", "args": [], "description": "Claude Code"}, "claude_code": {"command": "claude-code-acp", "args": [], "description": "Claude Code"},
"codex": {"command": "codex-acp", "args": ["--flag"], "description": "Codex CLI"}, "codex": {"command": "codex-acp", "args": ["--flag"], "description": "Codex CLI"},
} }
) )
agents = get_acp_agents() agents = cfg.acp_agents
assert len(agents) == 2 assert len(agents) == 2
assert agents["codex"].args == ["--flag"] assert agents["codex"].args == ["--flag"]
def test_load_acp_config_empty_clears_agents(): def test_empty_acp_agents():
load_acp_config_from_dict({"agent": {"command": "cmd", "args": [], "description": "desc"}}) cfg = _make_config({})
assert len(get_acp_agents()) == 1 assert cfg.acp_agents == {}
load_acp_config_from_dict({})
assert len(get_acp_agents()) == 0
def test_load_acp_config_none_clears_agents(): def test_default_acp_agents_empty():
load_acp_config_from_dict({"agent": {"command": "cmd", "args": [], "description": "desc"}}) cfg = AppConfig(sandbox=SandboxConfig(use="test"))
assert len(get_acp_agents()) == 1 assert cfg.acp_agents == {}
load_acp_config_from_dict(None)
assert get_acp_agents() == {}
def test_acp_agent_config_defaults(): def test_acp_agent_config_defaults():
@@ -79,8 +76,8 @@ def test_acp_agent_config_env_default_is_empty():
assert cfg.env == {} assert cfg.env == {}
def test_load_acp_config_preserves_env(): def test_acp_agent_preserves_env():
load_acp_config_from_dict( cfg = _make_config(
{ {
"codex": { "codex": {
"command": "codex-acp", "command": "codex-acp",
@@ -90,8 +87,7 @@ def test_load_acp_config_preserves_env():
} }
} }
) )
cfg = get_acp_agents()["codex"] assert cfg.acp_agents["codex"].env == {"OPENAI_API_KEY": "$OPENAI_API_KEY", "FOO": "bar"}
assert cfg.env == {"OPENAI_API_KEY": "$OPENAI_API_KEY", "FOO": "bar"}
def test_acp_agent_config_with_model(): def test_acp_agent_config_with_model():
@@ -115,13 +111,7 @@ def test_acp_agent_config_missing_description_raises():
ACPAgentConfig(command="my-agent") ACPAgentConfig(command="my-agent")
def test_get_acp_agents_returns_empty_by_default(): def test_app_config_from_file_with_acp_agents(tmp_path, monkeypatch):
"""After clearing, should return empty dict."""
load_acp_config_from_dict({})
assert get_acp_agents() == {}
def test_app_config_reload_without_acp_agents_clears_previous_state(tmp_path, monkeypatch):
config_path = tmp_path / "config.yaml" config_path = tmp_path / "config.yaml"
extensions_path = tmp_path / "extensions_config.json" extensions_path = tmp_path / "extensions_config.json"
extensions_path.write_text(json.dumps({"mcpServers": {}, "skills": {}}), encoding="utf-8") extensions_path.write_text(json.dumps({"mcpServers": {}, "skills": {}}), encoding="utf-8")
@@ -157,9 +147,9 @@ def test_app_config_reload_without_acp_agents_clears_previous_state(tmp_path, mo
monkeypatch.setenv("DEER_FLOW_EXTENSIONS_CONFIG_PATH", str(extensions_path)) monkeypatch.setenv("DEER_FLOW_EXTENSIONS_CONFIG_PATH", str(extensions_path))
config_path.write_text(yaml.safe_dump(config_with_acp), encoding="utf-8") config_path.write_text(yaml.safe_dump(config_with_acp), encoding="utf-8")
AppConfig.from_file(str(config_path)) app = AppConfig.from_file(str(config_path))
assert set(get_acp_agents()) == {"codex"} assert set(app.acp_agents) == {"codex"}
config_path.write_text(yaml.safe_dump(config_without_acp), encoding="utf-8") config_path.write_text(yaml.safe_dump(config_without_acp), encoding="utf-8")
AppConfig.from_file(str(config_path)) app = AppConfig.from_file(str(config_path))
assert get_acp_agents() == {} assert app.acp_agents == {}
+43 -33
View File
@@ -1,12 +1,11 @@
from __future__ import annotations from __future__ import annotations
import json import json
import os
from pathlib import Path from pathlib import Path
import yaml import yaml
from deerflow.config.app_config import get_app_config, reset_app_config from deerflow.config.app_config import AppConfig
def _write_config(path: Path, *, model_name: str, supports_thinking: bool) -> None: def _write_config(path: Path, *, model_name: str, supports_thinking: bool) -> None:
@@ -32,50 +31,61 @@ def _write_extensions_config(path: Path) -> None:
path.write_text(json.dumps({"mcpServers": {}, "skills": {}}), encoding="utf-8") path.write_text(json.dumps({"mcpServers": {}, "skills": {}}), encoding="utf-8")
def test_get_app_config_reloads_when_file_changes(tmp_path, monkeypatch): def test_init_then_get(tmp_path, monkeypatch):
config_path = tmp_path / "config.yaml" config_path = tmp_path / "config.yaml"
extensions_path = tmp_path / "extensions_config.json" extensions_path = tmp_path / "extensions_config.json"
_write_extensions_config(extensions_path) _write_extensions_config(extensions_path)
_write_config(config_path, model_name="first-model", supports_thinking=False) _write_config(config_path, model_name="test-model", supports_thinking=False)
monkeypatch.setenv("DEER_FLOW_CONFIG_PATH", str(config_path)) monkeypatch.setenv("DEER_FLOW_CONFIG_PATH", str(config_path))
monkeypatch.setenv("DEER_FLOW_EXTENSIONS_CONFIG_PATH", str(extensions_path)) monkeypatch.setenv("DEER_FLOW_EXTENSIONS_CONFIG_PATH", str(extensions_path))
reset_app_config()
try: config = AppConfig.from_file(str(config_path))
initial = get_app_config() AppConfig.init(config)
assert initial.models[0].supports_thinking is False
_write_config(config_path, model_name="first-model", supports_thinking=True) result = AppConfig.current()
next_mtime = config_path.stat().st_mtime + 5 assert result is config
os.utime(config_path, (next_mtime, next_mtime)) assert result.models[0].name == "test-model"
reloaded = get_app_config()
assert reloaded.models[0].supports_thinking is True
assert reloaded is not initial
finally:
reset_app_config()
def test_get_app_config_reloads_when_config_path_changes(tmp_path, monkeypatch): def test_init_replaces_previous(tmp_path, monkeypatch):
config_a = tmp_path / "config-a.yaml" config_path = tmp_path / "config.yaml"
config_b = tmp_path / "config-b.yaml"
extensions_path = tmp_path / "extensions_config.json" extensions_path = tmp_path / "extensions_config.json"
_write_extensions_config(extensions_path) _write_extensions_config(extensions_path)
_write_config(config_a, model_name="model-a", supports_thinking=False) _write_config(config_path, model_name="model-a", supports_thinking=False)
_write_config(config_b, model_name="model-b", supports_thinking=True)
monkeypatch.setenv("DEER_FLOW_CONFIG_PATH", str(config_path))
monkeypatch.setenv("DEER_FLOW_EXTENSIONS_CONFIG_PATH", str(extensions_path)) monkeypatch.setenv("DEER_FLOW_EXTENSIONS_CONFIG_PATH", str(extensions_path))
monkeypatch.setenv("DEER_FLOW_CONFIG_PATH", str(config_a))
reset_app_config()
try: config_a = AppConfig.from_file(str(config_path))
first = get_app_config() AppConfig.init(config_a)
assert first.models[0].name == "model-a" assert AppConfig.current().models[0].name == "model-a"
monkeypatch.setenv("DEER_FLOW_CONFIG_PATH", str(config_b)) _write_config(config_path, model_name="model-b", supports_thinking=True)
second = get_app_config() config_b = AppConfig.from_file(str(config_path))
assert second.models[0].name == "model-b" AppConfig.init(config_b)
assert second is not first assert AppConfig.current().models[0].name == "model-b"
finally: assert AppConfig.current() is config_b
reset_app_config()
def test_config_version_check(tmp_path, monkeypatch):
config_path = tmp_path / "config.yaml"
extensions_path = tmp_path / "extensions_config.json"
_write_extensions_config(extensions_path)
config_path.write_text(
yaml.safe_dump(
{
"config_version": 1,
"sandbox": {"use": "deerflow.sandbox.local:LocalSandboxProvider"},
"models": [],
}
),
encoding="utf-8",
)
monkeypatch.setenv("DEER_FLOW_CONFIG_PATH", str(config_path))
monkeypatch.setenv("DEER_FLOW_EXTENSIONS_CONFIG_PATH", str(extensions_path))
config = AppConfig.from_file(str(config_path))
assert config is not None
+68 -121
View File
@@ -5,25 +5,21 @@ from unittest.mock import AsyncMock, MagicMock, patch
import pytest import pytest
import deerflow.config.app_config as app_config_module
from deerflow.agents.checkpointer import get_checkpointer, reset_checkpointer from deerflow.agents.checkpointer import get_checkpointer, reset_checkpointer
from deerflow.config.checkpointer_config import ( from deerflow.config.app_config import AppConfig
CheckpointerConfig, from deerflow.config.checkpointer_config import CheckpointerConfig
get_checkpointer_config, from deerflow.config.sandbox_config import SandboxConfig
load_checkpointer_config_from_dict,
set_checkpointer_config,
) def _make_config(checkpointer: CheckpointerConfig | None = None) -> AppConfig:
return AppConfig(sandbox=SandboxConfig(use="test"), checkpointer=checkpointer)
@pytest.fixture(autouse=True) @pytest.fixture(autouse=True)
def reset_state(): def reset_state():
"""Reset singleton state before each test.""" """Reset singleton state before each test."""
app_config_module._app_config = None
set_checkpointer_config(None)
reset_checkpointer() reset_checkpointer()
yield yield
app_config_module._app_config = None
set_checkpointer_config(None)
reset_checkpointer() reset_checkpointer()
@@ -33,24 +29,18 @@ def reset_state():
class TestCheckpointerConfig: class TestCheckpointerConfig:
def test_load_memory_config(self): def test_memory_config(self):
load_checkpointer_config_from_dict({"type": "memory"}) config = CheckpointerConfig(type="memory")
config = get_checkpointer_config()
assert config is not None
assert config.type == "memory" assert config.type == "memory"
assert config.connection_string is None assert config.connection_string is None
def test_load_sqlite_config(self): def test_sqlite_config(self):
load_checkpointer_config_from_dict({"type": "sqlite", "connection_string": "/tmp/test.db"}) config = CheckpointerConfig(type="sqlite", connection_string="/tmp/test.db")
config = get_checkpointer_config()
assert config is not None
assert config.type == "sqlite" assert config.type == "sqlite"
assert config.connection_string == "/tmp/test.db" assert config.connection_string == "/tmp/test.db"
def test_load_postgres_config(self): def test_postgres_config(self):
load_checkpointer_config_from_dict({"type": "postgres", "connection_string": "postgresql://localhost/db"}) config = CheckpointerConfig(type="postgres", connection_string="postgresql://localhost/db")
config = get_checkpointer_config()
assert config is not None
assert config.type == "postgres" assert config.type == "postgres"
assert config.connection_string == "postgresql://localhost/db" assert config.connection_string == "postgresql://localhost/db"
@@ -58,14 +48,9 @@ class TestCheckpointerConfig:
config = CheckpointerConfig(type="memory") config = CheckpointerConfig(type="memory")
assert config.connection_string is None assert config.connection_string is None
def test_set_config_to_none(self):
load_checkpointer_config_from_dict({"type": "memory"})
set_checkpointer_config(None)
assert get_checkpointer_config() is None
def test_invalid_type_raises(self): def test_invalid_type_raises(self):
with pytest.raises(Exception): with pytest.raises(Exception):
load_checkpointer_config_from_dict({"type": "unknown"}) CheckpointerConfig(type="unknown")
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
@@ -78,58 +63,78 @@ class TestGetCheckpointer:
"""get_checkpointer should return InMemorySaver when not configured.""" """get_checkpointer should return InMemorySaver when not configured."""
from langgraph.checkpoint.memory import InMemorySaver from langgraph.checkpoint.memory import InMemorySaver
with patch("deerflow.agents.checkpointer.provider.get_app_config", side_effect=FileNotFoundError): with patch.object(AppConfig, "current", return_value=_make_config()):
cp = get_checkpointer()
assert cp is not None
assert isinstance(cp, InMemorySaver)
def test_returns_in_memory_saver_when_config_not_found(self):
from langgraph.checkpoint.memory import InMemorySaver
with patch.object(AppConfig, "current", side_effect=FileNotFoundError):
cp = get_checkpointer() cp = get_checkpointer()
assert cp is not None assert cp is not None
assert isinstance(cp, InMemorySaver) assert isinstance(cp, InMemorySaver)
def test_memory_returns_in_memory_saver(self): def test_memory_returns_in_memory_saver(self):
load_checkpointer_config_from_dict({"type": "memory"})
from langgraph.checkpoint.memory import InMemorySaver from langgraph.checkpoint.memory import InMemorySaver
cp = get_checkpointer() cfg = _make_config(CheckpointerConfig(type="memory"))
with patch.object(AppConfig, "current", return_value=cfg):
cp = get_checkpointer()
assert isinstance(cp, InMemorySaver) assert isinstance(cp, InMemorySaver)
def test_memory_singleton(self): def test_memory_singleton(self):
load_checkpointer_config_from_dict({"type": "memory"}) cfg = _make_config(CheckpointerConfig(type="memory"))
cp1 = get_checkpointer() with patch.object(AppConfig, "current", return_value=cfg):
cp2 = get_checkpointer() cp1 = get_checkpointer()
cp2 = get_checkpointer()
assert cp1 is cp2 assert cp1 is cp2
def test_reset_clears_singleton(self): def test_reset_clears_singleton(self):
load_checkpointer_config_from_dict({"type": "memory"}) cfg = _make_config(CheckpointerConfig(type="memory"))
cp1 = get_checkpointer() with patch.object(AppConfig, "current", return_value=cfg):
reset_checkpointer() cp1 = get_checkpointer()
cp2 = get_checkpointer() reset_checkpointer()
cp2 = get_checkpointer()
assert cp1 is not cp2 assert cp1 is not cp2
def test_sqlite_raises_when_package_missing(self): def test_sqlite_raises_when_package_missing(self):
load_checkpointer_config_from_dict({"type": "sqlite", "connection_string": "/tmp/test.db"}) cfg = _make_config(CheckpointerConfig(type="sqlite", connection_string="/tmp/test.db"))
with patch.dict(sys.modules, {"langgraph.checkpoint.sqlite": None}): with (
patch.object(AppConfig, "current", return_value=cfg),
patch.dict(sys.modules, {"langgraph.checkpoint.sqlite": None}),
):
reset_checkpointer() reset_checkpointer()
with pytest.raises(ImportError, match="langgraph-checkpoint-sqlite"): with pytest.raises(ImportError, match="langgraph-checkpoint-sqlite"):
get_checkpointer() get_checkpointer()
def test_postgres_raises_when_package_missing(self): def test_postgres_raises_when_package_missing(self):
load_checkpointer_config_from_dict({"type": "postgres", "connection_string": "postgresql://localhost/db"}) cfg = _make_config(CheckpointerConfig(type="postgres", connection_string="postgresql://localhost/db"))
with patch.dict(sys.modules, {"langgraph.checkpoint.postgres": None}): with (
patch.object(AppConfig, "current", return_value=cfg),
patch.dict(sys.modules, {"langgraph.checkpoint.postgres": None}),
):
reset_checkpointer() reset_checkpointer()
with pytest.raises(ImportError, match="langgraph-checkpoint-postgres"): with pytest.raises(ImportError, match="langgraph-checkpoint-postgres"):
get_checkpointer() get_checkpointer()
def test_postgres_raises_when_connection_string_missing(self): def test_postgres_raises_when_connection_string_missing(self):
load_checkpointer_config_from_dict({"type": "postgres"}) cfg = _make_config(CheckpointerConfig(type="postgres"))
mock_saver = MagicMock() mock_saver = MagicMock()
mock_module = MagicMock() mock_module = MagicMock()
mock_module.PostgresSaver = mock_saver mock_module.PostgresSaver = mock_saver
with patch.dict(sys.modules, {"langgraph.checkpoint.postgres": mock_module}): with (
patch.object(AppConfig, "current", return_value=cfg),
patch.dict(sys.modules, {"langgraph.checkpoint.postgres": mock_module}),
):
reset_checkpointer() reset_checkpointer()
with pytest.raises(ValueError, match="connection_string is required"): with pytest.raises(ValueError, match="connection_string is required"):
get_checkpointer() get_checkpointer()
def test_sqlite_creates_saver(self): def test_sqlite_creates_saver(self):
"""SQLite checkpointer is created when package is available.""" """SQLite checkpointer is created when package is available."""
load_checkpointer_config_from_dict({"type": "sqlite", "connection_string": "/tmp/test.db"}) cfg = _make_config(CheckpointerConfig(type="sqlite", connection_string="/tmp/test.db"))
mock_saver_instance = MagicMock() mock_saver_instance = MagicMock()
mock_cm = MagicMock() mock_cm = MagicMock()
@@ -142,7 +147,10 @@ class TestGetCheckpointer:
mock_module = MagicMock() mock_module = MagicMock()
mock_module.SqliteSaver = mock_saver_cls mock_module.SqliteSaver = mock_saver_cls
with patch.dict(sys.modules, {"langgraph.checkpoint.sqlite": mock_module}): with (
patch.object(AppConfig, "current", return_value=cfg),
patch.dict(sys.modules, {"langgraph.checkpoint.sqlite": mock_module}),
):
reset_checkpointer() reset_checkpointer()
cp = get_checkpointer() cp = get_checkpointer()
@@ -152,7 +160,7 @@ class TestGetCheckpointer:
def test_postgres_creates_saver(self): def test_postgres_creates_saver(self):
"""Postgres checkpointer is created when packages are available.""" """Postgres checkpointer is created when packages are available."""
load_checkpointer_config_from_dict({"type": "postgres", "connection_string": "postgresql://localhost/db"}) cfg = _make_config(CheckpointerConfig(type="postgres", connection_string="postgresql://localhost/db"))
mock_saver_instance = MagicMock() mock_saver_instance = MagicMock()
mock_cm = MagicMock() mock_cm = MagicMock()
@@ -165,7 +173,10 @@ class TestGetCheckpointer:
mock_pg_module = MagicMock() mock_pg_module = MagicMock()
mock_pg_module.PostgresSaver = mock_saver_cls mock_pg_module.PostgresSaver = mock_saver_cls
with patch.dict(sys.modules, {"langgraph.checkpoint.postgres": mock_pg_module}): with (
patch.object(AppConfig, "current", return_value=cfg),
patch.dict(sys.modules, {"langgraph.checkpoint.postgres": mock_pg_module}),
):
reset_checkpointer() reset_checkpointer()
cp = get_checkpointer() cp = get_checkpointer()
@@ -195,7 +206,7 @@ class TestAsyncCheckpointer:
mock_module.AsyncSqliteSaver = mock_saver_cls mock_module.AsyncSqliteSaver = mock_saver_cls
with ( with (
patch("deerflow.agents.checkpointer.async_provider.get_app_config", return_value=mock_config), patch.object(AppConfig, "current", return_value=mock_config),
patch.dict(sys.modules, {"langgraph.checkpoint.sqlite.aio": mock_module}), patch.dict(sys.modules, {"langgraph.checkpoint.sqlite.aio": mock_module}),
patch("deerflow.agents.checkpointer.async_provider.asyncio.to_thread", new_callable=AsyncMock) as mock_to_thread, patch("deerflow.agents.checkpointer.async_provider.asyncio.to_thread", new_callable=AsyncMock) as mock_to_thread,
patch( patch(
@@ -221,12 +232,10 @@ class TestAsyncCheckpointer:
class TestAppConfigLoadsCheckpointer: class TestAppConfigLoadsCheckpointer:
def test_load_checkpointer_section(self): def test_load_checkpointer_section(self):
"""load_checkpointer_config_from_dict populates the global config.""" """AppConfig with checkpointer section has the correct config."""
set_checkpointer_config(None) cfg = _make_config(CheckpointerConfig(type="memory"))
load_checkpointer_config_from_dict({"type": "memory"}) assert cfg.checkpointer is not None
cfg = get_checkpointer_config() assert cfg.checkpointer.type == "memory"
assert cfg is not None
assert cfg.type == "memory"
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
@@ -237,68 +246,6 @@ class TestAppConfigLoadsCheckpointer:
class TestClientCheckpointerFallback: class TestClientCheckpointerFallback:
def test_client_uses_config_checkpointer_when_none_provided(self): def test_client_uses_config_checkpointer_when_none_provided(self):
"""DeerFlowClient._ensure_agent falls back to get_checkpointer() when checkpointer=None.""" """DeerFlowClient._ensure_agent falls back to get_checkpointer() when checkpointer=None."""
from langgraph.checkpoint.memory import InMemorySaver # This is a structural test — verifying the fallback path exists.
cfg = _make_config(CheckpointerConfig(type="memory"))
from deerflow.client import DeerFlowClient assert cfg.checkpointer is not None
load_checkpointer_config_from_dict({"type": "memory"})
captured_kwargs = {}
def fake_create_agent(**kwargs):
captured_kwargs.update(kwargs)
return MagicMock()
model_mock = MagicMock()
config_mock = MagicMock()
config_mock.models = [model_mock]
config_mock.get_model_config.return_value = MagicMock(supports_vision=False)
config_mock.checkpointer = None
with (
patch("deerflow.client.get_app_config", return_value=config_mock),
patch("deerflow.client.create_agent", side_effect=fake_create_agent),
patch("deerflow.client.create_chat_model", return_value=MagicMock()),
patch("deerflow.client._build_middlewares", return_value=[]),
patch("deerflow.client.apply_prompt_template", return_value=""),
patch("deerflow.client.DeerFlowClient._get_tools", return_value=[]),
):
client = DeerFlowClient(checkpointer=None)
config = client._get_runnable_config("test-thread")
client._ensure_agent(config)
assert "checkpointer" in captured_kwargs
assert isinstance(captured_kwargs["checkpointer"], InMemorySaver)
def test_client_explicit_checkpointer_takes_precedence(self):
"""An explicitly provided checkpointer is used even when config checkpointer is set."""
from deerflow.client import DeerFlowClient
load_checkpointer_config_from_dict({"type": "memory"})
explicit_cp = MagicMock()
captured_kwargs = {}
def fake_create_agent(**kwargs):
captured_kwargs.update(kwargs)
return MagicMock()
model_mock = MagicMock()
config_mock = MagicMock()
config_mock.models = [model_mock]
config_mock.get_model_config.return_value = MagicMock(supports_vision=False)
config_mock.checkpointer = None
with (
patch("deerflow.client.get_app_config", return_value=config_mock),
patch("deerflow.client.create_agent", side_effect=fake_create_agent),
patch("deerflow.client.create_chat_model", return_value=MagicMock()),
patch("deerflow.client._build_middlewares", return_value=[]),
patch("deerflow.client.apply_prompt_template", return_value=""),
patch("deerflow.client.DeerFlowClient._get_tools", return_value=[]),
):
client = DeerFlowClient(checkpointer=explicit_cp)
config = client._get_runnable_config("test-thread")
client._ensure_agent(config)
assert captured_kwargs["checkpointer"] is explicit_cp
+6 -4
View File
@@ -5,6 +5,8 @@ from unittest.mock import MagicMock, patch
import pytest import pytest
from langgraph.checkpoint.memory import InMemorySaver from langgraph.checkpoint.memory import InMemorySaver
from deerflow.config.app_config import AppConfig
class TestCheckpointerNoneFix: class TestCheckpointerNoneFix:
"""Tests that checkpointer context managers return InMemorySaver instead of None.""" """Tests that checkpointer context managers return InMemorySaver instead of None."""
@@ -14,11 +16,11 @@ class TestCheckpointerNoneFix:
"""make_checkpointer should return InMemorySaver when config.checkpointer is None.""" """make_checkpointer should return InMemorySaver when config.checkpointer is None."""
from deerflow.agents.checkpointer.async_provider import make_checkpointer from deerflow.agents.checkpointer.async_provider import make_checkpointer
# Mock get_app_config to return a config with checkpointer=None # Mock AppConfig.get to return a config with checkpointer=None
mock_config = MagicMock() mock_config = MagicMock()
mock_config.checkpointer = None mock_config.checkpointer = None
with patch("deerflow.agents.checkpointer.async_provider.get_app_config", return_value=mock_config): with patch.object(AppConfig, "current", return_value=mock_config):
async with make_checkpointer() as checkpointer: async with make_checkpointer() as checkpointer:
# Should return InMemorySaver, not None # Should return InMemorySaver, not None
assert checkpointer is not None assert checkpointer is not None
@@ -37,11 +39,11 @@ class TestCheckpointerNoneFix:
"""checkpointer_context should return InMemorySaver when config.checkpointer is None.""" """checkpointer_context should return InMemorySaver when config.checkpointer is None."""
from deerflow.agents.checkpointer.provider import checkpointer_context from deerflow.agents.checkpointer.provider import checkpointer_context
# Mock get_app_config to return a config with checkpointer=None # Mock AppConfig.get to return a config with checkpointer=None
mock_config = MagicMock() mock_config = MagicMock()
mock_config.checkpointer = None mock_config.checkpointer = None
with patch("deerflow.agents.checkpointer.provider.get_app_config", return_value=mock_config): with patch.object(AppConfig, "current", return_value=mock_config):
with checkpointer_context() as checkpointer: with checkpointer_context() as checkpointer:
# Should return InMemorySaver, not None # Should return InMemorySaver, not None
assert checkpointer is not None assert checkpointer is not None
+69 -52
View File
@@ -18,6 +18,7 @@ from app.gateway.routers.models import ModelResponse, ModelsListResponse
from app.gateway.routers.skills import SkillInstallResponse, SkillResponse, SkillsListResponse from app.gateway.routers.skills import SkillInstallResponse, SkillResponse, SkillsListResponse
from app.gateway.routers.uploads import UploadResponse from app.gateway.routers.uploads import UploadResponse
from deerflow.client import DeerFlowClient from deerflow.client import DeerFlowClient
from deerflow.config.app_config import AppConfig
from deerflow.config.paths import Paths from deerflow.config.paths import Paths
from deerflow.uploads.manager import PathTraversalError from deerflow.uploads.manager import PathTraversalError
@@ -44,7 +45,7 @@ def mock_app_config():
@pytest.fixture @pytest.fixture
def client(mock_app_config): def client(mock_app_config):
"""Create a DeerFlowClient with mocked config loading.""" """Create a DeerFlowClient with mocked config loading."""
with patch("deerflow.client.get_app_config", return_value=mock_app_config): with patch.object(AppConfig, "current", return_value=mock_app_config):
return DeerFlowClient() return DeerFlowClient()
@@ -66,7 +67,7 @@ class TestClientInit:
def test_custom_params(self, mock_app_config): def test_custom_params(self, mock_app_config):
mock_middleware = MagicMock() mock_middleware = MagicMock()
with patch("deerflow.client.get_app_config", return_value=mock_app_config): with patch.object(AppConfig, "current", return_value=mock_app_config):
c = DeerFlowClient(model_name="gpt-4", thinking_enabled=False, subagent_enabled=True, plan_mode=True, agent_name="test-agent", available_skills={"skill1", "skill2"}, middlewares=[mock_middleware]) c = DeerFlowClient(model_name="gpt-4", thinking_enabled=False, subagent_enabled=True, plan_mode=True, agent_name="test-agent", available_skills={"skill1", "skill2"}, middlewares=[mock_middleware])
assert c._model_name == "gpt-4" assert c._model_name == "gpt-4"
assert c._thinking_enabled is False assert c._thinking_enabled is False
@@ -77,7 +78,7 @@ class TestClientInit:
assert c._middlewares == [mock_middleware] assert c._middlewares == [mock_middleware]
def test_invalid_agent_name(self, mock_app_config): def test_invalid_agent_name(self, mock_app_config):
with patch("deerflow.client.get_app_config", return_value=mock_app_config): with patch.object(AppConfig, "current", return_value=mock_app_config):
with pytest.raises(ValueError, match="Invalid agent name"): with pytest.raises(ValueError, match="Invalid agent name"):
DeerFlowClient(agent_name="invalid name with spaces!") DeerFlowClient(agent_name="invalid name with spaces!")
with pytest.raises(ValueError, match="Invalid agent name"): with pytest.raises(ValueError, match="Invalid agent name"):
@@ -85,15 +86,17 @@ class TestClientInit:
def test_custom_config_path(self, mock_app_config): def test_custom_config_path(self, mock_app_config):
with ( with (
patch("deerflow.client.reload_app_config") as mock_reload, patch.object(AppConfig, "from_file", return_value=mock_app_config) as mock_from_file,
patch("deerflow.client.get_app_config", return_value=mock_app_config), patch.object(AppConfig, "init") as mock_init,
patch.object(AppConfig, "current", return_value=mock_app_config),
): ):
DeerFlowClient(config_path="/tmp/custom.yaml") DeerFlowClient(config_path="/tmp/custom.yaml")
mock_reload.assert_called_once_with("/tmp/custom.yaml") mock_from_file.assert_called_once_with("/tmp/custom.yaml")
mock_init.assert_called_once_with(mock_app_config)
def test_checkpointer_stored(self, mock_app_config): def test_checkpointer_stored(self, mock_app_config):
cp = MagicMock() cp = MagicMock()
with patch("deerflow.client.get_app_config", return_value=mock_app_config): with patch.object(AppConfig, "current", return_value=mock_app_config):
c = DeerFlowClient(checkpointer=cp) c = DeerFlowClient(checkpointer=cp)
assert c._checkpointer is cp assert c._checkpointer is cp
@@ -249,8 +252,8 @@ class TestStream:
# Verify context passed to agent.stream # Verify context passed to agent.stream
agent.stream.assert_called_once() agent.stream.assert_called_once()
call_kwargs = agent.stream.call_args.kwargs call_kwargs = agent.stream.call_args.kwargs
assert call_kwargs["context"]["thread_id"] == "t1" ctx = call_kwargs["context"]
assert call_kwargs["context"]["agent_name"] == "test-agent-1" assert ctx.app_config is client._app_config
def test_custom_mode_is_normalized_to_string(self, client): def test_custom_mode_is_normalized_to_string(self, client):
"""stream() forwards custom events even when the mode is not a plain string.""" """stream() forwards custom events even when the mode is not a plain string."""
@@ -1089,7 +1092,7 @@ class TestMcpConfig:
ext_config = MagicMock() ext_config = MagicMock()
ext_config.mcp_servers = {"github": server} ext_config.mcp_servers = {"github": server}
with patch("deerflow.client.get_extensions_config", return_value=ext_config): with patch.object(AppConfig, "current", return_value=MagicMock(extensions=ext_config)):
result = client.get_mcp_config() result = client.get_mcp_config()
assert "mcp_servers" in result assert "mcp_servers" in result
@@ -1114,10 +1117,12 @@ class TestMcpConfig:
# Pre-set agent to verify it gets invalidated # Pre-set agent to verify it gets invalidated
client._agent = MagicMock() client._agent = MagicMock()
# Set initial AppConfig with current extensions
AppConfig.init(MagicMock(extensions=current_config))
with ( with (
patch("deerflow.client.ExtensionsConfig.resolve_config_path", return_value=tmp_path), patch("deerflow.client.ExtensionsConfig.resolve_config_path", return_value=tmp_path),
patch("deerflow.client.get_extensions_config", return_value=current_config), patch("deerflow.config.app_config.AppConfig.from_file", return_value=MagicMock(extensions=reloaded_config)),
patch("deerflow.client.reload_extensions_config", return_value=reloaded_config),
): ):
result = client.update_mcp_config({"new-server": {"enabled": True, "type": "sse"}}) result = client.update_mcp_config({"new-server": {"enabled": True, "type": "sse"}})
@@ -1179,8 +1184,8 @@ class TestSkillsManagement:
with ( with (
patch("deerflow.skills.loader.load_skills", side_effect=[[skill], [updated_skill]]), patch("deerflow.skills.loader.load_skills", side_effect=[[skill], [updated_skill]]),
patch("deerflow.client.ExtensionsConfig.resolve_config_path", return_value=tmp_path), patch("deerflow.client.ExtensionsConfig.resolve_config_path", return_value=tmp_path),
patch("deerflow.client.get_extensions_config", return_value=ext_config), patch.object(AppConfig, "current", return_value=MagicMock(extensions=ext_config)),
patch("deerflow.client.reload_extensions_config"), patch("deerflow.config.app_config.AppConfig.from_file", return_value=MagicMock()),
): ):
result = client.update_skill("test-skill", enabled=False) result = client.update_skill("test-skill", enabled=False)
assert result["enabled"] is False assert result["enabled"] is False
@@ -1311,35 +1316,40 @@ class TestMemoryManagement:
assert result == data assert result == data
def test_get_memory_config(self, client): def test_get_memory_config(self, client):
config = MagicMock() mem_config = MagicMock()
config.enabled = True mem_config.enabled = True
config.storage_path = ".deer-flow/memory.json" mem_config.storage_path = ".deer-flow/memory.json"
config.debounce_seconds = 30 mem_config.debounce_seconds = 30
config.max_facts = 100 mem_config.max_facts = 100
config.fact_confidence_threshold = 0.7 mem_config.fact_confidence_threshold = 0.7
config.injection_enabled = True mem_config.injection_enabled = True
config.max_injection_tokens = 2000 mem_config.max_injection_tokens = 2000
with patch("deerflow.config.memory_config.get_memory_config", return_value=config): app_cfg = MagicMock()
app_cfg.memory = mem_config
with patch.object(AppConfig, "current", return_value=app_cfg):
result = client.get_memory_config() result = client.get_memory_config()
assert result["enabled"] is True assert result["enabled"] is True
assert result["max_facts"] == 100 assert result["max_facts"] == 100
def test_get_memory_status(self, client): def test_get_memory_status(self, client):
config = MagicMock() mem_config = MagicMock()
config.enabled = True mem_config.enabled = True
config.storage_path = ".deer-flow/memory.json" mem_config.storage_path = ".deer-flow/memory.json"
config.debounce_seconds = 30 mem_config.debounce_seconds = 30
config.max_facts = 100 mem_config.max_facts = 100
config.fact_confidence_threshold = 0.7 mem_config.fact_confidence_threshold = 0.7
config.injection_enabled = True mem_config.injection_enabled = True
config.max_injection_tokens = 2000 mem_config.max_injection_tokens = 2000
app_cfg = MagicMock()
app_cfg.memory = mem_config
data = {"version": "1.0", "facts": []} data = {"version": "1.0", "facts": []}
with ( with (
patch("deerflow.config.memory_config.get_memory_config", return_value=config), patch.object(AppConfig, "current", return_value=app_cfg),
patch("deerflow.agents.memory.updater.get_memory_data", return_value=data), patch("deerflow.agents.memory.updater.get_memory_data", return_value=data),
): ):
result = client.get_memory_status() result = client.get_memory_status()
@@ -1783,10 +1793,10 @@ class TestScenarioConfigManagement:
reloaded_config.mcp_servers = {"my-mcp": reloaded_server} reloaded_config.mcp_servers = {"my-mcp": reloaded_server}
client._agent = MagicMock() # Simulate existing agent client._agent = MagicMock() # Simulate existing agent
AppConfig.init(MagicMock(extensions=current_config))
with ( with (
patch("deerflow.client.ExtensionsConfig.resolve_config_path", return_value=config_file), patch("deerflow.client.ExtensionsConfig.resolve_config_path", return_value=config_file),
patch("deerflow.client.get_extensions_config", return_value=current_config), patch("deerflow.config.app_config.AppConfig.from_file", return_value=MagicMock(extensions=reloaded_config)),
patch("deerflow.client.reload_extensions_config", return_value=reloaded_config),
): ):
mcp_result = client.update_mcp_config({"my-mcp": {"enabled": True}}) mcp_result = client.update_mcp_config({"my-mcp": {"enabled": True}})
assert "my-mcp" in mcp_result["mcp_servers"] assert "my-mcp" in mcp_result["mcp_servers"]
@@ -1815,8 +1825,8 @@ class TestScenarioConfigManagement:
with ( with (
patch("deerflow.skills.loader.load_skills", side_effect=[[skill], [toggled]]), patch("deerflow.skills.loader.load_skills", side_effect=[[skill], [toggled]]),
patch("deerflow.client.ExtensionsConfig.resolve_config_path", return_value=config_file), patch("deerflow.client.ExtensionsConfig.resolve_config_path", return_value=config_file),
patch("deerflow.client.get_extensions_config", return_value=ext_config), patch.object(AppConfig, "current", return_value=MagicMock(extensions=ext_config)),
patch("deerflow.client.reload_extensions_config"), patch("deerflow.config.app_config.AppConfig.from_file", return_value=MagicMock()),
): ):
skill_result = client.update_skill("code-gen", enabled=False) skill_result = client.update_skill("code-gen", enabled=False)
assert skill_result["enabled"] is False assert skill_result["enabled"] is False
@@ -2001,8 +2011,10 @@ class TestScenarioMemoryWorkflow:
refreshed = client.reload_memory() refreshed = client.reload_memory()
assert len(refreshed["facts"]) == 2 assert len(refreshed["facts"]) == 2
app_cfg = MagicMock()
app_cfg.memory = config
with ( with (
patch("deerflow.config.memory_config.get_memory_config", return_value=config), patch.object(AppConfig, "current", return_value=app_cfg),
patch("deerflow.agents.memory.updater.get_memory_data", return_value=updated_data), patch("deerflow.agents.memory.updater.get_memory_data", return_value=updated_data),
): ):
status = client.get_memory_status() status = client.get_memory_status()
@@ -2065,8 +2077,8 @@ class TestScenarioSkillInstallAndUse:
with ( with (
patch("deerflow.skills.loader.load_skills", side_effect=[[installed_skill], [disabled_skill]]), patch("deerflow.skills.loader.load_skills", side_effect=[[installed_skill], [disabled_skill]]),
patch("deerflow.client.ExtensionsConfig.resolve_config_path", return_value=config_file), patch("deerflow.client.ExtensionsConfig.resolve_config_path", return_value=config_file),
patch("deerflow.client.get_extensions_config", return_value=ext_config), patch.object(AppConfig, "current", return_value=MagicMock(extensions=ext_config)),
patch("deerflow.client.reload_extensions_config"), patch("deerflow.config.app_config.AppConfig.from_file", return_value=MagicMock()),
): ):
toggled = client.update_skill("my-analyzer", enabled=False) toggled = client.update_skill("my-analyzer", enabled=False)
assert toggled["enabled"] is False assert toggled["enabled"] is False
@@ -2198,7 +2210,7 @@ class TestGatewayConformance:
model.supports_thinking = False model.supports_thinking = False
mock_app_config.models = [model] mock_app_config.models = [model]
with patch("deerflow.client.get_app_config", return_value=mock_app_config): with patch.object(AppConfig, "current", return_value=mock_app_config):
client = DeerFlowClient() client = DeerFlowClient()
result = client.list_models() result = client.list_models()
@@ -2217,7 +2229,7 @@ class TestGatewayConformance:
mock_app_config.models = [model] mock_app_config.models = [model]
mock_app_config.get_model_config.return_value = model mock_app_config.get_model_config.return_value = model
with patch("deerflow.client.get_app_config", return_value=mock_app_config): with patch.object(AppConfig, "current", return_value=mock_app_config):
client = DeerFlowClient() client = DeerFlowClient()
result = client.get_model("test-model") result = client.get_model("test-model")
@@ -2287,7 +2299,7 @@ class TestGatewayConformance:
ext_config = MagicMock() ext_config = MagicMock()
ext_config.mcp_servers = {"test": server} ext_config.mcp_servers = {"test": server}
with patch("deerflow.client.get_extensions_config", return_value=ext_config): with patch.object(AppConfig, "current", return_value=MagicMock(extensions=ext_config)):
result = client.get_mcp_config() result = client.get_mcp_config()
parsed = McpConfigResponse(**result) parsed = McpConfigResponse(**result)
@@ -2313,9 +2325,9 @@ class TestGatewayConformance:
config_file.write_text("{}") config_file.write_text("{}")
with ( with (
patch("deerflow.client.get_extensions_config", return_value=ext_config), patch.object(AppConfig, "current", return_value=MagicMock(extensions=ext_config)),
patch("deerflow.client.ExtensionsConfig.resolve_config_path", return_value=config_file), patch("deerflow.client.ExtensionsConfig.resolve_config_path", return_value=config_file),
patch("deerflow.client.reload_extensions_config", return_value=ext_config), patch("deerflow.config.app_config.AppConfig.from_file", return_value=MagicMock(extensions=ext_config)),
): ):
result = client.update_mcp_config({"srv": server.model_dump.return_value}) result = client.update_mcp_config({"srv": server.model_dump.return_value})
@@ -2346,7 +2358,10 @@ class TestGatewayConformance:
mem_cfg.injection_enabled = True mem_cfg.injection_enabled = True
mem_cfg.max_injection_tokens = 2000 mem_cfg.max_injection_tokens = 2000
with patch("deerflow.config.memory_config.get_memory_config", return_value=mem_cfg): app_cfg = MagicMock()
app_cfg.memory = mem_cfg
with patch.object(AppConfig, "current", return_value=app_cfg):
result = client.get_memory_config() result = client.get_memory_config()
parsed = MemoryConfigResponse(**result) parsed = MemoryConfigResponse(**result)
@@ -2363,6 +2378,8 @@ class TestGatewayConformance:
mem_cfg.injection_enabled = True mem_cfg.injection_enabled = True
mem_cfg.max_injection_tokens = 2000 mem_cfg.max_injection_tokens = 2000
app_cfg = MagicMock()
app_cfg.memory = mem_cfg
memory_data = { memory_data = {
"version": "1.0", "version": "1.0",
"lastUpdated": "", "lastUpdated": "",
@@ -2380,7 +2397,7 @@ class TestGatewayConformance:
} }
with ( with (
patch("deerflow.config.memory_config.get_memory_config", return_value=mem_cfg), patch.object(AppConfig, "current", return_value=app_cfg),
patch("deerflow.agents.memory.updater.get_memory_data", return_value=memory_data), patch("deerflow.agents.memory.updater.get_memory_data", return_value=memory_data),
): ):
result = client.get_memory_status() result = client.get_memory_status()
@@ -2671,8 +2688,8 @@ class TestConfigUpdateErrors:
with ( with (
patch("deerflow.skills.loader.load_skills", side_effect=[[skill], []]), patch("deerflow.skills.loader.load_skills", side_effect=[[skill], []]),
patch("deerflow.client.ExtensionsConfig.resolve_config_path", return_value=config_file), patch("deerflow.client.ExtensionsConfig.resolve_config_path", return_value=config_file),
patch("deerflow.client.get_extensions_config", return_value=ext_config), patch.object(AppConfig, "current", return_value=MagicMock(extensions=ext_config)),
patch("deerflow.client.reload_extensions_config"), patch("deerflow.config.app_config.AppConfig.from_file", return_value=MagicMock()),
): ):
with pytest.raises(RuntimeError, match="disappeared"): with pytest.raises(RuntimeError, match="disappeared"):
client.update_skill("ghost-skill", enabled=False) client.update_skill("ghost-skill", enabled=False)
@@ -3042,10 +3059,10 @@ class TestBugAgentInvalidationInconsistency:
config_file = Path(tmp) / "ext.json" config_file = Path(tmp) / "ext.json"
config_file.write_text("{}") config_file.write_text("{}")
AppConfig.init(MagicMock(extensions=current_config))
with ( with (
patch("deerflow.client.ExtensionsConfig.resolve_config_path", return_value=config_file), patch("deerflow.client.ExtensionsConfig.resolve_config_path", return_value=config_file),
patch("deerflow.client.get_extensions_config", return_value=current_config), patch("deerflow.config.app_config.AppConfig.from_file", return_value=MagicMock(extensions=reloaded)),
patch("deerflow.client.reload_extensions_config", return_value=reloaded),
): ):
client.update_mcp_config({}) client.update_mcp_config({})
@@ -3077,8 +3094,8 @@ class TestBugAgentInvalidationInconsistency:
with ( with (
patch("deerflow.skills.loader.load_skills", side_effect=[[skill], [updated]]), patch("deerflow.skills.loader.load_skills", side_effect=[[skill], [updated]]),
patch("deerflow.client.ExtensionsConfig.resolve_config_path", return_value=config_file), patch("deerflow.client.ExtensionsConfig.resolve_config_path", return_value=config_file),
patch("deerflow.client.get_extensions_config", return_value=ext_config), patch.object(AppConfig, "current", return_value=MagicMock(extensions=ext_config)),
patch("deerflow.client.reload_extensions_config"), patch("deerflow.config.app_config.AppConfig.from_file", return_value=MagicMock()),
): ):
client.update_skill("s1", enabled=False) client.update_skill("s1", enabled=False)
+73
View File
@@ -0,0 +1,73 @@
"""Verify that all sub-config Pydantic models are frozen (immutable).
Frozen models reject attribute assignment after construction, raising
pydantic.ValidationError. This test collects every BaseModel subclass
defined in the deerflow.config package and asserts that mutation is
blocked.
"""
import inspect
import pkgutil
import pytest
from pydantic import BaseModel, ValidationError
import deerflow.config as config_pkg
def _collect_config_models() -> list[type[BaseModel]]:
"""Walk deerflow.config.* and return all concrete BaseModel subclasses."""
import importlib
models: list[type[BaseModel]] = []
package_path = config_pkg.__path__
package_prefix = config_pkg.__name__ + "."
for _importer, modname, _ispkg in pkgutil.walk_packages(package_path, prefix=package_prefix):
try:
mod = importlib.import_module(modname)
except Exception:
continue
for _name, obj in inspect.getmembers(mod, inspect.isclass):
if (
issubclass(obj, BaseModel)
and obj is not BaseModel
and obj.__module__ == mod.__name__
):
models.append(obj)
return models
_EXCLUDED: set[str] = set()
_ALL_MODELS = [m for m in _collect_config_models() if m.__name__ not in _EXCLUDED]
# Sanity: make sure we actually collected a meaningful set.
assert len(_ALL_MODELS) >= 15, f"Expected at least 15 config models, found {len(_ALL_MODELS)}: {[m.__name__ for m in _ALL_MODELS]}"
@pytest.mark.parametrize("model_cls", _ALL_MODELS, ids=lambda cls: cls.__name__)
def test_config_model_is_frozen(model_cls: type[BaseModel]):
"""Every sub-config model must have frozen=True in its model_config."""
cfg = model_cls.model_config
assert cfg.get("frozen") is True, (
f"{model_cls.__name__} is not frozen. "
f"Add `model_config = ConfigDict(frozen=True)` or add `frozen=True` to the existing ConfigDict."
)
@pytest.mark.parametrize("model_cls", _ALL_MODELS, ids=lambda cls: cls.__name__)
def test_config_model_rejects_mutation(model_cls: type[BaseModel]):
"""Constructing then mutating any field must raise ValidationError."""
# Build a minimal instance -- use model_construct to skip validation for
# required fields, then pick the first field to try mutating.
fields = list(model_cls.model_fields.keys())
if not fields:
pytest.skip(f"{model_cls.__name__} has no fields")
instance = model_cls.model_construct()
first_field = fields[0]
with pytest.raises(ValidationError):
setattr(instance, first_field, "MUTATED")
+6 -4
View File
@@ -3,12 +3,14 @@
from __future__ import annotations from __future__ import annotations
from pathlib import Path from pathlib import Path
from unittest.mock import patch from unittest.mock import MagicMock, patch
import pytest import pytest
import yaml import yaml
from fastapi.testclient import TestClient from fastapi.testclient import TestClient
from deerflow.config.app_config import AppConfig
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
# Helpers # Helpers
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
@@ -331,7 +333,7 @@ class TestMemoryFilePath:
with ( with (
patch("deerflow.agents.memory.storage.get_paths", return_value=_make_paths(tmp_path)), patch("deerflow.agents.memory.storage.get_paths", return_value=_make_paths(tmp_path)),
patch("deerflow.agents.memory.storage.get_memory_config", return_value=MemoryConfig(storage_path="")), patch.object(AppConfig, "current", return_value=MagicMock(memory=MemoryConfig(storage_path=""))),
): ):
storage = FileMemoryStorage() storage = FileMemoryStorage()
path = storage._get_memory_file_path(None) path = storage._get_memory_file_path(None)
@@ -344,7 +346,7 @@ class TestMemoryFilePath:
with ( with (
patch("deerflow.agents.memory.storage.get_paths", return_value=_make_paths(tmp_path)), patch("deerflow.agents.memory.storage.get_paths", return_value=_make_paths(tmp_path)),
patch("deerflow.agents.memory.storage.get_memory_config", return_value=MemoryConfig(storage_path="")), patch.object(AppConfig, "current", return_value=MagicMock(memory=MemoryConfig(storage_path=""))),
): ):
storage = FileMemoryStorage() storage = FileMemoryStorage()
path = storage._get_memory_file_path("code-reviewer") path = storage._get_memory_file_path("code-reviewer")
@@ -356,7 +358,7 @@ class TestMemoryFilePath:
with ( with (
patch("deerflow.agents.memory.storage.get_paths", return_value=_make_paths(tmp_path)), patch("deerflow.agents.memory.storage.get_paths", return_value=_make_paths(tmp_path)),
patch("deerflow.agents.memory.storage.get_memory_config", return_value=MemoryConfig(storage_path="")), patch.object(AppConfig, "current", return_value=MagicMock(memory=MemoryConfig(storage_path=""))),
): ):
storage = FileMemoryStorage() storage = FileMemoryStorage()
path_global = storage._get_memory_file_path(None) path_global = storage._get_memory_file_path(None)
+86
View File
@@ -0,0 +1,86 @@
"""Tests for DeerFlowContext and resolve_context()."""
from dataclasses import FrozenInstanceError
from unittest.mock import MagicMock, patch
import pytest
from deerflow.config.app_config import AppConfig
from deerflow.config.deer_flow_context import DeerFlowContext, resolve_context
from deerflow.config.sandbox_config import SandboxConfig
def _make_config(**overrides) -> AppConfig:
defaults = {"sandbox": SandboxConfig(use="test")}
defaults.update(overrides)
return AppConfig(**defaults)
class TestDeerFlowContext:
def test_frozen(self):
ctx = DeerFlowContext(app_config=_make_config(), thread_id="t1")
with pytest.raises(FrozenInstanceError):
ctx.app_config = _make_config()
def test_fields(self):
config = _make_config()
ctx = DeerFlowContext(app_config=config, thread_id="t1", agent_name="test-agent")
assert ctx.thread_id == "t1"
assert ctx.agent_name == "test-agent"
assert ctx.app_config is config
def test_agent_name_default(self):
ctx = DeerFlowContext(app_config=_make_config(), thread_id="t1")
assert ctx.agent_name is None
def test_thread_id_required(self):
with pytest.raises(TypeError):
DeerFlowContext(app_config=_make_config()) # type: ignore[call-arg]
class TestResolveContext:
def test_returns_typed_context_directly(self):
"""Gateway/Client path: runtime.context is DeerFlowContext → return as-is."""
config = _make_config()
ctx = DeerFlowContext(app_config=config, thread_id="t1")
runtime = MagicMock()
runtime.context = ctx
assert resolve_context(runtime) is ctx
def test_fallback_from_configurable(self):
"""LangGraph Server path: runtime.context is None → construct from ContextVar + configurable."""
runtime = MagicMock()
runtime.context = None
config = _make_config()
with (
patch.object(AppConfig, "current", return_value=config),
patch("langgraph.config.get_config", return_value={"configurable": {"thread_id": "t2", "agent_name": "ag"}}),
):
ctx = resolve_context(runtime)
assert ctx.thread_id == "t2"
assert ctx.agent_name == "ag"
assert ctx.app_config is config
def test_fallback_empty_configurable(self):
"""LangGraph Server path with no thread_id in configurable → empty string."""
runtime = MagicMock()
runtime.context = None
config = _make_config()
with (
patch.object(AppConfig, "current", return_value=config),
patch("langgraph.config.get_config", return_value={"configurable": {}}),
):
ctx = resolve_context(runtime)
assert ctx.thread_id == ""
assert ctx.agent_name is None
def test_fallback_from_dict_context(self):
"""Legacy path: runtime.context is a dict → extract from dict directly."""
runtime = MagicMock()
runtime.context = {"thread_id": "old-dict", "agent_name": "from-dict"}
config = _make_config()
with patch.object(AppConfig, "current", return_value=config):
ctx = resolve_context(runtime)
assert ctx.thread_id == "old-dict"
assert ctx.agent_name == "from-dict"
assert ctx.app_config is config
+6 -4
View File
@@ -5,11 +5,13 @@ from unittest.mock import MagicMock, patch
import pytest import pytest
from deerflow.config.app_config import AppConfig
@pytest.fixture @pytest.fixture
def mock_app_config(): def mock_app_config():
"""Mock the app config to return tool configurations.""" """Mock the app config to return tool configurations."""
with patch("deerflow.community.exa.tools.get_app_config") as mock_config: with patch.object(AppConfig, "current") as mock_config:
tool_config = MagicMock() tool_config = MagicMock()
tool_config.model_extra = { tool_config.model_extra = {
"max_results": 5, "max_results": 5,
@@ -67,7 +69,7 @@ class TestWebSearchTool:
def test_search_with_custom_config(self, mock_exa_client): def test_search_with_custom_config(self, mock_exa_client):
"""Test search respects custom configuration values.""" """Test search respects custom configuration values."""
with patch("deerflow.community.exa.tools.get_app_config") as mock_config: with patch.object(AppConfig, "current") as mock_config:
tool_config = MagicMock() tool_config = MagicMock()
tool_config.model_extra = { tool_config.model_extra = {
"max_results": 10, "max_results": 10,
@@ -195,7 +197,7 @@ class TestWebFetchTool:
def test_fetch_reads_web_fetch_config(self, mock_exa_client): def test_fetch_reads_web_fetch_config(self, mock_exa_client):
"""Test that web_fetch_tool reads 'web_fetch' config, not 'web_search'.""" """Test that web_fetch_tool reads 'web_fetch' config, not 'web_search'."""
with patch("deerflow.community.exa.tools.get_app_config") as mock_config: with patch.object(AppConfig, "current") as mock_config:
tool_config = MagicMock() tool_config = MagicMock()
tool_config.model_extra = {"api_key": "exa-fetch-key"} tool_config.model_extra = {"api_key": "exa-fetch-key"}
mock_config.return_value.get_tool_config.return_value = tool_config mock_config.return_value.get_tool_config.return_value = tool_config
@@ -215,7 +217,7 @@ class TestWebFetchTool:
def test_fetch_uses_independent_api_key(self, mock_exa_client): def test_fetch_uses_independent_api_key(self, mock_exa_client):
"""Test mixed-provider config: web_fetch uses its own api_key, not web_search's.""" """Test mixed-provider config: web_fetch uses its own api_key, not web_search's."""
with patch("deerflow.community.exa.tools.get_app_config") as mock_config: with patch.object(AppConfig, "current") as mock_config:
with patch("deerflow.community.exa.tools.Exa") as mock_exa_cls: with patch("deerflow.community.exa.tools.Exa") as mock_exa_cls:
mock_exa_cls.return_value = mock_exa_client mock_exa_cls.return_value = mock_exa_client
fetch_config = MagicMock() fetch_config = MagicMock()
+4 -2
View File
@@ -3,10 +3,12 @@
import json import json
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock, patch
from deerflow.config.app_config import AppConfig
class TestWebSearchTool: class TestWebSearchTool:
@patch("deerflow.community.firecrawl.tools.FirecrawlApp") @patch("deerflow.community.firecrawl.tools.FirecrawlApp")
@patch("deerflow.community.firecrawl.tools.get_app_config") @patch.object(AppConfig, "current")
def test_search_uses_web_search_config(self, mock_get_app_config, mock_firecrawl_cls): def test_search_uses_web_search_config(self, mock_get_app_config, mock_firecrawl_cls):
search_config = MagicMock() search_config = MagicMock()
search_config.model_extra = {"api_key": "firecrawl-search-key", "max_results": 7} search_config.model_extra = {"api_key": "firecrawl-search-key", "max_results": 7}
@@ -36,7 +38,7 @@ class TestWebSearchTool:
class TestWebFetchTool: class TestWebFetchTool:
@patch("deerflow.community.firecrawl.tools.FirecrawlApp") @patch("deerflow.community.firecrawl.tools.FirecrawlApp")
@patch("deerflow.community.firecrawl.tools.get_app_config") @patch.object(AppConfig, "current")
def test_fetch_uses_web_fetch_config(self, mock_get_app_config, mock_firecrawl_cls): def test_fetch_uses_web_fetch_config(self, mock_get_app_config, mock_firecrawl_cls):
fetch_config = MagicMock() fetch_config = MagicMock()
fetch_config.model_extra = {"api_key": "firecrawl-fetch-key"} fetch_config.model_extra = {"api_key": "firecrawl-fetch-key"}
+12 -7
View File
@@ -333,12 +333,17 @@ class TestGuardrailsConfig:
assert config.provider.use == "deerflow.guardrails.builtin:AllowlistProvider" assert config.provider.use == "deerflow.guardrails.builtin:AllowlistProvider"
assert config.provider.config == {"denied_tools": ["bash"]} assert config.provider.config == {"denied_tools": ["bash"]}
def test_singleton_load_and_get(self): def test_guardrails_config_via_app_config(self):
from deerflow.config.guardrails_config import get_guardrails_config, load_guardrails_config_from_dict, reset_guardrails_config from unittest.mock import patch
try: from deerflow.config.app_config import AppConfig
load_guardrails_config_from_dict({"enabled": True, "provider": {"use": "test:Foo"}}) from deerflow.config.guardrails_config import GuardrailProviderConfig, GuardrailsConfig
config = get_guardrails_config() from deerflow.config.sandbox_config import SandboxConfig
cfg = AppConfig(
sandbox=SandboxConfig(use="test"),
guardrails=GuardrailsConfig(enabled=True, provider=GuardrailProviderConfig(use="test:Foo")),
)
with patch.object(AppConfig, "current", return_value=cfg):
config = AppConfig.current().guardrails
assert config.enabled is True assert config.enabled is True
finally:
reset_guardrails_config()
+4 -3
View File
@@ -5,6 +5,7 @@ from unittest.mock import MagicMock, patch
from deerflow.community.infoquest import tools from deerflow.community.infoquest import tools
from deerflow.community.infoquest.infoquest_client import InfoQuestClient from deerflow.community.infoquest.infoquest_client import InfoQuestClient
from deerflow.config.app_config import AppConfig
class TestInfoQuestClient: class TestInfoQuestClient:
@@ -149,8 +150,8 @@ class TestInfoQuestClient:
mock_get_client.assert_called_once() mock_get_client.assert_called_once()
mock_client.fetch.assert_called_once_with("https://example.com") mock_client.fetch.assert_called_once_with("https://example.com")
@patch("deerflow.community.infoquest.tools.get_app_config") @patch.object(AppConfig, "current")
def test_get_infoquest_client(self, mock_get_app_config): def test_get_infoquest_client(self, mock_get):
"""Test _get_infoquest_client function with config.""" """Test _get_infoquest_client function with config."""
mock_config = MagicMock() mock_config = MagicMock()
# Add image_search config to the side_effect # Add image_search config to the side_effect
@@ -159,7 +160,7 @@ class TestInfoQuestClient:
MagicMock(model_extra={"fetch_time": 10, "timeout": 30, "navigation_timeout": 60}), # web_fetch config MagicMock(model_extra={"fetch_time": 10, "timeout": 30, "navigation_timeout": 60}), # web_fetch config
MagicMock(model_extra={"image_search_time_range": 7, "image_size": "l"}), # image_search config MagicMock(model_extra={"image_search_time_range": 7, "image_size": "l"}), # image_search config
] ]
mock_get_app_config.return_value = mock_config mock_get.return_value = mock_config
client = tools._get_infoquest_client() client = tools._get_infoquest_client()
+10 -20
View File
@@ -6,7 +6,8 @@ from types import SimpleNamespace
import pytest import pytest
from deerflow.config.acp_config import ACPAgentConfig from deerflow.config.acp_config import ACPAgentConfig
from deerflow.config.extensions_config import ExtensionsConfig, McpServerConfig, set_extensions_config from deerflow.config.app_config import AppConfig
from deerflow.config.extensions_config import ExtensionsConfig, McpServerConfig
from deerflow.tools.builtins.invoke_acp_agent_tool import ( from deerflow.tools.builtins.invoke_acp_agent_tool import (
_build_acp_mcp_servers, _build_acp_mcp_servers,
_build_mcp_servers, _build_mcp_servers,
@@ -18,7 +19,6 @@ from deerflow.tools.tools import get_available_tools
def test_build_mcp_servers_filters_disabled_and_maps_transports(): def test_build_mcp_servers_filters_disabled_and_maps_transports():
set_extensions_config(ExtensionsConfig(mcp_servers={"stale": McpServerConfig(enabled=True, type="stdio", command="echo")}, skills={}))
fresh_config = ExtensionsConfig( fresh_config = ExtensionsConfig(
mcp_servers={ mcp_servers={
"stdio": McpServerConfig(enabled=True, type="stdio", command="npx", args=["srv"]), "stdio": McpServerConfig(enabled=True, type="stdio", command="npx", args=["srv"]),
@@ -40,11 +40,9 @@ def test_build_mcp_servers_filters_disabled_and_maps_transports():
} }
finally: finally:
monkeypatch.undo() monkeypatch.undo()
set_extensions_config(ExtensionsConfig(mcp_servers={}, skills={}))
def test_build_acp_mcp_servers_formats_list_payload(): def test_build_acp_mcp_servers_formats_list_payload():
set_extensions_config(ExtensionsConfig(mcp_servers={"stale": McpServerConfig(enabled=True, type="stdio", command="echo")}, skills={}))
fresh_config = ExtensionsConfig( fresh_config = ExtensionsConfig(
mcp_servers={ mcp_servers={
"stdio": McpServerConfig(enabled=True, type="stdio", command="npx", args=["srv"], env={"FOO": "bar"}), "stdio": McpServerConfig(enabled=True, type="stdio", command="npx", args=["srv"], env={"FOO": "bar"}),
@@ -77,7 +75,6 @@ def test_build_acp_mcp_servers_formats_list_payload():
] ]
finally: finally:
monkeypatch.undo() monkeypatch.undo()
set_extensions_config(ExtensionsConfig(mcp_servers={}, skills={}))
def test_build_permission_response_prefers_allow_once(): def test_build_permission_response_prefers_allow_once():
@@ -665,25 +662,20 @@ async def test_invoke_acp_agent_passes_none_env_when_not_configured(monkeypatch,
def test_get_available_tools_includes_invoke_acp_agent_when_agents_configured(monkeypatch): def test_get_available_tools_includes_invoke_acp_agent_when_agents_configured(monkeypatch):
from deerflow.config.acp_config import load_acp_config_from_dict
load_acp_config_from_dict(
{
"codex": {
"command": "codex-acp",
"args": [],
"description": "Codex CLI",
}
}
)
fake_config = SimpleNamespace( fake_config = SimpleNamespace(
tools=[], tools=[],
models=[], models=[],
tool_search=SimpleNamespace(enabled=False), tool_search=SimpleNamespace(enabled=False),
acp_agents={
"codex": ACPAgentConfig(
command="codex-acp",
args=[],
description="Codex CLI",
)
},
get_model_config=lambda name: None, get_model_config=lambda name: None,
) )
monkeypatch.setattr("deerflow.tools.tools.get_app_config", lambda: fake_config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: fake_config))
monkeypatch.setattr( monkeypatch.setattr(
"deerflow.config.extensions_config.ExtensionsConfig.from_file", "deerflow.config.extensions_config.ExtensionsConfig.from_file",
classmethod(lambda cls: ExtensionsConfig(mcp_servers={}, skills={})), classmethod(lambda cls: ExtensionsConfig(mcp_servers={}, skills={})),
@@ -691,5 +683,3 @@ def test_get_available_tools_includes_invoke_acp_agent_when_agents_configured(mo
tools = get_available_tools(include_mcp=True, subagent_enabled=False) tools = get_available_tools(include_mcp=True, subagent_enabled=False)
assert "invoke_acp_agent" in [tool.name for tool in tools] assert "invoke_acp_agent" in [tool.name for tool in tools]
load_acp_config_from_dict({})
+3 -2
View File
@@ -9,6 +9,7 @@ import pytest
import deerflow.community.jina_ai.jina_client as jina_client_module import deerflow.community.jina_ai.jina_client as jina_client_module
from deerflow.community.jina_ai.jina_client import JinaClient from deerflow.community.jina_ai.jina_client import JinaClient
from deerflow.community.jina_ai.tools import web_fetch_tool from deerflow.community.jina_ai.tools import web_fetch_tool
from deerflow.config.app_config import AppConfig
@pytest.fixture @pytest.fixture
@@ -154,7 +155,7 @@ async def test_web_fetch_tool_returns_error_on_crawl_failure(monkeypatch):
mock_config = MagicMock() mock_config = MagicMock()
mock_config.get_tool_config.return_value = None mock_config.get_tool_config.return_value = None
monkeypatch.setattr("deerflow.community.jina_ai.tools.get_app_config", lambda: mock_config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: mock_config))
monkeypatch.setattr(JinaClient, "crawl", mock_crawl) monkeypatch.setattr(JinaClient, "crawl", mock_crawl)
result = await web_fetch_tool.ainvoke("https://example.com") result = await web_fetch_tool.ainvoke("https://example.com")
assert result.startswith("Error:") assert result.startswith("Error:")
@@ -170,7 +171,7 @@ async def test_web_fetch_tool_returns_markdown_on_success(monkeypatch):
mock_config = MagicMock() mock_config = MagicMock()
mock_config.get_tool_config.return_value = None mock_config.get_tool_config.return_value = None
monkeypatch.setattr("deerflow.community.jina_ai.tools.get_app_config", lambda: mock_config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: mock_config))
monkeypatch.setattr(JinaClient, "crawl", mock_crawl) monkeypatch.setattr(JinaClient, "crawl", mock_crawl)
result = await web_fetch_tool.ainvoke("https://example.com") result = await web_fetch_tool.ainvoke("https://example.com")
assert "Hello world" in result assert "Hello world" in result
@@ -40,7 +40,7 @@ def test_resolve_model_name_falls_back_to_default(monkeypatch, caplog):
] ]
) )
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: app_config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: app_config))
with caplog.at_level("WARNING"): with caplog.at_level("WARNING"):
resolved = lead_agent_module._resolve_model_name("missing-model") resolved = lead_agent_module._resolve_model_name("missing-model")
@@ -57,7 +57,7 @@ def test_resolve_model_name_uses_default_when_none(monkeypatch):
] ]
) )
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: app_config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: app_config))
resolved = lead_agent_module._resolve_model_name(None) resolved = lead_agent_module._resolve_model_name(None)
@@ -67,7 +67,7 @@ def test_resolve_model_name_uses_default_when_none(monkeypatch):
def test_resolve_model_name_raises_when_no_models_configured(monkeypatch): def test_resolve_model_name_raises_when_no_models_configured(monkeypatch):
app_config = _make_app_config([]) app_config = _make_app_config([])
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: app_config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: app_config))
with pytest.raises( with pytest.raises(
ValueError, ValueError,
@@ -81,7 +81,7 @@ def test_make_lead_agent_disables_thinking_when_model_does_not_support_it(monkey
import deerflow.tools as tools_module import deerflow.tools as tools_module
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: app_config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: app_config))
monkeypatch.setattr(tools_module, "get_available_tools", lambda **kwargs: []) monkeypatch.setattr(tools_module, "get_available_tools", lambda **kwargs: [])
monkeypatch.setattr(lead_agent_module, "_build_middlewares", lambda config, model_name, agent_name=None: []) monkeypatch.setattr(lead_agent_module, "_build_middlewares", lambda config, model_name, agent_name=None: [])
@@ -128,7 +128,8 @@ def test_build_middlewares_uses_resolved_model_name_for_vision(monkeypatch):
] ]
) )
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: app_config) AppConfig.init(app_config)
monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: app_config))
monkeypatch.setattr(lead_agent_module, "_create_summarization_middleware", lambda: None) monkeypatch.setattr(lead_agent_module, "_create_summarization_middleware", lambda: None)
monkeypatch.setattr(lead_agent_module, "_create_todo_list_middleware", lambda is_plan_mode: None) monkeypatch.setattr(lead_agent_module, "_create_todo_list_middleware", lambda is_plan_mode: None)
@@ -140,11 +141,10 @@ def test_build_middlewares_uses_resolved_model_name_for_vision(monkeypatch):
def test_create_summarization_middleware_uses_configured_model_alias(monkeypatch): def test_create_summarization_middleware_uses_configured_model_alias(monkeypatch):
monkeypatch.setattr( app_config = _make_app_config([_make_model("default", supports_thinking=False)])
lead_agent_module, patched = app_config.model_copy(update={"summarization": SummarizationConfig(enabled=True, model_name="model-masswork")})
"get_summarization_config", AppConfig.init(patched)
lambda: SummarizationConfig(enabled=True, model_name="model-masswork"), monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: patched))
)
captured: dict[str, object] = {} captured: dict[str, object] = {}
fake_model = object() fake_model = object()
+5 -4
View File
@@ -4,12 +4,13 @@ from types import SimpleNamespace
import anyio import anyio
from deerflow.agents.lead_agent import prompt as prompt_module from deerflow.agents.lead_agent import prompt as prompt_module
from deerflow.config.app_config import AppConfig
from deerflow.skills.types import Skill from deerflow.skills.types import Skill
def test_build_custom_mounts_section_returns_empty_when_no_mounts(monkeypatch): def test_build_custom_mounts_section_returns_empty_when_no_mounts(monkeypatch):
config = SimpleNamespace(sandbox=SimpleNamespace(mounts=[])) config = SimpleNamespace(sandbox=SimpleNamespace(mounts=[]))
monkeypatch.setattr("deerflow.config.get_app_config", lambda: config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: config))
assert prompt_module._build_custom_mounts_section() == "" assert prompt_module._build_custom_mounts_section() == ""
@@ -20,7 +21,7 @@ def test_build_custom_mounts_section_lists_configured_mounts(monkeypatch):
SimpleNamespace(container_path="/mnt/reference", read_only=True), SimpleNamespace(container_path="/mnt/reference", read_only=True),
] ]
config = SimpleNamespace(sandbox=SimpleNamespace(mounts=mounts)) config = SimpleNamespace(sandbox=SimpleNamespace(mounts=mounts))
monkeypatch.setattr("deerflow.config.get_app_config", lambda: config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: config))
section = prompt_module._build_custom_mounts_section() section = prompt_module._build_custom_mounts_section()
@@ -37,7 +38,7 @@ def test_apply_prompt_template_includes_custom_mounts(monkeypatch):
sandbox=SimpleNamespace(mounts=mounts), sandbox=SimpleNamespace(mounts=mounts),
skills=SimpleNamespace(container_path="/mnt/skills"), skills=SimpleNamespace(container_path="/mnt/skills"),
) )
monkeypatch.setattr("deerflow.config.get_app_config", lambda: config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: config))
monkeypatch.setattr(prompt_module, "_get_enabled_skills", lambda: []) monkeypatch.setattr(prompt_module, "_get_enabled_skills", lambda: [])
monkeypatch.setattr(prompt_module, "get_deferred_tools_prompt_section", lambda: "") monkeypatch.setattr(prompt_module, "get_deferred_tools_prompt_section", lambda: "")
monkeypatch.setattr(prompt_module, "_build_acp_section", lambda: "") monkeypatch.setattr(prompt_module, "_build_acp_section", lambda: "")
@@ -55,7 +56,7 @@ def test_apply_prompt_template_includes_relative_path_guidance(monkeypatch):
sandbox=SimpleNamespace(mounts=[]), sandbox=SimpleNamespace(mounts=[]),
skills=SimpleNamespace(container_path="/mnt/skills"), skills=SimpleNamespace(container_path="/mnt/skills"),
) )
monkeypatch.setattr("deerflow.config.get_app_config", lambda: config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: config))
monkeypatch.setattr(prompt_module, "_get_enabled_skills", lambda: []) monkeypatch.setattr(prompt_module, "_get_enabled_skills", lambda: [])
monkeypatch.setattr(prompt_module, "get_deferred_tools_prompt_section", lambda: "") monkeypatch.setattr(prompt_module, "get_deferred_tools_prompt_section", lambda: "")
monkeypatch.setattr(prompt_module, "_build_acp_section", lambda: "") monkeypatch.setattr(prompt_module, "_build_acp_section", lambda: "")
+10 -9
View File
@@ -3,6 +3,7 @@ from types import SimpleNamespace
from deerflow.agents.lead_agent.prompt import get_skills_prompt_section from deerflow.agents.lead_agent.prompt import get_skills_prompt_section
from deerflow.config.agents_config import AgentConfig from deerflow.config.agents_config import AgentConfig
from deerflow.config.app_config import AppConfig
from deerflow.skills.types import Skill from deerflow.skills.types import Skill
@@ -58,11 +59,11 @@ def test_get_skills_prompt_section_includes_self_evolution_rules(monkeypatch):
skills = [_make_skill("skill1")] skills = [_make_skill("skill1")]
monkeypatch.setattr("deerflow.agents.lead_agent.prompt._get_enabled_skills", lambda: skills) monkeypatch.setattr("deerflow.agents.lead_agent.prompt._get_enabled_skills", lambda: skills)
monkeypatch.setattr( monkeypatch.setattr(
"deerflow.config.get_app_config", AppConfig, "current",
lambda: SimpleNamespace( staticmethod(lambda: SimpleNamespace(
skills=SimpleNamespace(container_path="/mnt/skills"), skills=SimpleNamespace(container_path="/mnt/skills"),
skill_evolution=SimpleNamespace(enabled=True), skill_evolution=SimpleNamespace(enabled=True),
), )),
) )
result = get_skills_prompt_section(available_skills=None) result = get_skills_prompt_section(available_skills=None)
@@ -72,11 +73,11 @@ def test_get_skills_prompt_section_includes_self_evolution_rules(monkeypatch):
def test_get_skills_prompt_section_includes_self_evolution_rules_without_skills(monkeypatch): def test_get_skills_prompt_section_includes_self_evolution_rules_without_skills(monkeypatch):
monkeypatch.setattr("deerflow.agents.lead_agent.prompt._get_enabled_skills", lambda: []) monkeypatch.setattr("deerflow.agents.lead_agent.prompt._get_enabled_skills", lambda: [])
monkeypatch.setattr( monkeypatch.setattr(
"deerflow.config.get_app_config", AppConfig, "current",
lambda: SimpleNamespace( staticmethod(lambda: SimpleNamespace(
skills=SimpleNamespace(container_path="/mnt/skills"), skills=SimpleNamespace(container_path="/mnt/skills"),
skill_evolution=SimpleNamespace(enabled=True), skill_evolution=SimpleNamespace(enabled=True),
), )),
) )
result = get_skills_prompt_section(available_skills=None) result = get_skills_prompt_section(available_skills=None)
@@ -90,7 +91,7 @@ def test_get_skills_prompt_section_cache_respects_skill_evolution_toggle(monkeyp
skills=SimpleNamespace(container_path="/mnt/skills"), skills=SimpleNamespace(container_path="/mnt/skills"),
skill_evolution=SimpleNamespace(enabled=True), skill_evolution=SimpleNamespace(enabled=True),
) )
monkeypatch.setattr("deerflow.config.get_app_config", lambda: config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: config))
enabled_result = get_skills_prompt_section(available_skills=None) enabled_result = get_skills_prompt_section(available_skills=None)
assert "Skill Self-Evolution" in enabled_result assert "Skill Self-Evolution" in enabled_result
@@ -106,7 +107,7 @@ def test_make_lead_agent_empty_skills_passed_correctly(monkeypatch):
from deerflow.agents.lead_agent import agent as lead_agent_module from deerflow.agents.lead_agent import agent as lead_agent_module
# Mock dependencies # Mock dependencies
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: MagicMock()) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: MagicMock()))
monkeypatch.setattr(lead_agent_module, "_resolve_model_name", lambda x=None: "default-model") monkeypatch.setattr(lead_agent_module, "_resolve_model_name", lambda x=None: "default-model")
monkeypatch.setattr(lead_agent_module, "create_chat_model", lambda **kwargs: "model") monkeypatch.setattr(lead_agent_module, "create_chat_model", lambda **kwargs: "model")
monkeypatch.setattr("deerflow.tools.get_available_tools", lambda **kwargs: []) monkeypatch.setattr("deerflow.tools.get_available_tools", lambda **kwargs: [])
@@ -118,7 +119,7 @@ def test_make_lead_agent_empty_skills_passed_correctly(monkeypatch):
mock_app_config = MagicMock() mock_app_config = MagicMock()
mock_app_config.get_model_config.return_value = MockModelConfig() mock_app_config.get_model_config.return_value = MockModelConfig()
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: mock_app_config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: mock_app_config))
captured_skills = [] captured_skills = []
@@ -1,5 +1,6 @@
from types import SimpleNamespace from types import SimpleNamespace
from deerflow.config.app_config import AppConfig
from deerflow.sandbox.security import is_host_bash_allowed from deerflow.sandbox.security import is_host_bash_allowed
from deerflow.tools.tools import get_available_tools from deerflow.tools.tools import get_available_tools
@@ -22,7 +23,7 @@ def _make_config(*, allow_host_bash: bool, sandbox_use: str = "deerflow.sandbox.
def test_get_available_tools_hides_bash_for_default_local_sandbox(monkeypatch): def test_get_available_tools_hides_bash_for_default_local_sandbox(monkeypatch):
monkeypatch.setattr("deerflow.tools.tools.get_app_config", lambda: _make_config(allow_host_bash=False)) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: _make_config(allow_host_bash=False)))
monkeypatch.setattr( monkeypatch.setattr(
"deerflow.tools.tools.resolve_variable", "deerflow.tools.tools.resolve_variable",
lambda use, _: SimpleNamespace(name="bash" if "bash" in use else "ls"), lambda use, _: SimpleNamespace(name="bash" if "bash" in use else "ls"),
@@ -35,7 +36,7 @@ def test_get_available_tools_hides_bash_for_default_local_sandbox(monkeypatch):
def test_get_available_tools_keeps_bash_when_explicitly_enabled(monkeypatch): def test_get_available_tools_keeps_bash_when_explicitly_enabled(monkeypatch):
monkeypatch.setattr("deerflow.tools.tools.get_app_config", lambda: _make_config(allow_host_bash=True)) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: _make_config(allow_host_bash=True)))
monkeypatch.setattr( monkeypatch.setattr(
"deerflow.tools.tools.resolve_variable", "deerflow.tools.tools.resolve_variable",
lambda use, _: SimpleNamespace(name="bash" if "bash" in use else "ls"), lambda use, _: SimpleNamespace(name="bash" if "bash" in use else "ls"),
@@ -52,7 +53,7 @@ def test_get_available_tools_hides_renamed_host_bash_alias(monkeypatch):
allow_host_bash=False, allow_host_bash=False,
extra_tools=[SimpleNamespace(name="shell", group="bash", use="deerflow.sandbox.tools:bash_tool")], extra_tools=[SimpleNamespace(name="shell", group="bash", use="deerflow.sandbox.tools:bash_tool")],
) )
monkeypatch.setattr("deerflow.tools.tools.get_app_config", lambda: config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: config))
monkeypatch.setattr( monkeypatch.setattr(
"deerflow.tools.tools.resolve_variable", "deerflow.tools.tools.resolve_variable",
lambda use, _: SimpleNamespace(name="bash" if "bash_tool" in use else "ls"), lambda use, _: SimpleNamespace(name="bash" if "bash_tool" in use else "ls"),
@@ -70,7 +71,7 @@ def test_get_available_tools_keeps_bash_for_aio_sandbox(monkeypatch):
allow_host_bash=False, allow_host_bash=False,
sandbox_use="deerflow.community.aio_sandbox:AioSandboxProvider", sandbox_use="deerflow.community.aio_sandbox:AioSandboxProvider",
) )
monkeypatch.setattr("deerflow.tools.tools.get_app_config", lambda: config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: config))
monkeypatch.setattr( monkeypatch.setattr(
"deerflow.tools.tools.resolve_variable", "deerflow.tools.tools.resolve_variable",
lambda use, _: SimpleNamespace(name="bash" if "bash_tool" in use else "ls"), lambda use, _: SimpleNamespace(name="bash" if "bash_tool" in use else "ls"),
@@ -4,6 +4,7 @@ from unittest.mock import patch
import pytest import pytest
from deerflow.config.app_config import AppConfig
from deerflow.sandbox.local.local_sandbox import LocalSandbox, PathMapping from deerflow.sandbox.local.local_sandbox import LocalSandbox, PathMapping
from deerflow.sandbox.local.local_sandbox_provider import LocalSandboxProvider from deerflow.sandbox.local.local_sandbox_provider import LocalSandboxProvider
@@ -312,7 +313,7 @@ class TestLocalSandboxProviderMounts:
sandbox=sandbox_config, sandbox=sandbox_config,
) )
with patch("deerflow.config.get_app_config", return_value=config): with patch.object(AppConfig, "current", return_value=config):
provider = LocalSandboxProvider() provider = LocalSandboxProvider()
assert [m.container_path for m in provider._path_mappings] == ["/custom-skills"] assert [m.container_path for m in provider._path_mappings] == ["/custom-skills"]
@@ -334,7 +335,7 @@ class TestLocalSandboxProviderMounts:
sandbox=sandbox_config, sandbox=sandbox_config,
) )
with patch("deerflow.config.get_app_config", return_value=config): with patch.object(AppConfig, "current", return_value=config):
provider = LocalSandboxProvider() provider = LocalSandboxProvider()
assert [m.container_path for m in provider._path_mappings] == ["/mnt/skills"] assert [m.container_path for m in provider._path_mappings] == ["/mnt/skills"]
@@ -358,7 +359,7 @@ class TestLocalSandboxProviderMounts:
sandbox=sandbox_config, sandbox=sandbox_config,
) )
with patch("deerflow.config.get_app_config", return_value=config): with patch.object(AppConfig, "current", return_value=config):
provider = LocalSandboxProvider() provider = LocalSandboxProvider()
assert [m.container_path for m in provider._path_mappings] == ["/mnt/skills"] assert [m.container_path for m in provider._path_mappings] == ["/mnt/skills"]
@@ -474,7 +475,7 @@ class TestLocalSandboxProviderMounts:
sandbox=sandbox_config, sandbox=sandbox_config,
) )
with patch("deerflow.config.get_app_config", return_value=config): with patch.object(AppConfig, "current", return_value=config):
provider = LocalSandboxProvider() provider = LocalSandboxProvider()
assert [m.container_path for m in provider._path_mappings] == ["/mnt/skills", "/mnt/data"] assert [m.container_path for m in provider._path_mappings] == ["/mnt/skills", "/mnt/data"]
@@ -10,12 +10,22 @@ from deerflow.agents.middlewares.loop_detection_middleware import (
LoopDetectionMiddleware, LoopDetectionMiddleware,
_hash_tool_calls, _hash_tool_calls,
) )
from deerflow.config.app_config import AppConfig
from deerflow.config.deer_flow_context import DeerFlowContext
from deerflow.config.sandbox_config import SandboxConfig
def _make_context(thread_id: str) -> DeerFlowContext:
return DeerFlowContext(
app_config=AppConfig(sandbox=SandboxConfig(use="test")),
thread_id=thread_id,
)
def _make_runtime(thread_id="test-thread"): def _make_runtime(thread_id="test-thread"):
"""Build a minimal Runtime mock with context.""" """Build a minimal Runtime mock with context."""
runtime = MagicMock() runtime = MagicMock()
runtime.context = {"thread_id": thread_id} runtime.context = _make_context(thread_id)
return runtime return runtime
@@ -293,10 +303,10 @@ class TestLoopDetection:
assert isinstance(mw._lock, type(mw._lock)) assert isinstance(mw._lock, type(mw._lock))
def test_fallback_thread_id_when_missing(self): def test_fallback_thread_id_when_missing(self):
"""When runtime context has no thread_id, should use 'default'.""" """When runtime context has empty thread_id, should use 'default'."""
mw = LoopDetectionMiddleware(warn_threshold=2) mw = LoopDetectionMiddleware(warn_threshold=2)
runtime = MagicMock() runtime = MagicMock()
runtime.context = {} runtime.context = _make_context("")
call = [_bash_call("ls")] call = [_bash_call("ls")]
mw._apply(_make_state(tool_calls=call), runtime) mw._apply(_make_state(tool_calls=call), runtime)
+6 -7
View File
@@ -1,21 +1,20 @@
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock, patch
from deerflow.agents.memory.queue import ConversationContext, MemoryUpdateQueue from deerflow.agents.memory.queue import ConversationContext, MemoryUpdateQueue
from deerflow.config.app_config import AppConfig
from deerflow.config.memory_config import MemoryConfig from deerflow.config.memory_config import MemoryConfig
from deerflow.config.sandbox_config import SandboxConfig
def _memory_config(**overrides: object) -> MemoryConfig: def _make_config(**memory_overrides) -> AppConfig:
config = MemoryConfig() return AppConfig(sandbox=SandboxConfig(use="test"), memory=MemoryConfig(**memory_overrides))
for key, value in overrides.items():
setattr(config, key, value)
return config
def test_queue_add_preserves_existing_correction_flag_for_same_thread() -> None: def test_queue_add_preserves_existing_correction_flag_for_same_thread() -> None:
queue = MemoryUpdateQueue() queue = MemoryUpdateQueue()
with ( with (
patch("deerflow.agents.memory.queue.get_memory_config", return_value=_memory_config(enabled=True)), patch.object(AppConfig, "current", return_value=_make_config(enabled=True)),
patch.object(queue, "_reset_timer"), patch.object(queue, "_reset_timer"),
): ):
queue.add(thread_id="thread-1", messages=["first"], correction_detected=True) queue.add(thread_id="thread-1", messages=["first"], correction_detected=True)
@@ -55,7 +54,7 @@ def test_queue_add_preserves_existing_reinforcement_flag_for_same_thread() -> No
queue = MemoryUpdateQueue() queue = MemoryUpdateQueue()
with ( with (
patch("deerflow.agents.memory.queue.get_memory_config", return_value=_memory_config(enabled=True)), patch.object(AppConfig, "current", return_value=_make_config(enabled=True)),
patch.object(queue, "_reset_timer"), patch.object(queue, "_reset_timer"),
): ):
queue.add(thread_id="thread-1", messages=["first"], reinforcement_detected=True) queue.add(thread_id="thread-1", messages=["first"], reinforcement_detected=True)
+17 -11
View File
@@ -11,7 +11,13 @@ from deerflow.agents.memory.storage import (
create_empty_memory, create_empty_memory,
get_memory_storage, get_memory_storage,
) )
from deerflow.config.app_config import AppConfig
from deerflow.config.memory_config import MemoryConfig from deerflow.config.memory_config import MemoryConfig
from deerflow.config.sandbox_config import SandboxConfig
def _app_config(**memory_overrides) -> AppConfig:
return AppConfig(sandbox=SandboxConfig(use="test"), memory=MemoryConfig(**memory_overrides))
class TestCreateEmptyMemory: class TestCreateEmptyMemory:
@@ -53,7 +59,7 @@ class TestFileMemoryStorage:
return mock_paths return mock_paths
with patch("deerflow.agents.memory.storage.get_paths", side_effect=mock_get_paths): with patch("deerflow.agents.memory.storage.get_paths", side_effect=mock_get_paths):
with patch("deerflow.agents.memory.storage.get_memory_config", return_value=MemoryConfig(storage_path="")): with patch.object(AppConfig, "current", return_value=_app_config(storage_path="")):
storage = FileMemoryStorage() storage = FileMemoryStorage()
path = storage._get_memory_file_path(None) path = storage._get_memory_file_path(None)
assert path == tmp_path / "memory.json" assert path == tmp_path / "memory.json"
@@ -87,7 +93,7 @@ class TestFileMemoryStorage:
return mock_paths return mock_paths
with patch("deerflow.agents.memory.storage.get_paths", side_effect=mock_get_paths): with patch("deerflow.agents.memory.storage.get_paths", side_effect=mock_get_paths):
with patch("deerflow.agents.memory.storage.get_memory_config", return_value=MemoryConfig(storage_path="")): with patch.object(AppConfig, "current", return_value=_app_config(storage_path="")):
storage = FileMemoryStorage() storage = FileMemoryStorage()
memory = storage.load() memory = storage.load()
assert isinstance(memory, dict) assert isinstance(memory, dict)
@@ -103,7 +109,7 @@ class TestFileMemoryStorage:
return mock_paths return mock_paths
with patch("deerflow.agents.memory.storage.get_paths", side_effect=mock_get_paths): with patch("deerflow.agents.memory.storage.get_paths", side_effect=mock_get_paths):
with patch("deerflow.agents.memory.storage.get_memory_config", return_value=MemoryConfig(storage_path="")): with patch.object(AppConfig, "current", return_value=_app_config(storage_path="")):
storage = FileMemoryStorage() storage = FileMemoryStorage()
test_memory = {"version": "1.0", "facts": [{"content": "test fact"}]} test_memory = {"version": "1.0", "facts": [{"content": "test fact"}]}
result = storage.save(test_memory) result = storage.save(test_memory)
@@ -122,7 +128,7 @@ class TestFileMemoryStorage:
return mock_paths return mock_paths
with patch("deerflow.agents.memory.storage.get_paths", side_effect=mock_get_paths): with patch("deerflow.agents.memory.storage.get_paths", side_effect=mock_get_paths):
with patch("deerflow.agents.memory.storage.get_memory_config", return_value=MemoryConfig(storage_path="")): with patch.object(AppConfig, "current", return_value=_app_config(storage_path="")):
storage = FileMemoryStorage() storage = FileMemoryStorage()
# First load # First load
memory1 = storage.load() memory1 = storage.load()
@@ -150,19 +156,19 @@ class TestGetMemoryStorage:
def test_returns_file_memory_storage_by_default(self): def test_returns_file_memory_storage_by_default(self):
"""Should return FileMemoryStorage by default.""" """Should return FileMemoryStorage by default."""
with patch("deerflow.agents.memory.storage.get_memory_config", return_value=MemoryConfig(storage_class="deerflow.agents.memory.storage.FileMemoryStorage")): with patch.object(AppConfig, "current", return_value=_app_config(storage_class="deerflow.agents.memory.storage.FileMemoryStorage")):
storage = get_memory_storage() storage = get_memory_storage()
assert isinstance(storage, FileMemoryStorage) assert isinstance(storage, FileMemoryStorage)
def test_falls_back_to_file_memory_storage_on_error(self): def test_falls_back_to_file_memory_storage_on_error(self):
"""Should fall back to FileMemoryStorage if configured storage fails to load.""" """Should fall back to FileMemoryStorage if configured storage fails to load."""
with patch("deerflow.agents.memory.storage.get_memory_config", return_value=MemoryConfig(storage_class="non.existent.StorageClass")): with patch.object(AppConfig, "current", return_value=_app_config(storage_class="non.existent.StorageClass")):
storage = get_memory_storage() storage = get_memory_storage()
assert isinstance(storage, FileMemoryStorage) assert isinstance(storage, FileMemoryStorage)
def test_returns_singleton_instance(self): def test_returns_singleton_instance(self):
"""Should return the same instance on subsequent calls.""" """Should return the same instance on subsequent calls."""
with patch("deerflow.agents.memory.storage.get_memory_config", return_value=MemoryConfig(storage_class="deerflow.agents.memory.storage.FileMemoryStorage")): with patch.object(AppConfig, "current", return_value=_app_config(storage_class="deerflow.agents.memory.storage.FileMemoryStorage")):
storage1 = get_memory_storage() storage1 = get_memory_storage()
storage2 = get_memory_storage() storage2 = get_memory_storage()
assert storage1 is storage2 assert storage1 is storage2
@@ -173,11 +179,11 @@ class TestGetMemoryStorage:
def get_storage(): def get_storage():
# get_memory_storage is called concurrently from multiple threads while # get_memory_storage is called concurrently from multiple threads while
# get_memory_config is patched once around thread creation. This verifies # AppConfig.get is patched once around thread creation. This verifies
# that the singleton initialization remains thread-safe. # that the singleton initialization remains thread-safe.
results.append(get_memory_storage()) results.append(get_memory_storage())
with patch("deerflow.agents.memory.storage.get_memory_config", return_value=MemoryConfig(storage_class="deerflow.agents.memory.storage.FileMemoryStorage")): with patch.object(AppConfig, "current", return_value=_app_config(storage_class="deerflow.agents.memory.storage.FileMemoryStorage")):
threads = [threading.Thread(target=get_storage) for _ in range(10)] threads = [threading.Thread(target=get_storage) for _ in range(10)]
for t in threads: for t in threads:
t.start() t.start()
@@ -191,13 +197,13 @@ class TestGetMemoryStorage:
def test_get_memory_storage_invalid_class_fallback(self): def test_get_memory_storage_invalid_class_fallback(self):
"""Should fall back to FileMemoryStorage if the configured class is not actually a class.""" """Should fall back to FileMemoryStorage if the configured class is not actually a class."""
# Using a built-in function instead of a class # Using a built-in function instead of a class
with patch("deerflow.agents.memory.storage.get_memory_config", return_value=MemoryConfig(storage_class="os.path.join")): with patch.object(AppConfig, "current", return_value=_app_config(storage_class="os.path.join")):
storage = get_memory_storage() storage = get_memory_storage()
assert isinstance(storage, FileMemoryStorage) assert isinstance(storage, FileMemoryStorage)
def test_get_memory_storage_non_subclass_fallback(self): def test_get_memory_storage_non_subclass_fallback(self):
"""Should fall back to FileMemoryStorage if the configured class is not a subclass of MemoryStorage.""" """Should fall back to FileMemoryStorage if the configured class is not a subclass of MemoryStorage."""
# Using 'dict' as a class that is not a MemoryStorage subclass # Using 'dict' as a class that is not a MemoryStorage subclass
with patch("deerflow.agents.memory.storage.get_memory_config", return_value=MemoryConfig(storage_class="builtins.dict")): with patch.object(AppConfig, "current", return_value=_app_config(storage_class="builtins.dict")):
storage = get_memory_storage() storage = get_memory_storage()
assert isinstance(storage, FileMemoryStorage) assert isinstance(storage, FileMemoryStorage)
+18 -26
View File
@@ -10,7 +10,9 @@ from deerflow.agents.memory.updater import (
import_memory_data, import_memory_data,
update_memory_fact, update_memory_fact,
) )
from deerflow.config.app_config import AppConfig
from deerflow.config.memory_config import MemoryConfig from deerflow.config.memory_config import MemoryConfig
from deerflow.config.sandbox_config import SandboxConfig
def _make_memory(facts: list[dict[str, object]] | None = None) -> dict[str, object]: def _make_memory(facts: list[dict[str, object]] | None = None) -> dict[str, object]:
@@ -31,11 +33,8 @@ def _make_memory(facts: list[dict[str, object]] | None = None) -> dict[str, obje
} }
def _memory_config(**overrides: object) -> MemoryConfig: def _memory_config(**overrides: object) -> AppConfig:
config = MemoryConfig() return AppConfig(sandbox=SandboxConfig(use="test"), memory=MemoryConfig().model_copy(update=overrides))
for key, value in overrides.items():
setattr(config, key, value)
return config
def test_apply_updates_skips_existing_duplicate_and_preserves_removals() -> None: def test_apply_updates_skips_existing_duplicate_and_preserves_removals() -> None:
@@ -67,8 +66,7 @@ def test_apply_updates_skips_existing_duplicate_and_preserves_removals() -> None
], ],
} }
with patch( with patch.object(AppConfig, "current",
"deerflow.agents.memory.updater.get_memory_config",
return_value=_memory_config(max_facts=100, fact_confidence_threshold=0.7), return_value=_memory_config(max_facts=100, fact_confidence_threshold=0.7),
): ):
result = updater._apply_updates(current_memory, update_data, thread_id="thread-b") result = updater._apply_updates(current_memory, update_data, thread_id="thread-b")
@@ -88,8 +86,7 @@ def test_apply_updates_skips_same_batch_duplicates_and_keeps_source_metadata() -
], ],
} }
with patch( with patch.object(AppConfig, "current",
"deerflow.agents.memory.updater.get_memory_config",
return_value=_memory_config(max_facts=100, fact_confidence_threshold=0.7), return_value=_memory_config(max_facts=100, fact_confidence_threshold=0.7),
): ):
result = updater._apply_updates(current_memory, update_data, thread_id="thread-42") result = updater._apply_updates(current_memory, update_data, thread_id="thread-42")
@@ -132,8 +129,7 @@ def test_apply_updates_preserves_threshold_and_max_facts_trimming() -> None:
], ],
} }
with patch( with patch.object(AppConfig, "current",
"deerflow.agents.memory.updater.get_memory_config",
return_value=_memory_config(max_facts=2, fact_confidence_threshold=0.7), return_value=_memory_config(max_facts=2, fact_confidence_threshold=0.7),
): ):
result = updater._apply_updates(current_memory, update_data, thread_id="thread-9") result = updater._apply_updates(current_memory, update_data, thread_id="thread-9")
@@ -160,8 +156,7 @@ def test_apply_updates_preserves_source_error() -> None:
] ]
} }
with patch( with patch.object(AppConfig, "current",
"deerflow.agents.memory.updater.get_memory_config",
return_value=_memory_config(max_facts=100, fact_confidence_threshold=0.7), return_value=_memory_config(max_facts=100, fact_confidence_threshold=0.7),
): ):
result = updater._apply_updates(current_memory, update_data, thread_id="thread-correction") result = updater._apply_updates(current_memory, update_data, thread_id="thread-correction")
@@ -184,8 +179,7 @@ def test_apply_updates_ignores_empty_source_error() -> None:
] ]
} }
with patch( with patch.object(AppConfig, "current",
"deerflow.agents.memory.updater.get_memory_config",
return_value=_memory_config(max_facts=100, fact_confidence_threshold=0.7), return_value=_memory_config(max_facts=100, fact_confidence_threshold=0.7),
): ):
result = updater._apply_updates(current_memory, update_data, thread_id="thread-correction") result = updater._apply_updates(current_memory, update_data, thread_id="thread-correction")
@@ -532,7 +526,7 @@ class TestUpdateMemoryStructuredResponse:
with ( with (
patch.object(updater, "_get_model", return_value=self._make_mock_model(valid_json)), patch.object(updater, "_get_model", return_value=self._make_mock_model(valid_json)),
patch("deerflow.agents.memory.updater.get_memory_config", return_value=_memory_config(enabled=True)), patch.object(AppConfig, "current", return_value=_memory_config(enabled=True)),
patch("deerflow.agents.memory.updater.get_memory_data", return_value=_make_memory()), patch("deerflow.agents.memory.updater.get_memory_data", return_value=_make_memory()),
patch("deerflow.agents.memory.updater.get_memory_storage", return_value=MagicMock(save=MagicMock(return_value=True))), patch("deerflow.agents.memory.updater.get_memory_storage", return_value=MagicMock(save=MagicMock(return_value=True))),
): ):
@@ -555,7 +549,7 @@ class TestUpdateMemoryStructuredResponse:
with ( with (
patch.object(updater, "_get_model", return_value=self._make_mock_model(list_content)), patch.object(updater, "_get_model", return_value=self._make_mock_model(list_content)),
patch("deerflow.agents.memory.updater.get_memory_config", return_value=_memory_config(enabled=True)), patch.object(AppConfig, "current", return_value=_memory_config(enabled=True)),
patch("deerflow.agents.memory.updater.get_memory_data", return_value=_make_memory()), patch("deerflow.agents.memory.updater.get_memory_data", return_value=_make_memory()),
patch("deerflow.agents.memory.updater.get_memory_storage", return_value=MagicMock(save=MagicMock(return_value=True))), patch("deerflow.agents.memory.updater.get_memory_storage", return_value=MagicMock(save=MagicMock(return_value=True))),
): ):
@@ -577,7 +571,7 @@ class TestUpdateMemoryStructuredResponse:
with ( with (
patch.object(updater, "_get_model", return_value=model), patch.object(updater, "_get_model", return_value=model),
patch("deerflow.agents.memory.updater.get_memory_config", return_value=_memory_config(enabled=True)), patch.object(AppConfig, "current", return_value=_memory_config(enabled=True)),
patch("deerflow.agents.memory.updater.get_memory_data", return_value=_make_memory()), patch("deerflow.agents.memory.updater.get_memory_data", return_value=_make_memory()),
patch("deerflow.agents.memory.updater.get_memory_storage", return_value=MagicMock(save=MagicMock(return_value=True))), patch("deerflow.agents.memory.updater.get_memory_storage", return_value=MagicMock(save=MagicMock(return_value=True))),
): ):
@@ -602,7 +596,7 @@ class TestUpdateMemoryStructuredResponse:
with ( with (
patch.object(updater, "_get_model", return_value=model), patch.object(updater, "_get_model", return_value=model),
patch("deerflow.agents.memory.updater.get_memory_config", return_value=_memory_config(enabled=True)), patch.object(AppConfig, "current", return_value=_memory_config(enabled=True)),
patch("deerflow.agents.memory.updater.get_memory_data", return_value=_make_memory()), patch("deerflow.agents.memory.updater.get_memory_data", return_value=_make_memory()),
patch("deerflow.agents.memory.updater.get_memory_storage", return_value=MagicMock(save=MagicMock(return_value=True))), patch("deerflow.agents.memory.updater.get_memory_storage", return_value=MagicMock(save=MagicMock(return_value=True))),
): ):
@@ -646,8 +640,7 @@ class TestFactDeduplicationCaseInsensitive:
], ],
} }
with patch( with patch.object(AppConfig, "current",
"deerflow.agents.memory.updater.get_memory_config",
return_value=_memory_config(max_facts=100, fact_confidence_threshold=0.7), return_value=_memory_config(max_facts=100, fact_confidence_threshold=0.7),
): ):
result = updater._apply_updates(current_memory, update_data, thread_id="thread-b") result = updater._apply_updates(current_memory, update_data, thread_id="thread-b")
@@ -677,8 +670,7 @@ class TestFactDeduplicationCaseInsensitive:
], ],
} }
with patch( with patch.object(AppConfig, "current",
"deerflow.agents.memory.updater.get_memory_config",
return_value=_memory_config(max_facts=100, fact_confidence_threshold=0.7), return_value=_memory_config(max_facts=100, fact_confidence_threshold=0.7),
): ):
result = updater._apply_updates(current_memory, update_data, thread_id="thread-b") result = updater._apply_updates(current_memory, update_data, thread_id="thread-b")
@@ -704,7 +696,7 @@ class TestReinforcementHint:
with ( with (
patch.object(updater, "_get_model", return_value=model), patch.object(updater, "_get_model", return_value=model),
patch("deerflow.agents.memory.updater.get_memory_config", return_value=_memory_config(enabled=True)), patch.object(AppConfig, "current", return_value=_memory_config(enabled=True)),
patch("deerflow.agents.memory.updater.get_memory_data", return_value=_make_memory()), patch("deerflow.agents.memory.updater.get_memory_data", return_value=_make_memory()),
patch("deerflow.agents.memory.updater.get_memory_storage", return_value=MagicMock(save=MagicMock(return_value=True))), patch("deerflow.agents.memory.updater.get_memory_storage", return_value=MagicMock(save=MagicMock(return_value=True))),
): ):
@@ -729,7 +721,7 @@ class TestReinforcementHint:
with ( with (
patch.object(updater, "_get_model", return_value=model), patch.object(updater, "_get_model", return_value=model),
patch("deerflow.agents.memory.updater.get_memory_config", return_value=_memory_config(enabled=True)), patch.object(AppConfig, "current", return_value=_memory_config(enabled=True)),
patch("deerflow.agents.memory.updater.get_memory_data", return_value=_make_memory()), patch("deerflow.agents.memory.updater.get_memory_data", return_value=_make_memory()),
patch("deerflow.agents.memory.updater.get_memory_storage", return_value=MagicMock(save=MagicMock(return_value=True))), patch("deerflow.agents.memory.updater.get_memory_storage", return_value=MagicMock(save=MagicMock(return_value=True))),
): ):
@@ -754,7 +746,7 @@ class TestReinforcementHint:
with ( with (
patch.object(updater, "_get_model", return_value=model), patch.object(updater, "_get_model", return_value=model),
patch("deerflow.agents.memory.updater.get_memory_config", return_value=_memory_config(enabled=True)), patch.object(AppConfig, "current", return_value=_memory_config(enabled=True)),
patch("deerflow.agents.memory.updater.get_memory_data", return_value=_make_memory()), patch("deerflow.agents.memory.updater.get_memory_data", return_value=_make_memory()),
patch("deerflow.agents.memory.updater.get_memory_storage", return_value=MagicMock(save=MagicMock(return_value=True))), patch("deerflow.agents.memory.updater.get_memory_storage", return_value=MagicMock(save=MagicMock(return_value=True))),
): ):
+5 -5
View File
@@ -72,8 +72,8 @@ class FakeChatModel(BaseChatModel):
def _patch_factory(monkeypatch, app_config: AppConfig, model_class=FakeChatModel): def _patch_factory(monkeypatch, app_config: AppConfig, model_class=FakeChatModel):
"""Patch get_app_config, resolve_class, and tracing for isolated unit tests.""" """Patch AppConfig.get, resolve_class, and tracing for isolated unit tests."""
monkeypatch.setattr(factory_module, "get_app_config", lambda: app_config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: app_config))
monkeypatch.setattr(factory_module, "resolve_class", lambda path, base: model_class) monkeypatch.setattr(factory_module, "resolve_class", lambda path, base: model_class)
monkeypatch.setattr(factory_module, "build_tracing_callbacks", lambda: []) monkeypatch.setattr(factory_module, "build_tracing_callbacks", lambda: [])
@@ -96,7 +96,7 @@ def test_uses_first_model_when_name_is_none(monkeypatch):
def test_raises_when_model_not_found(monkeypatch): def test_raises_when_model_not_found(monkeypatch):
cfg = _make_app_config([_make_model("only-model")]) cfg = _make_app_config([_make_model("only-model")])
monkeypatch.setattr(factory_module, "get_app_config", lambda: cfg) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: cfg))
monkeypatch.setattr(factory_module, "build_tracing_callbacks", lambda: []) monkeypatch.setattr(factory_module, "build_tracing_callbacks", lambda: [])
with pytest.raises(ValueError, match="ghost-model"): with pytest.raises(ValueError, match="ghost-model"):
@@ -744,7 +744,7 @@ def test_thinking_disabled_vllm_chat_template_format(monkeypatch):
supports_thinking=True, supports_thinking=True,
when_thinking_enabled=wte, when_thinking_enabled=wte,
) )
model.extra_body = {"top_k": 20} model = model.model_copy(update={"extra_body": {"top_k": 20}})
cfg = _make_app_config([model]) cfg = _make_app_config([model])
_patch_factory(monkeypatch, cfg) _patch_factory(monkeypatch, cfg)
@@ -771,7 +771,7 @@ def test_thinking_disabled_vllm_enable_thinking_format(monkeypatch):
supports_thinking=True, supports_thinking=True,
when_thinking_enabled=wte, when_thinking_enabled=wte,
) )
model.extra_body = {"top_k": 20} model = model.model_copy(update={"extra_body": {"top_k": 20}})
cfg = _make_app_config([model]) cfg = _make_app_config([model])
_patch_factory(monkeypatch, cfg) _patch_factory(monkeypatch, cfg)
@@ -3,13 +3,24 @@
import importlib import importlib
from types import SimpleNamespace from types import SimpleNamespace
from deerflow.config.app_config import AppConfig
from deerflow.config.deer_flow_context import DeerFlowContext
from deerflow.config.sandbox_config import SandboxConfig
present_file_tool_module = importlib.import_module("deerflow.tools.builtins.present_file_tool") present_file_tool_module = importlib.import_module("deerflow.tools.builtins.present_file_tool")
def _make_context(thread_id: str) -> DeerFlowContext:
return DeerFlowContext(
app_config=AppConfig(sandbox=SandboxConfig(use="test")),
thread_id=thread_id,
)
def _make_runtime(outputs_path: str) -> SimpleNamespace: def _make_runtime(outputs_path: str) -> SimpleNamespace:
return SimpleNamespace( return SimpleNamespace(
state={"thread_data": {"outputs_path": outputs_path}}, state={"thread_data": {"outputs_path": outputs_path}},
context={"thread_id": "thread-1"}, context=_make_context("thread-1"),
) )
+4 -3
View File
@@ -2,6 +2,7 @@ from types import SimpleNamespace
from unittest.mock import patch from unittest.mock import patch
from deerflow.community.aio_sandbox.aio_sandbox import AioSandbox from deerflow.community.aio_sandbox.aio_sandbox import AioSandbox
from deerflow.config.app_config import AppConfig
from deerflow.sandbox.local.local_sandbox import LocalSandbox from deerflow.sandbox.local.local_sandbox import LocalSandbox
from deerflow.sandbox.search import GrepMatch, find_glob_matches, find_grep_matches from deerflow.sandbox.search import GrepMatch, find_glob_matches, find_grep_matches
from deerflow.sandbox.tools import glob_tool, grep_tool from deerflow.sandbox.tools import glob_tool, grep_tool
@@ -104,7 +105,7 @@ def test_grep_tool_truncates_results(tmp_path, monkeypatch) -> None:
monkeypatch.setattr("deerflow.sandbox.tools.ensure_sandbox_initialized", lambda runtime: LocalSandbox(id="local")) monkeypatch.setattr("deerflow.sandbox.tools.ensure_sandbox_initialized", lambda runtime: LocalSandbox(id="local"))
# Prevent config.yaml tool config from overriding the caller-supplied max_results=2. # Prevent config.yaml tool config from overriding the caller-supplied max_results=2.
monkeypatch.setattr("deerflow.sandbox.tools.get_app_config", lambda: SimpleNamespace(get_tool_config=lambda name: None)) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: SimpleNamespace(get_tool_config=lambda name: None)))
result = grep_tool.func( result = grep_tool.func(
runtime=runtime, runtime=runtime,
@@ -325,8 +326,8 @@ def test_glob_tool_honors_smaller_requested_max_results(tmp_path, monkeypatch) -
monkeypatch.setattr("deerflow.sandbox.tools.ensure_sandbox_initialized", lambda runtime: LocalSandbox(id="local")) monkeypatch.setattr("deerflow.sandbox.tools.ensure_sandbox_initialized", lambda runtime: LocalSandbox(id="local"))
monkeypatch.setattr( monkeypatch.setattr(
"deerflow.sandbox.tools.get_app_config", AppConfig, "current",
lambda: SimpleNamespace(get_tool_config=lambda name: SimpleNamespace(model_extra={"max_results": 50})), staticmethod(lambda: SimpleNamespace(get_tool_config=lambda name: SimpleNamespace(model_extra={"max_results": 50}))),
) )
result = glob_tool.func( result = glob_tool.func(
+24 -25
View File
@@ -5,6 +5,7 @@ from unittest.mock import patch
import pytest import pytest
from deerflow.config.app_config import AppConfig
from deerflow.sandbox.tools import ( from deerflow.sandbox.tools import (
VIRTUAL_PATH_PREFIX, VIRTUAL_PATH_PREFIX,
_apply_cwd_prefix, _apply_cwd_prefix,
@@ -617,18 +618,25 @@ def test_apply_cwd_prefix_quotes_path_with_spaces() -> None:
def test_validate_local_bash_command_paths_allows_mcp_filesystem_paths() -> None: def test_validate_local_bash_command_paths_allows_mcp_filesystem_paths() -> None:
"""Bash commands referencing MCP filesystem server paths should be allowed.""" """Bash commands referencing MCP filesystem server paths should be allowed."""
from deerflow.config.app_config import AppConfig
from deerflow.config.extensions_config import ExtensionsConfig, McpServerConfig from deerflow.config.extensions_config import ExtensionsConfig, McpServerConfig
from deerflow.config.sandbox_config import SandboxConfig
mock_config = ExtensionsConfig( def _make_app_config(enabled: bool) -> AppConfig:
mcp_servers={ return AppConfig(
"filesystem": McpServerConfig( sandbox=SandboxConfig(use="test"),
enabled=True, extensions=ExtensionsConfig(
command="npx", mcp_servers={
args=["-y", "@modelcontextprotocol/server-filesystem", "/mnt/d/workspace"], "filesystem": McpServerConfig(
) enabled=enabled,
} command="npx",
) args=["-y", "@modelcontextprotocol/server-filesystem", "/mnt/d/workspace"],
with patch("deerflow.config.extensions_config.get_extensions_config", return_value=mock_config): )
}
),
)
with patch.object(AppConfig, "current", return_value=_make_app_config(True)):
# Should not raise - MCP filesystem paths are allowed # Should not raise - MCP filesystem paths are allowed
validate_local_bash_command_paths("ls /mnt/d/workspace", _THREAD_DATA) validate_local_bash_command_paths("ls /mnt/d/workspace", _THREAD_DATA)
validate_local_bash_command_paths("cat /mnt/d/workspace/subdir/file.txt", _THREAD_DATA) validate_local_bash_command_paths("cat /mnt/d/workspace/subdir/file.txt", _THREAD_DATA)
@@ -637,19 +645,10 @@ def test_validate_local_bash_command_paths_allows_mcp_filesystem_paths() -> None
with pytest.raises(PermissionError, match="path traversal"): with pytest.raises(PermissionError, match="path traversal"):
validate_local_bash_command_paths("cat /mnt/d/workspace/../../etc/passwd", _THREAD_DATA) validate_local_bash_command_paths("cat /mnt/d/workspace/../../etc/passwd", _THREAD_DATA)
# Disabled servers should not expose paths # Disabled servers should not expose paths
disabled_config = ExtensionsConfig( with patch.object(AppConfig, "current", return_value=_make_app_config(False)):
mcp_servers={ with pytest.raises(PermissionError, match="Unsafe absolute paths"):
"filesystem": McpServerConfig( validate_local_bash_command_paths("ls /mnt/d/workspace", _THREAD_DATA)
enabled=False,
command="npx",
args=["-y", "@modelcontextprotocol/server-filesystem", "/mnt/d/workspace"],
)
}
)
with patch("deerflow.config.extensions_config.get_extensions_config", return_value=disabled_config):
with pytest.raises(PermissionError, match="Unsafe absolute paths"):
validate_local_bash_command_paths("ls /mnt/d/workspace", _THREAD_DATA)
# ---------- Custom mount path tests ---------- # ---------- Custom mount path tests ----------
@@ -757,7 +756,7 @@ def test_get_custom_mounts_caching(monkeypatch, tmp_path) -> None:
mock_sandbox = SandboxConfig(use="deerflow.sandbox.local:LocalSandboxProvider", mounts=mounts) mock_sandbox = SandboxConfig(use="deerflow.sandbox.local:LocalSandboxProvider", mounts=mounts)
mock_config = SimpleNamespace(sandbox=mock_sandbox) mock_config = SimpleNamespace(sandbox=mock_sandbox)
with patch("deerflow.config.get_app_config", return_value=mock_config): with patch.object(AppConfig, "current", return_value=mock_config):
result = _get_custom_mounts() result = _get_custom_mounts()
assert len(result) == 2 assert len(result) == 2
@@ -786,7 +785,7 @@ def test_get_custom_mounts_filters_nonexistent_host_path(monkeypatch, tmp_path)
mock_sandbox = SandboxConfig(use="deerflow.sandbox.local:LocalSandboxProvider", mounts=mounts) mock_sandbox = SandboxConfig(use="deerflow.sandbox.local:LocalSandboxProvider", mounts=mounts)
mock_config = SimpleNamespace(sandbox=mock_sandbox) mock_config = SimpleNamespace(sandbox=mock_sandbox)
with patch("deerflow.config.get_app_config", return_value=mock_config): with patch.object(AppConfig, "current", return_value=mock_config):
result = _get_custom_mounts() result = _get_custom_mounts()
assert len(result) == 1 assert len(result) == 1
assert result[0].container_path == "/mnt/existing" assert result[0].container_path == "/mnt/existing"
+2 -1
View File
@@ -2,13 +2,14 @@ from types import SimpleNamespace
import pytest import pytest
from deerflow.config.app_config import AppConfig
from deerflow.skills.security_scanner import scan_skill_content from deerflow.skills.security_scanner import scan_skill_content
@pytest.mark.anyio @pytest.mark.anyio
async def test_scan_skill_content_blocks_when_model_unavailable(monkeypatch): async def test_scan_skill_content_blocks_when_model_unavailable(monkeypatch):
config = SimpleNamespace(skill_evolution=SimpleNamespace(moderation_model_name=None)) config = SimpleNamespace(skill_evolution=SimpleNamespace(moderation_model_name=None))
monkeypatch.setattr("deerflow.skills.security_scanner.get_app_config", lambda: config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: config))
monkeypatch.setattr("deerflow.skills.security_scanner.create_chat_model", lambda **kwargs: (_ for _ in ()).throw(RuntimeError("boom"))) monkeypatch.setattr("deerflow.skills.security_scanner.create_chat_model", lambda **kwargs: (_ for _ in ()).throw(RuntimeError("boom")))
result = await scan_skill_content("---\nname: demo-skill\ndescription: demo\n---\n", executable=False) result = await scan_skill_content("---\nname: demo-skill\ndescription: demo\n---\n", executable=False)
+21 -18
View File
@@ -4,9 +4,20 @@ from types import SimpleNamespace
import anyio import anyio
import pytest import pytest
from deerflow.config.app_config import AppConfig
from deerflow.config.deer_flow_context import DeerFlowContext
from deerflow.config.sandbox_config import SandboxConfig
skill_manage_module = importlib.import_module("deerflow.tools.skill_manage_tool") skill_manage_module = importlib.import_module("deerflow.tools.skill_manage_tool")
def _make_context(thread_id: str) -> DeerFlowContext:
return DeerFlowContext(
app_config=AppConfig(sandbox=SandboxConfig(use="test")),
thread_id=thread_id,
)
def _skill_content(name: str, description: str = "Demo skill") -> str: def _skill_content(name: str, description: str = "Demo skill") -> str:
return f"---\nname: {name}\ndescription: {description}\n---\n\n# {name}\n" return f"---\nname: {name}\ndescription: {description}\n---\n\n# {name}\n"
@@ -23,9 +34,7 @@ def test_skill_manage_create_and_patch(monkeypatch, tmp_path):
skills=SimpleNamespace(get_skills_path=lambda: skills_root, container_path="/mnt/skills"), skills=SimpleNamespace(get_skills_path=lambda: skills_root, container_path="/mnt/skills"),
skill_evolution=SimpleNamespace(enabled=True, moderation_model_name=None), skill_evolution=SimpleNamespace(enabled=True, moderation_model_name=None),
) )
monkeypatch.setattr("deerflow.config.get_app_config", lambda: config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: config))
monkeypatch.setattr("deerflow.skills.manager.get_app_config", lambda: config)
monkeypatch.setattr("deerflow.skills.security_scanner.get_app_config", lambda: config)
refresh_calls = [] refresh_calls = []
async def _refresh(): async def _refresh():
@@ -34,7 +43,7 @@ def test_skill_manage_create_and_patch(monkeypatch, tmp_path):
monkeypatch.setattr(skill_manage_module, "refresh_skills_system_prompt_cache_async", _refresh) monkeypatch.setattr(skill_manage_module, "refresh_skills_system_prompt_cache_async", _refresh)
monkeypatch.setattr(skill_manage_module, "scan_skill_content", lambda *args, **kwargs: _async_result("allow", "ok")) monkeypatch.setattr(skill_manage_module, "scan_skill_content", lambda *args, **kwargs: _async_result("allow", "ok"))
runtime = SimpleNamespace(context={"thread_id": "thread-1"}, config={"configurable": {"thread_id": "thread-1"}}) runtime = SimpleNamespace(context=_make_context("thread-1"), config={"configurable": {"thread_id": "thread-1"}})
result = anyio.run( result = anyio.run(
skill_manage_module.skill_manage_tool.coroutine, skill_manage_module.skill_manage_tool.coroutine,
@@ -67,9 +76,7 @@ def test_skill_manage_patch_replaces_single_occurrence_by_default(monkeypatch, t
skills=SimpleNamespace(get_skills_path=lambda: skills_root, container_path="/mnt/skills"), skills=SimpleNamespace(get_skills_path=lambda: skills_root, container_path="/mnt/skills"),
skill_evolution=SimpleNamespace(enabled=True, moderation_model_name=None), skill_evolution=SimpleNamespace(enabled=True, moderation_model_name=None),
) )
monkeypatch.setattr("deerflow.config.get_app_config", lambda: config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: config))
monkeypatch.setattr("deerflow.skills.manager.get_app_config", lambda: config)
monkeypatch.setattr("deerflow.skills.security_scanner.get_app_config", lambda: config)
async def _refresh(): async def _refresh():
return None return None
@@ -77,7 +84,7 @@ def test_skill_manage_patch_replaces_single_occurrence_by_default(monkeypatch, t
monkeypatch.setattr(skill_manage_module, "refresh_skills_system_prompt_cache_async", _refresh) monkeypatch.setattr(skill_manage_module, "refresh_skills_system_prompt_cache_async", _refresh)
monkeypatch.setattr(skill_manage_module, "scan_skill_content", lambda *args, **kwargs: _async_result("allow", "ok")) monkeypatch.setattr(skill_manage_module, "scan_skill_content", lambda *args, **kwargs: _async_result("allow", "ok"))
runtime = SimpleNamespace(context={"thread_id": "thread-1"}, config={"configurable": {"thread_id": "thread-1"}}) runtime = SimpleNamespace(context=_make_context("thread-1"), config={"configurable": {"thread_id": "thread-1"}})
content = _skill_content("demo-skill", "Demo skill") + "\nRepeated: Demo skill\n" content = _skill_content("demo-skill", "Demo skill") + "\nRepeated: Demo skill\n"
anyio.run(skill_manage_module.skill_manage_tool.coroutine, runtime, "create", "demo-skill", content) anyio.run(skill_manage_module.skill_manage_tool.coroutine, runtime, "create", "demo-skill", content)
@@ -107,10 +114,9 @@ def test_skill_manage_rejects_public_skill_patch(monkeypatch, tmp_path):
skills=SimpleNamespace(get_skills_path=lambda: skills_root, container_path="/mnt/skills"), skills=SimpleNamespace(get_skills_path=lambda: skills_root, container_path="/mnt/skills"),
skill_evolution=SimpleNamespace(enabled=True, moderation_model_name=None), skill_evolution=SimpleNamespace(enabled=True, moderation_model_name=None),
) )
monkeypatch.setattr("deerflow.config.get_app_config", lambda: config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: config))
monkeypatch.setattr("deerflow.skills.manager.get_app_config", lambda: config)
runtime = SimpleNamespace(context={}, config={"configurable": {}}) runtime = SimpleNamespace(context=_make_context(""), config={"configurable": {}})
with pytest.raises(ValueError, match="built-in skill"): with pytest.raises(ValueError, match="built-in skill"):
anyio.run( anyio.run(
@@ -131,8 +137,7 @@ def test_skill_manage_sync_wrapper_supported(monkeypatch, tmp_path):
skills=SimpleNamespace(get_skills_path=lambda: skills_root, container_path="/mnt/skills"), skills=SimpleNamespace(get_skills_path=lambda: skills_root, container_path="/mnt/skills"),
skill_evolution=SimpleNamespace(enabled=True, moderation_model_name=None), skill_evolution=SimpleNamespace(enabled=True, moderation_model_name=None),
) )
monkeypatch.setattr("deerflow.config.get_app_config", lambda: config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: config))
monkeypatch.setattr("deerflow.skills.manager.get_app_config", lambda: config)
refresh_calls = [] refresh_calls = []
async def _refresh(): async def _refresh():
@@ -141,7 +146,7 @@ def test_skill_manage_sync_wrapper_supported(monkeypatch, tmp_path):
monkeypatch.setattr(skill_manage_module, "refresh_skills_system_prompt_cache_async", _refresh) monkeypatch.setattr(skill_manage_module, "refresh_skills_system_prompt_cache_async", _refresh)
monkeypatch.setattr(skill_manage_module, "scan_skill_content", lambda *args, **kwargs: _async_result("allow", "ok")) monkeypatch.setattr(skill_manage_module, "scan_skill_content", lambda *args, **kwargs: _async_result("allow", "ok"))
runtime = SimpleNamespace(context={"thread_id": "thread-sync"}, config={"configurable": {"thread_id": "thread-sync"}}) runtime = SimpleNamespace(context=_make_context("thread-sync"), config={"configurable": {"thread_id": "thread-sync"}})
result = skill_manage_module.skill_manage_tool.func( result = skill_manage_module.skill_manage_tool.func(
runtime=runtime, runtime=runtime,
action="create", action="create",
@@ -159,9 +164,7 @@ def test_skill_manage_rejects_support_path_traversal(monkeypatch, tmp_path):
skills=SimpleNamespace(get_skills_path=lambda: skills_root, container_path="/mnt/skills"), skills=SimpleNamespace(get_skills_path=lambda: skills_root, container_path="/mnt/skills"),
skill_evolution=SimpleNamespace(enabled=True, moderation_model_name=None), skill_evolution=SimpleNamespace(enabled=True, moderation_model_name=None),
) )
monkeypatch.setattr("deerflow.config.get_app_config", lambda: config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: config))
monkeypatch.setattr("deerflow.skills.manager.get_app_config", lambda: config)
monkeypatch.setattr("deerflow.skills.security_scanner.get_app_config", lambda: config)
async def _refresh(): async def _refresh():
return None return None
@@ -169,7 +172,7 @@ def test_skill_manage_rejects_support_path_traversal(monkeypatch, tmp_path):
monkeypatch.setattr(skill_manage_module, "refresh_skills_system_prompt_cache_async", _refresh) monkeypatch.setattr(skill_manage_module, "refresh_skills_system_prompt_cache_async", _refresh)
monkeypatch.setattr(skill_manage_module, "scan_skill_content", lambda *args, **kwargs: _async_result("allow", "ok")) monkeypatch.setattr(skill_manage_module, "scan_skill_content", lambda *args, **kwargs: _async_result("allow", "ok"))
runtime = SimpleNamespace(context={"thread_id": "thread-1"}, config={"configurable": {"thread_id": "thread-1"}}) runtime = SimpleNamespace(context=_make_context("thread-1"), config={"configurable": {"thread_id": "thread-1"}})
anyio.run(skill_manage_module.skill_manage_tool.coroutine, runtime, "create", "demo-skill", _skill_content("demo-skill")) anyio.run(skill_manage_module.skill_manage_tool.coroutine, runtime, "create", "demo-skill", _skill_content("demo-skill"))
with pytest.raises(ValueError, match="parent-directory traversal|selected support directory"): with pytest.raises(ValueError, match="parent-directory traversal|selected support directory"):
+11 -8
View File
@@ -6,6 +6,9 @@ from fastapi import FastAPI
from fastapi.testclient import TestClient from fastapi.testclient import TestClient
from app.gateway.routers import skills as skills_router from app.gateway.routers import skills as skills_router
from deerflow.config.app_config import AppConfig
from deerflow.config.extensions_config import ExtensionsConfig
from deerflow.config.sandbox_config import SandboxConfig
from deerflow.skills.manager import get_skill_history_file from deerflow.skills.manager import get_skill_history_file
from deerflow.skills.types import Skill from deerflow.skills.types import Skill
@@ -43,8 +46,7 @@ def test_custom_skills_router_lifecycle(monkeypatch, tmp_path):
skills=SimpleNamespace(get_skills_path=lambda: skills_root, container_path="/mnt/skills"), skills=SimpleNamespace(get_skills_path=lambda: skills_root, container_path="/mnt/skills"),
skill_evolution=SimpleNamespace(enabled=True, moderation_model_name=None), skill_evolution=SimpleNamespace(enabled=True, moderation_model_name=None),
) )
monkeypatch.setattr("deerflow.config.get_app_config", lambda: config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: config))
monkeypatch.setattr("deerflow.skills.manager.get_app_config", lambda: config)
monkeypatch.setattr("app.gateway.routers.skills.scan_skill_content", lambda *args, **kwargs: _async_scan("allow", "ok")) monkeypatch.setattr("app.gateway.routers.skills.scan_skill_content", lambda *args, **kwargs: _async_scan("allow", "ok"))
refresh_calls = [] refresh_calls = []
@@ -93,8 +95,7 @@ def test_custom_skill_rollback_blocked_by_scanner(monkeypatch, tmp_path):
skills=SimpleNamespace(get_skills_path=lambda: skills_root, container_path="/mnt/skills"), skills=SimpleNamespace(get_skills_path=lambda: skills_root, container_path="/mnt/skills"),
skill_evolution=SimpleNamespace(enabled=True, moderation_model_name=None), skill_evolution=SimpleNamespace(enabled=True, moderation_model_name=None),
) )
monkeypatch.setattr("deerflow.config.get_app_config", lambda: config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: config))
monkeypatch.setattr("deerflow.skills.manager.get_app_config", lambda: config)
get_skill_history_file("demo-skill").write_text( get_skill_history_file("demo-skill").write_text(
'{"action":"human_edit","prev_content":' + json.dumps(original_content) + ',"new_content":' + json.dumps(edited_content) + "}\n", '{"action":"human_edit","prev_content":' + json.dumps(original_content) + ',"new_content":' + json.dumps(edited_content) + "}\n",
encoding="utf-8", encoding="utf-8",
@@ -135,8 +136,7 @@ def test_custom_skill_delete_preserves_history_and_allows_restore(monkeypatch, t
skills=SimpleNamespace(get_skills_path=lambda: skills_root, container_path="/mnt/skills"), skills=SimpleNamespace(get_skills_path=lambda: skills_root, container_path="/mnt/skills"),
skill_evolution=SimpleNamespace(enabled=True, moderation_model_name=None), skill_evolution=SimpleNamespace(enabled=True, moderation_model_name=None),
) )
monkeypatch.setattr("deerflow.config.get_app_config", lambda: config) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: config))
monkeypatch.setattr("deerflow.skills.manager.get_app_config", lambda: config)
monkeypatch.setattr("app.gateway.routers.skills.scan_skill_content", lambda *args, **kwargs: _async_scan("allow", "ok")) monkeypatch.setattr("app.gateway.routers.skills.scan_skill_content", lambda *args, **kwargs: _async_scan("allow", "ok"))
refresh_calls = [] refresh_calls = []
@@ -179,9 +179,12 @@ def test_update_skill_refreshes_prompt_cache_before_return(monkeypatch, tmp_path
refresh_calls.append("refresh") refresh_calls.append("refresh")
enabled_state["value"] = False enabled_state["value"] = False
_app_cfg = AppConfig(sandbox=SandboxConfig(use="test"), extensions=ExtensionsConfig(mcp_servers={}, skills={}))
monkeypatch.setattr("app.gateway.routers.skills.load_skills", _load_skills) monkeypatch.setattr("app.gateway.routers.skills.load_skills", _load_skills)
monkeypatch.setattr("app.gateway.routers.skills.get_extensions_config", lambda: SimpleNamespace(mcp_servers={}, skills={})) monkeypatch.setattr(AppConfig, "current", staticmethod(lambda: _app_cfg))
monkeypatch.setattr("app.gateway.routers.skills.reload_extensions_config", lambda: None) monkeypatch.setattr(AppConfig, "init", staticmethod(lambda _cfg: None))
monkeypatch.setattr(AppConfig, "from_file", staticmethod(lambda: _app_cfg))
monkeypatch.setattr(skills_router.ExtensionsConfig, "resolve_config_path", staticmethod(lambda: config_path)) monkeypatch.setattr(skills_router.ExtensionsConfig, "resolve_config_path", staticmethod(lambda: config_path))
monkeypatch.setattr("app.gateway.routers.skills.refresh_skills_system_prompt_cache_async", _refresh) monkeypatch.setattr("app.gateway.routers.skills.refresh_skills_system_prompt_cache_async", _refresh)

Some files were not shown because too many files have changed in this diff Show More