Files
deer-flow/backend/packages/harness/deerflow/config/memory_config.py
T
greatmengqi edf345cd72 refactor(config): eliminate global mutable state, wire DeerFlowContext into runtime
- Freeze all config models (AppConfig + 15 sub-configs) with frozen=True
- Purify from_file() — remove 9 load_*_from_dict() side-effect calls
- Replace mtime/reload/push/pop machinery with single ContextVar + init_app_config()
- Delete 10 sub-module globals and their getters/setters/loaders
- Migrate 50+ consumers from get_*_config() to get_app_config().xxx

- Expand DeerFlowContext: app_config + thread_id + agent_name (frozen dataclass)
- Wire into Gateway runtime (worker.py) and DeerFlowClient via context= parameter
- Remove sandbox_id from runtime.context — flows through ThreadState.sandbox only
- Middleware/tools access runtime.context directly via Runtime[DeerFlowContext] generic
- resolve_context() retained at server entry points for LangGraph Server fallback
2026-04-14 01:18:19 +08:00

64 lines
2.0 KiB
Python

"""Configuration for memory mechanism."""
from pydantic import BaseModel, ConfigDict, Field
class MemoryConfig(BaseModel):
"""Configuration for global memory mechanism."""
model_config = ConfigDict(frozen=True)
enabled: bool = Field(
default=True,
description="Whether to enable memory mechanism",
)
storage_path: str = Field(
default="",
description=(
"Path to store memory data. "
"If empty, defaults to `{base_dir}/memory.json` (see Paths.memory_file). "
"Absolute paths are used as-is. "
"Relative paths are resolved against `Paths.base_dir` "
"(not the backend working directory). "
"Note: if you previously set this to `.deer-flow/memory.json`, "
"the file will now be resolved as `{base_dir}/.deer-flow/memory.json`; "
"migrate existing data or use an absolute path to preserve the old location."
),
)
storage_class: str = Field(
default="deerflow.agents.memory.storage.FileMemoryStorage",
description="The class path for memory storage provider",
)
debounce_seconds: int = Field(
default=30,
ge=1,
le=300,
description="Seconds to wait before processing queued updates (debounce)",
)
model_name: str | None = Field(
default=None,
description="Model name to use for memory updates (None = use default model)",
)
max_facts: int = Field(
default=100,
ge=10,
le=500,
description="Maximum number of facts to store",
)
fact_confidence_threshold: float = Field(
default=0.7,
ge=0.0,
le=1.0,
description="Minimum confidence threshold for storing facts",
)
injection_enabled: bool = Field(
default=True,
description="Whether to inject memory into system prompt",
)
max_injection_tokens: int = Field(
default=2000,
ge=100,
le=8000,
description="Maximum tokens to use for memory injection",
)