From 8b61c94e1ddce6d2c0dd7b5d234b3c3987935054 Mon Sep 17 00:00:00 2001 From: greatmengqi Date: Fri, 1 May 2026 15:43:28 +0800 Subject: [PATCH] fix: keep lead agent graph factory signature compatible (#2678) Co-authored-by: greatmengqi --- .../deerflow/agents/lead_agent/agent.py | 9 ++++- .../tests/test_lead_agent_model_resolution.py | 39 +++++++++++++++++++ 2 files changed, 46 insertions(+), 2 deletions(-) diff --git a/backend/packages/harness/deerflow/agents/lead_agent/agent.py b/backend/packages/harness/deerflow/agents/lead_agent/agent.py index 555d992f..12fedd5b 100644 --- a/backend/packages/harness/deerflow/agents/lead_agent/agent.py +++ b/backend/packages/harness/deerflow/agents/lead_agent/agent.py @@ -314,13 +314,18 @@ def _build_middlewares( return middlewares -def make_lead_agent(config: RunnableConfig, app_config: AppConfig | None = None): +def make_lead_agent(config: RunnableConfig): + """LangGraph graph factory; keep the signature compatible with LangGraph Server.""" + return _make_lead_agent(config, app_config=get_app_config()) + + +def _make_lead_agent(config: RunnableConfig, *, app_config: AppConfig): # Lazy import to avoid circular dependency from deerflow.tools import get_available_tools from deerflow.tools.builtins import setup_agent cfg = _get_runtime_config(config) - resolved_app_config = app_config or get_app_config() + resolved_app_config = app_config thinking_enabled = cfg.get("thinking_enabled", True) reasoning_effort = cfg.get("reasoning_effort", None) diff --git a/backend/tests/test_lead_agent_model_resolution.py b/backend/tests/test_lead_agent_model_resolution.py index 797f17f2..c22377b8 100644 --- a/backend/tests/test_lead_agent_model_resolution.py +++ b/backend/tests/test_lead_agent_model_resolution.py @@ -2,6 +2,7 @@ from __future__ import annotations +import inspect from unittest.mock import MagicMock import pytest @@ -33,6 +34,44 @@ def _make_model(name: str, *, supports_thinking: bool) -> ModelConfig: ) +def test_make_lead_agent_signature_matches_langgraph_server_factory_abi(): + assert list(inspect.signature(lead_agent_module.make_lead_agent).parameters) == ["config"] + + +def test_internal_make_lead_agent_uses_explicit_app_config(monkeypatch): + app_config = _make_app_config([_make_model("explicit-model", supports_thinking=False)]) + + import deerflow.tools as tools_module + + def _raise_get_app_config(): + raise AssertionError("ambient get_app_config() must not be used when app_config is explicit") + + monkeypatch.setattr(lead_agent_module, "get_app_config", _raise_get_app_config) + monkeypatch.setattr(tools_module, "get_available_tools", lambda **kwargs: []) + monkeypatch.setattr(lead_agent_module, "_build_middlewares", lambda config, model_name, agent_name=None, **kwargs: []) + + captured: dict[str, object] = {} + + def _fake_create_chat_model(*, name, thinking_enabled, reasoning_effort=None, app_config=None): + captured["name"] = name + captured["app_config"] = app_config + return object() + + monkeypatch.setattr(lead_agent_module, "create_chat_model", _fake_create_chat_model) + monkeypatch.setattr(lead_agent_module, "create_agent", lambda **kwargs: kwargs) + + result = lead_agent_module._make_lead_agent( + {"configurable": {"model_name": "explicit-model"}}, + app_config=app_config, + ) + + assert captured == { + "name": "explicit-model", + "app_config": app_config, + } + assert result["model"] is not None + + def test_resolve_model_name_falls_back_to_default(monkeypatch, caplog): app_config = _make_app_config( [