fix: address PR review feedback (accounts_prompt, tests, and remove markdown)

This commit is contained in:
vakrahul
2026-02-23 17:38:04 +05:30
parent a0d14b8a25
commit 31700fa8da
3 changed files with 59 additions and 4 deletions
+7
View File
@@ -827,6 +827,12 @@ class AgentRunner:
if has_llm_nodes:
from framework.credentials.models import CredentialError
if self._is_local_model(self.model):
raise CredentialError(
f"Failed to initialize LLM for local model '{self.model}'. "
f"Ensure your local LLM server is running "
f"(e.g. 'ollama serve' for Ollama)."
)
api_key_env = self._get_api_key_env_var(self.model)
hint = (
f"Set it with: export {api_key_env}=your-api-key"
@@ -1003,6 +1009,7 @@ class AgentRunner:
checkpoint_config=checkpoint_config,
config=runtime_config,
graph_id=self.graph.id or self.agent_path.name,
accounts_prompt=accounts_prompt,
)
# Pass intro_message through for TUI display
+3 -4
View File
@@ -142,13 +142,14 @@ class AgentRuntime:
runtime_log_store: Optional RuntimeLogStore for per-execution logging
checkpoint_config: Optional checkpoint configuration for resumable sessions
graph_id: Optional identifier for the primary graph (defaults to "primary")
accounts_prompt: Connected accounts block for system prompt injection
accounts_prompt: Optional connected-accounts context for system prompt injection
"""
self.graph = graph
self.goal = goal
self._config = config or AgentRuntimeConfig()
self._runtime_log_store = runtime_log_store
self._checkpoint_config = checkpoint_config
self.accounts_prompt = accounts_prompt
# Primary graph identity
self._graph_id: str = graph_id or "primary"
@@ -180,7 +181,6 @@ class AgentRuntime:
self._llm = llm
self._tools = tools or []
self._tool_executor = tool_executor
self._accounts_prompt = accounts_prompt
# Entry points and streams (primary graph)
self._entry_points: dict[str, EntryPointSpec] = {}
@@ -276,7 +276,6 @@ class AgentRuntime:
session_store=self._session_store,
checkpoint_config=self._checkpoint_config,
graph_id=self._graph_id,
accounts_prompt=self._accounts_prompt,
)
await stream.start()
self._streams[ep_id] = stream
@@ -678,7 +677,6 @@ class AgentRuntime:
session_store=self._session_store,
checkpoint_config=self._checkpoint_config,
graph_id=graph_id,
accounts_prompt=self._accounts_prompt,
)
if self._running:
await stream.start()
@@ -1176,6 +1174,7 @@ def create_agent_runtime(
checkpoint_config: Optional checkpoint configuration for resumable sessions.
If None, uses default checkpointing behavior.
graph_id: Optional identifier for the primary graph (defaults to "primary").
accounts_prompt: Optional connected-accounts context for system prompt injection.
Returns:
Configured AgentRuntime (not yet started)
+49
View File
@@ -826,3 +826,52 @@ class TestAsyncComplete:
assert call_thread_ids[0] != main_thread_id, (
"Base acomplete() should offload sync complete() to a thread pool"
)
# ---------------------------------------------------------------------------
# AgentRunner._is_local_model — parameterized tests
# ---------------------------------------------------------------------------
class TestIsLocalModel:
"""Parameterized tests for AgentRunner._is_local_model()."""
@pytest.mark.parametrize(
"model",
[
"ollama/llama3",
"ollama/mistral",
"ollama_chat/llama3",
"vllm/mistral",
"lm_studio/phi3",
"llamacpp/llama-7b",
"Ollama/Llama3", # case-insensitive
"VLLM/Mistral",
],
)
def test_local_models_return_true(self, model):
"""Local model prefixes should be recognized."""
from framework.runner.runner import AgentRunner
assert AgentRunner._is_local_model(model) is True
@pytest.mark.parametrize(
"model",
[
"anthropic/claude-3-haiku",
"openai/gpt-4o",
"gpt-4o-mini",
"claude-3-haiku-20240307",
"gemini/gemini-1.5-flash",
"groq/llama3-70b",
"mistral/mistral-large",
"azure/gpt-4",
"cohere/command-r",
"together/llama3-70b",
],
)
def test_cloud_models_return_false(self, model):
"""Cloud model prefixes should not be treated as local."""
from framework.runner.runner import AgentRunner
assert AgentRunner._is_local_model(model) is False