Merge pull request #1577 from adenhq/fix/ruff-tests1

fixed ruff format --check
This commit is contained in:
Bryan @ Aden
2026-01-27 20:12:03 -08:00
committed by GitHub
6 changed files with 165 additions and 47 deletions
@@ -108,8 +108,10 @@ async def _interactive_shell(verbose=False):
try:
while True:
try:
topic = await asyncio.get_event_loop().run_in_executor(None, input, "Topic> ")
if topic.lower() in ['quit', 'exit', 'q']:
topic = await asyncio.get_event_loop().run_in_executor(
None, input, "Topic> "
)
if topic.lower() in ["quit", "exit", "q"]:
click.echo("Goodbye!")
break
@@ -130,7 +132,11 @@ async def _interactive_shell(verbose=False):
click.echo(f"\nReport saved to: {output['file_path']}\n")
if "final_report" in output:
click.echo("\n--- Report Preview ---\n")
preview = output["final_report"][:500] + "..." if len(output.get("final_report", "")) > 500 else output.get("final_report", "")
preview = (
output["final_report"][:500] + "..."
if len(output.get("final_report", "")) > 500
else output.get("final_report", "")
)
click.echo(preview)
click.echo("\n")
else:
@@ -142,6 +148,7 @@ async def _interactive_shell(verbose=False):
except Exception as e:
click.echo(f"Error: {e}", err=True)
import traceback
traceback.print_exc()
finally:
await agent.stop()
@@ -1,4 +1,5 @@
"""Agent graph construction for Online Research Agent."""
from framework.graph import EdgeSpec, EdgeCondition, Goal, SuccessCriterion, Constraint
from framework.graph.edge import GraphSpec
from framework.graph.executor import ExecutionResult
@@ -194,13 +195,15 @@ class OnlineResearchAgent:
trigger_type = "manual"
name = ep_id.replace("-", " ").title()
specs.append(EntryPointSpec(
id=ep_id,
name=name,
entry_node=node_id,
trigger_type=trigger_type,
isolation_level="shared",
))
specs.append(
EntryPointSpec(
id=ep_id,
name=name,
entry_node=node_id,
trigger_type=trigger_type,
isolation_level="shared",
)
)
return specs
def _create_runtime(self, mock_mode=False) -> AgentRuntime:
@@ -225,7 +228,10 @@ class OnlineResearchAgent:
for server_name, server_config in mcp_servers.items():
server_config["name"] = server_name
# Resolve relative cwd paths
if "cwd" in server_config and not Path(server_config["cwd"]).is_absolute():
if (
"cwd" in server_config
and not Path(server_config["cwd"]).is_absolute()
):
server_config["cwd"] = str(agent_dir / server_config["cwd"])
tool_registry.register_mcp_server(server_config)
@@ -297,7 +303,9 @@ class OnlineResearchAgent:
"""
if self._runtime is None or not self._runtime.is_running:
raise RuntimeError("Agent runtime not started. Call start() first.")
return await self._runtime.trigger(entry_point, input_data, correlation_id, session_state=session_state)
return await self._runtime.trigger(
entry_point, input_data, correlation_id, session_state=session_state
)
async def trigger_and_wait(
self,
@@ -320,9 +328,13 @@ class OnlineResearchAgent:
"""
if self._runtime is None or not self._runtime.is_running:
raise RuntimeError("Agent runtime not started. Call start() first.")
return await self._runtime.trigger_and_wait(entry_point, input_data, timeout, session_state=session_state)
return await self._runtime.trigger_and_wait(
entry_point, input_data, timeout, session_state=session_state
)
async def run(self, context: dict, mock_mode=False, session_state=None) -> ExecutionResult:
async def run(
self, context: dict, mock_mode=False, session_state=None
) -> ExecutionResult:
"""
Run the agent (convenience method for simple single execution).
@@ -341,7 +353,9 @@ class OnlineResearchAgent:
else:
entry_point = "start"
result = await self.trigger_and_wait(entry_point, context, session_state=session_state)
result = await self.trigger_and_wait(
entry_point, context, session_state=session_state
)
return result or ExecutionResult(success=False, error="Execution timeout")
finally:
await self.stop()
@@ -403,7 +417,9 @@ class OnlineResearchAgent:
# Validate entry points
for ep_id, node_id in self.entry_points.items():
if node_id not in node_ids:
errors.append(f"Entry point '{ep_id}' references unknown node '{node_id}'")
errors.append(
f"Entry point '{ep_id}' references unknown node '{node_id}'"
)
return {
"valid": len(errors) == 0,
@@ -1,4 +1,5 @@
"""Runtime configuration."""
from dataclasses import dataclass
@@ -13,6 +14,7 @@ class RuntimeConfig:
default_config = RuntimeConfig()
# Agent metadata
@dataclass
class AgentMetadata:
@@ -1,4 +1,5 @@
"""Node definitions for Online Research Agent."""
from framework.graph import NodeSpec
# Node 1: Parse Query
@@ -10,9 +11,21 @@ parse_query_node = NodeSpec(
input_keys=["topic"],
output_keys=["search_queries", "research_focus", "key_aspects"],
output_schema={
"research_focus": {"type": "string", "required": True, "description": "Brief statement of what we're researching"},
"key_aspects": {"type": "array", "required": True, "description": "List of 3-5 key aspects to investigate"},
"search_queries": {"type": "array", "required": True, "description": "List of 3-5 search queries"},
"research_focus": {
"type": "string",
"required": True,
"description": "Brief statement of what we're researching",
},
"key_aspects": {
"type": "array",
"required": True,
"description": "List of 3-5 key aspects to investigate",
},
"search_queries": {
"type": "array",
"required": True,
"description": "List of 3-5 search queries",
},
},
system_prompt="""\
You are a research query strategist. Given a research topic, analyze it and generate search queries.
@@ -50,8 +63,16 @@ search_sources_node = NodeSpec(
input_keys=["search_queries", "research_focus"],
output_keys=["source_urls", "search_results_summary"],
output_schema={
"source_urls": {"type": "array", "required": True, "description": "List of source URLs found"},
"search_results_summary": {"type": "string", "required": True, "description": "Brief summary of what was found"},
"source_urls": {
"type": "array",
"required": True,
"description": "List of source URLs found",
},
"search_results_summary": {
"type": "string",
"required": True,
"description": "Brief summary of what was found",
},
},
system_prompt="""\
You are a research assistant executing web searches. Use the web_search tool to find sources.
@@ -80,8 +101,16 @@ fetch_content_node = NodeSpec(
input_keys=["source_urls", "research_focus"],
output_keys=["fetched_sources", "fetch_errors"],
output_schema={
"fetched_sources": {"type": "array", "required": True, "description": "List of fetched source objects with url, title, content"},
"fetch_errors": {"type": "array", "required": True, "description": "List of URLs that failed to fetch"},
"fetched_sources": {
"type": "array",
"required": True,
"description": "List of fetched source objects with url, title, content",
},
"fetch_errors": {
"type": "array",
"required": True,
"description": "List of URLs that failed to fetch",
},
},
system_prompt="""\
You are a content fetcher. Use web_scrape tool to retrieve content from URLs.
@@ -113,8 +142,16 @@ evaluate_sources_node = NodeSpec(
input_keys=["fetched_sources", "research_focus", "key_aspects"],
output_keys=["ranked_sources", "source_analysis"],
output_schema={
"ranked_sources": {"type": "array", "required": True, "description": "List of ranked sources with scores"},
"source_analysis": {"type": "string", "required": True, "description": "Overview of source quality and coverage"},
"ranked_sources": {
"type": "array",
"required": True,
"description": "List of ranked sources with scores",
},
"source_analysis": {
"type": "string",
"required": True,
"description": "Overview of source quality and coverage",
},
},
system_prompt="""\
You are a source evaluator. Assess each source for quality and relevance.
@@ -153,9 +190,21 @@ synthesize_findings_node = NodeSpec(
input_keys=["ranked_sources", "research_focus", "key_aspects"],
output_keys=["key_findings", "themes", "source_citations"],
output_schema={
"key_findings": {"type": "array", "required": True, "description": "List of key findings with sources and confidence"},
"themes": {"type": "array", "required": True, "description": "List of themes with descriptions and supporting sources"},
"source_citations": {"type": "object", "required": True, "description": "Map of facts to supporting URLs"},
"key_findings": {
"type": "array",
"required": True,
"description": "List of key findings with sources and confidence",
},
"themes": {
"type": "array",
"required": True,
"description": "List of themes with descriptions and supporting sources",
},
"source_citations": {
"type": "object",
"required": True,
"description": "Map of facts to supporting URLs",
},
},
system_prompt="""\
You are a research synthesizer. Analyze multiple sources to extract insights.
@@ -192,11 +241,25 @@ write_report_node = NodeSpec(
name="Write Report",
description="Generate a narrative report with proper citations",
node_type="llm_generate",
input_keys=["key_findings", "themes", "source_citations", "research_focus", "ranked_sources"],
input_keys=[
"key_findings",
"themes",
"source_citations",
"research_focus",
"ranked_sources",
],
output_keys=["report_content", "references"],
output_schema={
"report_content": {"type": "string", "required": True, "description": "Full markdown report text with citations"},
"references": {"type": "array", "required": True, "description": "List of reference objects with number, url, title"},
"report_content": {
"type": "string",
"required": True,
"description": "Full markdown report text with citations",
},
"references": {
"type": "array",
"required": True,
"description": "List of reference objects with number, url, title",
},
},
system_prompt="""\
You are a research report writer. Create a well-structured narrative report.
@@ -239,9 +302,21 @@ quality_check_node = NodeSpec(
input_keys=["report_content", "references", "source_citations"],
output_keys=["quality_score", "issues", "final_report"],
output_schema={
"quality_score": {"type": "number", "required": True, "description": "Quality score 0-1"},
"issues": {"type": "array", "required": True, "description": "List of issues found and fixed"},
"final_report": {"type": "string", "required": True, "description": "Corrected full report"},
"quality_score": {
"type": "number",
"required": True,
"description": "Quality score 0-1",
},
"issues": {
"type": "array",
"required": True,
"description": "List of issues found and fixed",
},
"final_report": {
"type": "string",
"required": True,
"description": "Corrected full report",
},
},
system_prompt="""\
You are a quality assurance reviewer. Check the research report for issues.
@@ -278,8 +353,16 @@ save_report_node = NodeSpec(
input_keys=["final_report", "references", "research_focus"],
output_keys=["file_path", "save_status"],
output_schema={
"file_path": {"type": "string", "required": True, "description": "Path where report was saved"},
"save_status": {"type": "string", "required": True, "description": "Status of save operation"},
"file_path": {
"type": "string",
"required": True,
"description": "Path where report was saved",
},
"save_status": {
"type": "string",
"required": True,
"description": "Status of save operation",
},
},
system_prompt="""\
You are a file manager. Save the research report to disk.
+14 -10
View File
@@ -524,11 +524,13 @@ def add_node(
tools_list = json.loads(tools)
routes_dict = json.loads(routes)
except json.JSONDecodeError as e:
return json.dumps({
"valid": False,
"errors": [f"Invalid JSON input: {e}"],
"warnings": [],
})
return json.dumps(
{
"valid": False,
"errors": [f"Invalid JSON input: {e}"],
"warnings": [],
}
)
# Validate credentials for tools BEFORE adding the node
cred_error = _validate_tool_credentials(tools_list)
@@ -717,11 +719,13 @@ def update_node(
tools_list = json.loads(tools) if tools else None
routes_dict = json.loads(routes) if routes else None
except json.JSONDecodeError as e:
return json.dumps({
"valid": False,
"errors": [f"Invalid JSON input: {e}"],
"warnings": [],
})
return json.dumps(
{
"valid": False,
"errors": [f"Invalid JSON input: {e}"],
"warnings": [],
}
)
# Validate credentials for new tools BEFORE updating
if tools_list:
+7 -1
View File
@@ -5,7 +5,7 @@ This test verifies the fix for Issue #363 where GraphExecutor was ignoring
the max_retries field in NodeSpec and using a hardcoded value of 3.
"""
from unittest.mock import MagicMock
from unittest.mock import AsyncMock, MagicMock
import pytest
@@ -47,6 +47,12 @@ class AlwaysFailsNode(NodeProtocol):
return NodeResult(success=False, error=f"Permanent error (attempt {self.attempt_count})")
@pytest.fixture(autouse=True)
def fast_sleep(monkeypatch):
"""Mock asyncio.sleep to avoid real delays from exponential backoff."""
monkeypatch.setattr("asyncio.sleep", AsyncMock())
@pytest.fixture
def runtime():
"""Create a mock Runtime for testing."""