markdown support, removed subgraph, stop button

This commit is contained in:
bryan
2026-02-24 10:40:24 -08:00
parent 28a71b70a8
commit 7a7ece1805
8 changed files with 1796 additions and 1172 deletions
-86
View File
@@ -5,7 +5,6 @@ is tracked as an AgentSlot holding a runner, runtime, and metadata.
"""
import asyncio
import json
import logging
import time
from dataclasses import dataclass
@@ -15,84 +14,6 @@ from typing import Any
logger = logging.getLogger(__name__)
async def _extract_subgraph_steps(nodes: list, llm: Any) -> None:
"""Extract workflow steps from system prompts for frontend visualization.
Called once during agent load. Iterates event_loop nodes with system prompts,
asks the LLM to decompose each prompt into a DAG of steps, and stores the
result on node.subgraph_steps. Non-critical failures are logged and skipped.
"""
candidates = [
n for n in nodes if n.node_type == "event_loop" and n.system_prompt and not n.subgraph_steps
]
if not candidates:
return
for node in candidates:
try:
prompt = (
f"Analyze this system prompt for an AI agent node "
f"and extract the workflow steps.\n\n"
f"The node has these tools available: {json.dumps(node.tools)}\n"
f"The node reads these inputs: {json.dumps(node.input_keys)}\n"
f"The node must produce these outputs: {json.dumps(node.output_keys)}\n\n"
f"System prompt:\n---\n{node.system_prompt}\n---\n\n"
f"Extract a JSON array of workflow steps. For each step:\n"
f'- "id": short snake_case identifier\n'
f'- "label": human-readable description (5-10 words)\n'
f'- "tool": the tool name this step uses, or null for reasoning/decision steps\n'
f'- "depends_on": list of step ids that must complete before this one starts\n'
f'- "type": "action" (does work), "decision" '
f'(branches/loops), "loop" (repeats), or '
f'"output" (sets output)\n\n'
f"IMPORTANT:\n"
f"- Look for parallelism: if multiple tools can run "
f"independently after the same step, "
f"give them the SAME depends_on — this creates fan-out\n"
f"- Look for convergence: if a step needs results from multiple prior steps, "
f"list ALL of them in depends_on — this creates fan-in\n"
f"- Look for loops: if the prompt says 'repeat', 'go back to', 'if more then...', "
f"model it as a decision step\n"
f"- Do NOT make a simple linear chain unless the "
f"prompt truly describes a strictly sequential "
f"process\n\n"
f"Return ONLY a JSON array of step objects. No explanation."
)
response = await llm.acomplete(
messages=[{"role": "user", "content": prompt}],
max_tokens=1000,
json_mode=True,
)
# Parse the JSON array from the response
text = response.content.strip()
# Handle responses wrapped in {"steps": [...]} or just [...]
parsed = json.loads(text)
if isinstance(parsed, dict) and "steps" in parsed:
steps = parsed["steps"]
elif isinstance(parsed, list):
steps = parsed
else:
logger.warning(f"Subgraph extraction for '{node.id}': unexpected format")
continue
# Basic validation
if not isinstance(steps, list) or not all(
isinstance(s, dict) and s.get("id") and s.get("label") and "depends_on" in s
for s in steps
):
logger.warning(f"Subgraph extraction for '{node.id}': invalid step structure")
continue
node.subgraph_steps = steps
logger.info(f"Extracted {len(steps)} subgraph steps for node '{node.id}'")
except Exception as e:
logger.warning(f"Subgraph extraction failed for node '{node.id}': {e}")
continue
@dataclass
class AgentSlot:
"""A loaded agent with its runtime resources."""
@@ -172,13 +93,6 @@ class AgentManager:
if runner._agent_runtime is None:
await loop.run_in_executor(None, runner._setup)
# Extract subgraph steps for frontend visualization (non-critical)
if runner.graph and runner._llm:
try:
await _extract_subgraph_steps(runner.graph.nodes, runner._llm)
except Exception as e:
logger.warning(f"Subgraph extraction skipped: {e}")
runtime = runner._agent_runtime
# Start runtime on event loop
+1469 -6
View File
File diff suppressed because it is too large Load Diff
+2
View File
@@ -14,7 +14,9 @@
"lucide-react": "^0.575.0",
"react": "^18.3.1",
"react-dom": "^18.3.1",
"react-markdown": "^10.1.0",
"react-router-dom": "^7.1.0",
"remark-gfm": "^4.0.1",
"tailwind-merge": "^3.5.0"
},
"devDependencies": {
+40 -28
View File
@@ -1,6 +1,6 @@
import { useMemo, useState, useRef, useEffect } from "react";
import ReactDOM from "react-dom";
import { Play, Loader2, CheckCircle2, GitBranch, Zap, Layers } from "lucide-react";
import { Play, Pause, Loader2, CheckCircle2, GitBranch, Zap, Layers } from "lucide-react";
export type NodeStatus = "running" | "complete" | "pending" | "error" | "looping";
@@ -25,6 +25,7 @@ interface AgentGraphProps {
onNodeClick?: (node: GraphNode) => void;
onVersionBump?: (type: VersionBump) => void;
onRun?: () => void;
onPause?: () => void;
version?: string;
runState?: RunState;
}
@@ -79,7 +80,7 @@ function formatLabel(id: string): string {
.join(" ");
}
export default function AgentGraph({ nodes, title: _title, onNodeClick, onVersionBump, onRun, version, runState: externalRunState }: AgentGraphProps) {
export default function AgentGraph({ nodes, title: _title, onNodeClick, onVersionBump, onRun, onPause, version, runState: externalRunState }: AgentGraphProps) {
const [localRunState, setLocalRunState] = useState<RunState>("idle");
const runState = externalRunState ?? localRunState;
const [versionPopover, setVersionPopover] = useState<"hidden" | "confirm" | "pick">("hidden");
@@ -225,31 +226,42 @@ export default function AgentGraph({ nodes, title: _title, onNodeClick, onVersio
return { layers, cols, maxCols, nodeW, colSpacing, firstColX };
}, [nodes, forwardEdges]);
const RunButton = () => (
<button
ref={runBtnRef}
onClick={handleRun}
disabled={runState !== "idle" || nodes.length === 0}
className={`flex items-center gap-1.5 px-2.5 py-1 rounded-md text-[11px] font-semibold transition-all duration-200 ${
runState === "running"
? "bg-green-500/15 text-green-400 border border-green-500/30 cursor-default"
: runState === "deploying"
? "bg-primary/10 text-primary border border-primary/20 cursor-default"
: nodes.length === 0
? "bg-muted/30 text-muted-foreground/40 border border-border/20 cursor-not-allowed"
: "bg-primary/10 text-primary border border-primary/20 hover:bg-primary/20 hover:border-primary/40 active:scale-95"
}`}
>
{runState === "deploying" ? (
<Loader2 className="w-3 h-3 animate-spin" />
) : runState === "running" ? (
<CheckCircle2 className="w-3 h-3" />
) : (
<Play className="w-3 h-3 fill-current" />
)}
{runState === "deploying" ? "Deploying\u2026" : runState === "running" ? "Running" : "Run"}
</button>
);
const RunButton = () => {
const [hovered, setHovered] = useState(false);
const showPause = runState === "running" && hovered;
return (
<button
ref={runBtnRef}
onClick={runState === "running" ? onPause : handleRun}
disabled={runState === "deploying" || nodes.length === 0}
onMouseEnter={() => setHovered(true)}
onMouseLeave={() => setHovered(false)}
className={`flex items-center gap-1.5 px-2.5 py-1 rounded-md text-[11px] font-semibold transition-all duration-200 ${
showPause
? "bg-amber-500/15 text-amber-400 border border-amber-500/40 hover:bg-amber-500/25 active:scale-95 cursor-pointer"
: runState === "running"
? "bg-green-500/15 text-green-400 border border-green-500/30 cursor-pointer"
: runState === "deploying"
? "bg-primary/10 text-primary border border-primary/20 cursor-default"
: nodes.length === 0
? "bg-muted/30 text-muted-foreground/40 border border-border/20 cursor-not-allowed"
: "bg-primary/10 text-primary border border-primary/20 hover:bg-primary/20 hover:border-primary/40 active:scale-95"
}`}
>
{runState === "deploying" ? (
<Loader2 className="w-3 h-3 animate-spin" />
) : showPause ? (
<Pause className="w-3 h-3 fill-current" />
) : runState === "running" ? (
<CheckCircle2 className="w-3 h-3" />
) : (
<Play className="w-3 h-3 fill-current" />
)}
{runState === "deploying" ? "Deploying\u2026" : showPause ? "Pause" : runState === "running" ? "Running" : "Run"}
</button>
);
};
// Version bump popover (portalled)
const VersionPopover = () => {
@@ -587,7 +599,7 @@ export default function AgentGraph({ nodes, title: _title, onNodeClick, onVersio
<VersionPopover />
{/* Graph */}
<div className="flex-1 overflow-auto px-3 pb-5">
<div className="flex-1 overflow-y-auto overflow-x-hidden px-3 pb-5">
<svg
width={svgWidth}
height={svgHeight}
+5 -4
View File
@@ -1,6 +1,7 @@
import { useState, useRef, useEffect } from "react";
import { memo, useState, useRef, useEffect } from "react";
import { Send, Crown, Cpu } from "lucide-react";
import { formatAgentDisplayName } from "@/lib/chat-helpers";
import MarkdownContent from "@/components/MarkdownContent";
export interface ChatMessage {
id: string;
@@ -32,7 +33,7 @@ function getColor(_agent: string, role?: "queen" | "worker"): string {
return "hsl(220,60%,55%)";
}
function MessageBubble({ msg }: { msg: ChatMessage }) {
const MessageBubble = memo(function MessageBubble({ msg }: { msg: ChatMessage }) {
const isUser = msg.type === "user";
const isQueen = msg.role === "queen";
const color = getColor(msg.agent, msg.role);
@@ -91,12 +92,12 @@ function MessageBubble({ msg }: { msg: ChatMessage }) {
isQueen ? "border border-primary/20 bg-primary/5" : "bg-muted/60"
}`}
>
<p className="whitespace-pre-wrap break-words text-foreground">{msg.content}</p>
<MarkdownContent content={msg.content} />
</div>
</div>
</div>
);
}
}, (prev, next) => prev.msg.id === next.msg.id && prev.msg.content === next.msg.content);
export default function ChatPanel({ messages, onSend, isWaiting, activeThread, awaitingInput, disabled }: ChatPanelProps) {
const [input, setInput] = useState("");
@@ -0,0 +1,97 @@
import ReactMarkdown from "react-markdown";
import remarkGfm from "remark-gfm";
import type { Components } from "react-markdown";
import { cn } from "@/lib/utils";
const components: Components = {
// Headers: same size as body text, just bold — keeps chat bubbles compact
h1: ({ children }) => <h1 className="font-bold mt-3 mb-1 first:mt-0">{children}</h1>,
h2: ({ children }) => <h2 className="font-bold mt-2 mb-1 first:mt-0">{children}</h2>,
h3: ({ children }) => <h3 className="font-semibold mt-2 mb-1 first:mt-0">{children}</h3>,
// Paragraphs: preserve whitespace and line breaks (matches existing plain-text behavior)
p: ({ children }) => <p className="whitespace-pre-wrap break-words mb-2 last:mb-0">{children}</p>,
// Lists
ul: ({ children }) => <ul className="list-disc pl-4 mb-2 last:mb-0 space-y-0.5">{children}</ul>,
ol: ({ children }) => <ol className="list-decimal pl-4 mb-2 last:mb-0 space-y-0.5">{children}</ol>,
li: ({ children }) => <li>{children}</li>,
// Inline code
code: ({ className, children, ...props }) => {
const isBlock = className?.includes("language-");
if (isBlock) {
return (
<code className={cn("text-xs", className)} {...props}>
{children}
</code>
);
}
return (
<code className="bg-muted px-1 py-0.5 rounded text-[13px] font-mono">
{children}
</code>
);
},
// Code blocks
pre: ({ children }) => (
<pre className="bg-muted/80 rounded-lg p-3 overflow-x-auto text-xs font-mono my-2 last:mb-0">
{children}
</pre>
),
// Links
a: ({ href, children }) => (
<a
href={href}
target="_blank"
rel="noopener noreferrer"
className="text-primary underline underline-offset-2 hover:opacity-80"
>
{children}
</a>
),
// Tables
table: ({ children }) => (
<div className="overflow-x-auto my-2 last:mb-0">
<table className="text-xs border-collapse w-full">{children}</table>
</div>
),
th: ({ children }) => (
<th className="border border-border px-2 py-1 text-left font-semibold bg-muted/40">
{children}
</th>
),
td: ({ children }) => <td className="border border-border px-2 py-1">{children}</td>,
// Blockquotes
blockquote: ({ children }) => (
<blockquote className="border-l-2 border-primary/40 pl-3 my-2 text-muted-foreground italic">
{children}
</blockquote>
),
// Horizontal rules
hr: () => <hr className="border-border my-3" />,
// Strong & emphasis inherit naturally from <strong>/<em> defaults — no overrides needed
};
const remarkPlugins = [remarkGfm];
interface MarkdownContentProps {
content: string;
className?: string;
}
export default function MarkdownContent({ content, className }: MarkdownContentProps) {
return (
<div className={cn("break-words text-foreground", className)}>
<ReactMarkdown remarkPlugins={remarkPlugins} components={components}>
{content}
</ReactMarkdown>
</div>
);
}
File diff suppressed because it is too large Load Diff
+144 -32
View File
@@ -178,6 +178,19 @@ function NewTabPopover({ open, onClose, anchorRef, discoverAgents, onFromScratch
);
}
function fmtLogTs(ts: string): string {
try {
const d = new Date(ts);
return `[${String(d.getHours()).padStart(2, "0")}:${String(d.getMinutes()).padStart(2, "0")}:${String(d.getSeconds()).padStart(2, "0")}]`;
} catch {
return "[--:--:--]";
}
}
function truncate(s: string, max: number): string {
return s.length > max ? s.slice(0, max) + "..." : s;
}
export default function Workspace() {
const navigate = useNavigate();
const [searchParams] = useSearchParams();
@@ -244,6 +257,10 @@ export default function Workspace() {
const sessionsRef = useRef(sessionsByAgent);
sessionsRef.current = sessionsByAgent;
// Tracks the latest LLM text snapshot per node so we can flush it as a
// log line when the first tool call starts (avoids per-token log spam).
const llmSnapshotRef = useRef<Record<string, string>>({});
// --- Backend state ---
const [backendAgentId, setBackendAgentId] = useState<string | null>(null);
const [backendLoading, setBackendLoading] = useState(true);
@@ -252,6 +269,10 @@ export default function Workspace() {
const [awaitingInput, setAwaitingInput] = useState(false);
// Run button state — driven by SSE events from the worker
const [workerRunState, setWorkerRunState] = useState<"idle" | "deploying" | "running">("idle");
// Current execution ID — needed for pause API
const [currentExecutionId, setCurrentExecutionId] = useState<string | null>(null);
// Per-node live log lines accumulated from SSE events
const [nodeLogs, setNodeLogs] = useState<Record<string, string[]>>({});
// Resolved display name for the loaded agent (e.g. "Competitive Intel Agent")
const [agentDisplayName, setAgentDisplayName] = useState<string | null>(null);
// Graph context for NodeDetailPanel
@@ -277,7 +298,8 @@ export default function Workspace() {
if (!backendAgentId || !backendReady) return;
try {
setWorkerRunState("deploying");
await executionApi.trigger(backendAgentId, "default", {});
const result = await executionApi.trigger(backendAgentId, "default", {});
setCurrentExecutionId(result.execution_id);
// State transitions from here are driven by SSE events (step 7)
} catch (err) {
const errMsg = err instanceof Error ? err.message : String(err);
@@ -386,27 +408,6 @@ export default function Workspace() {
};
});
// Inject intro_message as seed message (only if session is empty)
if (agent.intro_message) {
const introMsg: ChatMessage = {
id: `intro-${agent.id}`,
agent: displayName,
agentColor: "",
content: agent.intro_message,
timestamp: "",
role: "worker" as const,
thread: initialAgent,
};
setSessionsByAgent((prev) => {
const sessions = prev[initialAgent] || [];
if (!sessions.length || sessions[0].messages.length > 0) return prev;
return {
...prev,
[initialAgent]: [{ ...sessions[0], messages: [introMsg] }, ...sessions.slice(1)],
};
});
}
// Check for existing sessions and load message history
try {
const { sessions } = await sessionsApi.list(agent.id);
@@ -539,6 +540,42 @@ export default function Workspace() {
[activeWorker, activeSessionByAgent],
);
const handlePause = useCallback(async () => {
if (!backendAgentId || !currentExecutionId) return;
try {
await executionApi.pause(backendAgentId, currentExecutionId);
setWorkerRunState("idle");
setCurrentExecutionId(null);
markAllNodesAs(["running", "looping"], "pending");
} catch (err) {
const errMsg = err instanceof Error ? err.message : String(err);
setSessionsByAgent((prev) => {
const sessions = prev[activeWorker] || [];
return {
...prev,
[activeWorker]: sessions.map((s) => {
const activeId = activeSessionByAgent[activeWorker] || sessions[0]?.id;
if (s.id !== activeId) return s;
const errorMsg: ChatMessage = {
id: makeId(), agent: "System", agentColor: "",
content: `Failed to pause: ${errMsg}`,
timestamp: "", type: "system", thread: activeWorker,
};
return { ...s, messages: [...s.messages, errorMsg] };
}),
};
});
}
}, [backendAgentId, currentExecutionId, activeWorker, activeSessionByAgent, markAllNodesAs]);
// --- Node log helpers (live SSE → per-node log lines) ---
const appendNodeLog = useCallback((nodeId: string, line: string) => {
setNodeLogs((prev) => ({
...prev,
[nodeId]: [...(prev[nodeId] || []), line].slice(-200),
}));
}, []);
// --- SSE event handler (Phase 5) ---
// Helper: upsert a chat message into the active session
const upsertChatMessage = useCallback(
@@ -578,18 +615,21 @@ export default function Workspace() {
const displayName = isQueen ? "Queen Bee" : (agentDisplayName || undefined);
const role = isQueen ? "queen" as const : "worker" as const;
const ts = fmtLogTs(event.timestamp);
switch (event.type) {
case "execution_started":
streamTurnRef.current += 1;
if (isQueen) {
// Queen execution starting — show typing indicator
setIsTyping(true);
} else {
// Worker execution starting — update run button + graph
setIsTyping(true);
setAwaitingInput(false);
setWorkerRunState("running");
if (event.execution_id) setCurrentExecutionId(event.execution_id);
markAllNodesAs(["running", "looping", "complete", "error"], "pending");
setNodeLogs({});
llmSnapshotRef.current = {};
}
break;
@@ -597,10 +637,10 @@ export default function Workspace() {
if (isQueen) {
setIsTyping(false);
} else {
// Worker finished
setIsTyping(false);
setAwaitingInput(false);
setWorkerRunState("idle");
setCurrentExecutionId(null);
markAllNodesAs(["running", "looping"], "complete");
}
break;
@@ -609,14 +649,20 @@ export default function Workspace() {
case "client_output_delta":
case "client_input_requested":
case "llm_text_delta": {
// Convert event to a chat message — queen events get role:"queen"
const chatMsg = sseEventToChatMessage(event, activeWorker, displayName, streamTurnRef.current);
if (chatMsg) {
// Override role for queen events
if (isQueen) chatMsg.role = role;
upsertChatMessage(chatMsg);
}
// Track LLM text snapshots for node logs (flushed on tool_call_started)
if (event.type === "llm_text_delta" && !isQueen && event.node_id) {
const snapshot = (event.data?.snapshot as string) || "";
if (snapshot) {
llmSnapshotRef.current[event.node_id] = snapshot;
}
}
if (event.type === "client_input_requested") {
setAwaitingInput(true);
setIsTyping(false);
@@ -626,7 +672,12 @@ export default function Workspace() {
setAwaitingInput(false);
if (!isQueen) {
setWorkerRunState("idle");
if (event.node_id) updateGraphNodeStatus(event.node_id, "error");
setCurrentExecutionId(null);
if (event.node_id) {
updateGraphNodeStatus(event.node_id, "error");
const errMsg = (event.data?.error as string) || "unknown error";
appendNodeLog(event.node_id, `${ts} ERROR Execution failed: ${errMsg}`);
}
markAllNodesAs(["running", "looping"], "pending");
}
}
@@ -645,21 +696,35 @@ export default function Workspace() {
updateGraphNodeStatus(event.node_id, isRevisit ? "looping" : "running", {
maxIterations: (event.data?.max_iterations as number) ?? undefined,
});
appendNodeLog(event.node_id, `${ts} INFO Node started`);
}
break;
case "node_loop_iteration":
streamTurnRef.current += 1;
if (!isQueen && event.node_id) {
updateGraphNodeStatus(event.node_id, "looping", {
iterations: (event.data?.iteration as number) ?? undefined,
});
// Flush any accumulated LLM text before starting next iteration
const pendingText = llmSnapshotRef.current[event.node_id];
if (pendingText?.trim()) {
appendNodeLog(event.node_id, `${ts} INFO LLM: ${truncate(pendingText.trim(), 300)}`);
delete llmSnapshotRef.current[event.node_id];
}
const iter = (event.data?.iteration as number) ?? undefined;
updateGraphNodeStatus(event.node_id, "looping", { iterations: iter });
appendNodeLog(event.node_id, `${ts} INFO Iteration ${iter ?? "?"}`);
}
break;
case "node_loop_completed":
if (!isQueen && event.node_id) {
// Flush any remaining LLM text
const pendingText = llmSnapshotRef.current[event.node_id];
if (pendingText?.trim()) {
appendNodeLog(event.node_id, `${ts} INFO LLM: ${truncate(pendingText.trim(), 300)}`);
delete llmSnapshotRef.current[event.node_id];
}
updateGraphNodeStatus(event.node_id, "complete");
appendNodeLog(event.node_id, `${ts} INFO Node completed`);
}
break;
@@ -673,11 +738,56 @@ export default function Workspace() {
break;
}
case "tool_call_started":
if (!isQueen && event.node_id) {
// Flush accumulated LLM reasoning before listing tool calls
const pendingText = llmSnapshotRef.current[event.node_id];
if (pendingText?.trim()) {
appendNodeLog(event.node_id, `${ts} INFO LLM: ${truncate(pendingText.trim(), 300)}`);
delete llmSnapshotRef.current[event.node_id];
}
const toolName = (event.data?.tool_name as string) || "unknown";
const toolInput = event.data?.tool_input;
const argsStr = toolInput ? truncate(JSON.stringify(toolInput), 200) : "";
appendNodeLog(event.node_id, `${ts} INFO Calling ${toolName}(${argsStr})`);
}
break;
case "tool_call_completed":
if (!isQueen && event.node_id) {
const toolName = (event.data?.tool_name as string) || "unknown";
const isError = event.data?.is_error as boolean | undefined;
const result = event.data?.result as string | undefined;
if (isError) {
appendNodeLog(event.node_id, `${ts} ERROR ${toolName} failed: ${truncate(result || "unknown error", 200)}`);
} else {
const resultStr = result ? ` (${truncate(result, 200)})` : "";
appendNodeLog(event.node_id, `${ts} INFO ${toolName} done${resultStr}`);
}
}
break;
case "node_internal_output":
if (!isQueen && event.node_id) {
const content = (event.data?.content as string) || "";
if (content.trim()) {
appendNodeLog(event.node_id, `${ts} INFO ${content}`);
}
}
break;
case "node_stalled":
if (!isQueen && event.node_id) {
const reason = (event.data?.reason as string) || "unknown";
appendNodeLog(event.node_id, `${ts} WARN Stalled: ${reason}`);
}
break;
default:
break;
}
},
[activeWorker, activeSessionByAgent, agentDisplayName, updateGraphNodeStatus, markAllNodesAs, upsertChatMessage],
[activeWorker, activeSessionByAgent, agentDisplayName, updateGraphNodeStatus, markAllNodesAs, upsertChatMessage, appendNodeLog],
);
// SSE subscription
@@ -917,6 +1027,7 @@ export default function Workspace() {
onNodeClick={(node) => setSelectedNode(prev => prev?.id === node.id ? null : node)}
onVersionBump={handleVersionBump}
onRun={handleRun}
onPause={handlePause}
version={`v${agentVersions[activeWorker]?.[0] ?? 1}.${agentVersions[activeWorker]?.[1] ?? 0}`}
runState={workerRunState}
/>
@@ -961,6 +1072,7 @@ export default function Workspace() {
agentId={backendAgentId || undefined}
graphId={backendGraphId || undefined}
sessionId={null}
nodeLogs={nodeLogs[selectedNode.id] || []}
onClose={() => setSelectedNode(null)}
/>
</div>