updated output to clean json, update set goal, changed llm to llm_generate

This commit is contained in:
bryan
2026-01-23 14:27:45 -08:00
parent ea4c56108b
commit b0e870d1db
9 changed files with 473 additions and 88 deletions
+12
View File
@@ -78,6 +78,7 @@ class LiteLLMProvider(LLMProvider):
system: str = "",
tools: list[Tool] | None = None,
max_tokens: int = 1024,
json_mode: bool = False,
) -> LLMResponse:
"""Generate a completion using LiteLLM."""
# Prepare messages with system prompt
@@ -86,6 +87,17 @@ class LiteLLMProvider(LLMProvider):
full_messages.append({"role": "system", "content": system})
full_messages.extend(messages)
# Add JSON mode via prompt engineering (works across all providers)
if json_mode:
json_instruction = (
"\n\nPlease respond with a valid JSON object."
)
# Append to system message if present, otherwise add as system message
if full_messages and full_messages[0]["role"] == "system":
full_messages[0]["content"] += json_instruction
else:
full_messages.insert(0, {"role": "system", "content": json_instruction.strip()})
# Build kwargs
kwargs: dict[str, Any] = {
"model": self.model,