fixes to merge
This commit is contained in:
@@ -1,29 +1,12 @@
|
||||
"""Anthropic Claude LLM provider - backward compatible wrapper around LiteLLM."""
|
||||
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
from framework.llm.provider import LLMProvider, LLMResponse, Tool
|
||||
from framework.llm.litellm import LiteLLMProvider
|
||||
|
||||
|
||||
def _get_api_key_from_credential_manager() -> str | None:
|
||||
"""Get API key from CredentialManager or environment.
|
||||
|
||||
Priority:
|
||||
1. CredentialManager (supports .env hot-reload)
|
||||
2. os.environ fallback
|
||||
"""
|
||||
try:
|
||||
from aden_tools.credentials import CredentialManager
|
||||
|
||||
creds = CredentialManager()
|
||||
if creds.is_available("anthropic"):
|
||||
return creds.get("anthropic")
|
||||
except ImportError:
|
||||
pass
|
||||
return os.environ.get("ANTHROPIC_API_KEY")
|
||||
|
||||
|
||||
def _get_api_key_from_credential_manager() -> str | None:
|
||||
"""Get API key from CredentialManager or environment.
|
||||
|
||||
@@ -64,7 +47,7 @@ class AnthropicProvider(LLMProvider):
|
||||
or ANTHROPIC_API_KEY env var.
|
||||
model: Model to use (default: claude-haiku-4-5-20251001)
|
||||
"""
|
||||
# Delegate to LiteLLMProvider internally.
|
||||
# Delegate to LiteLLMProvider internally.
|
||||
self.api_key = api_key or _get_api_key_from_credential_manager()
|
||||
if not self.api_key:
|
||||
raise ValueError(
|
||||
@@ -78,12 +61,6 @@ class AnthropicProvider(LLMProvider):
|
||||
api_key=self.api_key,
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
self.model = model
|
||||
self.api_key = api_key
|
||||
|
||||
def complete(
|
||||
self,
|
||||
messages: list[dict[str, Any]],
|
||||
@@ -108,88 +85,10 @@ class AnthropicProvider(LLMProvider):
|
||||
max_iterations: int = 10,
|
||||
) -> LLMResponse:
|
||||
"""Run a tool-use loop until Claude produces a final response."""
|
||||
current_messages = list(messages)
|
||||
total_input_tokens = 0
|
||||
total_output_tokens = 0
|
||||
|
||||
for _ in range(max_iterations):
|
||||
response = self.client.messages.create(
|
||||
model=self.model,
|
||||
max_tokens=1024,
|
||||
system=system,
|
||||
messages=current_messages,
|
||||
tools=[self._tool_to_dict(t) for t in tools],
|
||||
)
|
||||
|
||||
total_input_tokens += response.usage.input_tokens
|
||||
total_output_tokens += response.usage.output_tokens
|
||||
|
||||
# Check if we're done (no more tool use)
|
||||
if response.stop_reason == "end_turn":
|
||||
content = ""
|
||||
for block in response.content:
|
||||
if block.type == "text":
|
||||
content += block.text
|
||||
|
||||
return LLMResponse(
|
||||
content=content,
|
||||
model=response.model,
|
||||
input_tokens=total_input_tokens,
|
||||
output_tokens=total_output_tokens,
|
||||
stop_reason=response.stop_reason,
|
||||
raw_response=response,
|
||||
)
|
||||
|
||||
# Process tool uses
|
||||
tool_uses = []
|
||||
assistant_content = []
|
||||
for block in response.content:
|
||||
if block.type == "tool_use":
|
||||
tool_uses.append(
|
||||
ToolUse(id=block.id, name=block.name, input=block.input)
|
||||
)
|
||||
assistant_content.append({
|
||||
"type": "tool_use",
|
||||
"id": block.id,
|
||||
"name": block.name,
|
||||
"input": block.input,
|
||||
})
|
||||
elif block.type == "text":
|
||||
assistant_content.append({
|
||||
"type": "text",
|
||||
"text": block.text,
|
||||
})
|
||||
|
||||
# Add assistant message with tool uses
|
||||
current_messages.append({
|
||||
"role": "assistant",
|
||||
"content": assistant_content,
|
||||
})
|
||||
|
||||
# Execute tools and add results
|
||||
tool_results = []
|
||||
for tool_use in tool_uses:
|
||||
result = tool_executor(tool_use)
|
||||
# Ensure content is never empty (Anthropic API requires non-empty content)
|
||||
content = result.content if result.content else "(empty result)"
|
||||
tool_results.append({
|
||||
"type": "tool_result",
|
||||
"tool_use_id": result.tool_use_id,
|
||||
"content": content,
|
||||
"is_error": result.is_error,
|
||||
})
|
||||
|
||||
current_messages.append({
|
||||
"role": "user",
|
||||
"content": tool_results,
|
||||
})
|
||||
|
||||
# Max iterations reached
|
||||
return LLMResponse(
|
||||
content="Max tool iterations reached",
|
||||
model=self.model,
|
||||
input_tokens=total_input_tokens,
|
||||
output_tokens=total_output_tokens,
|
||||
stop_reason="max_iterations",
|
||||
raw_response=None,
|
||||
return self._provider.complete_with_tools(
|
||||
messages=messages,
|
||||
system=system,
|
||||
tools=tools,
|
||||
tool_executor=tool_executor,
|
||||
max_iterations=max_iterations,
|
||||
)
|
||||
|
||||
@@ -250,7 +250,7 @@ class TestAnthropicProviderBackwardCompatibility:
|
||||
def test_anthropic_provider_init_defaults(self):
|
||||
"""Test AnthropicProvider initialization with defaults."""
|
||||
provider = AnthropicProvider(api_key="test-key")
|
||||
assert provider.model == "claude-sonnet-4-20250514"
|
||||
assert provider.model == "claude-haiku-4-5-20251001"
|
||||
assert provider.api_key == "test-key"
|
||||
|
||||
def test_anthropic_provider_init_custom_model(self):
|
||||
|
||||
Reference in New Issue
Block a user