fix: remove hardcoded anthropic logics

This commit is contained in:
Timothy
2026-02-27 10:23:59 -08:00
parent 6a8286d4cf
commit e1db3a4af9
14 changed files with 298 additions and 388 deletions
+13 -1
View File
@@ -20,8 +20,20 @@ check: ## Run all checks without modifying files (CI-safe)
cd core && ruff format --check .
cd tools && ruff format --check .
test: ## Run all tests
test: ## Run all tests (core + tools, excludes live)
cd core && uv run python -m pytest tests/ -v
cd tools && uv run python -m pytest -v
test-tools: ## Run tool tests only (mocked, no credentials needed)
cd tools && uv run python -m pytest -v
test-live: ## Run live integration tests (requires real API credentials)
cd tools && uv run python -m pytest -m live -s -o "addopts=" --log-cli-level=INFO
test-all: ## Run everything including live tests
cd core && uv run python -m pytest tests/ -v
cd tools && uv run python -m pytest -v
cd tools && uv run python -m pytest -m live -s -o "addopts=" --log-cli-level=INFO
install-hooks: ## Install pre-commit hooks
uv pip install pre-commit
+1 -2
View File
@@ -427,8 +427,7 @@ class GraphSpec(BaseModel):
max_tokens: int = Field(default=None) # resolved by _resolve_max_tokens validator
# Cleanup LLM for JSON extraction fallback (fast/cheap model preferred)
# If not set, uses CEREBRAS_API_KEY -> cerebras/llama-3.3-70b or
# ANTHROPIC_API_KEY -> claude-haiku-4-5 as fallback
# If not set, uses CEREBRAS_API_KEY -> cerebras/llama-3.3-70b
cleanup_llm_model: str | None = None
# Execution limits
+3 -2
View File
@@ -179,11 +179,12 @@ class GraphExecutor:
self.accounts_data = accounts_data
self.tool_provider_map = tool_provider_map
# Initialize output cleaner
# Initialize output cleaner — uses its own dedicated fast model (CEREBRAS_API_KEY),
# never the main agent LLM. Passing the main LLM here would cause expensive
# Anthropic calls for output cleaning whenever ANTHROPIC_API_KEY is set.
self.cleansing_config = cleansing_config or CleansingConfig()
self.output_cleaner = OutputCleaner(
config=self.cleansing_config,
llm_provider=llm,
)
# Parallel execution settings
+4 -56
View File
@@ -154,69 +154,17 @@ class HITLProtocol:
"""
Parse human's raw input into structured response.
Uses Haiku to intelligently extract answers for each question.
Maps the raw input to the first question. For multi-question HITL,
the caller should present one question at a time.
"""
import os
response = HITLResponse(request_id=request.request_id, raw_input=raw_input)
# If no questions, just return raw input
if not request.questions:
return response
# Try to use Haiku for intelligent parsing
api_key = os.environ.get("ANTHROPIC_API_KEY")
if not use_haiku or not api_key:
# Simple fallback: treat as answer to first question
if request.questions:
response.answers[request.questions[0].id] = raw_input
return response
# Use Haiku to extract answers
try:
import json
import anthropic
questions_str = "\n".join(
[f"{i + 1}. {q.question} (id: {q.id})" for i, q in enumerate(request.questions)]
)
prompt = f"""Parse the user's response and extract answers for each question.
Questions asked:
{questions_str}
User's response:
{raw_input}
Extract the answer for each question. Output JSON with question IDs as keys.
Example format:
{{"question-1": "answer here", "question-2": "answer here"}}"""
client = anthropic.Anthropic(api_key=api_key)
message = client.messages.create(
model="claude-haiku-4-5-20251001",
max_tokens=500,
messages=[{"role": "user", "content": prompt}],
)
# Parse Haiku's response
import re
response_text = message.content[0].text.strip()
json_match = re.search(r"\{[^{}]*\}", response_text, re.DOTALL)
if json_match:
parsed = json.loads(json_match.group())
response.answers = parsed
except Exception:
# Fallback: use raw input for first question
if request.questions:
response.answers[request.questions[0].id] = raw_input
# Map raw input to first question
response.answers[request.questions[0].id] = raw_input
return response
@staticmethod
+7 -54
View File
@@ -556,7 +556,6 @@ class NodeResult:
Generate a human-readable summary of this node's execution and output.
This is like toString() - it describes what the node produced in its current state.
Uses Haiku to intelligently summarize complex outputs.
"""
if not self.success:
return f"❌ Failed: {self.error}"
@@ -564,59 +563,13 @@ class NodeResult:
if not self.output:
return "✓ Completed (no output)"
# Use Haiku to generate intelligent summary
import os
api_key = os.environ.get("ANTHROPIC_API_KEY")
if not api_key:
# Fallback: simple key-value listing
parts = [f"✓ Completed with {len(self.output)} outputs:"]
for key, value in list(self.output.items())[:5]: # Limit to 5 keys
value_str = str(value)[:100]
if len(str(value)) > 100:
value_str += "..."
parts.append(f"{key}: {value_str}")
return "\n".join(parts)
# Use Haiku to generate intelligent summary
try:
import json
import anthropic
node_context = ""
if node_spec:
node_context = f"\nNode: {node_spec.name}\nPurpose: {node_spec.description}"
output_json = json.dumps(self.output, indent=2, default=str)[:2000]
prompt = (
f"Generate a 1-2 sentence human-readable summary of "
f"what this node produced.{node_context}\n\n"
f"Node output:\n{output_json}\n\n"
"Provide a concise, clear summary that a human can quickly "
"understand. Focus on the key information produced."
)
client = anthropic.Anthropic(api_key=api_key)
message = client.messages.create(
model="claude-haiku-4-5-20251001",
max_tokens=200,
messages=[{"role": "user", "content": prompt}],
)
summary = message.content[0].text.strip()
return f"{summary}"
except Exception:
# Fallback on error
parts = [f"✓ Completed with {len(self.output)} outputs:"]
for key, value in list(self.output.items())[:3]:
value_str = str(value)[:80]
if len(str(value)) > 80:
value_str += "..."
parts.append(f"{key}: {value_str}")
return "\n".join(parts)
parts = [f"✓ Completed with {len(self.output)} outputs:"]
for key, value in list(self.output.items())[:5]: # Limit to 5 keys
value_str = str(value)[:100]
if len(str(value)) > 100:
value_str += "..."
parts.append(f"{key}: {value_str}")
return "\n".join(parts)
class NodeProtocol(ABC):
+8 -51
View File
@@ -1053,62 +1053,19 @@ def _interactive_approval(request):
def _format_natural_language_to_json(
user_input: str, input_keys: list[str], agent_description: str, session_context: dict = None
) -> dict:
"""Use Haiku to convert natural language input to JSON based on agent's input schema."""
import os
"""Convert natural language input to JSON based on agent's input schema.
import anthropic
Maps user input to the primary input field. For follow-up inputs,
appends to the existing value.
"""
main_field = input_keys[0] if input_keys else "objective"
client = anthropic.Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY"))
# Build prompt for Haiku
session_info = ""
if session_context:
# Extract the main field (usually 'objective') that we'll append to
main_field = input_keys[0] if input_keys else "objective"
existing_value = session_context.get(main_field, "")
if existing_value:
return {main_field: f"{existing_value}\n\n{user_input}"}
session_info = (
f'\n\nExisting {main_field}: "{existing_value}"\n\n'
f"The user is providing ADDITIONAL information. Append this new "
f"information to the existing {main_field} to create an enriched, "
"more detailed version."
)
prompt = f"""You are formatting user input for an agent that requires specific input fields.
Agent: {agent_description}
Required input fields: {", ".join(input_keys)}{session_info}
User input: {user_input}
{"If this is a follow-up, APPEND new info to the existing field value." if session_context else ""}
Output ONLY valid JSON, no explanation:"""
try:
message = client.messages.create(
model="claude-haiku-4-5-20251001", # Fast and cheap
max_tokens=500,
messages=[{"role": "user", "content": prompt}],
)
json_str = message.content[0].text.strip()
# Remove markdown code blocks if present
if json_str.startswith("```"):
json_str = json_str.split("```")[1]
if json_str.startswith("json"):
json_str = json_str[4:]
json_str = json_str.strip()
return json.loads(json_str)
except Exception:
# Fallback: try to infer the main field
if len(input_keys) == 1:
return {input_keys[0]: user_input}
else:
# Put it in the first field as fallback
return {input_keys[0]: user_input}
return {main_field: user_input}
def cmd_shell(args: argparse.Namespace) -> int:
+4
View File
@@ -106,6 +106,10 @@ lint.isort.section-order = [
[tool.pytest.ini_options]
testpaths = ["tests"]
asyncio_mode = "auto"
addopts = "-m 'not live'"
markers = [
"live: Tests that call real external APIs (require credentials, never run in CI)",
]
[dependency-groups]
dev = [
@@ -33,7 +33,6 @@ Usage:
})
Credential categories:
- llm.py: LLM provider credentials (anthropic, openai, etc.)
- search.py: Search tool credentials (brave_search, google_search, etc.)
- email.py: Email provider credentials (resend, google/gmail)
- apollo.py: Apollo.io API credentials
@@ -73,7 +72,6 @@ from .health_check import (
validate_integration_wiring,
)
from .hubspot import HUBSPOT_CREDENTIALS
from .llm import LLM_CREDENTIALS
from .news import NEWS_CREDENTIALS
from .postgres import POSTGRES_CREDENTIALS
from .razorpay import RAZORPAY_CREDENTIALS
@@ -92,7 +90,6 @@ from .telegram import TELEGRAM_CREDENTIALS
# Merged registry of all credentials
CREDENTIAL_SPECS = {
**LLM_CREDENTIALS,
**NEWS_CREDENTIALS,
**SEARCH_CREDENTIALS,
**EMAIL_CREDENTIALS,
@@ -139,7 +136,6 @@ __all__ = [
# Merged registry
"CREDENTIAL_SPECS",
# Category registries (for direct access if needed)
"LLM_CREDENTIALS",
"NEWS_CREDENTIALS",
"SEARCH_CREDENTIALS",
"EMAIL_CREDENTIALS",
@@ -563,83 +563,6 @@ class SlackHealthChecker:
)
class AnthropicHealthChecker:
"""Health checker for Anthropic API credentials."""
ENDPOINT = "https://api.anthropic.com/v1/messages"
TIMEOUT = 10.0
def check(self, api_key: str) -> HealthCheckResult:
"""
Validate Anthropic API key without consuming tokens.
Sends a deliberately invalid request (empty messages) to the messages endpoint.
A 401 means invalid key; 400 (bad request) means the key authenticated
but the payload was rejected confirming the key is valid without
generating any tokens. 429 (rate limited) also indicates a valid key.
"""
try:
with httpx.Client(timeout=self.TIMEOUT) as client:
response = client.post(
self.ENDPOINT,
headers={
"x-api-key": api_key,
"anthropic-version": "2023-06-01",
"Content-Type": "application/json",
},
# Empty messages triggers 400 (not 200), so no tokens are consumed.
json={
"model": "claude-sonnet-4-20250514",
"max_tokens": 1,
"messages": [],
},
)
if response.status_code == 200:
return HealthCheckResult(
valid=True,
message="Anthropic API key valid",
)
elif response.status_code == 401:
return HealthCheckResult(
valid=False,
message="Anthropic API key is invalid",
details={"status_code": 401},
)
elif response.status_code == 429:
# Rate limited but key is valid
return HealthCheckResult(
valid=True,
message="Anthropic API key valid (rate limited)",
details={"status_code": 429, "rate_limited": True},
)
elif response.status_code == 400:
# Bad request but key authenticated - key is valid
return HealthCheckResult(
valid=True,
message="Anthropic API key valid",
details={"status_code": 400},
)
else:
return HealthCheckResult(
valid=False,
message=f"Anthropic API returned status {response.status_code}",
details={"status_code": response.status_code},
)
except httpx.TimeoutException:
return HealthCheckResult(
valid=False,
message="Anthropic API request timed out",
details={"error": "timeout"},
)
except httpx.RequestError as e:
return HealthCheckResult(
valid=False,
message=f"Failed to connect to Anthropic API: {e}",
details={"error": str(e)},
)
class GitHubHealthChecker:
"""Health checker for GitHub Personal Access Token."""
@@ -1068,7 +991,6 @@ HEALTH_CHECKERS: dict[str, CredentialHealthChecker] = {
"slack": SlackHealthChecker(),
"google_search": GoogleSearchHealthChecker(),
"google_maps": GoogleMapsHealthChecker(),
"anthropic": AnthropicHealthChecker(),
"github": GitHubHealthChecker(),
"resend": ResendHealthChecker(),
"stripe": StripeHealthChecker(),
-44
View File
@@ -1,44 +0,0 @@
"""
LLM provider credentials.
Contains credentials for language model providers like Anthropic, OpenAI, etc.
"""
from .base import CredentialSpec
LLM_CREDENTIALS = {
"anthropic": CredentialSpec(
env_var="ANTHROPIC_API_KEY",
tools=[],
node_types=["event_loop"],
required=False, # Not required - agents can use other providers via LiteLLM
startup_required=False, # MCP server doesn't need LLM credentials
help_url="https://console.anthropic.com/settings/keys",
description="API key for Anthropic Claude models",
# Auth method support
direct_api_key_supported=True,
api_key_instructions="""To get an Anthropic API key:
1. Go to https://console.anthropic.com/settings/keys
2. Sign in or create an Anthropic account
3. Click "Create Key"
4. Give your key a descriptive name (e.g., "Hive Agent")
5. Copy the API key (starts with sk-ant-)
6. Store it securely - you won't be able to see the full key again!""",
# Health check configuration
health_check_endpoint="https://api.anthropic.com/v1/messages",
health_check_method="POST",
# Credential store mapping
credential_id="anthropic",
credential_key="api_key",
),
# Future LLM providers:
# "openai": CredentialSpec(
# env_var="OPENAI_API_KEY",
# tools=[],
# node_types=["openai_generate"],
# required=False,
# startup_required=False,
# help_url="https://platform.openai.com/api-keys",
# description="API key for OpenAI models",
# ),
}
+60 -1
View File
@@ -1,11 +1,18 @@
"""Shared fixtures for tools tests."""
from __future__ import annotations
import logging
import os
from pathlib import Path
from typing import Callable
import pytest
from fastmcp import FastMCP
from aden_tools.credentials import CredentialStoreAdapter
from aden_tools.credentials import CREDENTIAL_SPECS, CredentialStoreAdapter
logger = logging.getLogger(__name__)
@pytest.fixture
@@ -56,3 +63,55 @@ def large_text_file(tmp_path: Path) -> Path:
large_file = tmp_path / "large.txt"
large_file.write_text("x" * 20_000_000) # 20MB
return large_file
@pytest.fixture(scope="session")
def live_credential_resolver() -> Callable[[str], str | None]:
"""Resolve live credentials for integration tests.
Tries two sources in order:
1. Environment variable (spec.env_var)
2. CredentialStoreAdapter.default() (encrypted store + env fallback)
Returns a callable: resolver(credential_name) -> str | None.
Credential values are never logged or exposed in test output.
"""
_adapter: CredentialStoreAdapter | None = None
_adapter_init_failed = False
def _get_adapter() -> CredentialStoreAdapter | None:
nonlocal _adapter, _adapter_init_failed
if _adapter is not None:
return _adapter
if _adapter_init_failed:
return None
try:
_adapter = CredentialStoreAdapter.default()
except Exception as exc:
logger.debug("Could not initialize CredentialStoreAdapter: %s", exc)
_adapter_init_failed = True
return _adapter
def resolve(credential_name: str) -> str | None:
spec = CREDENTIAL_SPECS.get(credential_name)
if spec is None:
return None
# 1. Try env var directly
value = os.environ.get(spec.env_var)
if value:
return value
# 2. Try the adapter (encrypted store + fallback)
adapter = _get_adapter()
if adapter is not None:
try:
value = adapter.get(credential_name)
if value:
return value
except Exception:
pass
return None
return resolve
-13
View File
@@ -308,19 +308,6 @@ class TestCredentialSpecs:
assert spec.startup_required is False
assert "brave.com" in spec.help_url
def test_anthropic_spec_exists(self):
"""CREDENTIAL_SPECS includes anthropic with startup_required=True."""
assert "anthropic" in CREDENTIAL_SPECS
spec = CREDENTIAL_SPECS["anthropic"]
assert spec.env_var == "ANTHROPIC_API_KEY"
assert spec.tools == []
assert "event_loop" in spec.node_types
assert spec.required is False
assert spec.startup_required is False
assert "anthropic.com" in spec.help_url
class TestNodeTypeValidation:
"""Tests for node type credential validation."""
-82
View File
@@ -6,7 +6,6 @@ import httpx
from aden_tools.credentials.health_check import (
HEALTH_CHECKERS,
AnthropicHealthChecker,
ApolloHealthChecker,
BrevoHealthChecker,
CalcomHealthChecker,
@@ -35,11 +34,6 @@ class TestHealthCheckerRegistry:
assert "google_search" in HEALTH_CHECKERS
assert isinstance(HEALTH_CHECKERS["google_search"], GoogleSearchHealthChecker)
def test_anthropic_registered(self):
"""AnthropicHealthChecker is registered in HEALTH_CHECKERS."""
assert "anthropic" in HEALTH_CHECKERS
assert isinstance(HEALTH_CHECKERS["anthropic"], AnthropicHealthChecker)
def test_github_registered(self):
"""GitHubHealthChecker is registered in HEALTH_CHECKERS."""
assert "github" in HEALTH_CHECKERS
@@ -72,7 +66,6 @@ class TestHealthCheckerRegistry:
"brave_search",
"google_search",
"google_maps",
"anthropic",
"github",
"resend",
"google_calendar_oauth",
@@ -93,81 +86,6 @@ class TestHealthCheckerRegistry:
assert set(HEALTH_CHECKERS.keys()) == expected
class TestAnthropicHealthChecker:
"""Tests for AnthropicHealthChecker."""
def _mock_response(self, status_code, json_data=None):
response = MagicMock(spec=httpx.Response)
response.status_code = status_code
if json_data:
response.json.return_value = json_data
return response
@patch("aden_tools.credentials.health_check.httpx.Client")
def test_valid_key_200(self, mock_client_cls):
mock_client = MagicMock()
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
mock_client.post.return_value = self._mock_response(200)
checker = AnthropicHealthChecker()
result = checker.check("sk-ant-test-key")
assert result.valid is True
assert "valid" in result.message.lower()
@patch("aden_tools.credentials.health_check.httpx.Client")
def test_invalid_key_401(self, mock_client_cls):
mock_client = MagicMock()
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
mock_client.post.return_value = self._mock_response(401)
checker = AnthropicHealthChecker()
result = checker.check("invalid-key")
assert result.valid is False
assert result.details["status_code"] == 401
@patch("aden_tools.credentials.health_check.httpx.Client")
def test_rate_limited_429(self, mock_client_cls):
mock_client = MagicMock()
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
mock_client.post.return_value = self._mock_response(429)
checker = AnthropicHealthChecker()
result = checker.check("sk-ant-test-key")
assert result.valid is True
assert result.details.get("rate_limited") is True
@patch("aden_tools.credentials.health_check.httpx.Client")
def test_bad_request_400_still_valid(self, mock_client_cls):
mock_client = MagicMock()
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
mock_client.post.return_value = self._mock_response(400)
checker = AnthropicHealthChecker()
result = checker.check("sk-ant-test-key")
assert result.valid is True
@patch("aden_tools.credentials.health_check.httpx.Client")
def test_timeout(self, mock_client_cls):
mock_client = MagicMock()
mock_client_cls.return_value.__enter__ = MagicMock(return_value=mock_client)
mock_client_cls.return_value.__exit__ = MagicMock(return_value=False)
mock_client.post.side_effect = httpx.TimeoutException("timed out")
checker = AnthropicHealthChecker()
result = checker.check("sk-ant-test-key")
assert result.valid is False
assert result.details["error"] == "timeout"
class TestGitHubHealthChecker:
"""Tests for GitHubHealthChecker."""
+198
View File
@@ -0,0 +1,198 @@
"""Live integration tests for credential health checkers.
These tests make REAL API calls. They are gated behind the ``live`` marker
and never run in CI. Run them manually::
pytest -m live -s --log-cli-level=INFO # all live tests
pytest -m live -k anthropic -s # just anthropic
pytest -m live -k "not google" -s # skip google variants
pytest -m live --tb=short -q # quick summary
Prerequisites:
- Credentials available via env vars or ~/.hive/credentials/ encrypted store
- Tests skip gracefully when credentials are unavailable
- Rate-limited responses (429) are treated as PASS (credential is valid)
"""
from __future__ import annotations
import logging
import pytest
from aden_tools.credentials import CREDENTIAL_SPECS
from aden_tools.credentials.health_check import (
HEALTH_CHECKERS,
check_credential_health,
validate_integration_wiring,
)
logger = logging.getLogger(__name__)
# All credential names that have registered health checkers
CHECKER_NAMES = sorted(HEALTH_CHECKERS.keys())
def _redact(value: str) -> str:
"""Redact a credential for safe logging."""
if len(value) <= 8:
return "****"
return f"{value[:4]}...{value[-2:]}"
# ---------------------------------------------------------------------------
# 1. Direct checker tests
# ---------------------------------------------------------------------------
@pytest.mark.live
class TestLiveHealthCheckers:
"""Call each health checker against the real API."""
@pytest.mark.parametrize("credential_name", CHECKER_NAMES, ids=CHECKER_NAMES)
def test_checker_returns_valid(self, credential_name, live_credential_resolver):
"""Health checker returns valid=True with a real credential."""
credential_value = live_credential_resolver(credential_name)
if credential_value is None:
spec = CREDENTIAL_SPECS.get(credential_name)
env_var = spec.env_var if spec else "???"
pytest.skip(f"No credential available ({env_var})")
checker = HEALTH_CHECKERS[credential_name]
result = checker.check(credential_value)
logger.info(
"Live check %s: valid=%s message=%r",
credential_name,
result.valid,
result.message,
)
assert result.valid is True, (
f"Health check for '{credential_name}' returned valid=False: "
f"{result.message} (details: {result.details})"
)
assert result.message
@pytest.mark.parametrize("credential_name", CHECKER_NAMES, ids=CHECKER_NAMES)
def test_checker_extracts_identity(self, credential_name, live_credential_resolver):
"""Identity metadata (when present) contains non-empty strings."""
credential_value = live_credential_resolver(credential_name)
if credential_value is None:
pytest.skip(f"No credential available for '{credential_name}'")
checker = HEALTH_CHECKERS[credential_name]
result = checker.check(credential_value)
assert result.valid is True, (
f"Cannot verify identity -- health check failed: {result.message}"
)
identity = result.details.get("identity", {})
if identity:
logger.info("Identity for %s: %s", credential_name, identity)
for key, value in identity.items():
assert isinstance(value, str), (
f"Identity key '{key}' is not a string: {type(value)}"
)
assert value, f"Identity key '{key}' is empty"
else:
logger.info("No identity metadata for %s (OK for some APIs)", credential_name)
# ---------------------------------------------------------------------------
# 2. Dispatcher path (check_credential_health)
# ---------------------------------------------------------------------------
@pytest.mark.live
class TestLiveDispatcher:
"""Verify the full check_credential_health() dispatch path."""
@pytest.mark.parametrize("credential_name", CHECKER_NAMES, ids=CHECKER_NAMES)
def test_dispatcher_returns_valid(self, credential_name, live_credential_resolver):
"""check_credential_health() returns valid=True via dispatcher."""
credential_value = live_credential_resolver(credential_name)
if credential_value is None:
pytest.skip(f"No credential available for '{credential_name}'")
result = check_credential_health(credential_name, credential_value)
logger.info(
"Dispatcher check %s: valid=%s message=%r",
credential_name,
result.valid,
result.message,
)
assert result.valid is True, (
f"Dispatcher check for '{credential_name}' returned valid=False: "
f"{result.message} (details: {result.details})"
)
# ---------------------------------------------------------------------------
# 3. Integration wiring verification
# ---------------------------------------------------------------------------
@pytest.mark.live
class TestLiveIntegrationWiring:
"""validate_integration_wiring() passes for every registered checker."""
@pytest.mark.parametrize("credential_name", CHECKER_NAMES, ids=CHECKER_NAMES)
def test_wiring_valid(self, credential_name):
"""No wiring issues for credentials with health checkers."""
issues = validate_integration_wiring(credential_name)
assert not issues, (
f"Wiring issues for '{credential_name}':\n"
+ "\n".join(f" - {i}" for i in issues)
)
# ---------------------------------------------------------------------------
# 4. Summary reporter
# ---------------------------------------------------------------------------
@pytest.mark.live
class TestLiveCredentialSummary:
"""Print a human-readable summary of tested vs skipped credentials."""
def test_credential_availability_summary(self, live_credential_resolver):
"""Report which credentials were available for live testing."""
available = []
skipped = []
for name in CHECKER_NAMES:
value = live_credential_resolver(name)
spec = CREDENTIAL_SPECS.get(name)
env_var = spec.env_var if spec else "???"
if value:
available.append((name, env_var))
else:
skipped.append((name, env_var))
lines = [
"",
"=" * 60,
"LIVE CREDENTIAL TEST SUMMARY",
"=" * 60,
f" Available: {len(available)} / {len(CHECKER_NAMES)}",
f" Skipped: {len(skipped)} / {len(CHECKER_NAMES)}",
"",
]
if available:
lines.append(" TESTED:")
for name, env_var in available:
lines.append(f" [PASS] {name} ({env_var})")
if skipped:
lines.append("")
lines.append(" SKIPPED (no credential):")
for name, env_var in skipped:
lines.append(f" [SKIP] {name} ({env_var})")
lines.append("=" * 60)
summary = "\n".join(lines)
logger.info(summary)
print(summary) # noqa: T201 -- visible with pytest -s