Compare commits
23 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 4ad0d0e077 | |||
| b55a77634b | |||
| f7db603922 | |||
| b4a47a12ff | |||
| 2228851b16 | |||
| ed0a211906 | |||
| 63744ddaef | |||
| 82331acb77 | |||
| b96bbcaa72 | |||
| edfa49bf7a | |||
| eb9e4ed23c | |||
| fed9e90271 | |||
| ca565ae664 | |||
| 42ce97e0fc | |||
| bea17b5f79 | |||
| ab0d5ce8d3 | |||
| b374d5119a | |||
| fbbbaadd1e | |||
| 37651e534f | |||
| df63c3e781 | |||
| 838da4a16e | |||
| e916d573f6 | |||
| fa5ebf19a4 |
@@ -1,31 +0,0 @@
|
||||
name: Link Discord Account
|
||||
description: Connect your GitHub and Discord for the bounty program
|
||||
title: "link: @{{ github.actor }}"
|
||||
labels: ["link-discord"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Link your Discord account to receive XP and role rewards when your bounty PRs are merged.
|
||||
|
||||
**How to find your Discord ID:**
|
||||
1. Open Discord Settings > Advanced > Enable **Developer Mode**
|
||||
2. Right-click your username > **Copy User ID**
|
||||
|
||||
- type: input
|
||||
id: discord_id
|
||||
attributes:
|
||||
label: Discord User ID
|
||||
description: "Your numeric Discord ID (not your username). Example: 123456789012345678"
|
||||
placeholder: "123456789012345678"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: display_name
|
||||
attributes:
|
||||
label: Display Name (optional)
|
||||
description: How you'd like to be credited
|
||||
placeholder: "Jane Doe"
|
||||
validations:
|
||||
required: false
|
||||
@@ -5,7 +5,7 @@ on:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
@@ -24,6 +24,8 @@ jobs:
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
enable-cache: true
|
||||
|
||||
- name: Install dependencies
|
||||
run: uv sync --project core --group dev
|
||||
@@ -54,10 +56,12 @@ jobs:
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
enable-cache: true
|
||||
|
||||
- name: Install dependencies and run tests
|
||||
working-directory: core
|
||||
run: |
|
||||
cd core
|
||||
uv sync
|
||||
uv run pytest tests/ -v
|
||||
|
||||
@@ -77,10 +81,12 @@ jobs:
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
enable-cache: true
|
||||
|
||||
- name: Install dependencies and run tests
|
||||
working-directory: tools
|
||||
run: |
|
||||
cd tools
|
||||
uv sync --extra dev
|
||||
uv run pytest tests/ -v
|
||||
|
||||
@@ -98,10 +104,12 @@ jobs:
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
|
||||
with:
|
||||
enable-cache: true
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: core
|
||||
run: |
|
||||
cd core
|
||||
uv sync
|
||||
|
||||
- name: Validate exported agents
|
||||
|
||||
@@ -0,0 +1,54 @@
|
||||
# Closes PRs that still have the `pr-requirements-warning` label
|
||||
# after contributors were warned in pr-requirements.yml.
|
||||
name: PR Requirements Enforcement
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *" # runs every day once at midnight
|
||||
jobs:
|
||||
enforce:
|
||||
name: Close PRs still failing contribution requirements
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
issues: write
|
||||
steps:
|
||||
- name: Close PRs still failing requirements
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const { owner, repo } = context.repo;
|
||||
const prs = await github.paginate(github.rest.pulls.list, {
|
||||
owner,
|
||||
repo,
|
||||
state: "open",
|
||||
per_page: 100
|
||||
});
|
||||
for (const pr of prs) {
|
||||
// Skip draft PRs — author may still be actively working toward compliance
|
||||
if (pr.draft) continue;
|
||||
const labels = pr.labels.map(l => l.name);
|
||||
if (!labels.includes("pr-requirements-warning")) continue;
|
||||
const gracePeriod = 24 * 60 * 60 * 1000;
|
||||
const lastUpdated = new Date(pr.created_at);
|
||||
const now = new Date();
|
||||
if (now - lastUpdated < gracePeriod) {
|
||||
console.log(`Skipping PR #${pr.number} — still within grace period`);
|
||||
continue;
|
||||
}
|
||||
const prNumber = pr.number;
|
||||
const prAuthor = pr.user.login;
|
||||
await github.rest.issues.createComment({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: prNumber,
|
||||
body: `Closing PR because the contribution requirements were not resolved within the 24-hour grace period.
|
||||
If this was closed in error, feel free to reopen the PR after fixing the requirements.`
|
||||
});
|
||||
await github.rest.pulls.update({
|
||||
owner,
|
||||
repo,
|
||||
pull_number: prNumber,
|
||||
state: "closed"
|
||||
});
|
||||
console.log(`Closed PR #${prNumber} by ${prAuthor} (PR requirements were not met)`);
|
||||
}
|
||||
@@ -43,9 +43,10 @@ jobs:
|
||||
console.log(` Found issue references: ${issueNumbers.length > 0 ? issueNumbers.join(', ') : 'none'}`);
|
||||
|
||||
if (issueNumbers.length === 0) {
|
||||
const message = `## PR Closed - Requirements Not Met
|
||||
const message = `## PR Requirements Warning
|
||||
|
||||
This PR has been automatically closed because it doesn't meet the requirements.
|
||||
This PR does not meet the contribution requirements.
|
||||
If the issue is not fixed within ~24 hours, it may be automatically closed.
|
||||
|
||||
**Missing:** No linked issue found.
|
||||
|
||||
@@ -67,14 +68,15 @@ jobs:
|
||||
|
||||
**Why is this required?** See #472 for details.`;
|
||||
|
||||
const comments = await github.rest.issues.listComments({
|
||||
const comments = await github.paginate(github.rest.issues.listComments, {
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: prNumber,
|
||||
per_page: 100,
|
||||
});
|
||||
|
||||
const botComment = comments.data.find(
|
||||
(c) => c.user.type === 'Bot' && c.body.includes('PR Closed - Requirements Not Met')
|
||||
const botComment = comments.find(
|
||||
(c) => c.user.type === 'Bot' && c.body.includes('PR Requirements Warning')
|
||||
);
|
||||
|
||||
if (!botComment) {
|
||||
@@ -86,11 +88,11 @@ jobs:
|
||||
});
|
||||
}
|
||||
|
||||
await github.rest.pulls.update({
|
||||
await github.rest.issues.addLabels({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
pull_number: prNumber,
|
||||
state: 'closed',
|
||||
issue_number: prNumber,
|
||||
labels: ['pr-requirements-warning'],
|
||||
});
|
||||
|
||||
core.setFailed('PR must reference an issue');
|
||||
@@ -132,9 +134,10 @@ jobs:
|
||||
`#${i.number} (assignees: ${i.assignees.length > 0 ? i.assignees.join(', ') : 'none'})`
|
||||
).join(', ');
|
||||
|
||||
const message = `## PR Closed - Requirements Not Met
|
||||
const message = `## PR Requirements Warning
|
||||
|
||||
This PR has been automatically closed because it doesn't meet the requirements.
|
||||
This PR does not meet the contribution requirements.
|
||||
If the issue is not fixed within ~24 hours, it may be automatically closed.
|
||||
|
||||
**PR Author:** @${prAuthor}
|
||||
**Found issues:** ${issueList}
|
||||
@@ -157,14 +160,15 @@ jobs:
|
||||
|
||||
**Why is this required?** See #472 for details.`;
|
||||
|
||||
const comments = await github.rest.issues.listComments({
|
||||
const comments = await github.paginate(github.rest.issues.listComments, {
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: prNumber,
|
||||
per_page: 100,
|
||||
});
|
||||
|
||||
const botComment = comments.data.find(
|
||||
(c) => c.user.type === 'Bot' && c.body.includes('PR Closed - Requirements Not Met')
|
||||
const botComment = comments.find(
|
||||
(c) => c.user.type === 'Bot' && c.body.includes('PR Requirements Warning')
|
||||
);
|
||||
|
||||
if (!botComment) {
|
||||
@@ -176,14 +180,24 @@ jobs:
|
||||
});
|
||||
}
|
||||
|
||||
await github.rest.pulls.update({
|
||||
await github.rest.issues.addLabels({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
pull_number: prNumber,
|
||||
state: 'closed',
|
||||
issue_number: prNumber,
|
||||
labels: ['pr-requirements-warning'],
|
||||
});
|
||||
|
||||
core.setFailed('PR author must be assigned to the linked issue');
|
||||
} else {
|
||||
console.log(`PR requirements met! Issue #${issueWithAuthorAssigned} has ${prAuthor} as assignee.`);
|
||||
}
|
||||
try {
|
||||
await github.rest.issues.removeLabel({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: prNumber,
|
||||
name: "pr-requirements-warning"
|
||||
});
|
||||
}catch (error){
|
||||
//ignore if label doesn't exist
|
||||
}
|
||||
}
|
||||
@@ -1312,6 +1312,15 @@ def register_queen_lifecycle_tools(
|
||||
Returns credential IDs, aliases, status, and identity metadata.
|
||||
Never returns secret values. Optionally filter by credential_id.
|
||||
"""
|
||||
# Load shell config vars into os.environ — same first step as check-agent.
|
||||
# Ensures keys set in ~/.zshrc/~/.bashrc are visible to is_available() checks.
|
||||
try:
|
||||
from framework.credentials.validation import ensure_credential_key_env
|
||||
|
||||
ensure_credential_key_env()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
# Primary: CredentialStoreAdapter sees both Aden OAuth and local accounts
|
||||
from aden_tools.credentials import CredentialStoreAdapter
|
||||
@@ -1319,13 +1328,24 @@ def register_queen_lifecycle_tools(
|
||||
store = CredentialStoreAdapter.default()
|
||||
all_accounts = store.get_all_account_info()
|
||||
|
||||
# Filter by credential_id / provider if requested
|
||||
# Filter by credential_id / provider if requested.
|
||||
# A spec name like "gmail_oauth" maps to provider "google" via
|
||||
# credential_id field — resolve that alias before filtering.
|
||||
if credential_id:
|
||||
try:
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS
|
||||
|
||||
spec = CREDENTIAL_SPECS.get(credential_id)
|
||||
resolved_provider = (
|
||||
(spec.credential_id or credential_id) if spec else credential_id
|
||||
)
|
||||
except Exception:
|
||||
resolved_provider = credential_id
|
||||
all_accounts = [
|
||||
a
|
||||
for a in all_accounts
|
||||
if a.get("credential_id", "").startswith(credential_id)
|
||||
or a.get("provider", "") == credential_id
|
||||
or a.get("provider", "") in (credential_id, resolved_provider)
|
||||
]
|
||||
|
||||
return json.dumps(
|
||||
@@ -1342,13 +1362,43 @@ def register_queen_lifecycle_tools(
|
||||
|
||||
# Fallback: local encrypted store only
|
||||
try:
|
||||
from framework.credentials.local.models import LocalAccountInfo
|
||||
from framework.credentials.local.registry import LocalCredentialRegistry
|
||||
from framework.credentials.storage import EncryptedFileStorage
|
||||
|
||||
registry = LocalCredentialRegistry.default()
|
||||
accounts = registry.list_accounts(
|
||||
credential_id=credential_id or None,
|
||||
)
|
||||
|
||||
# Also include flat-file credentials saved by the GUI (no "/" separator).
|
||||
# LocalCredentialRegistry.list_accounts() skips these — read them directly.
|
||||
seen_cred_ids = {info.credential_id for info in accounts}
|
||||
storage = EncryptedFileStorage()
|
||||
for storage_id in storage.list_all():
|
||||
if "/" in storage_id:
|
||||
continue # already handled by LocalCredentialRegistry above
|
||||
if credential_id and storage_id != credential_id:
|
||||
continue
|
||||
if storage_id in seen_cred_ids:
|
||||
continue
|
||||
try:
|
||||
cred_obj = storage.load(storage_id)
|
||||
except Exception:
|
||||
continue
|
||||
if cred_obj is None:
|
||||
continue
|
||||
accounts.append(
|
||||
LocalAccountInfo(
|
||||
credential_id=storage_id,
|
||||
alias="default",
|
||||
status="unknown",
|
||||
identity=cred_obj.identity,
|
||||
last_validated=cred_obj.last_refreshed,
|
||||
created_at=cred_obj.created_at,
|
||||
)
|
||||
)
|
||||
|
||||
credentials = []
|
||||
for info in accounts:
|
||||
entry: dict[str, Any] = {
|
||||
|
||||
@@ -108,7 +108,9 @@ def main() -> None:
|
||||
if api_base:
|
||||
# Custom API base (ZAI or other OpenAI-compatible)
|
||||
endpoint = api_base.rstrip("/") + "/models"
|
||||
name = {"zai": "ZAI", "minimax": "MiniMax"}.get(provider_id, "Custom provider")
|
||||
name = {"zai": "ZAI", "minimax": "MiniMax"}.get(
|
||||
provider_id, "Custom provider"
|
||||
)
|
||||
result = check_openai_compatible(api_key, endpoint, name)
|
||||
elif provider_id in PROVIDERS:
|
||||
result = PROVIDERS[provider_id](api_key)
|
||||
|
||||
@@ -19,6 +19,7 @@ import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import textwrap
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
@@ -1220,11 +1221,13 @@ def run_agent_tests(
|
||||
def validate_agent_package(agent_name: str) -> str:
|
||||
"""Run structural validation checks on a built agent package in one call.
|
||||
|
||||
Executes 4 steps and reports all results (does not stop on first failure):
|
||||
Executes 5 steps and reports all results (does not stop on first failure):
|
||||
1. Class validation — checks graph structure and entry_points contract
|
||||
2. Graph validation — loads the agent graph without credential checks
|
||||
3. Tool validation — checks declared tools exist in MCP servers
|
||||
4. Tests — runs the agent's pytest suite
|
||||
2. Node completeness — every NodeSpec in nodes/ must be in the nodes list,
|
||||
and GCU nodes must be referenced in a parent's sub_agents
|
||||
3. Graph validation — loads the agent graph without credential checks
|
||||
4. Tool validation — checks declared tools exist in MCP servers
|
||||
5. Tests — runs the agent's pytest suite
|
||||
|
||||
Note: Credential validation is intentionally skipped here (building phase).
|
||||
Credentials are validated at run time by run_agent_with_input() preflight.
|
||||
@@ -1276,6 +1279,62 @@ def validate_agent_package(agent_name: str) -> str:
|
||||
except Exception as e:
|
||||
steps["class_validation"] = {"passed": False, "error": str(e)}
|
||||
|
||||
# Step A2: Node completeness — every NodeSpec in nodes/ must be in the nodes list
|
||||
try:
|
||||
_check_template = textwrap.dedent("""\
|
||||
import importlib, json
|
||||
agent = importlib.import_module('{agent_name}')
|
||||
nodes_mod = importlib.import_module('{agent_name}.nodes')
|
||||
graph_ids = {{n.id for n in agent.nodes}}
|
||||
defined = {{}}
|
||||
for attr in dir(nodes_mod):
|
||||
obj = getattr(nodes_mod, attr)
|
||||
if hasattr(obj, 'id') and hasattr(obj, 'node_type'):
|
||||
defined[obj.id] = attr
|
||||
orphaned = set(defined) - graph_ids
|
||||
errors = [
|
||||
f"Node '{{nid}}' ({{defined[nid]}}) defined in nodes/ but not in nodes list"
|
||||
for nid in sorted(orphaned)
|
||||
]
|
||||
sub_refs = set()
|
||||
for n in agent.nodes:
|
||||
for sa in getattr(n, 'sub_agents', []) or []:
|
||||
sub_refs.add(sa)
|
||||
for n in agent.nodes:
|
||||
if n.node_type == 'gcu' and n.id not in sub_refs:
|
||||
errors.append(
|
||||
f"GCU node '{{n.id}}' not referenced in any node's sub_agents list"
|
||||
)
|
||||
print(json.dumps({{'valid': len(errors) == 0, 'errors': errors}}))
|
||||
""")
|
||||
check_script = _check_template.format(agent_name=agent_name)
|
||||
proc = subprocess.run(
|
||||
["uv", "run", "python", "-c", check_script],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=30,
|
||||
env=env,
|
||||
cwd=PROJECT_ROOT,
|
||||
stdin=subprocess.DEVNULL,
|
||||
)
|
||||
if proc.returncode == 0:
|
||||
result = json.loads(proc.stdout.strip())
|
||||
steps["node_completeness"] = {
|
||||
"passed": result["valid"],
|
||||
"output": "; ".join(result["errors"])
|
||||
if result["errors"]
|
||||
else "All defined nodes are in the graph",
|
||||
}
|
||||
if not result["valid"]:
|
||||
steps["node_completeness"]["errors"] = result["errors"]
|
||||
else:
|
||||
steps["node_completeness"] = {
|
||||
"passed": False,
|
||||
"error": proc.stderr.strip()[:2000],
|
||||
}
|
||||
except Exception as e:
|
||||
steps["node_completeness"] = {"passed": False, "error": str(e)}
|
||||
|
||||
# Step B: Graph validation (subprocess for import isolation)
|
||||
# Credentials are checked at run time (run_agent_with_input preflight),
|
||||
# not at build time.
|
||||
@@ -1743,6 +1802,7 @@ class {class_name}:
|
||||
for ep_id, nid in self.entry_points.items():
|
||||
if nid not in node_ids:
|
||||
errors.append(f"Entry point '{{ep_id}}' references unknown node '{{nid}}'")
|
||||
|
||||
return {{"valid": len(errors) == 0, "errors": errors, "warnings": warnings}}
|
||||
|
||||
|
||||
|
||||
@@ -36,8 +36,8 @@ EMAIL_CREDENTIALS = {
|
||||
"google": CredentialSpec(
|
||||
env_var="GOOGLE_ACCESS_TOKEN",
|
||||
tools=[
|
||||
# send_email is excluded: it's a multi-provider tool that checks
|
||||
# credentials at runtime based on the provider parameter.
|
||||
# send_email is a multi-provider tool; also listed under resend
|
||||
"send_email",
|
||||
# Gmail tools
|
||||
"gmail_reply_email",
|
||||
"gmail_list_messages",
|
||||
|
||||
@@ -249,14 +249,16 @@ def register_tools(
|
||||
return _send_email_impl(to, subject, html, provider, from_email, cc, bcc, account)
|
||||
|
||||
def _fetch_original_message(access_token: str, message_id: str) -> dict:
|
||||
"""Fetch the original message to extract threading info."""
|
||||
"""Fetch the original message to extract threading info and body."""
|
||||
import base64
|
||||
|
||||
response = httpx.get(
|
||||
f"https://gmail.googleapis.com/gmail/v1/users/me/messages/{message_id}",
|
||||
headers={
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
params={"format": "metadata", "metadataHeaders": ["Message-ID", "Subject", "From"]},
|
||||
params={"format": "full"},
|
||||
timeout=30.0,
|
||||
)
|
||||
|
||||
@@ -273,14 +275,40 @@ def register_tools(
|
||||
}
|
||||
|
||||
data = response.json()
|
||||
headers = {h["name"]: h["value"] for h in data.get("payload", {}).get("headers", [])}
|
||||
payload = data.get("payload", {})
|
||||
headers = {h["name"]: h["value"] for h in payload.get("headers", [])}
|
||||
|
||||
def _extract_body(part: dict, mime_type: str) -> str | None:
|
||||
"""Recursively find and decode a body part by mime type."""
|
||||
if part.get("mimeType") == mime_type:
|
||||
body_data = part.get("body", {}).get("data", "")
|
||||
if body_data:
|
||||
return base64.urlsafe_b64decode(body_data).decode("utf-8", errors="replace")
|
||||
for sub in part.get("parts", []):
|
||||
result = _extract_body(sub, mime_type)
|
||||
if result:
|
||||
return result
|
||||
return None
|
||||
|
||||
body_html = _extract_body(payload, "text/html")
|
||||
body_text = _extract_body(payload, "text/plain") if not body_html else None
|
||||
|
||||
return {
|
||||
"thread_id": data.get("threadId"),
|
||||
"message_id_header": headers.get("Message-ID", headers.get("Message-Id", "")),
|
||||
"subject": headers.get("Subject", ""),
|
||||
"from": headers.get("From", ""),
|
||||
"date": headers.get("Date", ""),
|
||||
"body_html": body_html,
|
||||
"body_text": body_text,
|
||||
}
|
||||
|
||||
def _plain_to_html(text: str) -> str:
|
||||
"""Wrap plain text in a <pre> tag for safe HTML embedding."""
|
||||
import html as html_module
|
||||
|
||||
return f"<pre>{html_module.escape(text)}</pre>"
|
||||
|
||||
@mcp.tool()
|
||||
def gmail_reply_email(
|
||||
message_id: str,
|
||||
@@ -337,12 +365,26 @@ def register_tools(
|
||||
original_message_id = original["message_id_header"]
|
||||
original_subject = original["subject"]
|
||||
reply_to_address = original["from"]
|
||||
original_date = original.get("date", "")
|
||||
|
||||
# Build reply subject
|
||||
subject = original_subject
|
||||
if not subject.lower().startswith("re:"):
|
||||
subject = f"Re: {subject}"
|
||||
|
||||
# Append quoted original body so the thread is visible in the reply
|
||||
original_body = original.get("body_html") or _plain_to_html(original.get("body_text") or "")
|
||||
quoted_html = (
|
||||
f"<br><br>"
|
||||
f'<div class="gmail_quote">'
|
||||
f"<div>On {original_date}, {reply_to_address} wrote:</div>"
|
||||
f'<blockquote style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">'
|
||||
f"{original_body}"
|
||||
f"</blockquote>"
|
||||
f"</div>"
|
||||
)
|
||||
full_html = html + quoted_html
|
||||
|
||||
# Build MIME message with threading headers
|
||||
msg = MIMEMultipart("alternative")
|
||||
msg["To"] = reply_to_address
|
||||
@@ -358,7 +400,7 @@ def register_tools(
|
||||
if bcc_list:
|
||||
msg["Bcc"] = ", ".join(bcc_list)
|
||||
|
||||
msg.attach(MIMEText(html, "html"))
|
||||
msg.attach(MIMEText(full_html, "html"))
|
||||
|
||||
raw = base64.urlsafe_b64encode(msg.as_bytes()).decode("ascii")
|
||||
|
||||
|
||||
@@ -488,29 +488,36 @@ def register_tools(
|
||||
|
||||
@mcp.tool()
|
||||
def gmail_create_draft(
|
||||
to: str,
|
||||
subject: str,
|
||||
html: str,
|
||||
to: str = "",
|
||||
subject: str = "",
|
||||
account: str = "",
|
||||
reply_to_message_id: str = "",
|
||||
) -> dict:
|
||||
"""
|
||||
Create a draft email in the user's Gmail Drafts folder.
|
||||
|
||||
The draft can be reviewed and sent manually from Gmail.
|
||||
|
||||
To create a real threaded reply (not a new thread), provide
|
||||
reply_to_message_id. The tool will fetch the original message,
|
||||
derive recipient and subject automatically, and set the correct
|
||||
In-Reply-To/References headers so the draft appears in the same thread.
|
||||
|
||||
Args:
|
||||
to: Recipient email address.
|
||||
subject: Email subject line.
|
||||
html: Email body as HTML string.
|
||||
to: Recipient email address. Required when reply_to_message_id is not set.
|
||||
Ignored when reply_to_message_id is set (derived from original message).
|
||||
subject: Email subject line. Required when reply_to_message_id is not set.
|
||||
Ignored when reply_to_message_id is set (derived from original message).
|
||||
account: Account alias for multi-account routing. Optional.
|
||||
reply_to_message_id: Gmail message ID to reply to. When provided, creates
|
||||
the draft as a threaded reply with proper headers.
|
||||
|
||||
Returns:
|
||||
Dict with "success", "draft_id", and "message_id",
|
||||
Dict with "success", "draft_id", "message_id", and optionally "thread_id",
|
||||
or error dict with "error" and optional "help" keys.
|
||||
"""
|
||||
if not to or not to.strip():
|
||||
return {"error": "Recipient email (to) is required"}
|
||||
if not subject or not subject.strip():
|
||||
return {"error": "Subject is required"}
|
||||
if not html:
|
||||
return {"error": "Email body (html) is required"}
|
||||
|
||||
@@ -518,20 +525,101 @@ def register_tools(
|
||||
if isinstance(token, dict):
|
||||
return token
|
||||
|
||||
import html as html_module
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
|
||||
msg = MIMEText(html, "html")
|
||||
msg["To"] = to
|
||||
msg["Subject"] = subject
|
||||
thread_id: str | None = None
|
||||
in_reply_to: str | None = None
|
||||
full_html = html
|
||||
|
||||
if reply_to_message_id:
|
||||
# Fetch original message with full body for threading + quoted content
|
||||
try:
|
||||
orig_response = _gmail_request(
|
||||
"GET",
|
||||
f"messages/{_sanitize_path_param(reply_to_message_id, 'reply_to_message_id')}",
|
||||
token,
|
||||
params={"format": "full"},
|
||||
)
|
||||
except httpx.HTTPError as e:
|
||||
return {"error": f"Failed to fetch original message: {e}"}
|
||||
|
||||
orig_error = _handle_error(orig_response)
|
||||
if orig_error:
|
||||
return orig_error
|
||||
|
||||
orig_data = orig_response.json()
|
||||
thread_id = orig_data.get("threadId", "")
|
||||
payload = orig_data.get("payload", {})
|
||||
orig_headers = {h["name"]: h["value"] for h in payload.get("headers", [])}
|
||||
|
||||
in_reply_to = orig_headers.get("Message-ID") or orig_headers.get("Message-Id", "")
|
||||
orig_subject = orig_headers.get("Subject", "")
|
||||
orig_from = orig_headers.get("From", "")
|
||||
orig_date = orig_headers.get("Date", "")
|
||||
to = orig_from or to
|
||||
subject = (
|
||||
orig_subject if orig_subject.lower().startswith("re:") else f"Re: {orig_subject}"
|
||||
)
|
||||
|
||||
# Extract body recursively (prefer HTML, fall back to plain text)
|
||||
def _extract_body(part: dict, mime_type: str) -> str | None:
|
||||
if part.get("mimeType") == mime_type:
|
||||
body_data = part.get("body", {}).get("data", "")
|
||||
if body_data:
|
||||
return base64.urlsafe_b64decode(body_data).decode("utf-8", errors="replace")
|
||||
for sub in part.get("parts", []):
|
||||
result = _extract_body(sub, mime_type)
|
||||
if result:
|
||||
return result
|
||||
return None
|
||||
|
||||
orig_body_html = _extract_body(payload, "text/html")
|
||||
if not orig_body_html:
|
||||
orig_body_text = _extract_body(payload, "text/plain") or ""
|
||||
orig_body_html = f"<pre>{html_module.escape(orig_body_text)}</pre>"
|
||||
|
||||
quoted = (
|
||||
f"<br><br>"
|
||||
f'<div class="gmail_quote">'
|
||||
f"<div>On {orig_date}, {orig_from} wrote:</div>"
|
||||
"<blockquote"
|
||||
' style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">'
|
||||
f"{orig_body_html}"
|
||||
f"</blockquote>"
|
||||
f"</div>"
|
||||
)
|
||||
full_html = html + quoted
|
||||
else:
|
||||
if not to or not to.strip():
|
||||
return {"error": "Recipient email (to) is required"}
|
||||
if not subject or not subject.strip():
|
||||
return {"error": "Subject is required"}
|
||||
|
||||
if in_reply_to:
|
||||
msg: MIMEMultipart | MIMEText = MIMEMultipart("alternative")
|
||||
msg["To"] = to
|
||||
msg["Subject"] = subject
|
||||
msg["In-Reply-To"] = in_reply_to
|
||||
msg["References"] = in_reply_to
|
||||
msg.attach(MIMEText(full_html, "html")) # type: ignore[attr-defined]
|
||||
else:
|
||||
msg = MIMEText(full_html, "html")
|
||||
msg["To"] = to
|
||||
msg["Subject"] = subject
|
||||
|
||||
raw = base64.urlsafe_b64encode(msg.as_bytes()).decode("ascii")
|
||||
message_body: dict = {"raw": raw}
|
||||
if thread_id:
|
||||
message_body["threadId"] = thread_id
|
||||
|
||||
try:
|
||||
response = _gmail_request(
|
||||
"POST",
|
||||
"drafts",
|
||||
token,
|
||||
json={"message": {"raw": raw}},
|
||||
json={"message": message_body},
|
||||
)
|
||||
except httpx.HTTPError as e:
|
||||
return {"error": f"Request failed: {e}"}
|
||||
@@ -541,11 +629,14 @@ def register_tools(
|
||||
return error
|
||||
|
||||
data = response.json()
|
||||
return {
|
||||
result: dict = {
|
||||
"success": True,
|
||||
"draft_id": data.get("id", ""),
|
||||
"message_id": data.get("message", {}).get("id", ""),
|
||||
}
|
||||
if thread_id:
|
||||
result["thread_id"] = thread_id
|
||||
return result
|
||||
|
||||
@mcp.tool()
|
||||
def gmail_list_labels(account: str = "") -> dict:
|
||||
|
||||
@@ -501,19 +501,26 @@ _HTTPX_GET = "aden_tools.tools.email_tool.email_tool.httpx.get"
|
||||
_HTTPX_POST = "aden_tools.tools.email_tool.email_tool.httpx.post"
|
||||
|
||||
|
||||
def _mock_original_message_response():
|
||||
"""Helper: mock response for fetching the original message."""
|
||||
def _mock_original_message_response(body_html: str = "<p>Original message body</p>"):
|
||||
"""Helper: mock response for fetching the original message (format=full)."""
|
||||
import base64
|
||||
|
||||
resp = MagicMock()
|
||||
resp.status_code = 200
|
||||
resp.json.return_value = {
|
||||
"id": "orig_123",
|
||||
"threadId": "thread_abc",
|
||||
"payload": {
|
||||
"mimeType": "text/html",
|
||||
"headers": [
|
||||
{"name": "Message-ID", "value": "<orig@mail.gmail.com>"},
|
||||
{"name": "Subject", "value": "Hello there"},
|
||||
{"name": "From", "value": "sender@example.com"},
|
||||
]
|
||||
{"name": "Date", "value": "Mon, 1 Jan 2024 12:00:00 +0000"},
|
||||
],
|
||||
"body": {
|
||||
"data": base64.urlsafe_b64encode(body_html.encode()).decode(),
|
||||
},
|
||||
},
|
||||
}
|
||||
return resp
|
||||
@@ -666,3 +673,29 @@ class TestGmailReplyEmail:
|
||||
|
||||
assert "error" in result
|
||||
assert "403" in result["error"]
|
||||
|
||||
def test_reply_includes_quoted_original(self, reply_email_fn, monkeypatch):
|
||||
"""Reply body includes a blockquote with the original message content."""
|
||||
import base64
|
||||
|
||||
monkeypatch.setenv("GOOGLE_ACCESS_TOKEN", "test_token")
|
||||
|
||||
original_body = "<p>This is the original email content</p>"
|
||||
mock_get_resp = _mock_original_message_response(body_html=original_body)
|
||||
mock_send_resp = MagicMock()
|
||||
mock_send_resp.status_code = 200
|
||||
mock_send_resp.json.return_value = {"id": "reply_456", "threadId": "thread_abc"}
|
||||
|
||||
with patch(_HTTPX_GET, return_value=mock_get_resp):
|
||||
with patch(_HTTPX_POST, return_value=mock_send_resp) as mock_post:
|
||||
result = reply_email_fn(message_id="orig_123", html="<p>My reply</p>")
|
||||
|
||||
assert result["success"] is True
|
||||
|
||||
# Decode the raw MIME to verify the quoted body is present
|
||||
raw_b64 = mock_post.call_args[1]["json"]["raw"]
|
||||
raw_bytes = base64.urlsafe_b64decode(raw_b64)
|
||||
raw_str = raw_bytes.decode("utf-8", errors="replace")
|
||||
assert "<blockquote" in raw_str
|
||||
assert "This is the original email content" in raw_str
|
||||
assert "sender@example.com wrote:" in raw_str
|
||||
|
||||
@@ -542,3 +542,193 @@ class TestCreateLabel:
|
||||
|
||||
assert "error" in result
|
||||
assert "Request failed" in result["error"]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# gmail_create_draft
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def create_draft_fn(gmail_tools):
|
||||
return gmail_tools["gmail_create_draft"]
|
||||
|
||||
|
||||
def _orig_message_response(
|
||||
thread_id: str = "thread123",
|
||||
message_id_header: str = "<orig-msg-id@mail.gmail.com>",
|
||||
subject: str = "Hello there",
|
||||
from_addr: str = "sender@example.com",
|
||||
body_html: str = "<p>Original body</p>",
|
||||
) -> MagicMock:
|
||||
"""Mock response for fetching an original message (format=full)."""
|
||||
import base64
|
||||
|
||||
encoded_body = base64.urlsafe_b64encode(body_html.encode()).decode()
|
||||
return _mock_response(
|
||||
200,
|
||||
{
|
||||
"threadId": thread_id,
|
||||
"payload": {
|
||||
"mimeType": "text/html",
|
||||
"headers": [
|
||||
{"name": "Message-ID", "value": message_id_header},
|
||||
{"name": "Subject", "value": subject},
|
||||
{"name": "From", "value": from_addr},
|
||||
{"name": "Date", "value": "Mon, 1 Jan 2024 12:00:00 +0000"},
|
||||
],
|
||||
"body": {"data": encoded_body},
|
||||
"parts": [],
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
class TestGmailCreateDraft:
|
||||
"""Tests for gmail_create_draft tool."""
|
||||
|
||||
# -- new draft (no reply) -------------------------------------------------
|
||||
|
||||
def test_no_credentials(self, create_draft_fn, monkeypatch):
|
||||
monkeypatch.delenv("GOOGLE_ACCESS_TOKEN", raising=False)
|
||||
result = create_draft_fn(html="<p>Hi</p>", to="a@b.com", subject="Hey")
|
||||
assert "error" in result
|
||||
assert "Gmail credentials not configured" in result["error"]
|
||||
|
||||
def test_missing_to(self, create_draft_fn, monkeypatch):
|
||||
monkeypatch.setenv("GOOGLE_ACCESS_TOKEN", "tok")
|
||||
result = create_draft_fn(html="<p>Hi</p>", subject="Hey")
|
||||
assert "error" in result
|
||||
assert "to" in result["error"].lower()
|
||||
|
||||
def test_missing_subject(self, create_draft_fn, monkeypatch):
|
||||
monkeypatch.setenv("GOOGLE_ACCESS_TOKEN", "tok")
|
||||
result = create_draft_fn(html="<p>Hi</p>", to="a@b.com")
|
||||
assert "error" in result
|
||||
assert "subject" in result["error"].lower()
|
||||
|
||||
def test_missing_html(self, create_draft_fn, monkeypatch):
|
||||
monkeypatch.setenv("GOOGLE_ACCESS_TOKEN", "tok")
|
||||
result = create_draft_fn(html="", to="a@b.com", subject="Hey")
|
||||
assert "error" in result
|
||||
assert "html" in result["error"].lower()
|
||||
|
||||
def test_new_draft_happy_path(self, create_draft_fn, monkeypatch):
|
||||
monkeypatch.setenv("GOOGLE_ACCESS_TOKEN", "tok")
|
||||
mock_resp = _mock_response(200, {"id": "draft1", "message": {"id": "msg1"}})
|
||||
with patch(HTTPX_MODULE, return_value=mock_resp) as mock_req:
|
||||
result = create_draft_fn(html="<p>Hi</p>", to="a@b.com", subject="Hey")
|
||||
|
||||
assert result["success"] is True
|
||||
assert result["draft_id"] == "draft1"
|
||||
assert result["message_id"] == "msg1"
|
||||
assert "thread_id" not in result
|
||||
# threadId should NOT be in the API body
|
||||
body = mock_req.call_args[1]["json"]
|
||||
assert "threadId" not in body["message"]
|
||||
|
||||
# -- reply draft ----------------------------------------------------------
|
||||
|
||||
def test_reply_draft_happy_path(self, create_draft_fn, monkeypatch):
|
||||
monkeypatch.setenv("GOOGLE_ACCESS_TOKEN", "tok")
|
||||
orig_resp = _orig_message_response()
|
||||
draft_resp = _mock_response(200, {"id": "draft2", "message": {"id": "msg2"}})
|
||||
|
||||
calls = [orig_resp, draft_resp]
|
||||
with patch(HTTPX_MODULE, side_effect=calls) as mock_req:
|
||||
result = create_draft_fn(
|
||||
html="<p>Reply</p>",
|
||||
reply_to_message_id="origmsg123",
|
||||
)
|
||||
|
||||
assert result["success"] is True
|
||||
assert result["draft_id"] == "draft2"
|
||||
assert result["thread_id"] == "thread123"
|
||||
|
||||
# Verify draft API call has threadId
|
||||
draft_call = mock_req.call_args_list[1]
|
||||
body = draft_call[1]["json"]
|
||||
assert body["message"]["threadId"] == "thread123"
|
||||
|
||||
# Verify MIME headers and quoted body
|
||||
import base64
|
||||
import email
|
||||
|
||||
raw = base64.urlsafe_b64decode(body["message"]["raw"])
|
||||
mime = email.message_from_bytes(raw)
|
||||
assert mime["In-Reply-To"] == "<orig-msg-id@mail.gmail.com>"
|
||||
assert mime["References"] == "<orig-msg-id@mail.gmail.com>"
|
||||
assert mime["To"] == "sender@example.com"
|
||||
assert mime["Subject"] == "Re: Hello there"
|
||||
|
||||
# Verify quoted original body is embedded
|
||||
mime_body = mime.get_payload(decode=True)
|
||||
if mime_body is None:
|
||||
# multipart — find the html part
|
||||
for part in mime.walk():
|
||||
if part.get_content_type() == "text/html":
|
||||
mime_body = part.get_payload(decode=True)
|
||||
break
|
||||
decoded_body = mime_body.decode("utf-8") if mime_body else ""
|
||||
assert "<p>Reply</p>" in decoded_body
|
||||
assert "gmail_quote" in decoded_body
|
||||
assert "<p>Original body</p>" in decoded_body
|
||||
assert "blockquote" in decoded_body
|
||||
|
||||
def test_reply_draft_subject_already_re(self, create_draft_fn, monkeypatch):
|
||||
monkeypatch.setenv("GOOGLE_ACCESS_TOKEN", "tok")
|
||||
orig_resp = _orig_message_response(subject="Re: Hello there")
|
||||
draft_resp = _mock_response(200, {"id": "d3", "message": {"id": "m3"}})
|
||||
|
||||
with patch(HTTPX_MODULE, side_effect=[orig_resp, draft_resp]):
|
||||
result = create_draft_fn(html="<p>x</p>", reply_to_message_id="origmsg")
|
||||
|
||||
# Extract subject from result — it should not be "Re: Re: Hello there"
|
||||
assert result["success"] is True
|
||||
# Check via MIME is covered by test_reply_draft_subject_no_double_re below.
|
||||
|
||||
def test_reply_draft_subject_no_double_re(self, create_draft_fn, monkeypatch):
|
||||
monkeypatch.setenv("GOOGLE_ACCESS_TOKEN", "tok")
|
||||
orig_resp = _orig_message_response(subject="Re: Hello there")
|
||||
draft_resp = _mock_response(200, {"id": "d4", "message": {"id": "m4"}})
|
||||
|
||||
with patch(HTTPX_MODULE, side_effect=[orig_resp, draft_resp]) as mock_req:
|
||||
create_draft_fn(html="<p>x</p>", reply_to_message_id="origmsg")
|
||||
|
||||
import base64
|
||||
import email
|
||||
|
||||
body = mock_req.call_args_list[1][1]["json"]
|
||||
raw = base64.urlsafe_b64decode(body["message"]["raw"])
|
||||
mime = email.message_from_bytes(raw)
|
||||
assert mime["Subject"] == "Re: Hello there"
|
||||
|
||||
def test_reply_draft_fetch_401(self, create_draft_fn, monkeypatch):
|
||||
monkeypatch.setenv("GOOGLE_ACCESS_TOKEN", "tok")
|
||||
orig_resp = _mock_response(401)
|
||||
with patch(HTTPX_MODULE, return_value=orig_resp):
|
||||
result = create_draft_fn(html="<p>x</p>", reply_to_message_id="origmsg")
|
||||
assert "error" in result
|
||||
assert "token" in result["error"].lower()
|
||||
|
||||
def test_reply_draft_fetch_404(self, create_draft_fn, monkeypatch):
|
||||
monkeypatch.setenv("GOOGLE_ACCESS_TOKEN", "tok")
|
||||
orig_resp = _mock_response(404)
|
||||
with patch(HTTPX_MODULE, return_value=orig_resp):
|
||||
result = create_draft_fn(html="<p>x</p>", reply_to_message_id="origmsg")
|
||||
assert "error" in result
|
||||
|
||||
def test_reply_draft_network_error_on_fetch(self, create_draft_fn, monkeypatch):
|
||||
monkeypatch.setenv("GOOGLE_ACCESS_TOKEN", "tok")
|
||||
with patch(HTTPX_MODULE, side_effect=httpx.HTTPError("timeout")):
|
||||
result = create_draft_fn(html="<p>x</p>", reply_to_message_id="origmsg")
|
||||
assert "error" in result
|
||||
assert "fetch" in result["error"].lower()
|
||||
|
||||
def test_reply_draft_api_error_on_create(self, create_draft_fn, monkeypatch):
|
||||
monkeypatch.setenv("GOOGLE_ACCESS_TOKEN", "tok")
|
||||
orig_resp = _orig_message_response()
|
||||
draft_resp = _mock_response(500, text="internal error")
|
||||
with patch(HTTPX_MODULE, side_effect=[orig_resp, draft_resp]):
|
||||
result = create_draft_fn(html="<p>x</p>", reply_to_message_id="origmsg")
|
||||
assert "error" in result
|
||||
|
||||
Reference in New Issue
Block a user