Merge pull request #6097 from aden-hive/feature/integration-extended

Expand integration tool coverage across 40 vendors
This commit is contained in:
RichardTang-Aden
2026-03-09 19:47:34 -07:00
committed by GitHub
81 changed files with 6635 additions and 2 deletions
@@ -17,6 +17,9 @@ AIRTABLE_CREDENTIALS = {
"airtable_update_records",
"airtable_list_bases",
"airtable_get_base_schema",
"airtable_delete_records",
"airtable_search_records",
"airtable_list_collaborators",
],
required=True,
startup_required=False,
@@ -14,6 +14,9 @@ APOLLO_CREDENTIALS = {
"apollo_enrich_company",
"apollo_search_people",
"apollo_search_companies",
"apollo_get_person_activities",
"apollo_list_email_accounts",
"apollo_bulk_enrich_people",
],
required=True,
startup_required=False,
@@ -16,6 +16,9 @@ ASANA_CREDENTIALS = {
"asana_get_task",
"asana_create_task",
"asana_search_tasks",
"asana_update_task",
"asana_add_comment",
"asana_create_subtask",
],
required=True,
startup_required=False,
@@ -16,6 +16,9 @@ AWS_S3_CREDENTIALS = {
"s3_get_object",
"s3_put_object",
"s3_delete_object",
"s3_copy_object",
"s3_get_object_metadata",
"s3_generate_presigned_url",
],
required=True,
startup_required=False,
@@ -42,6 +45,9 @@ AWS_S3_CREDENTIALS = {
"s3_get_object",
"s3_put_object",
"s3_delete_object",
"s3_copy_object",
"s3_get_object_metadata",
"s3_generate_presigned_url",
],
required=True,
startup_required=False,
@@ -15,6 +15,9 @@ BREVO_CREDENTIALS = {
"brevo_get_contact",
"brevo_update_contact",
"brevo_get_email_stats",
"brevo_list_contacts",
"brevo_delete_contact",
"brevo_list_email_campaigns",
],
required=True,
startup_required=False,
@@ -16,6 +16,9 @@ CALENDLY_CREDENTIALS = {
"calendly_list_scheduled_events",
"calendly_get_scheduled_event",
"calendly_list_invitees",
"calendly_cancel_event",
"calendly_list_webhooks",
"calendly_get_event_type",
],
required=True,
startup_required=False,
@@ -16,6 +16,9 @@ CLOUDINARY_CREDENTIALS = {
"cloudinary_get_resource",
"cloudinary_delete_resource",
"cloudinary_search",
"cloudinary_get_usage",
"cloudinary_rename_resource",
"cloudinary_add_tag",
],
required=True,
startup_required=False,
@@ -41,6 +44,9 @@ CLOUDINARY_CREDENTIALS = {
"cloudinary_get_resource",
"cloudinary_delete_resource",
"cloudinary_search",
"cloudinary_get_usage",
"cloudinary_rename_resource",
"cloudinary_add_tag",
],
required=True,
startup_required=False,
@@ -60,6 +66,9 @@ CLOUDINARY_CREDENTIALS = {
"cloudinary_get_resource",
"cloudinary_delete_resource",
"cloudinary_search",
"cloudinary_get_usage",
"cloudinary_rename_resource",
"cloudinary_add_tag",
],
required=True,
startup_required=False,
@@ -16,6 +16,9 @@ CONFLUENCE_CREDENTIALS = {
"confluence_get_page",
"confluence_create_page",
"confluence_search",
"confluence_update_page",
"confluence_delete_page",
"confluence_get_page_children",
],
required=True,
startup_required=False,
@@ -41,6 +44,9 @@ CONFLUENCE_CREDENTIALS = {
"confluence_get_page",
"confluence_create_page",
"confluence_search",
"confluence_update_page",
"confluence_delete_page",
"confluence_get_page_children",
],
required=True,
startup_required=False,
@@ -60,6 +66,9 @@ CONFLUENCE_CREDENTIALS = {
"confluence_get_page",
"confluence_create_page",
"confluence_search",
"confluence_update_page",
"confluence_delete_page",
"confluence_get_page_children",
],
required=True,
startup_required=False,
@@ -14,6 +14,9 @@ DISCORD_CREDENTIALS = {
"discord_list_channels",
"discord_send_message",
"discord_get_messages",
"discord_get_channel",
"discord_create_reaction",
"discord_delete_message",
],
required=True,
startup_required=False,
@@ -14,6 +14,9 @@ DOCKER_HUB_CREDENTIALS = {
"docker_hub_list_repos",
"docker_hub_list_tags",
"docker_hub_get_repo",
"docker_hub_get_tag_detail",
"docker_hub_delete_tag",
"docker_hub_list_webhooks",
],
required=True,
startup_required=False,
@@ -26,6 +26,9 @@ GITHUB_CREDENTIALS = {
"github_list_stargazers",
"github_get_user_profile",
"github_get_user_emails",
"github_list_commits",
"github_create_release",
"github_list_workflow_runs",
],
required=True,
startup_required=False,
@@ -17,6 +17,9 @@ GITLAB_CREDENTIALS = {
"gitlab_get_issue",
"gitlab_create_issue",
"gitlab_list_merge_requests",
"gitlab_update_issue",
"gitlab_get_merge_request",
"gitlab_create_merge_request_note",
],
required=True,
startup_required=False,
@@ -15,6 +15,9 @@ GOOGLE_ANALYTICS_CREDENTIALS = {
"ga_get_realtime",
"ga_get_top_pages",
"ga_get_traffic_sources",
"ga_get_user_demographics",
"ga_get_conversion_events",
"ga_get_landing_pages",
],
required=True,
startup_required=False,
@@ -15,6 +15,9 @@ GOOGLE_SEARCH_CONSOLE_CREDENTIALS = {
"gsc_list_sitemaps",
"gsc_inspect_url",
"gsc_submit_sitemap",
"gsc_top_queries",
"gsc_top_pages",
"gsc_delete_sitemap",
],
required=True,
startup_required=False,
@@ -17,6 +17,9 @@ GREENHOUSE_CREDENTIALS = {
"greenhouse_get_candidate",
"greenhouse_list_applications",
"greenhouse_get_application",
"greenhouse_list_offers",
"greenhouse_add_candidate_note",
"greenhouse_list_scorecards",
],
required=True,
startup_required=False,
@@ -22,6 +22,9 @@ HUBSPOT_CREDENTIALS = {
"hubspot_get_deal",
"hubspot_create_deal",
"hubspot_update_deal",
"hubspot_delete_object",
"hubspot_list_associations",
"hubspot_create_association",
],
required=True,
startup_required=False,
@@ -18,6 +18,9 @@ INTERCOM_CREDENTIALS = {
"intercom_add_tag",
"intercom_assign_conversation",
"intercom_list_teams",
"intercom_close_conversation",
"intercom_create_contact",
"intercom_list_conversations",
],
required=True,
startup_required=False,
+9
View File
@@ -17,6 +17,9 @@ JIRA_CREDENTIALS = {
"jira_list_projects",
"jira_get_project",
"jira_add_comment",
"jira_update_issue",
"jira_list_transitions",
"jira_transition_issue",
],
required=True,
startup_required=False,
@@ -43,6 +46,9 @@ JIRA_CREDENTIALS = {
"jira_list_projects",
"jira_get_project",
"jira_add_comment",
"jira_update_issue",
"jira_list_transitions",
"jira_transition_issue",
],
required=True,
startup_required=False,
@@ -63,6 +69,9 @@ JIRA_CREDENTIALS = {
"jira_list_projects",
"jira_get_project",
"jira_add_comment",
"jira_update_issue",
"jira_list_transitions",
"jira_transition_issue",
],
required=True,
startup_required=False,
@@ -28,6 +28,9 @@ LINEAR_CREDENTIALS = {
"linear_users_list",
"linear_user_get",
"linear_viewer",
"linear_cycles_list",
"linear_issue_comments_list",
"linear_issue_relation_create",
],
required=True,
startup_required=False,
@@ -16,6 +16,9 @@ LUSHA_CREDENTIALS = {
"lusha_search_contacts",
"lusha_search_companies",
"lusha_get_usage",
"lusha_bulk_enrich_persons",
"lusha_get_technologies",
"lusha_search_decision_makers",
],
required=True,
startup_required=False,
+8 -1
View File
@@ -9,7 +9,14 @@ from .base import CredentialSpec
NEWS_CREDENTIALS = {
"newsdata": CredentialSpec(
env_var="NEWSDATA_API_KEY",
tools=["news_search", "news_headlines", "news_by_company"],
tools=[
"news_search",
"news_headlines",
"news_by_company",
"news_latest",
"news_by_source",
"news_by_topic",
],
node_types=[],
required=True,
startup_required=False,
@@ -16,6 +16,9 @@ NOTION_CREDENTIALS = {
"notion_create_page",
"notion_query_database",
"notion_get_database",
"notion_update_page",
"notion_archive_page",
"notion_append_blocks",
],
required=True,
startup_required=False,
@@ -16,6 +16,9 @@ PAGERDUTY_CREDENTIALS = {
"pagerduty_create_incident",
"pagerduty_update_incident",
"pagerduty_list_services",
"pagerduty_list_oncalls",
"pagerduty_add_incident_note",
"pagerduty_list_escalation_policies",
],
required=True,
startup_required=False,
@@ -37,6 +40,7 @@ PAGERDUTY_CREDENTIALS = {
tools=[
"pagerduty_create_incident",
"pagerduty_update_incident",
"pagerduty_add_incident_note",
],
required=False,
startup_required=False,
@@ -20,6 +20,9 @@ PIPEDRIVE_CREDENTIALS = {
"pipedrive_list_pipelines",
"pipedrive_list_stages",
"pipedrive_add_note",
"pipedrive_update_deal",
"pipedrive_create_person",
"pipedrive_create_activity",
],
required=True,
startup_required=False,
@@ -13,6 +13,9 @@ POSTGRES_CREDENTIALS = {
"pg_list_tables",
"pg_describe_table",
"pg_explain",
"pg_get_table_stats",
"pg_list_indexes",
"pg_get_foreign_keys",
],
required=True,
startup_required=False,
@@ -14,6 +14,9 @@ PUSHOVER_CREDENTIALS = {
"pushover_validate_user",
"pushover_list_sounds",
"pushover_check_receipt",
"pushover_cancel_receipt",
"pushover_send_glance",
"pushover_get_limits",
],
required=True,
startup_required=False,
@@ -16,6 +16,9 @@ QUICKBOOKS_CREDENTIALS = {
"quickbooks_create_customer",
"quickbooks_create_invoice",
"quickbooks_get_company_info",
"quickbooks_list_invoices",
"quickbooks_get_customer",
"quickbooks_create_payment",
],
required=True,
startup_required=False,
@@ -41,6 +44,9 @@ QUICKBOOKS_CREDENTIALS = {
"quickbooks_create_customer",
"quickbooks_create_invoice",
"quickbooks_get_company_info",
"quickbooks_list_invoices",
"quickbooks_get_customer",
"quickbooks_create_payment",
],
required=True,
startup_required=False,
@@ -15,6 +15,9 @@ REDDIT_CREDENTIALS = {
"reddit_get_posts",
"reddit_get_comments",
"reddit_get_user",
"reddit_get_subreddit_info",
"reddit_get_post_detail",
"reddit_get_user_posts",
],
required=True,
startup_required=False,
@@ -41,6 +44,9 @@ REDDIT_CREDENTIALS = {
"reddit_get_posts",
"reddit_get_comments",
"reddit_get_user",
"reddit_get_subreddit_info",
"reddit_get_post_detail",
"reddit_get_user_posts",
],
required=True,
startup_required=False,
@@ -17,6 +17,9 @@ SALESFORCE_CREDENTIALS = {
"salesforce_update_record",
"salesforce_describe_object",
"salesforce_list_objects",
"salesforce_delete_record",
"salesforce_search_records",
"salesforce_get_record_count",
],
required=True,
startup_required=False,
@@ -43,6 +46,9 @@ SALESFORCE_CREDENTIALS = {
"salesforce_update_record",
"salesforce_describe_object",
"salesforce_list_objects",
"salesforce_delete_record",
"salesforce_search_records",
"salesforce_get_record_count",
],
required=True,
startup_required=False,
+9 -1
View File
@@ -81,7 +81,15 @@ SEARCH_CREDENTIALS = {
),
"exa_search": CredentialSpec(
env_var="EXA_API_KEY",
tools=["exa_search", "exa_find_similar", "exa_get_contents", "exa_answer"],
tools=[
"exa_search",
"exa_find_similar",
"exa_get_contents",
"exa_answer",
"exa_search_news",
"exa_search_papers",
"exa_search_companies",
],
node_types=[],
required=True,
startup_required=False,
@@ -15,6 +15,9 @@ SERPAPI_CREDENTIALS = {
"scholar_get_author",
"patents_search",
"patents_get_details",
"scholar_cited_by",
"scholar_search_profiles",
"serpapi_google_search",
],
required=True,
startup_required=False,
@@ -17,6 +17,9 @@ SHOPIFY_CREDENTIALS = {
"shopify_get_product",
"shopify_list_customers",
"shopify_search_customers",
"shopify_update_product",
"shopify_get_customer",
"shopify_create_draft_order",
],
required=True,
startup_required=False,
@@ -43,6 +46,9 @@ SHOPIFY_CREDENTIALS = {
"shopify_get_product",
"shopify_list_customers",
"shopify_search_customers",
"shopify_update_product",
"shopify_get_customer",
"shopify_create_draft_order",
],
required=True,
startup_required=False,
@@ -58,6 +58,9 @@ SLACK_CREDENTIALS = {
"slack_kick_user_from_channel",
"slack_delete_file",
"slack_get_team_stats",
"slack_get_channel_info",
"slack_list_files",
"slack_get_file_info",
],
required=True,
startup_required=False,
@@ -60,6 +60,9 @@ STRIPE_CREDENTIALS = {
"stripe_list_payment_methods",
"stripe_get_payment_method",
"stripe_detach_payment_method",
"stripe_list_disputes",
"stripe_list_events",
"stripe_create_checkout_session",
],
required=True,
startup_required=False,
@@ -20,6 +20,9 @@ TELEGRAM_CREDENTIALS = {
"telegram_get_chat",
"telegram_pin_message",
"telegram_unpin_message",
"telegram_get_chat_member_count",
"telegram_send_video",
"telegram_set_chat_description",
],
required=True,
startup_required=False,
@@ -20,6 +20,9 @@ TRELLO_CREDENTIALS = {
"trello_update_card",
"trello_add_comment",
"trello_add_attachment",
"trello_get_card",
"trello_create_list",
"trello_search_cards",
],
required=True,
startup_required=False,
@@ -50,6 +53,9 @@ TRELLO_CREDENTIALS = {
"trello_update_card",
"trello_add_comment",
"trello_add_attachment",
"trello_get_card",
"trello_create_list",
"trello_search_cards",
],
required=True,
startup_required=False,
@@ -15,6 +15,9 @@ TWILIO_CREDENTIALS = {
"twilio_send_whatsapp",
"twilio_list_messages",
"twilio_get_message",
"twilio_list_phone_numbers",
"twilio_list_calls",
"twilio_delete_message",
],
required=True,
startup_required=False,
@@ -38,6 +41,9 @@ TWILIO_CREDENTIALS = {
"twilio_send_whatsapp",
"twilio_list_messages",
"twilio_get_message",
"twilio_list_phone_numbers",
"twilio_list_calls",
"twilio_delete_message",
],
required=True,
startup_required=False,
@@ -15,6 +15,9 @@ TWITTER_CREDENTIALS = {
"twitter_get_user",
"twitter_get_user_tweets",
"twitter_get_tweet",
"twitter_get_user_followers",
"twitter_get_tweet_replies",
"twitter_get_list_tweets",
],
required=True,
startup_required=False,
@@ -16,6 +16,9 @@ ZENDESK_CREDENTIALS = {
"zendesk_create_ticket",
"zendesk_update_ticket",
"zendesk_search_tickets",
"zendesk_get_ticket_comments",
"zendesk_add_ticket_comment",
"zendesk_list_users",
],
required=True,
startup_required=False,
@@ -41,6 +44,9 @@ ZENDESK_CREDENTIALS = {
"zendesk_create_ticket",
"zendesk_update_ticket",
"zendesk_search_tickets",
"zendesk_get_ticket_comments",
"zendesk_add_ticket_comment",
"zendesk_list_users",
],
required=True,
startup_required=False,
@@ -60,6 +66,9 @@ ZENDESK_CREDENTIALS = {
"zendesk_create_ticket",
"zendesk_update_ticket",
"zendesk_search_tickets",
"zendesk_get_ticket_comments",
"zendesk_add_ticket_comment",
"zendesk_list_users",
],
required=True,
startup_required=False,
+3
View File
@@ -17,6 +17,9 @@ ZOOM_CREDENTIALS = {
"zoom_create_meeting",
"zoom_delete_meeting",
"zoom_list_recordings",
"zoom_update_meeting",
"zoom_list_meeting_participants",
"zoom_list_meeting_registrants",
],
required=True,
startup_required=False,
@@ -50,6 +50,16 @@ def _patch(url: str, headers: dict, body: dict) -> dict:
return resp.json()
def _delete(url: str, headers: dict, params: dict | None = None) -> dict:
"""Send a DELETE request."""
resp = httpx.delete(url, headers=headers, params=params, timeout=30)
if resp.status_code >= 400:
return {"error": f"HTTP {resp.status_code}: {resp.text[:500]}"}
if not resp.content:
return {"status": "ok"}
return resp.json()
def register_tools(mcp: FastMCP, credentials: Any = None) -> None:
"""Register Airtable tools."""
@@ -323,3 +333,134 @@ def register_tools(mcp: FastMCP, credentials: Any = None) -> None:
for t in tables
],
}
@mcp.tool()
def airtable_delete_records(
base_id: str,
table_name: str,
record_ids: str,
) -> dict:
"""Delete records from an Airtable table (up to 10 per request).
Args:
base_id: The Airtable base ID (starts with 'app').
table_name: Table name or ID.
record_ids: Comma-separated record IDs to delete (e.g. 'recABC,recDEF').
"""
hdrs = _get_headers()
if hdrs is None:
return {
"error": "AIRTABLE_PAT is required",
"help": "Set AIRTABLE_PAT env var with your Airtable personal access token",
}
if not base_id or not table_name or not record_ids:
return {"error": "base_id, table_name, and record_ids are required"}
ids = [rid.strip() for rid in record_ids.split(",") if rid.strip()]
if len(ids) > 10:
return {"error": "maximum 10 records per request"}
url = f"{BASE_URL}/{base_id}/{table_name}"
# Airtable DELETE uses repeated records[] query params
params = [("records[]", rid) for rid in ids]
resp = httpx.delete(url, headers=hdrs, params=params, timeout=30)
if resp.status_code >= 400:
return {"error": f"HTTP {resp.status_code}: {resp.text[:500]}"}
data = resp.json()
deleted = data.get("records", [])
return {
"result": "deleted",
"count": len(deleted),
"deleted_ids": [r.get("id", "") for r in deleted if r.get("deleted")],
}
@mcp.tool()
def airtable_search_records(
base_id: str,
table_name: str,
field_name: str,
search_value: str,
max_records: int = 100,
) -> dict:
"""Search records by matching a field value using an Airtable formula.
Args:
base_id: The Airtable base ID (starts with 'app').
table_name: Table name or ID.
field_name: The field name to search in.
search_value: The value to search for (exact match or FIND for partial).
max_records: Maximum number of records to return (default 100).
"""
hdrs = _get_headers()
if hdrs is None:
return {
"error": "AIRTABLE_PAT is required",
"help": "Set AIRTABLE_PAT env var with your Airtable personal access token",
}
if not base_id or not table_name or not field_name or not search_value:
return {"error": "base_id, table_name, field_name, and search_value are required"}
# Use FIND for case-insensitive partial match
escaped = search_value.replace('"', '\\"')
formula = f'FIND(LOWER("{escaped}"), LOWER({{{field_name}}}))'
params: dict[str, Any] = {
"filterByFormula": formula,
"maxRecords": str(max_records),
}
url = f"{BASE_URL}/{base_id}/{table_name}"
data = _get(url, hdrs, params)
if "error" in data:
return data
records = data.get("records", [])
return {
"count": len(records),
"records": [
{
"id": r["id"],
"fields": r.get("fields", {}),
"created_time": r.get("createdTime"),
}
for r in records
],
}
@mcp.tool()
def airtable_list_collaborators(
base_id: str,
) -> dict:
"""List collaborators who have access to an Airtable base.
Args:
base_id: The Airtable base ID (starts with 'app').
"""
hdrs = _get_headers()
if hdrs is None:
return {
"error": "AIRTABLE_PAT is required",
"help": "Set AIRTABLE_PAT env var with your Airtable personal access token",
}
if not base_id:
return {"error": "base_id is required"}
# Uses the meta API endpoint for base sharing
url = f"https://api.airtable.com/v0/meta/bases/{base_id}/collaborators"
data = _get(url, hdrs)
if "error" in data:
return data
collabs = data.get("collaborators", [])
return {
"count": len(collabs),
"collaborators": [
{
"user_id": c.get("userId", ""),
"email": c.get("email", ""),
"permission_level": c.get("permissionLevel", ""),
}
for c in collabs
],
}
@@ -269,6 +269,102 @@ class _ApolloClient:
}
return result
def get_person_activities(
self,
person_id: str,
) -> dict[str, Any]:
"""Get activity history for a person (emails, calls, tasks)."""
response = httpx.get(
f"{APOLLO_API_BASE}/activities",
headers=self._headers,
params={"contact_id": person_id},
timeout=30.0,
)
result = self._handle_response(response)
if "error" not in result:
activities = result.get("activities", [])
return {
"contact_id": person_id,
"count": len(activities),
"activities": [
{
"id": a.get("id"),
"type": a.get("type"),
"subject": a.get("subject"),
"body": (a.get("body") or "")[:500],
"created_at": a.get("created_at"),
"completed_at": a.get("completed_at"),
"status": a.get("status"),
"priority": a.get("priority"),
}
for a in activities[:50]
],
}
return result
def list_email_accounts(self) -> dict[str, Any]:
"""List email accounts connected to Apollo."""
response = httpx.get(
f"{APOLLO_API_BASE}/email_accounts",
headers=self._headers,
timeout=30.0,
)
result = self._handle_response(response)
if "error" not in result:
accounts = result.get("email_accounts", [])
return {
"count": len(accounts),
"email_accounts": [
{
"id": a.get("id"),
"email": a.get("email"),
"type": a.get("type"),
"active": a.get("active"),
"default": a.get("default"),
"last_synced_at": a.get("last_synced_at"),
"sending_daily_limit": a.get("sending_daily_limit"),
"emails_sent_today": a.get("emails_sent_today"),
}
for a in accounts
],
}
return result
def bulk_enrich_people(
self,
details: list[dict[str, Any]],
) -> dict[str, Any]:
"""Bulk enrich up to 10 people at once."""
body: dict[str, Any] = {"details": details[:10]}
response = httpx.post(
f"{APOLLO_API_BASE}/people/bulk_match",
headers=self._headers,
json=body,
timeout=60.0,
)
result = self._handle_response(response)
if "error" not in result:
matches = result.get("matches", [])
enriched = []
for m in matches:
if m is None:
enriched.append({"match_found": False})
continue
enriched.append(
{
"match_found": True,
"id": m.get("id"),
"name": m.get("name"),
"title": m.get("title"),
"email": m.get("email"),
"email_status": m.get("email_status"),
"linkedin_url": m.get("linkedin_url"),
"organization_name": (m.get("organization") or {}).get("name"),
}
)
return {"count": len(enriched), "results": enriched}
return result
def search_companies(
self,
industries: list[str] | None = None,
@@ -526,6 +622,89 @@ def register_tools(
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
# --- Person Activities ---
@mcp.tool()
def apollo_get_person_activities(person_id: str) -> dict:
"""
Get activity history for a person in Apollo (emails, calls, tasks).
Args:
person_id: Apollo person/contact ID (required)
Returns:
Dict with activities list (type, subject, body, status, timestamps)
"""
client = _get_client()
if isinstance(client, dict):
return client
if not person_id:
return {"error": "person_id is required"}
try:
return client.get_person_activities(person_id)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
# --- Email Accounts ---
@mcp.tool()
def apollo_list_email_accounts() -> dict:
"""
List email accounts connected to Apollo for sending sequences.
Returns:
Dict with email accounts (email, type, active, daily limit, sent today)
"""
client = _get_client()
if isinstance(client, dict):
return client
try:
return client.list_email_accounts()
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
# --- Bulk Enrichment ---
@mcp.tool()
def apollo_bulk_enrich_people(details_json: str) -> dict:
"""
Bulk enrich up to 10 people at once by email or domain+name.
Args:
details_json: JSON array of objects, each with lookup keys.
e.g. '[{"email": "john@acme.com"},
{"first_name": "Jane", "last_name": "Doe", "domain": "acme.com"}]'
Returns:
Dict with enrichment results for each person
"""
client = _get_client()
if isinstance(client, dict):
return client
if not details_json:
return {"error": "details_json is required"}
import json
try:
details = json.loads(details_json)
except json.JSONDecodeError:
return {"error": "details_json must be valid JSON"}
if not isinstance(details, list) or len(details) == 0:
return {"error": "details_json must be a non-empty JSON array"}
if len(details) > 10:
return {"error": "maximum 10 people per bulk request"}
try:
return client.bulk_enrich_people(details)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
# --- Company Search ---
@mcp.tool()
@@ -71,6 +71,25 @@ def _post(endpoint: str, token: str, body: dict | None = None) -> dict[str, Any]
return {"error": f"Asana request failed: {e!s}"}
def _put(endpoint: str, token: str, body: dict | None = None) -> dict[str, Any]:
try:
resp = httpx.put(
f"{ASANA_API}/{endpoint}",
headers=_headers(token),
json={"data": body or {}},
timeout=30.0,
)
if resp.status_code == 401:
return {"error": "Unauthorized. Check your ASANA_ACCESS_TOKEN."}
if resp.status_code not in (200, 201):
return {"error": f"Asana API error {resp.status_code}: {resp.text[:500]}"}
return resp.json()
except httpx.TimeoutException:
return {"error": "Request to Asana timed out"}
except Exception as e:
return {"error": f"Asana request failed: {e!s}"}
def _auth_error() -> dict[str, Any]:
return {
"error": "ASANA_ACCESS_TOKEN not set",
@@ -331,3 +350,134 @@ def register_tools(
}
)
return {"query": query, "tasks": tasks}
@mcp.tool()
def asana_update_task(
task_gid: str,
name: str = "",
notes: str = "",
completed: bool | None = None,
due_on: str = "",
assignee: str = "",
) -> dict[str, Any]:
"""
Update an existing Asana task.
Args:
task_gid: Task GID (required)
name: New task name (optional)
notes: New task description/notes (optional)
completed: Set completion status (optional)
due_on: New due date YYYY-MM-DD, or empty string to clear (optional)
assignee: New assignee GID or "me" (optional)
Returns:
Dict with updated task (gid, name, completed) or error
"""
token = _get_token(credentials)
if not token:
return _auth_error()
if not task_gid:
return {"error": "task_gid is required"}
body: dict[str, Any] = {}
if name:
body["name"] = name
if notes:
body["notes"] = notes
if completed is not None:
body["completed"] = completed
if due_on:
body["due_on"] = due_on
if assignee:
body["assignee"] = assignee
if not body:
return {"error": "At least one field to update is required"}
data = _put(f"tasks/{task_gid}", token, body)
if "error" in data:
return data
t = data.get("data", {})
return {
"gid": t.get("gid", ""),
"name": t.get("name", ""),
"completed": t.get("completed", False),
"status": "updated",
}
@mcp.tool()
def asana_add_comment(
task_gid: str,
text: str,
) -> dict[str, Any]:
"""
Add a comment (story) to an Asana task.
Args:
task_gid: Task GID (required)
text: Comment text (required). Supports rich text formatting.
Returns:
Dict with created comment (gid, text, created_at) or error
"""
token = _get_token(credentials)
if not token:
return _auth_error()
if not task_gid or not text:
return {"error": "task_gid and text are required"}
data = _post(f"tasks/{task_gid}/stories", token, {"text": text})
if "error" in data:
return data
s = data.get("data", {})
return {
"gid": s.get("gid", ""),
"text": (s.get("text", "") or "")[:500],
"created_at": s.get("created_at", ""),
"status": "created",
}
@mcp.tool()
def asana_create_subtask(
parent_task_gid: str,
name: str,
notes: str = "",
assignee: str = "",
due_on: str = "",
) -> dict[str, Any]:
"""
Create a subtask under an existing Asana task.
Args:
parent_task_gid: Parent task GID (required)
name: Subtask name (required)
notes: Subtask description/notes (optional)
assignee: Assignee GID or "me" (optional)
due_on: Due date YYYY-MM-DD (optional)
Returns:
Dict with created subtask (gid, name) or error
"""
token = _get_token(credentials)
if not token:
return _auth_error()
if not parent_task_gid or not name:
return {"error": "parent_task_gid and name are required"}
body: dict[str, Any] = {"name": name}
if notes:
body["notes"] = notes
if assignee:
body["assignee"] = assignee
if due_on:
body["due_on"] = due_on
data = _post(f"tasks/{parent_task_gid}/subtasks", token, body)
if "error" in data:
return data
t = data.get("data", {})
return {"gid": t.get("gid", ""), "name": t.get("name", ""), "status": "created"}
@@ -338,3 +338,147 @@ def register_tools(mcp: FastMCP, credentials: Any = None) -> None:
return {"error": f"HTTP {resp.status_code}: {resp.text[:500]}"}
return {"result": "deleted", "key": key}
@mcp.tool()
def s3_copy_object(
source_bucket: str,
source_key: str,
dest_bucket: str,
dest_key: str,
) -> dict:
"""Copy an object within or between S3 buckets.
Args:
source_bucket: Source S3 bucket name.
source_key: Source object key (path).
dest_bucket: Destination S3 bucket name.
dest_key: Destination object key (path).
"""
cfg = _get_config()
if isinstance(cfg, dict):
return cfg
access_key, secret_key, region = cfg
if not source_bucket or not source_key or not dest_bucket or not dest_key:
return {"error": "source_bucket, source_key, dest_bucket, and dest_key are required"}
extra = {"x-amz-copy-source": f"/{source_bucket}/{source_key}"}
resp = _s3_request(
"PUT", dest_bucket, dest_key, access_key, secret_key, region, extra_headers=extra
)
if resp.status_code >= 400:
return {"error": f"HTTP {resp.status_code}: {resp.text[:500]}"}
return {
"result": "copied",
"source": f"{source_bucket}/{source_key}",
"destination": f"{dest_bucket}/{dest_key}",
}
@mcp.tool()
def s3_get_object_metadata(
bucket: str,
key: str,
) -> dict:
"""Get object metadata without downloading content (HEAD request).
Args:
bucket: S3 bucket name.
key: Object key (path).
"""
cfg = _get_config()
if isinstance(cfg, dict):
return cfg
access_key, secret_key, region = cfg
if not bucket or not key:
return {"error": "bucket and key are required"}
resp = _s3_request("HEAD", bucket, key, access_key, secret_key, region)
if resp.status_code == 404:
return {"error": "Object not found"}
if resp.status_code >= 400:
return {"error": f"HTTP {resp.status_code}"}
metadata = {
"key": key,
"content_type": resp.headers.get("content-type", ""),
"content_length": resp.headers.get("content-length"),
"last_modified": resp.headers.get("last-modified"),
"etag": resp.headers.get("etag"),
"storage_class": resp.headers.get("x-amz-storage-class", "STANDARD"),
}
# Include any x-amz-meta-* custom metadata
for header, value in resp.headers.items():
if header.lower().startswith("x-amz-meta-"):
meta_key = header[len("x-amz-meta-") :]
metadata[f"meta_{meta_key}"] = value
return metadata
@mcp.tool()
def s3_generate_presigned_url(
bucket: str,
key: str,
expires_in: int = 3600,
) -> dict:
"""Generate a pre-signed URL for temporary access to an S3 object.
The URL allows anyone with it to download the object without
AWS credentials, until it expires.
Args:
bucket: S3 bucket name.
key: Object key (path).
expires_in: URL validity in seconds (default 3600 = 1 hour, max 604800 = 7 days).
"""
cfg = _get_config()
if isinstance(cfg, dict):
return cfg
access_key, secret_key, region = cfg
if not bucket or not key:
return {"error": "bucket and key are required"}
expires_in = max(1, min(expires_in, 604800))
now = datetime.datetime.now(datetime.UTC)
datestamp = now.strftime("%Y%m%d")
amz_date = now.strftime("%Y%m%dT%H%M%SZ")
credential_scope = f"{datestamp}/{region}/s3/aws4_request"
credential = f"{access_key}/{credential_scope}"
host = f"{bucket}.s3.{region}.amazonaws.com"
path = f"/{key}"
query_params = {
"X-Amz-Algorithm": "AWS4-HMAC-SHA256",
"X-Amz-Credential": credential,
"X-Amz-Date": amz_date,
"X-Amz-Expires": str(expires_in),
"X-Amz-SignedHeaders": "host",
}
sorted_params = sorted(query_params.items())
canonical_qs = "&".join(
f"{urllib.parse.quote(k, safe='')}={urllib.parse.quote(str(v), safe='')}"
for k, v in sorted_params
)
canonical_request = f"GET\n{path}\n{canonical_qs}\nhost:{host}\n\nhost\nUNSIGNED-PAYLOAD"
string_to_sign = (
f"AWS4-HMAC-SHA256\n{amz_date}\n{credential_scope}\n"
f"{hashlib.sha256(canonical_request.encode()).hexdigest()}"
)
signing_key = _get_signing_key(secret_key, datestamp, region)
signature = hmac.new(
signing_key, string_to_sign.encode("utf-8"), hashlib.sha256
).hexdigest()
presigned_url = f"https://{host}{path}?{canonical_qs}&X-Amz-Signature={signature}"
return {
"url": presigned_url,
"expires_in": expires_in,
"key": key,
"bucket": bucket,
}
@@ -178,6 +178,51 @@ class _BrevoClient:
)
return self._handle_response(response)
def list_contacts(
self,
limit: int = 50,
offset: int = 0,
modified_since: str | None = None,
) -> dict[str, Any]:
"""List contacts with pagination."""
params: dict[str, Any] = {"limit": limit, "offset": offset}
if modified_since:
params["modifiedSince"] = modified_since
response = httpx.get(
f"{BREVO_API_BASE}/contacts",
headers=self._headers,
params=params,
timeout=30.0,
)
return self._handle_response(response)
def delete_contact(self, email: str) -> dict[str, Any]:
"""Delete a contact by email."""
response = httpx.delete(
f"{BREVO_API_BASE}/contacts/{email}",
headers=self._headers,
timeout=30.0,
)
return self._handle_response(response)
def list_email_campaigns(
self,
status: str | None = None,
limit: int = 50,
offset: int = 0,
) -> dict[str, Any]:
"""List email campaigns."""
params: dict[str, Any] = {"limit": limit, "offset": offset}
if status:
params["status"] = status
response = httpx.get(
f"{BREVO_API_BASE}/emailCampaigns",
headers=self._headers,
params=params,
timeout=30.0,
)
return self._handle_response(response)
def register_tools(
mcp: FastMCP,
@@ -421,6 +466,134 @@ def register_tools(
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def brevo_list_contacts(
limit: int = 50,
offset: int = 0,
modified_since: str = "",
) -> dict:
"""
List contacts in Brevo with pagination.
Args:
limit: Number of contacts per page (default 50, max 1000)
offset: Pagination offset (default 0)
modified_since: Filter by modification date (ISO 8601, optional)
Returns:
Dict with contacts list and total count
"""
client = _get_client()
if isinstance(client, dict):
return client
try:
result = client.list_contacts(
limit=max(1, min(limit, 1000)),
offset=offset,
modified_since=modified_since or None,
)
if "error" in result:
return result
contacts = result.get("contacts", [])
return {
"count": len(contacts),
"total": result.get("count", len(contacts)),
"contacts": [
{
"id": c.get("id"),
"email": c.get("email"),
"first_name": (c.get("attributes") or {}).get("FIRSTNAME"),
"last_name": (c.get("attributes") or {}).get("LASTNAME"),
"list_ids": c.get("listIds", []),
"email_blacklisted": c.get("emailBlacklisted", False),
"modified_at": c.get("modifiedAt"),
}
for c in contacts
],
}
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def brevo_delete_contact(email: str) -> dict:
"""
Delete a contact from Brevo by email address.
Args:
email: Email address of the contact to delete
Returns:
Dict with success status or error
"""
client = _get_client()
if isinstance(client, dict):
return client
if not email or "@" not in email:
return {"error": "Invalid email address"}
try:
result = client.delete_contact(email)
if "error" in result:
return result
return {"success": True, "email": email, "status": "deleted"}
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def brevo_list_email_campaigns(
status: str = "",
limit: int = 50,
offset: int = 0,
) -> dict:
"""
List email campaigns from Brevo.
Args:
status: Filter by status: 'draft', 'sent', 'queued', 'suspended',
'inProcess', 'archive' (optional)
limit: Number per page (default 50, max 1000)
offset: Pagination offset (default 0)
Returns:
Dict with campaigns list (name, subject, status, stats)
"""
client = _get_client()
if isinstance(client, dict):
return client
try:
result = client.list_email_campaigns(
status=status or None,
limit=max(1, min(limit, 1000)),
offset=offset,
)
if "error" in result:
return result
campaigns = result.get("campaigns", [])
return {
"count": len(campaigns),
"total": result.get("count", len(campaigns)),
"campaigns": [
{
"id": c.get("id"),
"name": c.get("name"),
"subject": c.get("subject"),
"status": c.get("status"),
"type": c.get("type"),
"created_at": c.get("createdAt"),
"scheduled_at": c.get("scheduledAt"),
"statistics": c.get("statistics", {}).get("globalStats", {}),
}
for c in campaigns
],
}
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def brevo_get_email_stats(message_id: str) -> dict:
"""
@@ -34,6 +34,16 @@ def _get(path: str, headers: dict, params: dict | None = None) -> dict:
return resp.json()
def _post(path: str, headers: dict, body: dict) -> dict:
"""Send a POST request."""
resp = httpx.post(f"{BASE_URL}{path}", headers=headers, json=body, timeout=30)
if resp.status_code >= 400:
return {"error": f"HTTP {resp.status_code}: {resp.text[:500]}"}
if not resp.content:
return {"status": "ok"}
return resp.json()
def register_tools(mcp: FastMCP, credentials: Any = None) -> None:
"""Register Calendly tools."""
@@ -255,3 +265,124 @@ def register_tools(mcp: FastMCP, credentials: Any = None) -> None:
for inv in items
],
}
@mcp.tool()
def calendly_cancel_event(
event_uri: str,
reason: str = "",
) -> dict:
"""Cancel a scheduled Calendly event.
Args:
event_uri: Full event URI (e.g. 'https://api.calendly.com/scheduled_events/XXX').
reason: Cancellation reason (optional).
"""
headers = _get_headers()
if headers is None:
return {
"error": "CALENDLY_PAT is required",
"help": "Set CALENDLY_PAT environment variable",
}
if not event_uri:
return {"error": "event_uri is required"}
event_uuid = event_uri.rstrip("/").rsplit("/", 1)[-1]
body: dict[str, Any] = {}
if reason:
body["reason"] = reason
data = _post(f"/scheduled_events/{event_uuid}/cancellation", headers, body)
if "error" in data:
return data
resource = data.get("resource", {})
return {
"canceled_by": resource.get("canceled_by", ""),
"reason": resource.get("reason", ""),
"created_at": resource.get("created_at", ""),
"status": "canceled",
}
@mcp.tool()
def calendly_list_webhooks(
organization_uri: str,
scope: str = "organization",
count: int = 20,
) -> dict:
"""List webhook subscriptions for a Calendly organization or user.
Args:
organization_uri: Full organization URI from calendly_get_current_user.
scope: Scope: 'organization' or 'user' (default 'organization').
count: Number of results per page (max 100).
"""
headers = _get_headers()
if headers is None:
return {
"error": "CALENDLY_PAT is required",
"help": "Set CALENDLY_PAT environment variable",
}
if not organization_uri:
return {"error": "organization_uri is required"}
params: dict[str, Any] = {
"organization": organization_uri,
"scope": scope,
"count": min(count, 100),
}
data = _get("/webhook_subscriptions", headers, params)
if "error" in data:
return data
items = data.get("collection", [])
return {
"count": len(items),
"webhooks": [
{
"uri": wh.get("uri", ""),
"callback_url": wh.get("callback_url", ""),
"state": wh.get("state", ""),
"events": wh.get("events", []),
"scope": wh.get("scope", ""),
"created_at": wh.get("created_at", ""),
}
for wh in items
],
}
@mcp.tool()
def calendly_get_event_type(event_type_uri: str) -> dict:
"""Get details of a specific Calendly event type (meeting template).
Args:
event_type_uri: Full event type URI (e.g. 'https://api.calendly.com/event_types/XXX').
"""
headers = _get_headers()
if headers is None:
return {
"error": "CALENDLY_PAT is required",
"help": "Set CALENDLY_PAT environment variable",
}
if not event_type_uri:
return {"error": "event_type_uri is required"}
et_uuid = event_type_uri.rstrip("/").rsplit("/", 1)[-1]
data = _get(f"/event_types/{et_uuid}", headers)
if "error" in data:
return data
et = data.get("resource", {})
return {
"uri": et.get("uri", ""),
"name": et.get("name", ""),
"slug": et.get("slug", ""),
"active": et.get("active", False),
"duration": et.get("duration", 0),
"kind": et.get("kind", ""),
"type": et.get("type", ""),
"color": et.get("color", ""),
"scheduling_url": et.get("scheduling_url", ""),
"description": et.get("description_plain", ""),
"custom_questions": et.get("custom_questions", []),
}
@@ -293,3 +293,128 @@ def register_tools(
"resources": resources,
"total_count": data.get("total_count", 0),
}
@mcp.tool()
def cloudinary_get_usage() -> dict[str, Any]:
"""
Get current Cloudinary account usage and limits.
Returns:
Dict with storage, bandwidth, transformations usage and limits
"""
cloud, key, secret = _get_credentials(credentials)
if not cloud or not key or not secret:
return _auth_error()
url = f"{_base_url(cloud)}/usage"
data = _request("get", url, key, secret)
if "error" in data:
return data
return {
"plan": data.get("plan", ""),
"storage": {
"used_bytes": (data.get("storage") or {}).get("usage", 0),
"limit_bytes": (data.get("storage") or {}).get("limit", 0),
"used_percent": (data.get("storage") or {}).get("used_percent", 0),
},
"bandwidth": {
"used_bytes": (data.get("bandwidth") or {}).get("usage", 0),
"limit_bytes": (data.get("bandwidth") or {}).get("limit", 0),
"used_percent": (data.get("bandwidth") or {}).get("used_percent", 0),
},
"transformations": {
"used": (data.get("transformations") or {}).get("usage", 0),
"limit": (data.get("transformations") or {}).get("limit", 0),
"used_percent": (data.get("transformations") or {}).get("used_percent", 0),
},
"resources": data.get("resources", 0),
"derived_resources": data.get("derived_resources", 0),
"last_updated": data.get("last_updated", ""),
}
@mcp.tool()
def cloudinary_rename_resource(
from_public_id: str,
to_public_id: str,
resource_type: str = "image",
overwrite: bool = False,
) -> dict[str, Any]:
"""
Rename a resource in Cloudinary.
Args:
from_public_id: Current public ID (required)
to_public_id: New public ID (required)
resource_type: Type: image, video, raw (default image)
overwrite: Whether to overwrite if target exists (default False)
Returns:
Dict with rename result
"""
cloud, key, secret = _get_credentials(credentials)
if not cloud or not key or not secret:
return _auth_error()
if not from_public_id or not to_public_id:
return {"error": "from_public_id and to_public_id are required"}
url = f"{_base_url(cloud)}/{resource_type}/rename"
form_data: dict[str, Any] = {
"from_public_id": from_public_id,
"to_public_id": to_public_id,
}
if overwrite:
form_data["overwrite"] = "true"
data = _request("post", url, key, secret, data=form_data)
if "error" in data:
return data
return {
"public_id": data.get("public_id", ""),
"secure_url": data.get("secure_url", ""),
"format": data.get("format", ""),
"status": "renamed",
}
@mcp.tool()
def cloudinary_add_tag(
tag: str,
public_ids: str,
resource_type: str = "image",
) -> dict[str, Any]:
"""
Add a tag to one or more Cloudinary resources.
Args:
tag: Tag name to add (required)
public_ids: Comma-separated public IDs (required, up to 1000)
resource_type: Type: image, video, raw (default image)
Returns:
Dict with tagged public IDs
"""
cloud, key, secret = _get_credentials(credentials)
if not cloud or not key or not secret:
return _auth_error()
if not tag or not public_ids:
return {"error": "tag and public_ids are required"}
ids = [pid.strip() for pid in public_ids.split(",") if pid.strip()]
url = f"{_base_url(cloud)}/{resource_type}/tags"
body = {
"tag": tag,
"public_ids": ids,
"command": "add",
}
data = _request(
"post", url, key, secret, json=body, headers={"Content-Type": "application/json"}
)
if "error" in data:
return data
return {
"tag": tag,
"public_ids": data.get("public_ids", ids),
"status": "tagged",
}
@@ -326,3 +326,121 @@ def register_tools(
}
)
return {"results": results, "count": len(results)}
@mcp.tool()
def confluence_update_page(
page_id: str,
title: str,
body: str,
version_number: int,
) -> dict[str, Any]:
"""
Update an existing Confluence page.
Args:
page_id: Page ID (required)
title: Page title (required, even if unchanged)
body: New page content in Confluence storage format (XHTML) (required)
version_number: Current version number + 1 (required).
Get the current version via confluence_get_page first.
Returns:
Dict with updated page id, title, and version
"""
domain, email, token = _get_credentials(credentials)
if not domain or not email or not token:
return _auth_error()
if not page_id or not title or not body:
return {"error": "page_id, title, and body are required"}
if version_number < 1:
return {"error": "version_number must be >= 1"}
payload: dict[str, Any] = {
"id": page_id,
"status": "current",
"title": title,
"body": {
"representation": "storage",
"value": body,
},
"version": {
"number": version_number,
"message": "Updated via API",
},
}
url = f"{_base_url(domain)}/wiki/api/v2/pages/{page_id}"
data = _request("put", url, email, token, json=payload)
if "error" in data:
return data
ver = data.get("version") or {}
return {
"id": data.get("id", ""),
"title": data.get("title", ""),
"version": ver.get("number", 0),
"status": "updated",
}
@mcp.tool()
def confluence_delete_page(page_id: str) -> dict[str, Any]:
"""
Delete a Confluence page.
Args:
page_id: Page ID to delete (required)
Returns:
Dict with success status or error
"""
domain, email, token = _get_credentials(credentials)
if not domain or not email or not token:
return _auth_error()
if not page_id:
return {"error": "page_id is required"}
url = f"{_base_url(domain)}/wiki/api/v2/pages/{page_id}"
data = _request("delete", url, email, token)
if "error" in data:
return data
return {"page_id": page_id, "status": "deleted"}
@mcp.tool()
def confluence_get_page_children(
page_id: str,
limit: int = 25,
) -> dict[str, Any]:
"""
List child pages of a Confluence page.
Args:
page_id: Parent page ID (required)
limit: Max results (1-250, default 25)
Returns:
Dict with child pages list (id, title, status, version)
"""
domain, email, token = _get_credentials(credentials)
if not domain or not email or not token:
return _auth_error()
if not page_id:
return {"error": "page_id is required"}
url = f"{_base_url(domain)}/wiki/api/v2/pages/{page_id}/children"
data = _request("get", url, email, token, params={"limit": max(1, min(limit, 250))})
if "error" in data:
return data
children = []
for p in data.get("results", []):
ver = p.get("version") or {}
children.append(
{
"id": p.get("id", ""),
"title": p.get("title", ""),
"status": p.get("status", ""),
"version": ver.get("number", 0),
}
)
return {"children": children, "count": len(children)}
@@ -137,6 +137,46 @@ class _DiscordClient:
params=params,
)
def get_channel(self, channel_id: str) -> dict[str, Any]:
"""Get detailed information about a channel.
API ref: GET /channels/{channel.id}
"""
return self._request_with_retry("GET", f"{DISCORD_API_BASE}/channels/{channel_id}")
def create_reaction(
self,
channel_id: str,
message_id: str,
emoji: str,
) -> dict[str, Any]:
"""Add a reaction to a message.
API ref: PUT /channels/{channel.id}/messages/{message.id}/reactions/{emoji}/@me
"""
# URL-encode the emoji for the path
import urllib.parse
encoded_emoji = urllib.parse.quote(emoji)
return self._request_with_retry(
"PUT",
f"{DISCORD_API_BASE}/channels/{channel_id}/messages/{message_id}/reactions/{encoded_emoji}/@me",
)
def delete_message(
self,
channel_id: str,
message_id: str,
) -> dict[str, Any]:
"""Delete a message from a channel.
API ref: DELETE /channels/{channel.id}/messages/{message.id}
"""
return self._request_with_retry(
"DELETE",
f"{DISCORD_API_BASE}/channels/{channel_id}/messages/{message_id}",
)
def register_tools(
mcp: FastMCP,
@@ -288,3 +328,96 @@ def register_tools(
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def discord_get_channel(
channel_id: str,
account: str = "",
) -> dict:
"""
Get detailed information about a Discord channel.
Returns channel metadata including name, topic, type, position,
permission overwrites, and rate limit settings.
Args:
channel_id: Channel ID (right-click channel > Copy ID in Dev Mode)
Returns:
Dict with channel details or error
"""
client = _get_client(account)
if isinstance(client, dict):
return client
try:
result = client.get_channel(channel_id)
if "error" in result:
return result
return {"channel": result, "success": True}
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def discord_create_reaction(
channel_id: str,
message_id: str,
emoji: str,
account: str = "",
) -> dict:
"""
Add a reaction to a Discord message.
Args:
channel_id: Channel ID where the message is
message_id: ID of the message to react to
emoji: Unicode emoji (e.g. "👍") or custom emoji in format "name:id"
Returns:
Dict with success status or error
"""
client = _get_client(account)
if isinstance(client, dict):
return client
try:
result = client.create_reaction(channel_id, message_id, emoji)
if isinstance(result, dict) and "error" in result:
return result
return {"success": True}
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def discord_delete_message(
channel_id: str,
message_id: str,
account: str = "",
) -> dict:
"""
Delete a message from a Discord channel.
The bot can delete its own messages, or any message if it has
Manage Messages permission in the channel.
Args:
channel_id: Channel ID where the message is
message_id: ID of the message to delete
Returns:
Dict with success status or error
"""
client = _get_client(account)
if isinstance(client, dict):
return client
try:
result = client.delete_message(channel_id, message_id)
if isinstance(result, dict) and "error" in result:
return result
return {"success": True, "deleted_message_id": message_id}
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@@ -51,6 +51,24 @@ def _get(endpoint: str, token: str, params: dict | None = None) -> dict[str, Any
return {"error": f"Docker Hub request failed: {e!s}"}
def _delete(endpoint: str, token: str) -> dict[str, Any]:
try:
resp = httpx.delete(f"{HUB_API}/{endpoint}", headers=_headers(token), timeout=30.0)
if resp.status_code == 401:
return {"error": "Unauthorized. Check your DOCKER_HUB_TOKEN."}
if resp.status_code == 404:
return {"error": "Not found"}
if resp.status_code == 204 or not resp.content:
return {"status": "deleted"}
if resp.status_code >= 400:
return {"error": f"Docker Hub API error {resp.status_code}: {resp.text[:500]}"}
return resp.json()
except httpx.TimeoutException:
return {"error": "Request to Docker Hub timed out"}
except Exception as e:
return {"error": f"Docker Hub request failed: {e!s}"}
def _auth_error() -> dict[str, Any]:
return {
"error": "DOCKER_HUB_TOKEN not set",
@@ -230,3 +248,116 @@ def register_tools(
"is_private": data.get("is_private", False),
"full_description": full_desc,
}
@mcp.tool()
def docker_hub_get_tag_detail(
repository: str,
tag: str,
) -> dict[str, Any]:
"""
Get detailed information about a specific image tag.
Args:
repository: Full repository name (e.g. "library/nginx" or "myuser/myapp")
tag: Tag name (e.g. "latest", "v1.0")
Returns:
Dict with tag details including images with architecture, OS, size, digest
"""
token = _get_token(credentials)
if not token:
return _auth_error()
if not repository or not tag:
return {"error": "repository and tag are required"}
data = _get(f"repositories/{repository}/tags/{tag}", token)
if "error" in data:
return data
images = []
for img in data.get("images", []):
images.append(
{
"architecture": img.get("architecture", ""),
"os": img.get("os", ""),
"size": img.get("size", 0),
"digest": img.get("digest", ""),
"status": img.get("status", ""),
"last_pushed": img.get("last_pushed", ""),
}
)
return {
"repository": repository,
"tag": data.get("name", tag),
"full_size": data.get("full_size", 0),
"last_updated": data.get("last_updated", ""),
"last_updater_username": data.get("last_updater_username", ""),
"images": images,
"image_count": len(images),
}
@mcp.tool()
def docker_hub_delete_tag(
repository: str,
tag: str,
) -> dict[str, Any]:
"""
Delete a specific tag from a Docker Hub repository.
Args:
repository: Full repository name (e.g. "myuser/myapp")
tag: Tag name to delete (e.g. "old-version")
Returns:
Dict with deletion status
"""
token = _get_token(credentials)
if not token:
return _auth_error()
if not repository or not tag:
return {"error": "repository and tag are required"}
data = _delete(f"repositories/{repository}/tags/{tag}", token)
if "error" in data:
return data
return {"repository": repository, "tag": tag, "status": "deleted"}
@mcp.tool()
def docker_hub_list_webhooks(
repository: str,
) -> dict[str, Any]:
"""
List webhooks configured for a Docker Hub repository.
Args:
repository: Full repository name (e.g. "myuser/myapp")
Returns:
Dict with webhooks list (name, hook_url, active, expect_final_callback)
"""
token = _get_token(credentials)
if not token:
return _auth_error()
if not repository:
return {"error": "repository is required"}
data = _get(f"repositories/{repository}/webhooks", token)
if "error" in data:
return data
webhooks = []
for wh in data.get("results", []):
hooks = wh.get("webhooks", [])
webhook_urls = [h.get("hook_url", "") for h in hooks]
webhooks.append(
{
"id": wh.get("id", ""),
"name": wh.get("name", ""),
"active": wh.get("active", False),
"expect_final_callback": wh.get("expect_final_callback", False),
"hook_urls": webhook_urls,
"created_at": wh.get("created_date", ""),
}
)
return {"repository": repository, "webhooks": webhooks, "count": len(webhooks)}
@@ -14,6 +14,7 @@ from __future__ import annotations
import os
import time
from datetime import UTC
from typing import TYPE_CHECKING, Literal
import httpx
@@ -399,3 +400,233 @@ def register_tools(
return {"error": f"Network error: {str(e)}"}
except Exception as e:
return {"error": f"Exa answer failed: {str(e)}"}
@mcp.tool()
def exa_search_news(
query: str,
num_results: int = 10,
days_back: int = 7,
include_text: bool = True,
) -> dict:
"""
Search recent news articles using Exa.
Convenience wrapper around exa_search pre-configured for news content
with automatic date filtering.
Args:
query: News search query (1-500 chars)
num_results: Number of results (1-20, default 10)
days_back: How many days back to search (default 7)
include_text: Include article text in results
Returns:
Dict with news articles including titles, URLs, dates, and text
"""
if not query or len(query) > 500:
return {"error": "Query must be 1-500 characters"}
from datetime import datetime, timedelta
start_date = (datetime.now(UTC) - timedelta(days=days_back)).strftime(
"%Y-%m-%dT00:00:00.000Z"
)
api_key = _get_api_key()
if not api_key:
return {
"error": "Exa credentials not configured",
"help": "Set EXA_API_KEY environment variable",
}
payload: dict = {
"query": query,
"numResults": max(1, min(num_results, 20)),
"category": "news",
"startPublishedDate": start_date,
"contents": {},
}
if include_text:
payload["contents"]["text"] = True
payload["contents"]["highlights"] = True
try:
data = _make_request("/search", payload, api_key)
if "error" in data:
return data
results = []
for item in data.get("results", []):
result = {
"title": item.get("title", ""),
"url": item.get("url", ""),
"published_date": item.get("publishedDate", ""),
"author": item.get("author", ""),
}
if include_text and "text" in item:
result["text"] = item["text"]
if "highlights" in item:
result["highlights"] = item["highlights"]
results.append(result)
return {
"query": query,
"days_back": days_back,
"results": results,
"total": len(results),
"provider": "exa",
}
except httpx.TimeoutException:
return {"error": "Exa news search timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {str(e)}"}
except Exception as e:
return {"error": f"Exa news search failed: {str(e)}"}
@mcp.tool()
def exa_search_papers(
query: str,
num_results: int = 10,
year_start: int | None = None,
include_text: bool = False,
) -> dict:
"""
Search for research papers and academic content using Exa.
Convenience wrapper pre-configured for academic paper discovery,
restricted to scholarly domains.
Args:
query: Research topic or paper search query (1-500 chars)
num_results: Number of results (1-20, default 10)
year_start: Only include papers published after this year
include_text: Include full paper text (default False for brevity)
Returns:
Dict with research papers including titles, URLs, dates, and highlights
"""
if not query or len(query) > 500:
return {"error": "Query must be 1-500 characters"}
api_key = _get_api_key()
if not api_key:
return {
"error": "Exa credentials not configured",
"help": "Set EXA_API_KEY environment variable",
}
payload: dict = {
"query": query,
"numResults": max(1, min(num_results, 20)),
"category": "research paper",
"contents": {"highlights": True},
}
if include_text:
payload["contents"]["text"] = True
if year_start:
payload["startPublishedDate"] = f"{year_start}-01-01T00:00:00.000Z"
try:
data = _make_request("/search", payload, api_key)
if "error" in data:
return data
results = []
for item in data.get("results", []):
result = {
"title": item.get("title", ""),
"url": item.get("url", ""),
"published_date": item.get("publishedDate", ""),
"author": item.get("author", ""),
}
if "highlights" in item:
result["highlights"] = item["highlights"]
if include_text and "text" in item:
result["text"] = item["text"]
results.append(result)
return {
"query": query,
"results": results,
"total": len(results),
"provider": "exa",
}
except httpx.TimeoutException:
return {"error": "Exa paper search timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {str(e)}"}
except Exception as e:
return {"error": f"Exa paper search failed: {str(e)}"}
@mcp.tool()
def exa_search_companies(
query: str,
num_results: int = 10,
include_text: bool = True,
) -> dict:
"""
Search for companies and startups using Exa.
Convenience wrapper pre-configured for company/startup discovery
using Exa's company category filter.
Args:
query: Company search query, e.g. "AI startups in healthcare" (1-500 chars)
num_results: Number of results (1-20, default 10)
include_text: Include company page text in results
Returns:
Dict with company results including titles, URLs, and descriptions
"""
if not query or len(query) > 500:
return {"error": "Query must be 1-500 characters"}
api_key = _get_api_key()
if not api_key:
return {
"error": "Exa credentials not configured",
"help": "Set EXA_API_KEY environment variable",
}
payload: dict = {
"query": query,
"numResults": max(1, min(num_results, 20)),
"category": "company",
"contents": {"highlights": True},
}
if include_text:
payload["contents"]["text"] = True
try:
data = _make_request("/search", payload, api_key)
if "error" in data:
return data
results = []
for item in data.get("results", []):
result = {
"title": item.get("title", ""),
"url": item.get("url", ""),
"published_date": item.get("publishedDate", ""),
}
if "highlights" in item:
result["highlights"] = item["highlights"]
if include_text and "text" in item:
result["text"] = item["text"]
results.append(result)
return {
"query": query,
"results": results,
"total": len(results),
"provider": "exa",
}
except httpx.TimeoutException:
return {"error": "Exa company search timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {str(e)}"}
except Exception as e:
return {"error": f"Exa company search failed: {str(e)}"}
@@ -487,6 +487,117 @@ class _GitHubClient:
"total": len(emails),
}
# --- Commits ---
def list_commits(
self,
owner: str,
repo: str,
sha: str | None = None,
author: str | None = None,
since: str | None = None,
until: str | None = None,
limit: int = 30,
) -> dict[str, Any]:
"""List commits for a repository.
API ref: GET /repos/{owner}/{repo}/commits
"""
owner = _sanitize_path_param(owner, "owner")
repo = _sanitize_path_param(repo, "repo")
params: dict[str, Any] = {"per_page": min(limit, 100)}
if sha:
params["sha"] = sha
if author:
params["author"] = author
if since:
params["since"] = since
if until:
params["until"] = until
response = httpx.get(
f"{GITHUB_API_BASE}/repos/{owner}/{repo}/commits",
headers=self._headers,
params=params,
timeout=30.0,
)
return self._handle_response(response)
# --- Releases ---
def create_release(
self,
owner: str,
repo: str,
tag_name: str,
name: str | None = None,
body: str | None = None,
draft: bool = False,
prerelease: bool = False,
target_commitish: str | None = None,
) -> dict[str, Any]:
"""Create a new release.
API ref: POST /repos/{owner}/{repo}/releases
"""
owner = _sanitize_path_param(owner, "owner")
repo = _sanitize_path_param(repo, "repo")
payload: dict[str, Any] = {
"tag_name": tag_name,
"draft": draft,
"prerelease": prerelease,
}
if name:
payload["name"] = name
if body:
payload["body"] = body
if target_commitish:
payload["target_commitish"] = target_commitish
response = httpx.post(
f"{GITHUB_API_BASE}/repos/{owner}/{repo}/releases",
headers=self._headers,
json=payload,
timeout=30.0,
)
return self._handle_response(response)
# --- Actions / Workflow Runs ---
def list_workflow_runs(
self,
owner: str,
repo: str,
workflow_id: str | None = None,
branch: str | None = None,
status: str | None = None,
limit: int = 20,
) -> dict[str, Any]:
"""List workflow runs for a repository.
API ref: GET /repos/{owner}/{repo}/actions/runs
"""
owner = _sanitize_path_param(owner, "owner")
repo = _sanitize_path_param(repo, "repo")
params: dict[str, Any] = {"per_page": min(limit, 100)}
if branch:
params["branch"] = branch
if status:
params["status"] = status
if workflow_id:
url = f"{GITHUB_API_BASE}/repos/{owner}/{repo}/actions/workflows/{workflow_id}/runs"
else:
url = f"{GITHUB_API_BASE}/repos/{owner}/{repo}/actions/runs"
response = httpx.get(
url,
headers=self._headers,
params=params,
timeout=30.0,
)
return self._handle_response(response)
def register_tools(
mcp: FastMCP,
@@ -1007,3 +1118,120 @@ def register_tools(
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": _sanitize_error_message(e)}
# --- Commits ---
@mcp.tool()
def github_list_commits(
owner: str,
repo: str,
sha: str | None = None,
author: str | None = None,
since: str | None = None,
until: str | None = None,
limit: int = 30,
account: str = "",
) -> dict:
"""
List commits for a repository.
Args:
owner: Repository owner
repo: Repository name
sha: Branch name or commit SHA to list commits from (default: default branch)
author: GitHub username or email to filter commits by author
since: ISO 8601 date to list commits after (e.g. "2024-01-01T00:00:00Z")
until: ISO 8601 date to list commits before
limit: Maximum number of commits to return (1-100, default 30)
Returns:
Dict with list of commits or error
"""
client = _get_client(account)
if isinstance(client, dict):
return client
try:
return client.list_commits(owner, repo, sha, author, since, until, limit)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": _sanitize_error_message(e)}
# --- Releases ---
@mcp.tool()
def github_create_release(
owner: str,
repo: str,
tag_name: str,
name: str | None = None,
body: str | None = None,
draft: bool = False,
prerelease: bool = False,
target_commitish: str | None = None,
account: str = "",
) -> dict:
"""
Create a new release for a repository.
Args:
owner: Repository owner
repo: Repository name
tag_name: The name of the tag for the release (e.g. "v1.0.0")
name: Release title (optional, defaults to tag_name)
body: Release notes in Markdown (optional)
draft: True to create as unpublished draft
prerelease: True to mark as pre-release
target_commitish: Branch or commit SHA to tag (default: default branch)
Returns:
Dict with created release information or error
"""
client = _get_client(account)
if isinstance(client, dict):
return client
try:
return client.create_release(
owner, repo, tag_name, name, body, draft, prerelease, target_commitish
)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": _sanitize_error_message(e)}
# --- Actions / Workflow Runs ---
@mcp.tool()
def github_list_workflow_runs(
owner: str,
repo: str,
workflow_id: str | None = None,
branch: str | None = None,
status: str | None = None,
limit: int = 20,
account: str = "",
) -> dict:
"""
List GitHub Actions workflow runs for a repository.
Args:
owner: Repository owner
repo: Repository name
workflow_id: Filter by workflow file name or ID (e.g. "ci.yml")
branch: Filter by branch name
status: Filter by status ("completed", "in_progress", "queued",
"success", "failure", "cancelled")
limit: Maximum number of runs to return (1-100, default 20)
Returns:
Dict with workflow runs or error
"""
client = _get_client(account)
if isinstance(client, dict):
return client
try:
return client.list_workflow_runs(owner, repo, workflow_id, branch, status, limit)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": _sanitize_error_message(e)}
@@ -86,6 +86,32 @@ def _post(
return {"error": f"GitLab request failed: {e!s}"}
def _put(
base_url: str, path: str, token: str, json: dict[str, Any] | None = None
) -> dict[str, Any] | list:
"""Make an authenticated PUT to the GitLab API."""
try:
resp = httpx.put(
f"{base_url}/api/v4{path}",
headers={"PRIVATE-TOKEN": token, "Content-Type": "application/json"},
json=json or {},
timeout=30.0,
)
if resp.status_code == 401:
return {"error": "Unauthorized. Check your GitLab token."}
if resp.status_code == 403:
return {"error": "Forbidden. Insufficient permissions."}
if resp.status_code == 404:
return {"error": "Not found."}
if resp.status_code not in (200, 201):
return {"error": f"GitLab API error {resp.status_code}: {resp.text[:500]}"}
return resp.json()
except httpx.TimeoutException:
return {"error": "Request to GitLab timed out"}
except Exception as e:
return {"error": f"GitLab request failed: {e!s}"}
def _auth_error() -> dict[str, Any]:
return {
"error": "GITLAB_TOKEN not set",
@@ -398,3 +424,151 @@ def register_tools(
}
)
return {"merge_requests": mrs, "count": len(mrs)}
@mcp.tool()
def gitlab_update_issue(
project_id: str,
issue_iid: int,
title: str = "",
description: str = "",
state_event: str = "",
labels: str = "",
assignee_ids: str = "",
) -> dict[str, Any]:
"""
Update an existing GitLab issue.
Args:
project_id: Project ID or URL-encoded path (required)
issue_iid: Issue internal ID within the project (required)
title: New issue title (optional)
description: New issue description (optional)
state_event: Transition: "close" or "reopen" (optional)
labels: Comma-separated label names to replace (optional)
assignee_ids: Comma-separated user IDs to assign (optional)
Returns:
Dict with updated issue (iid, title, state, web_url)
"""
base_url, token = _get_credentials(credentials)
if not token:
return _auth_error()
if not project_id or not issue_iid:
return {"error": "project_id and issue_iid are required"}
body: dict[str, Any] = {}
if title:
body["title"] = title
if description:
body["description"] = description
if state_event:
body["state_event"] = state_event
if labels:
body["labels"] = labels
if assignee_ids:
body["assignee_ids"] = [int(x.strip()) for x in assignee_ids.split(",") if x.strip()]
if not body:
return {"error": "At least one field to update is required"}
data = _put(base_url, f"/projects/{project_id}/issues/{issue_iid}", token, json=body)
if isinstance(data, dict) and "error" in data:
return data
if not isinstance(data, dict):
return {"error": "Unexpected response format"}
return {
"iid": data.get("iid"),
"title": data.get("title", ""),
"state": data.get("state", ""),
"web_url": data.get("web_url", ""),
"status": "updated",
}
@mcp.tool()
def gitlab_get_merge_request(
project_id: str,
merge_request_iid: int,
) -> dict[str, Any]:
"""
Get details about a specific merge request.
Args:
project_id: Project ID or URL-encoded path (required)
merge_request_iid: MR internal ID within the project (required)
Returns:
Dict with MR details (title, description, state, branches, author, reviewers)
"""
base_url, token = _get_credentials(credentials)
if not token:
return _auth_error()
if not project_id or not merge_request_iid:
return {"error": "project_id and merge_request_iid are required"}
data = _get(base_url, f"/projects/{project_id}/merge_requests/{merge_request_iid}", token)
if isinstance(data, dict) and "error" in data:
return data
if not isinstance(data, dict):
return {"error": "Unexpected response format"}
reviewers = [r.get("username", "") for r in data.get("reviewers", [])]
return {
"iid": data.get("iid"),
"title": data.get("title", ""),
"description": (data.get("description") or "")[:1000],
"state": data.get("state", ""),
"source_branch": data.get("source_branch", ""),
"target_branch": data.get("target_branch", ""),
"author": (data.get("author") or {}).get("username", ""),
"reviewers": reviewers,
"merge_status": data.get("merge_status", ""),
"has_conflicts": data.get("has_conflicts", False),
"changes_count": data.get("changes_count"),
"web_url": data.get("web_url", ""),
"created_at": data.get("created_at", ""),
"updated_at": data.get("updated_at", ""),
"merged_at": data.get("merged_at"),
}
@mcp.tool()
def gitlab_create_merge_request_note(
project_id: str,
merge_request_iid: int,
body: str,
) -> dict[str, Any]:
"""
Add a comment (note) to a GitLab merge request.
Args:
project_id: Project ID or URL-encoded path (required)
merge_request_iid: MR internal ID within the project (required)
body: Comment text (required, supports markdown)
Returns:
Dict with created note (id, body, author, created_at)
"""
base_url, token = _get_credentials(credentials)
if not token:
return _auth_error()
if not project_id or not merge_request_iid or not body:
return {"error": "project_id, merge_request_iid, and body are required"}
data = _post(
base_url,
f"/projects/{project_id}/merge_requests/{merge_request_iid}/notes",
token,
json={"body": body},
)
if isinstance(data, dict) and "error" in data:
return data
if not isinstance(data, dict):
return {"error": "Unexpected response format"}
return {
"id": data.get("id"),
"body": (data.get("body") or "")[:500],
"author": (data.get("author") or {}).get("username", ""),
"created_at": data.get("created_at", ""),
"status": "created",
}
@@ -335,3 +335,122 @@ def register_tools(
except Exception as e:
logger.warning("ga_get_traffic_sources failed: %s", e)
return {"error": f"Google Analytics API error: {e}"}
@mcp.tool()
def ga_get_user_demographics(
property_id: str,
start_date: str = "28daysAgo",
end_date: str = "today",
limit: int = 20,
) -> dict:
"""
Get user demographics breakdown (country, language, device).
Args:
property_id: GA4 property ID (e.g., "properties/123456")
start_date: Start date (e.g., "2024-01-01" or "28daysAgo")
end_date: End date (e.g., "today")
limit: Max rows to return (1-10000, default 20)
Returns:
Dict with user counts by country, language, and device category
"""
client = _get_client()
if isinstance(client, dict):
return client
if err := _validate_inputs(property_id, limit=limit):
return err
try:
return client.run_report(
property_id=property_id,
metrics=["totalUsers", "sessions", "engagedSessions"],
dimensions=["country", "language", "deviceCategory"],
start_date=start_date,
end_date=end_date,
limit=limit,
)
except Exception as e:
logger.warning("ga_get_user_demographics failed: %s", e)
return {"error": f"Google Analytics API error: {e}"}
@mcp.tool()
def ga_get_conversion_events(
property_id: str,
start_date: str = "28daysAgo",
end_date: str = "today",
limit: int = 20,
) -> dict:
"""
Get conversion event counts and values.
Args:
property_id: GA4 property ID (e.g., "properties/123456")
start_date: Start date (e.g., "2024-01-01" or "28daysAgo")
end_date: End date (e.g., "today")
limit: Max rows to return (1-10000, default 20)
Returns:
Dict with event names, counts, conversion counts, and total revenue
"""
client = _get_client()
if isinstance(client, dict):
return client
if err := _validate_inputs(property_id, limit=limit):
return err
try:
return client.run_report(
property_id=property_id,
metrics=["eventCount", "conversions", "totalRevenue"],
dimensions=["eventName"],
start_date=start_date,
end_date=end_date,
limit=limit,
)
except Exception as e:
logger.warning("ga_get_conversion_events failed: %s", e)
return {"error": f"Google Analytics API error: {e}"}
@mcp.tool()
def ga_get_landing_pages(
property_id: str,
start_date: str = "28daysAgo",
end_date: str = "today",
limit: int = 10,
) -> dict:
"""
Get top landing pages with entrance metrics.
Shows which pages users arrive on first and their engagement.
Args:
property_id: GA4 property ID (e.g., "properties/123456")
start_date: Start date (e.g., "2024-01-01" or "28daysAgo")
end_date: End date (e.g., "today")
limit: Max pages to return (1-10000, default 10)
Returns:
Dict with landing pages, sessions, bounce rate, and conversions
"""
client = _get_client()
if isinstance(client, dict):
return client
if err := _validate_inputs(property_id, limit=limit):
return err
try:
return client.run_report(
property_id=property_id,
metrics=["sessions", "bounceRate", "conversions", "averageSessionDuration"],
dimensions=["landingPagePlusQueryString"],
start_date=start_date,
end_date=end_date,
limit=limit,
)
except Exception as e:
logger.warning("ga_get_landing_pages failed: %s", e)
return {"error": f"Google Analytics API error: {e}"}
@@ -289,3 +289,155 @@ def register_tools(
return {"sitemap_url": sitemap_url, "status": "submitted"}
except Exception as e:
return {"error": f"Request failed: {e!s}"}
@mcp.tool()
def gsc_top_queries(
site_url: str,
start_date: str,
end_date: str,
row_limit: int = 25,
search_type: str = "web",
) -> dict[str, Any]:
"""
Get the top search queries for a site sorted by clicks.
Convenience wrapper around gsc_search_analytics with the 'query'
dimension pre-selected and results sorted by clicks descending.
Args:
site_url: Site URL (e.g. "https://example.com")
start_date: Start date (YYYY-MM-DD)
end_date: End date (YYYY-MM-DD)
row_limit: Number of top queries (1-25000, default 25)
search_type: Search type: web, image, video, news (default: web)
Returns:
Dict with top queries ranked by clicks
"""
token = _get_token(credentials)
if not token:
return _auth_error()
if not site_url or not start_date or not end_date:
return {"error": "site_url, start_date, and end_date are required"}
body = {
"startDate": start_date,
"endDate": end_date,
"dimensions": ["query"],
"rowLimit": max(1, min(row_limit, 25000)),
"type": search_type,
}
encoded = _encode_site(site_url)
data = _post(f"sites/{encoded}/searchAnalytics/query", token, body)
if "error" in data:
return data
rows = []
for r in data.get("rows", []):
rows.append(
{
"query": r.get("keys", [""])[0],
"clicks": r.get("clicks", 0),
"impressions": r.get("impressions", 0),
"ctr": round(r.get("ctr", 0), 4),
"position": round(r.get("position", 0), 1),
}
)
# Sort by clicks descending
rows.sort(key=lambda x: x["clicks"], reverse=True)
return {"site_url": site_url, "queries": rows, "count": len(rows)}
@mcp.tool()
def gsc_top_pages(
site_url: str,
start_date: str,
end_date: str,
row_limit: int = 25,
search_type: str = "web",
) -> dict[str, Any]:
"""
Get the top-performing pages for a site sorted by clicks.
Convenience wrapper around gsc_search_analytics with the 'page'
dimension pre-selected and results sorted by clicks descending.
Args:
site_url: Site URL (e.g. "https://example.com")
start_date: Start date (YYYY-MM-DD)
end_date: End date (YYYY-MM-DD)
row_limit: Number of top pages (1-25000, default 25)
search_type: Search type: web, image, video, news (default: web)
Returns:
Dict with top pages ranked by clicks
"""
token = _get_token(credentials)
if not token:
return _auth_error()
if not site_url or not start_date or not end_date:
return {"error": "site_url, start_date, and end_date are required"}
body = {
"startDate": start_date,
"endDate": end_date,
"dimensions": ["page"],
"rowLimit": max(1, min(row_limit, 25000)),
"type": search_type,
}
encoded = _encode_site(site_url)
data = _post(f"sites/{encoded}/searchAnalytics/query", token, body)
if "error" in data:
return data
rows = []
for r in data.get("rows", []):
rows.append(
{
"page": r.get("keys", [""])[0],
"clicks": r.get("clicks", 0),
"impressions": r.get("impressions", 0),
"ctr": round(r.get("ctr", 0), 4),
"position": round(r.get("position", 0), 1),
}
)
rows.sort(key=lambda x: x["clicks"], reverse=True)
return {"site_url": site_url, "pages": rows, "count": len(rows)}
@mcp.tool()
def gsc_delete_sitemap(
site_url: str,
sitemap_url: str,
) -> dict[str, Any]:
"""
Delete a sitemap from Google Search Console.
Args:
site_url: Site URL property (e.g. "https://example.com")
sitemap_url: Full sitemap URL to remove
Returns:
Dict with deletion status
"""
token = _get_token(credentials)
if not token:
return _auth_error()
if not site_url or not sitemap_url:
return {"error": "site_url and sitemap_url are required"}
encoded_site = _encode_site(site_url)
encoded_sitemap = _encode_site(sitemap_url)
try:
resp = httpx.delete(
f"{GSC_API}/sites/{encoded_site}/sitemaps/{encoded_sitemap}",
headers=_headers(token),
timeout=30.0,
)
if resp.status_code == 401:
return {"error": "Unauthorized. Check your GOOGLE_SEARCH_CONSOLE_TOKEN."}
if resp.status_code not in (200, 204):
return {"error": f"Google API error {resp.status_code}: {resp.text[:500]}"}
return {"sitemap_url": sitemap_url, "status": "deleted"}
except Exception as e:
return {"error": f"Request failed: {e!s}"}
@@ -61,6 +61,32 @@ def _get(path: str, token: str, params: dict[str, Any] | None = None) -> dict[st
return {"error": f"Greenhouse request failed: {e!s}"}
def _post(path: str, token: str, body: dict[str, Any]) -> dict[str, Any]:
"""Make an authenticated POST to the Greenhouse Harvest API."""
try:
resp = httpx.post(
f"{API_BASE}{path}",
headers={
"Authorization": _auth_header(token),
"Content-Type": "application/json",
"On-Behalf-Of": "",
},
json=body,
timeout=30.0,
)
if resp.status_code == 401:
return {"error": "Unauthorized. Check your Greenhouse API token."}
if resp.status_code == 403:
return {"error": "Forbidden. Your API key may lack the required permissions."}
if resp.status_code not in (200, 201):
return {"error": f"Greenhouse API error {resp.status_code}: {resp.text[:500]}"}
return resp.json()
except httpx.TimeoutException:
return {"error": "Request to Greenhouse timed out"}
except Exception as e:
return {"error": f"Greenhouse request failed: {e!s}"}
def _auth_error() -> dict[str, Any]:
return {
"error": "GREENHOUSE_API_TOKEN not set",
@@ -349,3 +375,144 @@ def register_tools(
"rejected_at": data.get("rejected_at"),
"last_activity_at": data.get("last_activity_at", ""),
}
@mcp.tool()
def greenhouse_list_offers(
application_id: int = 0,
per_page: int = 50,
page: int = 1,
) -> dict[str, Any]:
"""
List offers in Greenhouse.
Args:
application_id: Filter by application ID (optional, 0 = all)
per_page: Results per page (1-500, default 50)
page: Page number (default 1)
Returns:
Dict with offers list (id, status, version, start_date, created_at)
"""
token = _get_credentials(credentials)
if not token:
return _auth_error()
params: dict[str, Any] = {
"per_page": max(1, min(per_page, 500)),
"page": max(1, page),
}
if application_id:
path = f"/applications/{application_id}/offers"
else:
path = "/offers"
data = _get(path, token, params)
if isinstance(data, dict) and "error" in data:
return data
offers = []
for o in data if isinstance(data, list) else []:
offers.append(
{
"id": o.get("id"),
"application_id": o.get("application_id"),
"version": o.get("version"),
"status": o.get("status", ""),
"starts_at": o.get("starts_at", ""),
"created_at": o.get("created_at", ""),
"updated_at": o.get("updated_at", ""),
"sent_at": o.get("sent_at"),
"resolved_at": o.get("resolved_at"),
}
)
return {"offers": offers, "count": len(offers)}
@mcp.tool()
def greenhouse_add_candidate_note(
candidate_id: int,
body: str,
visibility: str = "public",
) -> dict[str, Any]:
"""
Add a note to a candidate in Greenhouse.
Args:
candidate_id: Greenhouse candidate ID (required)
body: Note content text (required)
visibility: Note visibility: 'public' or 'private' (default 'public')
Returns:
Dict with created note details
"""
token = _get_credentials(credentials)
if not token:
return _auth_error()
if not candidate_id or not body:
return {"error": "candidate_id and body are required"}
payload: dict[str, Any] = {
"body": body,
"visibility": visibility,
}
data = _post(f"/candidates/{candidate_id}/activity_feed/notes", token, payload)
if isinstance(data, dict) and "error" in data:
return data
return {
"id": data.get("id"),
"body": data.get("body", ""),
"visibility": data.get("visibility", ""),
"created_at": data.get("created_at", ""),
"status": "created",
}
@mcp.tool()
def greenhouse_list_scorecards(
application_id: int,
per_page: int = 50,
page: int = 1,
) -> dict[str, Any]:
"""
List scorecards for a specific application.
Args:
application_id: Greenhouse application ID (required)
per_page: Results per page (1-500, default 50)
page: Page number (default 1)
Returns:
Dict with scorecards list (id, interviewer, overall_recommendation, submitted_at)
"""
token = _get_credentials(credentials)
if not token:
return _auth_error()
if not application_id:
return {"error": "application_id is required"}
params: dict[str, Any] = {
"per_page": max(1, min(per_page, 500)),
"page": max(1, page),
}
data = _get(f"/applications/{application_id}/scorecards", token, params)
if isinstance(data, dict) and "error" in data:
return data
scorecards = []
for sc in data if isinstance(data, list) else []:
interviewer = sc.get("interviewer") or {}
scorecards.append(
{
"id": sc.get("id"),
"interviewer_name": interviewer.get("name", ""),
"interviewer_id": interviewer.get("id"),
"overall_recommendation": sc.get("overall_recommendation", ""),
"submitted_at": sc.get("submitted_at", ""),
"interview": (sc.get("interview") or {}).get("name", ""),
"created_at": sc.get("created_at", ""),
"updated_at": sc.get("updated_at", ""),
}
)
return {"scorecards": scorecards, "count": len(scorecards)}
@@ -124,6 +124,72 @@ class _HubSpotClient:
)
return self._handle_response(response)
def delete_object(
self,
object_type: str,
object_id: str,
) -> dict[str, Any]:
"""Delete (archive) a CRM object by ID.
API ref: DELETE /crm/v3/objects/{objectType}/{objectId}
"""
response = httpx.delete(
f"{HUBSPOT_API_BASE}/crm/v3/objects/{object_type}/{object_id}",
headers=self._headers,
timeout=30.0,
)
if response.status_code == 204:
return {"status": "deleted", "object_type": object_type, "object_id": object_id}
return self._handle_response(response)
def list_associations(
self,
from_object_type: str,
from_object_id: str,
to_object_type: str,
limit: int = 100,
) -> dict[str, Any]:
"""List associations between CRM objects.
API ref: GET /crm/v4/objects/{fromObjectType}/{fromObjectId}/associations/{toObjectType}
"""
params: dict[str, Any] = {"limit": min(limit, 500)}
response = httpx.get(
f"{HUBSPOT_API_BASE}/crm/v4/objects/{from_object_type}/{from_object_id}/associations/{to_object_type}",
headers=self._headers,
params=params,
timeout=30.0,
)
return self._handle_response(response)
def create_association(
self,
from_object_type: str,
from_object_id: str,
to_object_type: str,
to_object_id: str,
association_category: str = "HUBSPOT_DEFINED",
association_type_id: int = 0,
) -> dict[str, Any]:
"""Create an association between two CRM objects.
API ref: PUT /crm/v4/objects/{fromObjectType}/{fromObjectId}/
associations/{toObjectType}/{toObjectId}
"""
body = [
{
"associationCategory": association_category,
"associationTypeId": association_type_id,
}
]
response = httpx.put(
f"{HUBSPOT_API_BASE}/crm/v4/objects/{from_object_type}/{from_object_id}/associations/{to_object_type}/{to_object_id}",
headers=self._headers,
json=body,
timeout=30.0,
)
return self._handle_response(response)
def register_tools(
mcp: FastMCP,
@@ -489,3 +555,122 @@ def register_tools(
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
# --- Delete ---
@mcp.tool()
def hubspot_delete_object(
object_type: str,
object_id: str,
account: str = "",
) -> dict:
"""
Delete (archive) a HubSpot CRM object.
Moves the object to the recycle bin. It can be restored from HubSpot UI
within 90 days.
Args:
object_type: CRM object type ("contacts", "companies", or "deals")
object_id: The HubSpot object ID to delete
account: Account alias for multi-account support
Returns:
Dict with deletion status or error
"""
if object_type not in ("contacts", "companies", "deals"):
return {
"error": f"Unsupported object_type: {object_type!r}. "
"Use contacts, companies, or deals."
}
client = _get_client(account)
if isinstance(client, dict):
return client
try:
return client.delete_object(object_type, object_id)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
# --- Associations ---
@mcp.tool()
def hubspot_list_associations(
from_object_type: str,
from_object_id: str,
to_object_type: str,
limit: int = 100,
account: str = "",
) -> dict:
"""
List associations between HubSpot CRM objects.
Retrieve objects associated with a given record, e.g. all deals
linked to a contact, or all contacts linked to a company.
Args:
from_object_type: Source object type ("contacts", "companies", or "deals")
from_object_id: ID of the source object
to_object_type: Target object type ("contacts", "companies", or "deals")
limit: Maximum associations to return (1-500, default 100)
account: Account alias for multi-account support
Returns:
Dict with associated object IDs and association types, or error
"""
client = _get_client(account)
if isinstance(client, dict):
return client
try:
return client.list_associations(from_object_type, from_object_id, to_object_type, limit)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def hubspot_create_association(
from_object_type: str,
from_object_id: str,
to_object_type: str,
to_object_id: str,
association_type_id: int = 0,
account: str = "",
) -> dict:
"""
Create an association between two HubSpot CRM objects.
Links two records together, e.g. associate a contact with a company
or a deal with a contact. Common association_type_id values:
- 1: Contact to Company (primary)
- 3: Deal to Contact
- 5: Deal to Company
Use 0 for the default/primary association type.
Args:
from_object_type: Source object type ("contacts", "companies", or "deals")
from_object_id: ID of the source object
to_object_type: Target object type ("contacts", "companies", or "deals")
to_object_id: ID of the target object
association_type_id: HubSpot association type ID (default 0 for primary)
account: Account alias for multi-account support
Returns:
Dict with association result or error
"""
client = _get_client(account)
if isinstance(client, dict):
return client
try:
return client.create_association(
from_object_type,
from_object_id,
to_object_type,
to_object_id,
association_type_id=association_type_id,
)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@@ -233,6 +233,69 @@ class _IntercomClient:
)
return self._handle_response(response)
def close_conversation(self, conversation_id: str, body: str = "") -> dict[str, Any]:
"""Close a conversation."""
admin_id = self._get_admin_id()
if isinstance(admin_id, dict):
return admin_id
payload: dict[str, Any] = {
"type": "admin",
"admin_id": admin_id,
"message_type": "close",
}
if body:
payload["body"] = body
response = httpx.post(
f"{INTERCOM_API_BASE}/conversations/{conversation_id}/parts",
headers=self._headers,
json=payload,
timeout=30.0,
)
return self._handle_response(response)
def create_contact(
self,
role: str = "user",
email: str | None = None,
name: str | None = None,
phone: str | None = None,
external_id: str | None = None,
) -> dict[str, Any]:
"""Create a new contact (user or lead)."""
payload: dict[str, Any] = {"role": role}
if email:
payload["email"] = email
if name:
payload["name"] = name
if phone:
payload["phone"] = phone
if external_id:
payload["external_id"] = external_id
response = httpx.post(
f"{INTERCOM_API_BASE}/contacts",
headers=self._headers,
json=payload,
timeout=30.0,
)
return self._handle_response(response)
def list_conversations(
self,
limit: int = 20,
starting_after: str | None = None,
) -> dict[str, Any]:
"""List conversations with pagination."""
params: dict[str, Any] = {"per_page": min(limit, 150), "display_as": "plaintext"}
if starting_after:
params["starting_after"] = starting_after
response = httpx.get(
f"{INTERCOM_API_BASE}/conversations",
headers=self._headers,
params=params,
timeout=30.0,
)
return self._handle_response(response)
def register_tools(
mcp: FastMCP,
@@ -558,3 +621,97 @@ def register_tools(
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def intercom_close_conversation(
conversation_id: str,
body: str = "",
) -> dict:
"""
Close an Intercom conversation.
Args:
conversation_id: Intercom conversation ID (required)
body: Optional closing message to the customer
Returns:
Dict with updated conversation or error
"""
client = _get_client()
if isinstance(client, dict):
return client
if not conversation_id:
return {"error": "conversation_id is required"}
try:
return client.close_conversation(conversation_id, body=body)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def intercom_create_contact(
role: str = "user",
email: str = "",
name: str = "",
phone: str = "",
external_id: str = "",
) -> dict:
"""
Create a new Intercom contact (user or lead).
Args:
role: Contact role - "user" or "lead" (default "user")
email: Contact email address (optional but recommended)
name: Contact full name (optional)
phone: Contact phone number (optional)
external_id: Your system's unique ID for this contact (optional)
Returns:
Dict with created contact details or error
"""
client = _get_client()
if isinstance(client, dict):
return client
if role not in ("user", "lead"):
return {"error": "role must be 'user' or 'lead'"}
try:
return client.create_contact(
role=role,
email=email or None,
name=name or None,
phone=phone or None,
external_id=external_id or None,
)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def intercom_list_conversations(
limit: int = 20,
starting_after: str = "",
) -> dict:
"""
List Intercom conversations with pagination.
Args:
limit: Max conversations per page (1-150, default 20)
starting_after: Cursor for pagination from previous response (optional)
Returns:
Dict with conversations list and pagination info
"""
client = _get_client()
if isinstance(client, dict):
return client
try:
return client.list_conversations(
limit=limit,
starting_after=starting_after or None,
)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@@ -383,3 +383,137 @@ def register_tools(
"created": data.get("created", ""),
"status": "created",
}
@mcp.tool()
def jira_update_issue(
issue_key: str,
summary: str = "",
description: str = "",
priority: str = "",
labels: str = "",
assignee_account_id: str = "",
) -> dict[str, Any]:
"""
Update fields on an existing Jira issue.
Args:
issue_key: Issue key e.g. "PROJ-123" (required)
summary: New summary/title (optional)
description: New plain text description (optional)
priority: New priority name e.g. High, Medium, Low (optional)
labels: Comma-separated labels to replace existing labels (optional)
assignee_account_id: Atlassian account ID to reassign to (optional)
Returns:
Dict with update status or error
"""
domain, email, token = _get_credentials(credentials)
if not domain or not email or not token:
return _auth_error()
if not issue_key:
return {"error": "issue_key is required"}
fields: dict[str, Any] = {}
if summary:
fields["summary"] = summary
if description:
fields["description"] = _text_to_adf(description)
if priority:
fields["priority"] = {"name": priority}
if labels:
fields["labels"] = [item.strip() for item in labels.split(",") if item.strip()]
if assignee_account_id:
fields["assignee"] = {"accountId": assignee_account_id}
if not fields:
return {"error": "At least one field to update is required"}
url = f"{_base_url(domain)}/issue/{issue_key}"
data = _request("put", url, email, token, json={"fields": fields})
if isinstance(data, dict) and "error" in data:
return data
return {
"key": issue_key,
"status": "updated",
"url": f"https://{domain}/browse/{issue_key}",
}
@mcp.tool()
def jira_list_transitions(
issue_key: str,
) -> dict[str, Any]:
"""
List available status transitions for a Jira issue.
Use this to discover which statuses an issue can move to before
calling jira_transition_issue.
Args:
issue_key: Issue key e.g. "PROJ-123" (required)
Returns:
Dict with available transitions (id, name, to status)
"""
domain, email, token = _get_credentials(credentials)
if not domain or not email or not token:
return _auth_error()
if not issue_key:
return {"error": "issue_key is required"}
url = f"{_base_url(domain)}/issue/{issue_key}/transitions"
data = _request("get", url, email, token)
if isinstance(data, dict) and "error" in data:
return data
transitions = []
for t in data.get("transitions", []):
to_status = t.get("to") or {}
transitions.append(
{
"id": t.get("id", ""),
"name": t.get("name", ""),
"to_status": to_status.get("name", ""),
}
)
return {"transitions": transitions, "count": len(transitions)}
@mcp.tool()
def jira_transition_issue(
issue_key: str,
transition_id: str,
comment: str = "",
) -> dict[str, Any]:
"""
Transition a Jira issue to a new status.
Use jira_list_transitions first to find the correct transition_id.
Args:
issue_key: Issue key e.g. "PROJ-123" (required)
transition_id: Transition ID from jira_list_transitions (required)
comment: Optional comment to add with the transition
Returns:
Dict with transition status or error
"""
domain, email, token = _get_credentials(credentials)
if not domain or not email or not token:
return _auth_error()
if not issue_key or not transition_id:
return {"error": "issue_key and transition_id are required"}
body: dict[str, Any] = {"transition": {"id": transition_id}}
if comment:
body["update"] = {"comment": [{"add": {"body": _text_to_adf(comment)}}]}
url = f"{_base_url(domain)}/issue/{issue_key}/transitions"
data = _request("post", url, email, token, json=body)
if isinstance(data, dict) and "error" in data:
return data
return {
"key": issue_key,
"status": "transitioned",
"url": f"https://{domain}/browse/{issue_key}",
}
@@ -603,6 +603,110 @@ class _LinearClient:
"total": len(labels_data.get("nodes", [])),
}
# --- Cycles ---
def list_cycles(
self,
team_id: str,
limit: int = 50,
) -> dict[str, Any]:
"""List cycles for a team."""
query = """
query Cycles($filter: CycleFilter, $first: Int) {
cycles(filter: $filter, first: $first) {
nodes {
id
number
name
startsAt
endsAt
completedAt
progress
scopeHistory
issueCountHistory
}
pageInfo {
hasNextPage
endCursor
}
}
}
"""
variables: dict[str, Any] = {
"first": min(limit, 100),
"filter": {"team": {"id": {"eq": team_id}}},
}
result = self._execute_query(query, variables)
if "error" in result:
return result
cycles_data = result.get("cycles", {})
return {
"cycles": cycles_data.get("nodes", []),
"total": len(cycles_data.get("nodes", [])),
"hasNextPage": cycles_data.get("pageInfo", {}).get("hasNextPage", False),
}
def list_issue_comments(
self,
issue_id: str,
limit: int = 50,
) -> dict[str, Any]:
"""List comments on a specific issue."""
query = """
query Issue($id: String!) {
issue(id: $id) {
comments(first: 50) {
nodes {
id
body
createdAt
updatedAt
user { id name email }
}
}
}
}
"""
result = self._execute_query(query, {"id": issue_id})
if "error" in result:
return result
issue = result.get("issue", {})
comments_data = issue.get("comments", {})
return {
"comments": comments_data.get("nodes", []),
"total": len(comments_data.get("nodes", [])),
}
def create_issue_relation(
self,
issue_id: str,
related_issue_id: str,
relation_type: str = "related",
) -> dict[str, Any]:
"""Create a relation between two issues."""
mutation = """
mutation IssueRelationCreate($input: IssueRelationCreateInput!) {
issueRelationCreate(input: $input) {
success
issueRelation {
id
type
issue { id identifier title }
relatedIssue { id identifier title }
}
}
}
"""
input_data: dict[str, Any] = {
"issueId": issue_id,
"relatedIssueId": related_issue_id,
"type": relation_type,
}
result = self._execute_query(mutation, {"input": input_data})
if "error" in result:
return result
return result.get("issueRelationCreate", result)
# --- Users ---
def list_users(self) -> dict[str, Any]:
@@ -1230,3 +1334,82 @@ def register_tools(
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
# --- Cycles ---
@mcp.tool()
def linear_cycles_list(
team_id: str,
limit: int = 50,
) -> dict:
"""
List cycles (sprints) for a Linear team.
Args:
team_id: Team UUID (required)
limit: Maximum number of results (1-100, default 50)
Returns:
Dict with cycles list including id, number, name, dates, and progress
"""
client = _get_client()
if isinstance(client, dict):
return client
try:
return client.list_cycles(team_id=team_id, limit=limit)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def linear_issue_comments_list(issue_id: str) -> dict:
"""
List comments on a Linear issue.
Args:
issue_id: Issue UUID or identifier (e.g., 'ENG-123')
Returns:
Dict with comments list including id, body, author, and timestamps
"""
client = _get_client()
if isinstance(client, dict):
return client
try:
return client.list_issue_comments(issue_id=issue_id)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def linear_issue_relation_create(
issue_id: str,
related_issue_id: str,
relation_type: str = "related",
) -> dict:
"""
Create a relation between two Linear issues.
Args:
issue_id: Source issue UUID or identifier (required)
related_issue_id: Target issue UUID or identifier (required)
relation_type: Relation type - "related", "blocks", "duplicate" (default "related")
Returns:
Dict with created relation details
"""
client = _get_client()
if isinstance(client, dict):
return client
try:
return client.create_issue_relation(
issue_id=issue_id,
related_issue_id=related_issue_id,
relation_type=relation_type,
)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@@ -303,3 +303,143 @@ def register_tools(mcp: FastMCP, credentials: Any = None) -> None:
return data
return data
@mcp.tool()
def lusha_bulk_enrich_persons(
details_json: str,
) -> dict:
"""Bulk enrich multiple persons in a single request.
Args:
details_json: JSON array of person objects. Each object should have
at least one of: email, linkedinUrl, or firstName+lastName+companyDomain.
Example: [{"email": "j@acme.com"},
{"firstName": "Jane", "lastName": "Doe", "companyDomain": "acme.com"}]
"""
import json as _json
headers = _get_headers()
if not headers:
return {
"error": "LUSHA_API_KEY is required",
"help": "Set LUSHA_API_KEY environment variable",
}
try:
persons = _json.loads(details_json)
except _json.JSONDecodeError as e:
return {"error": f"Invalid JSON: {e}"}
if not isinstance(persons, list) or not persons:
return {"error": "details_json must be a non-empty JSON array"}
if len(persons) > 50:
return {"error": "Maximum 50 persons per request"}
payload = {"contacts": persons}
data = _post(f"{BASE_URL}/v2/person/bulk", headers, payload)
if "error" in data:
return data
results = []
for p in data.get("data", data.get("contacts", [])):
results.append(_extract_person(p))
return {"results": results, "count": len(results)}
@mcp.tool()
def lusha_get_technologies(
domain: str,
) -> dict:
"""Get the technology stack used by a company.
Args:
domain: Company domain (e.g. 'acme.com').
"""
headers = _get_headers()
if not headers:
return {
"error": "LUSHA_API_KEY is required",
"help": "Set LUSHA_API_KEY environment variable",
}
if not domain:
return {"error": "domain is required"}
data = _get(f"{BASE_URL}/v2/company", headers, {"domain": domain})
if "error" in data:
return data
return {
"domain": domain,
"company_name": data.get("name") or data.get("companyName", ""),
"technologies": data.get("technologies", []),
"industry": data.get("industry", ""),
}
@mcp.tool()
def lusha_search_decision_makers(
company_domains: str,
country: str = "",
page: int = 0,
page_size: int = 20,
) -> dict:
"""Search for decision makers (VP, C-level, Director) at companies.
Convenience wrapper around lusha_search_contacts pre-filtered for
senior seniority levels (Director, VP, C-level, Owner/Partner).
Args:
company_domains: Comma-separated company domains (e.g. 'acme.com,example.com').
country: Country name to filter by (optional).
page: Page number (0-indexed, default 0).
page_size: Results per page (default 20).
"""
headers = _get_headers()
if not headers:
return {
"error": "LUSHA_API_KEY is required",
"help": "Set LUSHA_API_KEY environment variable",
}
if not company_domains:
return {"error": "company_domains is required"}
contacts_include: dict[str, Any] = {
# Seniority levels: 4=Director, 5=VP, 6=C-level, 7=Owner/Partner
"seniorities": ["4", "5", "6", "7"],
}
if country:
contacts_include["locations"] = [{"country": country}]
companies_include: dict[str, Any] = {
"domains": [d.strip() for d in company_domains.split(",")],
}
payload: dict[str, Any] = {
"pages": {"page": page, "size": min(page_size, 100)},
"filters": {
"contacts": {"include": contacts_include},
"companies": {"include": companies_include},
},
}
data = _post(f"{BASE_URL}/prospecting/contact/search", headers, payload)
if "error" in data:
return data
contacts = data.get("data", [])
return {
"count": len(contacts),
"total": data.get("total"),
"contacts": [
{
"id": c.get("contactId"),
"first_name": c.get("firstName"),
"last_name": c.get("lastName"),
"job_title": c.get("jobTitle"),
"seniority": c.get("seniority"),
"department": c.get("department"),
"company_name": c.get("companyName"),
"company_domain": c.get("companyDomain"),
"location": c.get("location"),
}
for c in contacts
],
}
@@ -520,3 +520,191 @@ def register_tools(
return {"error": f"Network error: {e}"}
except Exception as e:
return {"error": f"News sentiment failed: {e}"}
@mcp.tool()
def news_latest(
language: str = "en",
country: str | None = None,
category: str | None = None,
limit: int | None = 10,
) -> dict:
"""
Get the latest breaking news without a search query.
Args:
language: Language code (default 'en')
country: Country code filter (e.g. 'us', 'gb')
category: Category filter (e.g. 'business', 'technology')
limit: Max number of results
Returns:
Dict with list of latest articles and provider metadata.
"""
creds = _get_credentials()
newsdata_key = creds["newsdata_api_key"]
finlight_key = creds["finlight_api_key"]
if not newsdata_key and not finlight_key:
return {
"error": "No news credentials configured",
"help": "Set NEWSDATA_API_KEY or FINLIGHT_API_KEY environment variable",
}
limit_value = _normalize_limit(limit)
if newsdata_key:
# NewsData latest endpoint
params = _clean_params(
{
"apikey": newsdata_key,
"language": language,
"category": category,
"country": country,
"size": limit_value,
}
)
def _fetch_latest():
r = httpx.get(NEWSDATA_URL, params=params, timeout=30.0)
if r.status_code != 200:
return _newsdata_error(r)
articles = _parse_newsdata_results(r.json())
return {
"results": articles,
"total": len(articles),
"provider": "newsdata",
}
result = _try_provider(_fetch_latest)
if "error" not in result:
return result
# Fallback to search with broad query
result = _search_with_fallback(
newsdata_key=newsdata_key,
finlight_key=finlight_key,
search_kwargs={
"query": category or "breaking news",
"from_date": None,
"to_date": None,
"language": language,
"limit": limit_value,
"sources": None,
"category": category,
"country": country,
},
)
return result
@mcp.tool()
def news_by_source(
sources: str,
query: str | None = None,
days_back: int = 7,
language: str = "en",
limit: int | None = 10,
) -> dict:
"""
Get news from specific sources.
Args:
sources: Comma-separated source IDs (e.g. 'bbc,reuters,cnn')
query: Optional search query to filter articles
days_back: Days to look back (default 7)
language: Language code (default 'en')
limit: Max number of results
Returns:
Dict with list of articles from specified sources.
"""
if not sources:
return {"error": "sources is required (comma-separated source IDs)"}
from_date, to_date = _build_date_range(days_back)
creds = _get_credentials()
newsdata_key = creds["newsdata_api_key"]
finlight_key = creds["finlight_api_key"]
if not newsdata_key and not finlight_key:
return {
"error": "No news credentials configured",
"help": "Set NEWSDATA_API_KEY or FINLIGHT_API_KEY environment variable",
}
limit_value = _normalize_limit(limit)
result = _search_with_fallback(
newsdata_key=newsdata_key,
finlight_key=finlight_key,
search_kwargs={
"query": query,
"from_date": from_date,
"to_date": to_date,
"language": language,
"limit": limit_value,
"sources": sources,
"category": None,
"country": None,
},
)
result["sources"] = sources
if query:
result["query"] = query
return result
@mcp.tool()
def news_by_topic(
topic: str,
days_back: int = 3,
language: str = "en",
country: str | None = None,
limit: int | None = 10,
) -> dict:
"""
Get news articles about a broad topic or industry.
Similar to news_search but optimized for topic-based discovery
with automatic date range.
Args:
topic: Broad topic (e.g. 'artificial intelligence', 'climate change')
days_back: Days to look back (default 3)
language: Language code (default 'en')
country: Country code filter
limit: Max number of results
Returns:
Dict with list of topic-relevant articles.
"""
if not topic:
return {"error": "topic is required"}
from_date, to_date = _build_date_range(days_back)
creds = _get_credentials()
newsdata_key = creds["newsdata_api_key"]
finlight_key = creds["finlight_api_key"]
if not newsdata_key and not finlight_key:
return {
"error": "No news credentials configured",
"help": "Set NEWSDATA_API_KEY or FINLIGHT_API_KEY environment variable",
}
limit_value = _normalize_limit(limit)
result = _search_with_fallback(
newsdata_key=newsdata_key,
finlight_key=finlight_key,
search_kwargs={
"query": topic,
"from_date": from_date,
"to_date": to_date,
"language": language,
"limit": limit_value,
"sources": None,
"category": None,
"country": country,
},
)
result["topic"] = topic
result["days_back"] = days_back
return result
@@ -350,3 +350,147 @@ def register_tools(
"created_time": data.get("created_time", ""),
"last_edited_time": data.get("last_edited_time", ""),
}
@mcp.tool()
def notion_update_page(
page_id: str,
properties_json: str,
) -> dict[str, Any]:
"""
Update properties on an existing Notion page.
Args:
page_id: Notion page ID (required)
properties_json: Properties to update as JSON string.
e.g. '{"Status": {"select": {"name": "Done"}}}'
or '{"Priority": {"number": 1}}'
Returns:
Dict with updated page (id, url) or error
"""
import json as json_mod
token = _get_credentials(credentials)
if not token:
return _auth_error()
if not page_id or not properties_json:
return {"error": "page_id and properties_json are required"}
try:
props = json_mod.loads(properties_json)
except json_mod.JSONDecodeError:
return {"error": "properties_json is not valid JSON"}
data = _request("patch", f"/pages/{page_id}", token, json={"properties": props})
if "error" in data:
return data
return {
"id": data.get("id", ""),
"url": data.get("url", ""),
"status": "updated",
}
@mcp.tool()
def notion_archive_page(
page_id: str,
archived: bool = True,
) -> dict[str, Any]:
"""
Archive or unarchive a Notion page.
Args:
page_id: Notion page ID (required)
archived: True to archive, False to restore (default True)
Returns:
Dict with page status or error
"""
token = _get_credentials(credentials)
if not token:
return _auth_error()
if not page_id:
return {"error": "page_id is required"}
data = _request("patch", f"/pages/{page_id}", token, json={"archived": archived})
if "error" in data:
return data
return {
"id": data.get("id", ""),
"archived": data.get("archived", archived),
"status": "archived" if archived else "restored",
}
@mcp.tool()
def notion_append_blocks(
page_id: str,
content: str,
block_type: str = "paragraph",
) -> dict[str, Any]:
"""
Append content blocks to an existing Notion page.
Args:
page_id: Notion page ID to append to (required)
content: Text content to append (required). For multiple blocks,
separate with newlines.
block_type: Block type to create: "paragraph", "heading_1",
"heading_2", "heading_3", "bulleted_list_item",
"numbered_list_item", "to_do", "quote", "callout"
(default "paragraph")
Returns:
Dict with appended block info or error
"""
token = _get_credentials(credentials)
if not token:
return _auth_error()
if not page_id or not content:
return {"error": "page_id and content are required"}
valid_types = {
"paragraph",
"heading_1",
"heading_2",
"heading_3",
"bulleted_list_item",
"numbered_list_item",
"to_do",
"quote",
"callout",
}
if block_type not in valid_types:
return {
"error": f"Invalid block_type: {block_type!r}",
"help": f"Must be one of: {', '.join(sorted(valid_types))}",
}
lines = [line for line in content.split("\n") if line.strip()]
children = []
for line in lines:
block: dict[str, Any] = {
"object": "block",
"type": block_type,
block_type: {
"rich_text": [{"type": "text", "text": {"content": line}}],
},
}
if block_type == "to_do":
block[block_type]["checked"] = False
children.append(block)
data = _request(
"patch",
f"/blocks/{page_id}/children",
token,
json={"children": children},
)
if "error" in data:
return data
return {
"page_id": page_id,
"blocks_added": len(children),
"status": "appended",
}
@@ -276,3 +276,135 @@ def register_tools(mcp: FastMCP, credentials: Any = None) -> None:
for s in services
],
}
@mcp.tool()
def pagerduty_list_oncalls(
schedule_id: str = "",
escalation_policy_id: str = "",
since: str = "",
until: str = "",
limit: int = 25,
) -> dict:
"""List current on-call entries.
Args:
schedule_id: Filter by schedule ID (optional).
escalation_policy_id: Filter by escalation policy ID (optional).
since: Start of date range (ISO 8601, optional).
until: End of date range (ISO 8601, optional).
limit: Maximum entries to return (default 25, max 100).
"""
headers = _get_headers()
if headers is None:
return {
"error": "PAGERDUTY_API_KEY is required",
"help": "Set PAGERDUTY_API_KEY environment variable",
}
params: dict[str, Any] = {"limit": min(limit, 100)}
if schedule_id:
params["schedule_ids[]"] = [schedule_id]
if escalation_policy_id:
params["escalation_policy_ids[]"] = [escalation_policy_id]
if since:
params["since"] = since
if until:
params["until"] = until
data = _get("/oncalls", headers, params)
if "error" in data:
return data
oncalls = data.get("oncalls", [])
return {
"count": len(oncalls),
"oncalls": [
{
"user_name": (oc.get("user") or {}).get("summary", ""),
"user_id": (oc.get("user") or {}).get("id", ""),
"schedule_name": (oc.get("schedule") or {}).get("summary", ""),
"schedule_id": (oc.get("schedule") or {}).get("id", ""),
"escalation_policy": (oc.get("escalation_policy") or {}).get("summary", ""),
"escalation_level": oc.get("escalation_level", 0),
"start": oc.get("start", ""),
"end": oc.get("end", ""),
}
for oc in oncalls
],
}
@mcp.tool()
def pagerduty_add_incident_note(
incident_id: str,
content: str,
) -> dict:
"""Add a note to a PagerDuty incident.
Args:
incident_id: The incident ID (required).
content: Note content text (required).
"""
headers = _get_headers(write=True)
if headers is None:
return {
"error": "PAGERDUTY_API_KEY is required",
"help": "Set PAGERDUTY_API_KEY environment variable",
}
if not incident_id or not content:
return {"error": "incident_id and content are required"}
body = {"note": {"content": content}}
data = _post(f"/incidents/{incident_id}/notes", headers, body)
if "error" in data:
return data
note = data.get("note", {})
return {
"id": note.get("id", ""),
"content": note.get("content", ""),
"created_at": note.get("created_at", ""),
"user": (note.get("user") or {}).get("summary", ""),
"status": "created",
}
@mcp.tool()
def pagerduty_list_escalation_policies(
query: str = "",
limit: int = 25,
) -> dict:
"""List PagerDuty escalation policies.
Args:
query: Filter by name (optional).
limit: Maximum results (default 25, max 100).
"""
headers = _get_headers()
if headers is None:
return {
"error": "PAGERDUTY_API_KEY is required",
"help": "Set PAGERDUTY_API_KEY environment variable",
}
params: dict[str, Any] = {"limit": min(limit, 100)}
if query:
params["query"] = query
data = _get("/escalation_policies", headers, params)
if "error" in data:
return data
policies = data.get("escalation_policies", [])
return {
"count": len(policies),
"escalation_policies": [
{
"id": p.get("id", ""),
"name": p.get("name", ""),
"description": p.get("description", ""),
"num_loops": p.get("num_loops", 0),
"teams": [t.get("summary", "") for t in p.get("teams", [])],
"escalation_rules_count": len(p.get("escalation_rules", [])),
}
for p in policies
],
}
@@ -566,3 +566,178 @@ def register_tools(
return {"error": data.get("error", "Failed to add note")}
return {"id": data.get("data", {}).get("id"), "status": "created"}
# ── Deal Updates ──────────────────────────────────────────────
@mcp.tool()
def pipedrive_update_deal(
deal_id: int,
title: str = "",
value: float = 0,
currency: str = "",
status: str = "",
stage_id: int = 0,
expected_close_date: str = "",
lost_reason: str = "",
) -> dict[str, Any]:
"""
Update an existing Pipedrive deal.
Args:
deal_id: Deal ID (required)
title: New deal title (optional)
value: New deal value (optional)
currency: Currency code e.g. "USD" (optional)
status: New status: open, won, lost, deleted (optional)
stage_id: Move to this pipeline stage ID (optional)
expected_close_date: Expected close date YYYY-MM-DD (optional)
lost_reason: Reason for loss when setting status to lost (optional)
Returns:
Dict with updated deal (id, title, status) or error
"""
token = _get_token(credentials)
if not token:
return _auth_error()
if not deal_id:
return {"error": "deal_id is required"}
body: dict[str, Any] = {}
if title:
body["title"] = title
if value:
body["value"] = value
if currency:
body["currency"] = currency
if status:
body["status"] = status
if stage_id:
body["stage_id"] = stage_id
if expected_close_date:
body["expected_close_date"] = expected_close_date
if lost_reason:
body["lost_reason"] = lost_reason
if not body:
return {"error": "At least one field to update is required"}
data = _put(f"deals/{deal_id}", token, body)
if "error" in data:
return data
if not data.get("success"):
return {"error": data.get("error", "Failed to update deal")}
d = data.get("data", {})
return {
"id": d.get("id"),
"title": d.get("title", ""),
"status": d.get("status", ""),
"result": "updated",
}
# ── Person Creation ───────────────────────────────────────────
@mcp.tool()
def pipedrive_create_person(
name: str,
email: str = "",
phone: str = "",
org_id: int = 0,
) -> dict[str, Any]:
"""
Create a new person (contact) in Pipedrive.
Args:
name: Person's full name (required)
email: Email address (optional)
phone: Phone number (optional)
org_id: Associated organization ID (optional)
Returns:
Dict with created person (id, name) or error
"""
token = _get_token(credentials)
if not token:
return _auth_error()
if not name:
return {"error": "name is required"}
body: dict[str, Any] = {"name": name}
if email:
body["email"] = [{"value": email, "primary": True, "label": "work"}]
if phone:
body["phone"] = [{"value": phone, "primary": True, "label": "work"}]
if org_id:
body["org_id"] = org_id
data = _post("persons", token, body)
if "error" in data:
return data
if not data.get("success"):
return {"error": data.get("error", "Failed to create person")}
p = data.get("data", {})
return {"id": p.get("id"), "name": p.get("name", ""), "status": "created"}
# ── Activity Creation ─────────────────────────────────────────
@mcp.tool()
def pipedrive_create_activity(
subject: str,
activity_type: str = "task",
due_date: str = "",
due_time: str = "",
deal_id: int = 0,
person_id: int = 0,
org_id: int = 0,
note: str = "",
) -> dict[str, Any]:
"""
Create a new activity (call, meeting, task, etc.) in Pipedrive.
Args:
subject: Activity subject/title (required)
activity_type: Type: call, meeting, task, deadline, email, lunch (default task)
due_date: Due date YYYY-MM-DD (optional)
due_time: Due time HH:MM (optional)
deal_id: Associated deal ID (optional)
person_id: Associated person ID (optional)
org_id: Associated organization ID (optional)
note: Activity note/description (optional)
Returns:
Dict with created activity (id, subject, type) or error
"""
token = _get_token(credentials)
if not token:
return _auth_error()
if not subject:
return {"error": "subject is required"}
body: dict[str, Any] = {"subject": subject, "type": activity_type}
if due_date:
body["due_date"] = due_date
if due_time:
body["due_time"] = due_time
if deal_id:
body["deal_id"] = deal_id
if person_id:
body["person_id"] = person_id
if org_id:
body["org_id"] = org_id
if note:
body["note"] = note
data = _post("activities", token, body)
if "error" in data:
return data
if not data.get("success"):
return {"error": data.get("error", "Failed to create activity")}
a = data.get("data", {})
return {
"id": a.get("id"),
"subject": a.get("subject", ""),
"type": a.get("type", ""),
"status": "created",
}
@@ -532,3 +532,223 @@ def register_tools(
},
)
return _error_response("Failed to explain query")
@mcp.tool()
def pg_get_table_stats(schema: str = "public") -> dict:
"""
Get row counts and size statistics for tables in a schema.
Args:
schema: Schema name (default 'public')
Returns:
dict with table stats: name, estimated_rows, total_size, index_size
"""
database_url = _get_database_url(credentials)
if not database_url:
return _missing_credential_response()
try:
with _get_connection(database_url) as conn:
with conn.cursor() as cur:
cur.execute(
"""
SELECT
t.tablename AS table_name,
c.reltuples::bigint AS estimated_rows,
pg_size_pretty(pg_total_relation_size(
quote_ident(t.schemaname) || '.' || quote_ident(t.tablename)
)) AS total_size,
pg_size_pretty(pg_indexes_size(
quote_ident(t.schemaname) || '.' || quote_ident(t.tablename)
)) AS index_size,
pg_total_relation_size(
quote_ident(t.schemaname) || '.' || quote_ident(t.tablename)
) AS total_bytes
FROM pg_tables t
JOIN pg_class c ON c.relname = t.tablename
JOIN pg_namespace n ON n.oid = c.relnamespace
AND n.nspname = t.schemaname
WHERE t.schemaname = %s
ORDER BY pg_total_relation_size(
quote_ident(t.schemaname) || '.' || quote_ident(t.tablename)
) DESC
""",
(schema,),
)
rows = cur.fetchall()
result = [
{
"table": r[0],
"estimated_rows": r[1],
"total_size": r[2],
"index_size": r[3],
"total_bytes": r[4],
}
for r in rows
]
return {"schema": schema, "result": result, "success": True}
except psycopg.Error:
return _error_response("Failed to get table stats")
@mcp.tool()
def pg_list_indexes(schema: str, table: str) -> dict:
"""
List indexes on a specific table.
Args:
schema: Schema name
table: Table name
Returns:
dict with indexes: name, columns, unique, type, size
"""
database_url = _get_database_url(credentials)
if not database_url:
return _missing_credential_response()
try:
with _get_connection(database_url) as conn:
with conn.cursor() as cur:
cur.execute(
"""
SELECT
i.relname AS index_name,
array_to_string(array_agg(a.attname ORDER BY k.n), ', ') AS columns,
ix.indisunique AS is_unique,
ix.indisprimary AS is_primary,
am.amname AS index_type,
pg_size_pretty(pg_relation_size(i.oid)) AS index_size
FROM pg_index ix
JOIN pg_class t ON t.oid = ix.indrelid
JOIN pg_class i ON i.oid = ix.indexrelid
JOIN pg_namespace n ON n.oid = t.relnamespace
JOIN pg_am am ON am.oid = i.relam
CROSS JOIN LATERAL unnest(ix.indkey) WITH ORDINALITY AS k(attnum, n)
JOIN pg_attribute a ON a.attrelid = t.oid AND a.attnum = k.attnum
WHERE n.nspname = %s AND t.relname = %s
GROUP BY i.relname, ix.indisunique, ix.indisprimary, am.amname, i.oid
ORDER BY i.relname
""",
(schema, table),
)
rows = cur.fetchall()
result = [
{
"name": r[0],
"columns": r[1],
"unique": r[2],
"primary": r[3],
"type": r[4],
"size": r[5],
}
for r in rows
]
return {"schema": schema, "table": table, "result": result, "success": True}
except psycopg.Error:
return _error_response("Failed to list indexes")
@mcp.tool()
def pg_get_foreign_keys(schema: str, table: str) -> dict:
"""
Get foreign key relationships for a table.
Shows both outgoing (this table references) and incoming (other tables
reference this table) foreign key constraints.
Args:
schema: Schema name
table: Table name
Returns:
dict with outgoing and incoming foreign keys
"""
database_url = _get_database_url(credentials)
if not database_url:
return _missing_credential_response()
try:
with _get_connection(database_url) as conn:
with conn.cursor() as cur:
# Outgoing foreign keys (this table references others)
cur.execute(
"""
SELECT
tc.constraint_name,
kcu.column_name,
ccu.table_schema AS ref_schema,
ccu.table_name AS ref_table,
ccu.column_name AS ref_column
FROM information_schema.table_constraints tc
JOIN information_schema.key_column_usage kcu
ON tc.constraint_name = kcu.constraint_name
AND tc.table_schema = kcu.table_schema
JOIN information_schema.constraint_column_usage ccu
ON ccu.constraint_name = tc.constraint_name
WHERE tc.constraint_type = 'FOREIGN KEY'
AND tc.table_schema = %s
AND tc.table_name = %s
ORDER BY tc.constraint_name
""",
(schema, table),
)
outgoing = [
{
"constraint": r[0],
"column": r[1],
"references_schema": r[2],
"references_table": r[3],
"references_column": r[4],
}
for r in cur.fetchall()
]
# Incoming foreign keys (other tables reference this table)
cur.execute(
"""
SELECT
tc.constraint_name,
tc.table_schema AS source_schema,
tc.table_name AS source_table,
kcu.column_name AS source_column,
ccu.column_name AS referenced_column
FROM information_schema.table_constraints tc
JOIN information_schema.key_column_usage kcu
ON tc.constraint_name = kcu.constraint_name
AND tc.table_schema = kcu.table_schema
JOIN information_schema.constraint_column_usage ccu
ON ccu.constraint_name = tc.constraint_name
WHERE tc.constraint_type = 'FOREIGN KEY'
AND ccu.table_schema = %s
AND ccu.table_name = %s
ORDER BY tc.constraint_name
""",
(schema, table),
)
incoming = [
{
"constraint": r[0],
"source_schema": r[1],
"source_table": r[2],
"source_column": r[3],
"referenced_column": r[4],
}
for r in cur.fetchall()
]
return {
"schema": schema,
"table": table,
"outgoing": outgoing,
"incoming": incoming,
"success": True,
}
except psycopg.Error:
return _error_response("Failed to get foreign keys")
@@ -225,3 +225,145 @@ def register_tools(
}
except Exception as e:
return {"error": f"Receipt check failed: {e!s}"}
@mcp.tool()
def pushover_cancel_receipt(
receipt: str,
) -> dict[str, Any]:
"""
Cancel emergency-priority notification retries for a receipt.
Stops Pushover from continuing to retry delivery of an emergency
notification before it expires or is acknowledged.
Args:
receipt: Receipt ID from an emergency-priority pushover_send response
Returns:
Dict with cancellation status
"""
token = _get_token(credentials)
if not token:
return _auth_error()
if not receipt:
return {"error": "receipt is required"}
try:
resp = httpx.post(
f"{PUSHOVER_API}/receipts/{receipt}/cancel.json",
data={"token": token},
timeout=30.0,
)
result = resp.json()
if result.get("status") != 1:
return {"error": f"Cancel failed: {resp.text[:300]}"}
return {"status": "cancelled", "receipt": receipt}
except httpx.TimeoutException:
return {"error": "Cancel request timed out"}
except Exception as e:
return {"error": f"Cancel failed: {e!s}"}
@mcp.tool()
def pushover_send_glance(
user_key: str,
title: str = "",
text: str = "",
subtext: str = "",
count: int | None = None,
percent: int | None = None,
device: str = "",
) -> dict[str, Any]:
"""
Update Pushover Glance data on a user's device widget.
Glances display small data updates on smartwatch/widget screens
without triggering a full notification.
Args:
user_key: Pushover user key
title: Glance title (max 100 chars)
text: Primary glance text (max 100 chars)
subtext: Secondary text line (max 100 chars)
count: Numeric count to display (-999 to 999)
percent: Percentage value (0-100)
device: Target device name (optional)
Returns:
Dict with glance update status
"""
token = _get_token(credentials)
if not token:
return _auth_error()
if not user_key:
return {"error": "user_key is required"}
if not any([title, text, subtext, count is not None, percent is not None]):
return {"error": "At least one of title, text, subtext, count, or percent is required"}
data: dict[str, Any] = {
"token": token,
"user": user_key,
}
if title:
data["title"] = title[:100]
if text:
data["text"] = text[:100]
if subtext:
data["subtext"] = subtext[:100]
if count is not None:
data["count"] = max(-999, min(count, 999))
if percent is not None:
data["percent"] = max(0, min(percent, 100))
if device:
data["device"] = device
try:
resp = httpx.post(
f"{PUSHOVER_API}/glances.json",
data=data,
timeout=30.0,
)
result = resp.json()
if result.get("status") != 1:
errors = result.get("errors", [])
return {
"error": f"Glance error: {', '.join(errors) if errors else resp.text[:300]}"
}
return {"status": "updated", "request": result.get("request", "")}
except httpx.TimeoutException:
return {"error": "Glance request timed out"}
except Exception as e:
return {"error": f"Glance update failed: {e!s}"}
@mcp.tool()
def pushover_get_limits() -> dict[str, Any]:
"""
Get Pushover application message limits and usage.
Returns the app's monthly message limit, number of messages sent
this month, and the reset timestamp.
Returns:
Dict with limit, remaining, and reset timestamp
"""
token = _get_token(credentials)
if not token:
return _auth_error()
try:
resp = httpx.get(
f"{PUSHOVER_API}/apps/limits.json",
params={"token": token},
timeout=30.0,
)
result = resp.json()
if result.get("status") != 1:
return {"error": f"Limits check failed: {resp.text[:300]}"}
return {
"limit": result.get("limit", 0),
"remaining": result.get("remaining", 0),
"reset": result.get("reset", 0),
}
except httpx.TimeoutException:
return {"error": "Limits request timed out"}
except Exception as e:
return {"error": f"Limits check failed: {e!s}"}
@@ -256,3 +256,156 @@ def register_tools(mcp: FastMCP, credentials: Any = None) -> None:
else None,
"fiscal_year_start": info.get("FiscalYearStartMonth"),
}
@mcp.tool()
def quickbooks_list_invoices(
status: str = "",
customer_id: str = "",
max_results: int = 100,
) -> dict:
"""List invoices from QuickBooks with optional filters.
Args:
status: Filter by status: 'Unpaid', 'Paid', 'Overdue' (optional).
Uses Balance > 0 for Unpaid, Balance = 0 for Paid,
DueDate < today for Overdue.
customer_id: Filter by customer ID (optional).
max_results: Maximum results (default 100, max 1000).
"""
cfg = _get_config()
if isinstance(cfg, dict):
return cfg
base_url, token = cfg
where_parts = []
if status == "Unpaid":
where_parts.append("Balance > '0'")
elif status == "Paid":
where_parts.append("Balance = '0'")
elif status == "Overdue":
import datetime
today = datetime.date.today().isoformat()
where_parts.append(f"DueDate < '{today}' AND Balance > '0'")
if customer_id:
where_parts.append(f"CustomerRef = '{customer_id}'")
query = "SELECT * FROM Invoice"
if where_parts:
query += " WHERE " + " AND ".join(where_parts)
query += f" MAXRESULTS {min(max_results, 1000)}"
url = f"{base_url}/query"
data = _get(url, token, params={"query": query, "minorversion": "73"})
if "error" in data:
return data
qr = data.get("QueryResponse", {})
invoices = qr.get("Invoice", [])
return {
"count": len(invoices),
"invoices": [
{
"id": inv.get("Id"),
"doc_number": inv.get("DocNumber"),
"customer_name": (inv.get("CustomerRef") or {}).get("name", ""),
"customer_id": (inv.get("CustomerRef") or {}).get("value", ""),
"total_amt": inv.get("TotalAmt"),
"balance": inv.get("Balance"),
"due_date": inv.get("DueDate"),
"txn_date": inv.get("TxnDate"),
"email_status": inv.get("EmailStatus"),
}
for inv in invoices
],
}
@mcp.tool()
def quickbooks_get_customer(customer_id: str) -> dict:
"""Get detailed customer information from QuickBooks.
Args:
customer_id: Customer ID (required).
"""
cfg = _get_config()
if isinstance(cfg, dict):
return cfg
base_url, token = cfg
if not customer_id:
return {"error": "customer_id is required"}
url = f"{base_url}/customer/{customer_id}"
data = _get(url, token, params={"minorversion": "73"})
if "error" in data:
return data
c = data.get("Customer", {})
email = c.get("PrimaryEmailAddr")
phone = c.get("PrimaryPhone")
addr = c.get("BillAddr") or {}
return {
"id": c.get("Id"),
"display_name": c.get("DisplayName"),
"company_name": c.get("CompanyName"),
"given_name": c.get("GivenName"),
"family_name": c.get("FamilyName"),
"email": email.get("Address") if isinstance(email, dict) else None,
"phone": phone.get("FreeFormNumber") if isinstance(phone, dict) else None,
"balance": c.get("Balance"),
"active": c.get("Active"),
"billing_address": {
"line1": addr.get("Line1", ""),
"city": addr.get("City", ""),
"state": addr.get("CountrySubDivisionCode", ""),
"postal_code": addr.get("PostalCode", ""),
"country": addr.get("Country", ""),
},
"sync_token": c.get("SyncToken"),
}
@mcp.tool()
def quickbooks_create_payment(
customer_id: str,
total_amt: float,
invoice_id: str = "",
) -> dict:
"""Record a payment in QuickBooks.
Args:
customer_id: Customer ID who is paying (required).
total_amt: Payment amount (required).
invoice_id: Invoice ID to apply payment to (optional).
"""
cfg = _get_config()
if isinstance(cfg, dict):
return cfg
base_url, token = cfg
if not customer_id or total_amt <= 0:
return {"error": "customer_id and a positive total_amt are required"}
body: dict[str, Any] = {
"CustomerRef": {"value": customer_id},
"TotalAmt": total_amt,
}
if invoice_id:
body["Line"] = [
{
"Amount": total_amt,
"LinkedTxn": [{"TxnId": invoice_id, "TxnType": "Invoice"}],
}
]
url = f"{base_url}/payment"
data = _post(url, token, body)
if "error" in data:
return data
payment = data.get("Payment", {})
return {
"result": "created",
"id": payment.get("Id"),
"total_amt": payment.get("TotalAmt"),
"customer_id": (payment.get("CustomerRef") or {}).get("value"),
"txn_date": payment.get("TxnDate"),
"sync_token": payment.get("SyncToken"),
}
@@ -308,3 +308,133 @@ def register_tools(
"created_utc": d.get("created_utc", 0),
"is_gold": d.get("is_gold", False),
}
@mcp.tool()
def reddit_get_subreddit_info(subreddit: str) -> dict[str, Any]:
"""
Get information about a subreddit.
Args:
subreddit: Subreddit name without r/ prefix (required)
Returns:
Dict with subreddit details (subscribers, description, rules, etc.)
"""
client_id, client_secret = _get_credentials(credentials)
if not client_id or not client_secret:
return _auth_error()
if not subreddit:
return {"error": "subreddit is required"}
token = _get_token(client_id, client_secret)
if not token:
return {"error": "Failed to acquire Reddit access token"}
data = _get(f"/r/{subreddit}/about", token)
if isinstance(data, dict) and "error" in data:
return data
d = (data if isinstance(data, dict) else {}).get("data", {})
return {
"name": d.get("display_name", ""),
"title": d.get("title", ""),
"description": (d.get("public_description", "") or "")[:500],
"subscribers": d.get("subscribers", 0),
"active_users": d.get("accounts_active", 0),
"created_utc": d.get("created_utc", 0),
"over18": d.get("over18", False),
"subreddit_type": d.get("subreddit_type", ""),
"submission_type": d.get("submission_type", ""),
}
@mcp.tool()
def reddit_get_post_detail(post_id: str) -> dict[str, Any]:
"""
Get full details for a single Reddit post by ID.
Args:
post_id: Post ID (e.g. "abc123", without t3_ prefix) (required)
Returns:
Dict with full post details including selftext, flair, awards
"""
client_id, client_secret = _get_credentials(credentials)
if not client_id or not client_secret:
return _auth_error()
if not post_id:
return {"error": "post_id is required"}
token = _get_token(client_id, client_secret)
if not token:
return {"error": "Failed to acquire Reddit access token"}
data = _get(f"/by_id/t3_{post_id}", token)
if isinstance(data, dict) and "error" in data:
return data
listing = data if isinstance(data, dict) else {}
children = (listing.get("data") or {}).get("children", [])
if not children or children[0].get("kind") != "t3":
return {"error": "Post not found"}
d = children[0].get("data", {})
return {
"id": d.get("id", ""),
"title": d.get("title", ""),
"author": d.get("author", ""),
"subreddit": d.get("subreddit", ""),
"score": d.get("score", 0),
"upvote_ratio": d.get("upvote_ratio", 0),
"num_comments": d.get("num_comments", 0),
"url": d.get("url", ""),
"permalink": d.get("permalink", ""),
"selftext": (d.get("selftext", "") or "")[:2000],
"link_flair_text": d.get("link_flair_text", ""),
"created_utc": d.get("created_utc", 0),
"is_self": d.get("is_self", False),
"over_18": d.get("over_18", False),
"locked": d.get("locked", False),
"archived": d.get("archived", False),
}
@mcp.tool()
def reddit_get_user_posts(
username: str,
sort: str = "new",
time: str = "all",
limit: int = 25,
) -> dict[str, Any]:
"""
Get recent posts submitted by a Reddit user.
Args:
username: Reddit username (required)
sort: Sort: hot, new, top, controversial (default new)
time: Time filter for top/controversial: hour, day, week, month, year, all
limit: Max results (1-100, default 25)
Returns:
Dict with user's submitted posts
"""
client_id, client_secret = _get_credentials(credentials)
if not client_id or not client_secret:
return _auth_error()
if not username:
return {"error": "username is required"}
token = _get_token(client_id, client_secret)
if not token:
return {"error": "Failed to acquire Reddit access token"}
params: dict[str, Any] = {
"sort": sort,
"t": time,
"limit": max(1, min(limit, 100)),
}
data = _get(f"/user/{username}/submitted", token, params)
if isinstance(data, dict) and "error" in data:
return data
listing = data if isinstance(data, dict) else {}
posts = _extract_posts(listing)
return {"username": username, "posts": posts, "count": len(posts)}
@@ -338,3 +338,125 @@ def register_tools(
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def salesforce_delete_record(
object_type: str,
record_id: str,
) -> dict:
"""
Delete a Salesforce record by its ID.
Args:
object_type: SObject type (e.g. "Lead", "Contact", "Account").
record_id: The 15 or 18-character Salesforce record ID.
Returns:
Dict with success status or error.
"""
creds = _get_creds(credentials)
if isinstance(creds, dict):
return creds
token, instance_url = creds
if not object_type or not record_id:
return {"error": "object_type and record_id are required"}
try:
url = f"{instance_url}/services/data/{API_VERSION}/sobjects/{object_type}/{record_id}"
resp = httpx.delete(url, headers=_headers(token), timeout=30.0)
return _handle_response(resp)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def salesforce_search_records(
search_query: str,
) -> dict:
"""
Full-text search across Salesforce records using SOSL.
More flexible than SOQL for keyword searches across multiple objects.
Args:
search_query: SOSL search string.
e.g. "FIND {John Smith} IN ALL FIELDS RETURNING Contact(Id, Name), Lead(Id, Name)"
Returns:
Dict with search results grouped by SObject type.
"""
creds = _get_creds(credentials)
if isinstance(creds, dict):
return creds
token, instance_url = creds
if not search_query:
return {"error": "search_query is required"}
try:
url = f"{instance_url}/services/data/{API_VERSION}/search/"
resp = httpx.get(
url,
headers=_headers(token),
params={"q": search_query},
timeout=30.0,
)
result = _handle_response(resp)
if "error" in result:
return result
# Result is a list of search results
if isinstance(result, list):
return {"records": result, "count": len(result)}
records = result.get("searchRecords", [])
return {"records": records, "count": len(records)}
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def salesforce_get_record_count(
object_type: str,
) -> dict:
"""
Get the total number of records for a Salesforce SObject type.
Uses SELECT COUNT() for an efficient count without returning records.
Args:
object_type: SObject type (e.g. "Lead", "Contact", "Account", "Opportunity").
Returns:
Dict with total_size count or error.
"""
creds = _get_creds(credentials)
if isinstance(creds, dict):
return creds
token, instance_url = creds
if not object_type:
return {"error": "object_type is required"}
try:
url = f"{instance_url}/services/data/{API_VERSION}/query/"
resp = httpx.get(
url,
headers=_headers(token),
params={"q": f"SELECT COUNT() FROM {object_type}"},
timeout=30.0,
)
result = _handle_response(resp)
if "error" in result:
return result
return {
"object_type": object_type,
"total_size": result.get("totalSize", 0),
}
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@@ -135,6 +135,38 @@ class _SerpAPIClient:
"""Get details for a specific patent by searching its ID."""
return self._request({"engine": "google_patents", "q": patent_id})
def scholar_cited_by(self, cites_id: str, num: int = 10, start: int = 0) -> dict[str, Any]:
"""Get papers that cite a given paper using its cites_id."""
return self._request(
{
"engine": "google_scholar",
"cites": cites_id,
"num": min(num, 20),
"start": start,
}
)
def scholar_profiles(self, query: str, num: int = 10) -> dict[str, Any]:
"""Search for Google Scholar author profiles."""
return self._request(
{
"engine": "google_scholar_profiles",
"mauthors": query,
"num": min(num, 20),
}
)
def google_search(self, query: str, num: int = 10, gl: str | None = None) -> dict[str, Any]:
"""Run a standard Google web search."""
params: dict[str, Any] = {
"engine": "google",
"q": query,
"num": min(num, 20),
}
if gl:
params["gl"] = gl
return self._request(params)
def register_tools(
mcp: FastMCP,
@@ -507,3 +539,200 @@ def register_tools(
return {"error": f"Network error: {e}"}
except Exception as e:
return {"error": f"Patent detail lookup failed: {e}"}
@mcp.tool()
def scholar_cited_by(
cites_id: str,
num_results: int = 10,
start: int = 0,
) -> dict:
"""
Get papers that cite a specific Google Scholar paper.
Uses the cites_id from a scholar_search result to find all papers
that reference the original paper.
Args:
cites_id: The cites_id from a scholar_search result's cited_by field
num_results: Number of citing papers to return (1-20, default 10)
start: Pagination offset (default 0)
Returns:
Dict with citing papers including titles, authors, and citation counts
"""
if not cites_id:
return {"error": "cites_id is required"}
client = _get_client()
if isinstance(client, dict):
return client
try:
data = client.scholar_cited_by(cites_id=cites_id, num=num_results, start=start)
if "error" in data:
return data
results = []
for item in data.get("organic_results", []):
result = {
"title": item.get("title", ""),
"link": item.get("link", ""),
"snippet": item.get("snippet", ""),
"result_id": item.get("result_id", ""),
"publication_info": item.get("publication_info", {}).get("summary", ""),
"cited_by_count": (
item.get("inline_links", {}).get("cited_by", {}).get("total", 0)
),
}
authors = item.get("publication_info", {}).get("authors", [])
if authors:
result["authors"] = [
{"name": a.get("name", ""), "author_id": a.get("author_id", "")}
for a in authors
]
results.append(result)
return {
"cites_id": cites_id,
"results": results,
"count": len(results),
}
except httpx.TimeoutException:
return {"error": "Cited-by request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
except Exception as e:
return {"error": f"Cited-by lookup failed: {e}"}
@mcp.tool()
def scholar_search_profiles(
query: str,
num_results: int = 10,
) -> dict:
"""
Search for Google Scholar author profiles by name or affiliation.
Returns author profiles with names, affiliations, citation counts,
and author IDs that can be used with scholar_get_author.
Args:
query: Author name or affiliation to search (e.g. "Geoffrey Hinton")
num_results: Number of profiles to return (1-20, default 10)
Returns:
Dict with author profiles including name, affiliation, and cited_by count
"""
if not query:
return {"error": "query is required"}
client = _get_client()
if isinstance(client, dict):
return client
try:
data = client.scholar_profiles(query=query, num=num_results)
if "error" in data:
return data
profiles = []
for p in data.get("profiles", []):
profiles.append(
{
"name": p.get("name", ""),
"author_id": p.get("author_id", ""),
"affiliations": p.get("affiliations", ""),
"email": p.get("email", ""),
"cited_by": p.get("cited_by", 0),
"interests": [i.get("title", "") for i in p.get("interests", [])],
"thumbnail": p.get("thumbnail", ""),
}
)
return {
"query": query,
"profiles": profiles,
"count": len(profiles),
}
except httpx.TimeoutException:
return {"error": "Profile search timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
except Exception as e:
return {"error": f"Profile search failed: {e}"}
@mcp.tool()
def serpapi_google_search(
query: str,
num_results: int = 10,
country: str | None = None,
) -> dict:
"""
Search Google web results via SerpAPI.
Returns structured Google search results with titles, snippets, links,
and optional knowledge graph and answer box data.
Args:
query: Google search query (1-500 chars)
num_results: Number of results (1-20, default 10)
country: Country code for localized results (e.g. 'us', 'uk')
Returns:
Dict with organic results and optional answer_box/knowledge_graph
"""
if not query or len(query) > 500:
return {"error": "Query must be 1-500 characters"}
client = _get_client()
if isinstance(client, dict):
return client
try:
data = client.google_search(query=query, num=num_results, gl=country)
if "error" in data:
return data
results = []
for item in data.get("organic_results", []):
results.append(
{
"title": item.get("title", ""),
"link": item.get("link", ""),
"snippet": item.get("snippet", ""),
"displayed_link": item.get("displayed_link", ""),
"position": item.get("position"),
}
)
output: dict = {
"query": query,
"results": results,
"count": len(results),
}
answer_box = data.get("answer_box")
if answer_box:
output["answer_box"] = {
"type": answer_box.get("type", ""),
"title": answer_box.get("title", ""),
"answer": answer_box.get("answer", answer_box.get("snippet", "")),
}
knowledge_graph = data.get("knowledge_graph")
if knowledge_graph:
output["knowledge_graph"] = {
"title": knowledge_graph.get("title", ""),
"type": knowledge_graph.get("type", ""),
"description": knowledge_graph.get("description", ""),
}
return output
except httpx.TimeoutException:
return {"error": "Google search timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
except Exception as e:
return {"error": f"Google search failed: {e}"}
@@ -459,3 +459,223 @@ def register_tools(
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def shopify_update_product(
product_id: str,
title: str = "",
body_html: str = "",
vendor: str = "",
product_type: str = "",
tags: str = "",
status: str = "",
) -> dict:
"""
Update an existing Shopify product.
Args:
product_id: The numeric Shopify product ID (required).
title: New product title (optional).
body_html: New product description HTML (optional).
vendor: New vendor name (optional).
product_type: New product type (optional).
tags: Comma-separated tags to replace existing tags (optional).
status: New status - "active", "archived", or "draft" (optional).
Returns:
Dict with updated product details.
"""
creds = _get_creds(credentials)
if isinstance(creds, dict):
return creds
token, store = creds
if not product_id:
return {"error": "product_id is required"}
product: dict[str, Any] = {}
if title:
product["title"] = title
if body_html:
product["body_html"] = body_html
if vendor:
product["vendor"] = vendor
if product_type:
product["product_type"] = product_type
if tags:
product["tags"] = tags
if status:
product["status"] = status
if not product:
return {"error": "At least one field to update is required"}
try:
resp = httpx.put(
f"{_base_url(store)}/products/{product_id}.json",
headers=_headers(token),
json={"product": product},
timeout=30.0,
)
result = _handle_response(resp)
if "error" in result:
return result
p = result.get("product", {})
return {
"id": p.get("id"),
"title": p.get("title"),
"vendor": p.get("vendor"),
"product_type": p.get("product_type"),
"status": p.get("status"),
"tags": p.get("tags"),
"updated_at": p.get("updated_at"),
"result": "updated",
}
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def shopify_get_customer(customer_id: str) -> dict:
"""
Get a single Shopify customer by ID.
Args:
customer_id: The numeric Shopify customer ID.
Returns:
Dict with full customer details including addresses and order stats.
"""
creds = _get_creds(credentials)
if isinstance(creds, dict):
return creds
token, store = creds
if not customer_id:
return {"error": "customer_id is required"}
try:
resp = httpx.get(
f"{_base_url(store)}/customers/{customer_id}.json",
headers=_headers(token),
timeout=30.0,
)
result = _handle_response(resp)
if "error" in result:
return result
c = result.get("customer", {})
addresses = []
for a in c.get("addresses", []):
addresses.append(
{
"id": a.get("id"),
"address1": a.get("address1"),
"city": a.get("city"),
"province": a.get("province"),
"country": a.get("country"),
"zip": a.get("zip"),
"default": a.get("default", False),
}
)
return {
"id": c.get("id"),
"first_name": c.get("first_name"),
"last_name": c.get("last_name"),
"email": c.get("email"),
"phone": c.get("phone"),
"orders_count": c.get("orders_count"),
"total_spent": c.get("total_spent"),
"state": c.get("state"),
"tags": c.get("tags"),
"note": c.get("note"),
"verified_email": c.get("verified_email"),
"tax_exempt": c.get("tax_exempt"),
"created_at": c.get("created_at"),
"updated_at": c.get("updated_at"),
"addresses": addresses,
}
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def shopify_create_draft_order(
line_items_json: str,
customer_id: str = "",
note: str = "",
tags: str = "",
) -> dict:
"""
Create a draft order in Shopify.
Args:
line_items_json: JSON array of line items. Each item needs either
"variant_id" and "quantity", or "title", "price", and "quantity".
Example: '[{"variant_id": 123, "quantity": 2}]'
customer_id: Existing customer ID to associate (optional).
note: Order note (optional).
tags: Comma-separated tags (optional).
Returns:
Dict with created draft order details including invoice URL.
"""
import json as json_mod
creds = _get_creds(credentials)
if isinstance(creds, dict):
return creds
token, store = creds
if not line_items_json:
return {"error": "line_items_json is required"}
try:
line_items = json_mod.loads(line_items_json)
except json_mod.JSONDecodeError:
return {"error": "line_items_json must be valid JSON"}
if not isinstance(line_items, list) or not line_items:
return {"error": "line_items_json must be a non-empty JSON array"}
draft_order: dict[str, Any] = {"line_items": line_items}
if customer_id:
draft_order["customer"] = {"id": int(customer_id)}
if note:
draft_order["note"] = note
if tags:
draft_order["tags"] = tags
try:
resp = httpx.post(
f"{_base_url(store)}/draft_orders.json",
headers=_headers(token),
json={"draft_order": draft_order},
timeout=30.0,
)
result = _handle_response(resp)
if "error" in result:
return result
d = result.get("draft_order", {})
return {
"id": d.get("id"),
"name": d.get("name"),
"status": d.get("status"),
"total_price": d.get("total_price"),
"subtotal_price": d.get("subtotal_price"),
"total_tax": d.get("total_tax"),
"currency": d.get("currency"),
"invoice_url": d.get("invoice_url"),
"created_at": d.get("created_at"),
"line_item_count": len(d.get("line_items", [])),
"result": "created",
}
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@@ -1141,6 +1141,117 @@ class _SlackClient:
"note": "For exact member count, paginate through users.list",
}
def get_channel_info(self, channel: str) -> dict[str, Any]:
"""Get detailed information about a channel."""
response = httpx.get(
f"{SLACK_API_BASE}/conversations.info",
headers=self._headers,
params={"channel": channel},
timeout=30.0,
)
data = self._handle_response(response)
if "error" in data:
return data
ch = data.get("channel", {})
return {
"id": ch.get("id"),
"name": ch.get("name"),
"is_channel": ch.get("is_channel"),
"is_private": ch.get("is_private"),
"is_archived": ch.get("is_archived"),
"is_general": ch.get("is_general"),
"topic": (ch.get("topic") or {}).get("value", ""),
"purpose": (ch.get("purpose") or {}).get("value", ""),
"num_members": ch.get("num_members"),
"creator": ch.get("creator"),
"created": ch.get("created"),
}
def list_files(
self,
channel: str | None = None,
user: str | None = None,
types: str | None = None,
count: int = 20,
page: int = 1,
) -> dict[str, Any]:
"""List files shared in the workspace."""
params: dict[str, Any] = {
"count": min(count, 100),
"page": page,
}
if channel:
params["channel"] = channel
if user:
params["user"] = user
if types:
params["types"] = types
response = httpx.get(
f"{SLACK_API_BASE}/files.list",
headers=self._headers,
params=params,
timeout=30.0,
)
data = self._handle_response(response)
if "error" in data:
return data
files = []
for f in data.get("files", []):
files.append(
{
"id": f.get("id"),
"name": f.get("name"),
"title": f.get("title"),
"mimetype": f.get("mimetype"),
"filetype": f.get("filetype"),
"size": f.get("size"),
"user": f.get("user"),
"created": f.get("created"),
"permalink": f.get("permalink"),
}
)
paging = data.get("paging", {})
return {
"files": files,
"count": len(files),
"total": paging.get("total", len(files)),
"page": paging.get("page", 1),
"pages": paging.get("pages", 1),
}
def get_file_info(self, file_id: str) -> dict[str, Any]:
"""Get detailed information about a file."""
response = httpx.get(
f"{SLACK_API_BASE}/files.info",
headers=self._headers,
params={"file": file_id},
timeout=30.0,
)
data = self._handle_response(response)
if "error" in data:
return data
f = data.get("file", {})
return {
"id": f.get("id"),
"name": f.get("name"),
"title": f.get("title"),
"mimetype": f.get("mimetype"),
"filetype": f.get("filetype"),
"size": f.get("size"),
"user": f.get("user"),
"created": f.get("created"),
"permalink": f.get("permalink"),
"url_private": f.get("url_private"),
"channels": f.get("channels", []),
"shares": list((f.get("shares") or {}).get("public", {}).keys())[:10],
"comments_count": f.get("comments_count", 0),
}
def register_tools(
mcp: FastMCP,
@@ -2908,3 +3019,97 @@ def register_tools(
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def slack_get_channel_info(
channel: str,
account: str = "",
) -> dict:
"""
Get detailed information about a Slack channel.
Args:
channel: Channel ID (e.g., "C1234567890")
account: Optional account alias for multi-workspace setups
Returns:
Dict with channel details including name, topic, purpose, member count
"""
client = _get_client(account)
if isinstance(client, dict):
return client
if not channel:
return {"error": "channel is required"}
try:
return client.get_channel_info(channel)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def slack_list_files(
channel: str = "",
user: str = "",
types: str = "",
count: int = 20,
page: int = 1,
account: str = "",
) -> dict:
"""
List files shared in the Slack workspace.
Args:
channel: Filter by channel ID (optional)
user: Filter by user ID (optional)
types: Filter by file type - comma-separated: spaces, snippets,
images, gdocs, zips, pdfs (optional)
count: Number of files per page (1-100, default 20)
page: Page number (default 1)
account: Optional account alias for multi-workspace setups
Returns:
Dict with files list including name, type, size, and permalink
"""
client = _get_client(account)
if isinstance(client, dict):
return client
try:
return client.list_files(
channel=channel or None,
user=user or None,
types=types or None,
count=count,
page=page,
)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def slack_get_file_info(
file_id: str,
account: str = "",
) -> dict:
"""
Get detailed information about a Slack file.
Args:
file_id: The file ID (e.g., "F1234567890")
account: Optional account alias for multi-workspace setups
Returns:
Dict with file details including name, type, size, permalink, and sharing info
"""
client = _get_client(account)
if isinstance(client, dict):
return client
if not file_id:
return {"error": "file_id is required"}
try:
return client.get_file_info(file_id)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@@ -956,6 +956,106 @@ class _StripeClient:
pm = self._stripe().payment_methods.detach(payment_method_id)
return self._format_payment_method(pm)
# --- Disputes ---
def list_disputes(
self,
limit: int = 10,
starting_after: str | None = None,
) -> dict[str, Any]:
params: dict[str, Any] = {"limit": min(limit, 100)}
if starting_after:
params["starting_after"] = starting_after
result = self._stripe().disputes.list(params)
return {
"has_more": result.has_more,
"disputes": [self._format_dispute(d) for d in result.data],
}
def _format_dispute(self, d: Any) -> dict[str, Any]:
return {
"id": d.id,
"amount": d.amount,
"currency": d.currency,
"charge": d.charge,
"payment_intent": d.payment_intent,
"reason": d.reason,
"status": d.status,
"created": d.created,
"evidence_due_by": (
getattr(d, "evidence_details", {}).get("due_by")
if hasattr(d, "evidence_details") and d.evidence_details
else None
),
}
# --- Events ---
def list_events(
self,
type_filter: str | None = None,
limit: int = 10,
starting_after: str | None = None,
) -> dict[str, Any]:
params: dict[str, Any] = {"limit": min(limit, 100)}
if type_filter:
params["type"] = type_filter
if starting_after:
params["starting_after"] = starting_after
result = self._stripe().events.list(params)
return {
"has_more": result.has_more,
"events": [
{
"id": e.id,
"type": e.type,
"created": e.created,
"object_id": (
e.data.object.get("id")
if hasattr(e.data, "object") and isinstance(e.data.object, dict)
else getattr(getattr(e.data, "object", None), "id", None)
),
}
for e in result.data
],
}
# --- Checkout Sessions ---
def create_checkout_session(
self,
line_items: list[dict[str, Any]],
mode: str = "payment",
success_url: str = "",
cancel_url: str = "",
customer_id: str | None = None,
metadata: dict[str, str] | None = None,
) -> dict[str, Any]:
params: dict[str, Any] = {
"line_items": line_items,
"mode": mode,
}
if success_url:
params["success_url"] = success_url
if cancel_url:
params["cancel_url"] = cancel_url
if customer_id:
params["customer"] = customer_id
if metadata:
params["metadata"] = metadata
session = self._stripe().checkout.sessions.create(params)
return {
"id": session.id,
"url": session.url,
"mode": session.mode,
"status": session.status,
"payment_status": session.payment_status,
"customer": session.customer,
"amount_total": session.amount_total,
"currency": session.currency,
"created": session.created,
}
def _format_payment_method(self, pm: Any) -> dict[str, Any]:
card = None
if pm.card:
@@ -2560,3 +2660,128 @@ def register_tools(
return client.detach_payment_method(payment_method_id)
except stripe.StripeError as e:
return _stripe_error(e)
# --- Dispute Tools ---
@mcp.tool()
def stripe_list_disputes(
limit: int = 10,
starting_after: str | None = None,
) -> dict:
"""
List payment disputes (chargebacks).
Args:
limit: Number of disputes to fetch (1-100, default 10)
starting_after: Cursor for pagination (dispute ID)
Returns:
Dict with disputes list including id, amount, reason, status
Example:
stripe_list_disputes(limit=20)
"""
client = _get_client()
if isinstance(client, dict):
return client
try:
return client.list_disputes(limit, starting_after)
except stripe.StripeError as e:
return _stripe_error(e)
# --- Event Tools ---
@mcp.tool()
def stripe_list_events(
type_filter: str | None = None,
limit: int = 10,
starting_after: str | None = None,
) -> dict:
"""
List recent API events (webhooks, state changes).
Args:
type_filter: Filter by event type (e.g. "charge.succeeded",
"invoice.payment_failed", "customer.subscription.updated")
limit: Number of events to fetch (1-100, default 10)
starting_after: Cursor for pagination (event ID)
Returns:
Dict with events list including id, type, created, object_id
Example:
stripe_list_events(type_filter="charge.succeeded", limit=5)
"""
client = _get_client()
if isinstance(client, dict):
return client
try:
return client.list_events(type_filter, limit, starting_after)
except stripe.StripeError as e:
return _stripe_error(e)
# --- Checkout Session Tools ---
@mcp.tool()
def stripe_create_checkout_session(
line_items_json: str,
mode: str = "payment",
success_url: str = "",
cancel_url: str = "",
customer_id: str | None = None,
metadata: dict[str, str] | None = None,
) -> dict:
"""
Create a Stripe Checkout session for hosted payment.
Args:
line_items_json: JSON array of line items. Each needs "price" (price ID)
and "quantity". Example: '[{"price": "price_abc", "quantity": 1}]'
mode: Session mode - "payment" (one-time), "subscription", or "setup"
(default "payment")
success_url: URL to redirect to on success (optional)
cancel_url: URL to redirect to on cancellation (optional)
customer_id: Existing customer ID to associate (optional, starts with "cus_")
metadata: Key-value metadata to attach (optional)
Returns:
Dict with checkout session details including URL
Example:
stripe_create_checkout_session('[{"price":"price_abc","quantity":1}]',
success_url="https://example.com/thanks")
"""
import json as json_mod
client = _get_client()
if isinstance(client, dict):
return client
if not line_items_json:
return {"error": "line_items_json is required"}
try:
line_items = json_mod.loads(line_items_json)
except json_mod.JSONDecodeError:
return {"error": "line_items_json must be valid JSON"}
if not isinstance(line_items, list) or not line_items:
return {"error": "line_items_json must be a non-empty JSON array"}
if mode not in ("payment", "subscription", "setup"):
return {"error": "mode must be one of: payment, subscription, setup"}
if customer_id and not customer_id.startswith("cus_"):
return {"error": "Invalid customer_id. Must start with: cus_"}
try:
return client.create_checkout_session(
line_items=line_items,
mode=mode,
success_url=success_url,
cancel_url=cancel_url,
customer_id=customer_id,
metadata=metadata,
)
except stripe.StripeError as e:
return _stripe_error(e)
@@ -266,6 +266,64 @@ class _TelegramClient:
)
return self._handle_response(response)
def get_chat_member_count(self, chat_id: str) -> dict[str, Any]:
"""Get the number of members in a chat.
API ref: https://core.telegram.org/bots/api#getchatmembercount
"""
response = httpx.post(
f"{self._base_url}/getChatMemberCount",
json={"chat_id": chat_id},
timeout=30.0,
)
return self._handle_response(response)
def send_video(
self,
chat_id: str,
video: str,
caption: str | None = None,
parse_mode: str | None = None,
duration: int | None = None,
) -> dict[str, Any]:
"""Send a video to a chat via URL or file_id.
API ref: https://core.telegram.org/bots/api#sendvideo
"""
payload: dict[str, Any] = {
"chat_id": chat_id,
"video": video,
}
if caption:
payload["caption"] = caption
if parse_mode:
payload["parse_mode"] = parse_mode
if duration is not None:
payload["duration"] = duration
response = httpx.post(
f"{self._base_url}/sendVideo",
json=payload,
timeout=60.0, # longer timeout for video uploads
)
return self._handle_response(response)
def set_chat_description(
self,
chat_id: str,
description: str,
) -> dict[str, Any]:
"""Change the description of a group, supergroup, or channel.
API ref: https://core.telegram.org/bots/api#setchatdescription
"""
response = httpx.post(
f"{self._base_url}/setChatDescription",
json={"chat_id": chat_id, "description": description},
timeout=30.0,
)
return self._handle_response(response)
def register_tools(
mcp: FastMCP,
@@ -669,3 +727,112 @@ def register_tools(
return {"error": "Telegram request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
# --- Extended Tools ---
@mcp.tool()
def telegram_get_chat_member_count(
chat_id: str,
) -> dict[str, Any]:
"""
Get the number of members in a Telegram chat.
Works for groups, supergroups, and channels.
Args:
chat_id: Chat ID (numeric) or @username for public channels
Returns:
Dict with member count on success, or error dict on failure.
"""
client = _get_client()
if isinstance(client, dict):
return client
try:
result = client.get_chat_member_count(chat_id=chat_id)
if isinstance(result, dict) and "error" in result:
return result
# Telegram returns {"ok": true, "result": <count>}
count = result.get("result", 0) if isinstance(result, dict) else result
return {"chat_id": chat_id, "member_count": count}
except httpx.TimeoutException:
return {"error": "Telegram request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def telegram_send_video(
chat_id: str,
video: str,
caption: str = "",
parse_mode: str = "",
duration: int = 0,
) -> dict[str, Any]:
"""
Send a video to a Telegram chat.
Use this to share video files, clips, or recordings.
Args:
chat_id: Target chat ID (numeric) or @username for public channels
video: URL of the video to send, or file_id of existing video on Telegram.
Supports MP4 format. Max 50 MB via URL.
caption: Optional caption for the video (0-1024 characters)
parse_mode: Optional format mode for caption - "HTML" or "Markdown"
duration: Optional video duration in seconds (0 to omit)
Returns:
Dict with message info on success, or error dict on failure.
"""
client = _get_client()
if isinstance(client, dict):
return client
try:
return client.send_video(
chat_id=chat_id,
video=video,
caption=caption if caption else None,
parse_mode=parse_mode if parse_mode else None,
duration=duration if duration > 0 else None,
)
except httpx.TimeoutException:
return {"error": "Telegram request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def telegram_set_chat_description(
chat_id: str,
description: str,
) -> dict[str, Any]:
"""
Change the description of a Telegram group, supergroup, or channel.
The bot must have the appropriate admin rights in the chat.
Args:
chat_id: Chat ID of the group/supergroup/channel
description: New description text (0-255 characters).
Use empty string to remove the description.
Returns:
Raw Telegram API response or error dict on failure.
"""
if len(description) > 255:
return {"error": "Description cannot exceed 255 characters"}
client = _get_client()
if isinstance(client, dict):
return client
try:
return client.set_chat_description(
chat_id=chat_id,
description=description,
)
except httpx.TimeoutException:
return {"error": "Telegram request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@@ -170,3 +170,60 @@ class TrelloClient:
) -> dict[str, Any]:
params = {"url": attachment_url, "name": name}
return self._request("POST", f"/cards/{card_id}/attachments", params=params)
def get_card(
self,
card_id: str,
fields: list[str] | None = None,
) -> dict[str, Any]:
"""Get a single card by ID.
API ref: GET /1/cards/{id}
"""
params: dict[str, Any] = {
"fields": ",".join(fields) if fields else "all",
"members": "true",
"member_fields": "fullName,username",
"checklists": "all",
"checklist_fields": "name",
"attachments": "true",
"attachment_fields": "name,url",
}
return self._request("GET", f"/cards/{card_id}", params=params)
def create_list(
self,
board_id: str,
name: str,
pos: str | None = None,
) -> dict[str, Any]:
"""Create a new list on a board.
API ref: POST /1/lists
"""
params: dict[str, Any] = {
"idBoard": board_id,
"name": name,
"pos": pos,
}
return self._request("POST", "/lists", params=params)
def search(
self,
query: str,
model_types: str = "cards",
cards_limit: int = 10,
board_id: str | None = None,
) -> dict[str, Any]:
"""Search across Trello.
API ref: GET /1/search
"""
params: dict[str, Any] = {
"query": query,
"modelTypes": model_types,
"cards_limit": min(cards_limit, 1000),
}
if board_id:
params["idBoards"] = board_id
return self._request("GET", "/search", params=params)
@@ -304,3 +304,79 @@ def register_tools(
attachment_url=attachment_url,
name=name,
)
@mcp.tool()
def trello_get_card(
card_id: str,
fields: list[str] | None = None,
) -> dict:
"""
Get full details of a Trello card.
Returns all card fields including members, checklists, and attachments.
Args:
card_id: Trello card id
fields: Optional list of card fields to return (e.g., ["name", "desc",
"url", "due", "labels"] or ["all"]). Defaults to all fields.
"""
client = _get_client()
if isinstance(client, dict):
return client
return client.get_card(card_id=card_id, fields=fields)
@mcp.tool()
def trello_create_list(
board_id: str,
name: str,
pos: str | None = None,
) -> dict:
"""
Create a new list on a Trello board.
Args:
board_id: Trello board id to create the list in
name: Name for the new list
pos: Optional position ("top", "bottom", or numeric string)
"""
if not name:
return {"error": "List name is required"}
client = _get_client()
if isinstance(client, dict):
return client
return client.create_list(board_id=board_id, name=name, pos=pos)
@mcp.tool()
def trello_search_cards(
query: str,
board_id: str | None = None,
limit: int = 10,
) -> dict:
"""
Search for Trello cards by keyword.
Full-text search across card names, descriptions, and comments.
Args:
query: Search query text
board_id: Optional board id to restrict search scope
limit: Max number of card results (1-1000, default 10)
"""
if not query:
return {"error": "Search query is required"}
limit_error = _validate_limit(limit)
if limit_error:
return limit_error
client = _get_client()
if isinstance(client, dict):
return client
result = client.search(
query=query,
model_types="cards",
cards_limit=limit,
board_id=board_id,
)
if isinstance(result, dict) and "error" in result:
return result
cards = result.get("cards", [])
return {"cards": cards, "count": len(cards)}
@@ -231,3 +231,124 @@ def register_tools(
return data
return _extract_message(data)
@mcp.tool()
def twilio_list_phone_numbers() -> dict[str, Any]:
"""
List phone numbers owned by the Twilio account.
Returns:
Dict with phone numbers list (sid, phone_number, friendly_name, capabilities)
"""
sid, token = _get_credentials(credentials)
if not sid or not token:
return _auth_error()
url = f"{_base_url(sid)}/IncomingPhoneNumbers.json"
data = _request("get", url, sid, token, params={"PageSize": 100})
if "error" in data:
return data
numbers = []
for n in data.get("incoming_phone_numbers", []):
caps = n.get("capabilities", {})
numbers.append(
{
"sid": n.get("sid", ""),
"phone_number": n.get("phone_number", ""),
"friendly_name": n.get("friendly_name", ""),
"sms_enabled": caps.get("sms", False),
"voice_enabled": caps.get("voice", False),
"mms_enabled": caps.get("mms", False),
"date_created": n.get("date_created"),
}
)
return {"phone_numbers": numbers, "count": len(numbers)}
@mcp.tool()
def twilio_list_calls(
to: str = "",
from_number: str = "",
status: str = "",
page_size: int = 20,
) -> dict[str, Any]:
"""
List recent calls from your Twilio account.
Args:
to: Filter by recipient number (optional)
from_number: Filter by caller number (optional)
status: Filter by status: queued, ringing, in-progress, completed,
busy, failed, no-answer, canceled (optional)
page_size: Number of results (1-1000, default 20)
Returns:
Dict with calls list (sid, to, from, status, duration, price)
"""
sid, token = _get_credentials(credentials)
if not sid or not token:
return _auth_error()
url = f"{_base_url(sid)}/Calls.json"
params: dict[str, Any] = {"PageSize": max(1, min(page_size, 1000))}
if to:
params["To"] = to
if from_number:
params["From"] = from_number
if status:
params["Status"] = status
data = _request("get", url, sid, token, params=params)
if "error" in data:
return data
calls = []
for c in data.get("calls", []):
calls.append(
{
"sid": c.get("sid", ""),
"to": c.get("to", ""),
"from": c.get("from", ""),
"status": c.get("status", ""),
"direction": c.get("direction", ""),
"duration": c.get("duration"),
"price": c.get("price"),
"start_time": c.get("start_time"),
"end_time": c.get("end_time"),
}
)
return {"calls": calls, "count": len(calls)}
@mcp.tool()
def twilio_delete_message(message_sid: str) -> dict[str, Any]:
"""
Delete a message from Twilio.
Args:
message_sid: Message SID e.g. "SMxxxxxxxx" (required)
Returns:
Dict with success status or error
"""
sid, token = _get_credentials(credentials)
if not sid or not token:
return _auth_error()
if not message_sid:
return {"error": "message_sid is required"}
url = f"{_base_url(sid)}/Messages/{message_sid}.json"
headers: dict[str, str] = {}
headers["Authorization"] = _auth_header(sid, token)
try:
resp = httpx.delete(url, headers=headers, timeout=30.0)
if resp.status_code == 204:
return {"sid": message_sid, "status": "deleted"}
if resp.status_code == 401:
return {"error": "Unauthorized. Check your Twilio credentials."}
if resp.status_code == 404:
return {"error": "Message not found."}
return {"error": f"Twilio API error {resp.status_code}: {resp.text[:500]}"}
except httpx.TimeoutException:
return {"error": "Request to Twilio timed out"}
except Exception as e:
return {"error": f"Twilio request failed: {e!s}"}
@@ -219,3 +219,139 @@ def register_tools(mcp: FastMCP, credentials: Any = None) -> None:
tweet["author_name"] = users[0].get("name")
tweet["author_username"] = users[0].get("username")
return tweet
@mcp.tool()
def twitter_get_user_followers(
user_id: str,
max_results: int = 25,
) -> dict:
"""Get followers of a Twitter/X user.
Args:
user_id: Twitter user ID (numeric string). Get from twitter_get_user.
max_results: Number of results (1-100, default 25).
"""
headers = _get_headers()
if headers is None:
return {
"error": "X_BEARER_TOKEN is required",
"help": "Set X_BEARER_TOKEN environment variable",
}
if not user_id:
return {"error": "user_id is required"}
params: dict[str, Any] = {
"max_results": max(1, min(max_results, 100)),
"user.fields": USER_FIELDS,
}
data = _get(f"/users/{user_id}/followers", headers, params)
if "error" in data:
return data
followers = []
for u in data.get("data", []):
metrics = u.get("public_metrics", {})
followers.append(
{
"id": u.get("id"),
"name": u.get("name"),
"username": u.get("username"),
"description": (u.get("description") or "")[:200],
"followers_count": metrics.get("followers_count", 0),
"following_count": metrics.get("following_count", 0),
"verified": u.get("verified"),
}
)
return {"count": len(followers), "followers": followers}
@mcp.tool()
def twitter_get_tweet_replies(
tweet_id: str,
max_results: int = 10,
) -> dict:
"""Get replies to a specific tweet using search.
Args:
tweet_id: Tweet ID to get replies for (numeric string).
max_results: Number of results (10-100, default 10).
"""
headers = _get_headers()
if headers is None:
return {
"error": "X_BEARER_TOKEN is required",
"help": "Set X_BEARER_TOKEN environment variable",
}
if not tweet_id:
return {"error": "tweet_id is required"}
params: dict[str, Any] = {
"query": f"conversation_id:{tweet_id} is:reply",
"max_results": max(10, min(max_results, 100)),
"tweet.fields": TWEET_FIELDS,
"expansions": "author_id",
"user.fields": "name,username",
}
data = _get("/tweets/search/recent", headers, params)
if "error" in data:
return data
users_map = {}
for u in data.get("includes", {}).get("users", []):
users_map[u["id"]] = {"name": u.get("name"), "username": u.get("username")}
replies = []
for t in data.get("data", []):
reply = _extract_tweet(t)
author = users_map.get(t.get("author_id"), {})
reply["author_name"] = author.get("name")
reply["author_username"] = author.get("username")
replies.append(reply)
return {"tweet_id": tweet_id, "count": len(replies), "replies": replies}
@mcp.tool()
def twitter_get_list_tweets(
list_id: str,
max_results: int = 10,
) -> dict:
"""Get recent tweets from a Twitter/X list.
Args:
list_id: Twitter list ID (numeric string).
max_results: Number of results (1-100, default 10).
"""
headers = _get_headers()
if headers is None:
return {
"error": "X_BEARER_TOKEN is required",
"help": "Set X_BEARER_TOKEN environment variable",
}
if not list_id:
return {"error": "list_id is required"}
params: dict[str, Any] = {
"max_results": max(1, min(max_results, 100)),
"tweet.fields": TWEET_FIELDS,
"expansions": "author_id",
"user.fields": "name,username",
}
data = _get(f"/lists/{list_id}/tweets", headers, params)
if "error" in data:
return data
users_map = {}
for u in data.get("includes", {}).get("users", []):
users_map[u["id"]] = {"name": u.get("name"), "username": u.get("username")}
tweets = []
for t in data.get("data", []):
tweet = _extract_tweet(t)
author = users_map.get(t.get("author_id"), {})
tweet["author_name"] = author.get("name")
tweet["author_username"] = author.get("username")
tweets.append(tweet)
return {"list_id": list_id, "count": len(tweets), "tweets": tweets}
@@ -295,3 +295,126 @@ def register_tools(
}
)
return {"results": results, "count": data.get("count", len(results))}
@mcp.tool()
def zendesk_get_ticket_comments(
ticket_id: int,
page_size: int = 25,
) -> dict[str, Any]:
"""
List comments on a Zendesk ticket (conversation history).
Args:
ticket_id: Zendesk ticket ID (required)
page_size: Number of comments per page (1-100, default 25)
Returns:
Dict with comments list (id, body, author_id, public, created_at)
"""
subdomain, email, token = _get_credentials(credentials)
if not subdomain or not email or not token:
return _auth_error()
if not ticket_id:
return {"error": "ticket_id is required"}
url = f"{_base_url(subdomain)}/tickets/{ticket_id}/comments"
params = {"page[size]": max(1, min(page_size, 100))}
data = _request("get", url, email, token, params=params)
if "error" in data:
return data
comments = []
for c in data.get("comments", []):
comments.append(
{
"id": c.get("id"),
"body": (c.get("body") or "")[:500],
"author_id": c.get("author_id"),
"public": c.get("public", True),
"created_at": c.get("created_at", ""),
}
)
return {"ticket_id": ticket_id, "comments": comments, "count": len(comments)}
@mcp.tool()
def zendesk_add_ticket_comment(
ticket_id: int,
body: str,
public: bool = True,
) -> dict[str, Any]:
"""
Add a comment to an existing Zendesk ticket.
Args:
ticket_id: Zendesk ticket ID (required)
body: Comment text (required)
public: Whether the comment is visible to the requester (default True).
Set to False for an internal note.
Returns:
Dict with updated ticket info and confirmation
"""
subdomain, email, token = _get_credentials(credentials)
if not subdomain or not email or not token:
return _auth_error()
if not ticket_id or not body:
return {"error": "ticket_id and body are required"}
ticket: dict[str, Any] = {
"comment": {"body": body, "public": public},
}
url = f"{_base_url(subdomain)}/tickets/{ticket_id}"
data = _request("put", url, email, token, json={"ticket": ticket})
if "error" in data:
return data
t = data.get("ticket", {})
return {
"id": t.get("id"),
"subject": t.get("subject", ""),
"status": t.get("status", ""),
"result": "comment_added",
}
@mcp.tool()
def zendesk_list_users(
role: str = "",
page_size: int = 25,
) -> dict[str, Any]:
"""
List users in Zendesk.
Args:
role: Filter by role: end-user, agent, admin (optional)
page_size: Number of users per page (1-100, default 25)
Returns:
Dict with users list (id, name, email, role, active)
"""
subdomain, email, token = _get_credentials(credentials)
if not subdomain or not email or not token:
return _auth_error()
url = f"{_base_url(subdomain)}/users"
params: dict[str, Any] = {"page[size]": max(1, min(page_size, 100))}
if role:
params["role"] = role
data = _request("get", url, email, token, params=params)
if "error" in data:
return data
users = []
for u in data.get("users", []):
users.append(
{
"id": u.get("id"),
"name": u.get("name", ""),
"email": u.get("email", ""),
"role": u.get("role", ""),
"active": u.get("active", False),
"created_at": u.get("created_at", ""),
}
)
return {"users": users, "count": len(users)}
@@ -432,3 +432,201 @@ def register_tools(
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def zoom_update_meeting(
meeting_id: str,
topic: str = "",
start_time: str = "",
duration: int = 0,
timezone: str = "",
agenda: str = "",
) -> dict:
"""
Update an existing Zoom meeting.
Args:
meeting_id: The Zoom meeting ID (required).
topic: New meeting topic/title (optional).
start_time: New start time in ISO 8601 format (optional).
duration: New duration in minutes (optional, 0 to skip).
timezone: New timezone e.g. "America/New_York" (optional).
agenda: New meeting description/agenda (optional).
Returns:
Dict with success status or error.
"""
token = _get_token(credentials)
if isinstance(token, dict):
return token
if not meeting_id:
return {"error": "meeting_id is required"}
body: dict[str, Any] = {}
if topic:
body["topic"] = topic
if start_time:
body["start_time"] = start_time
if duration > 0:
body["duration"] = duration
if timezone:
body["timezone"] = timezone
if agenda:
body["agenda"] = agenda
if not body:
return {"error": "At least one field to update is required"}
try:
resp = httpx.patch(
f"{ZOOM_API_BASE}/meetings/{meeting_id}",
headers=_headers(token),
json=body,
timeout=30.0,
)
# Zoom returns 204 on successful update
return _handle_response(resp)
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def zoom_list_meeting_participants(
meeting_id: str,
page_size: int = 30,
next_page_token: str = "",
) -> dict:
"""
List participants from a past Zoom meeting.
Args:
meeting_id: The Zoom meeting ID or UUID (required).
For past meetings, use the UUID (double-encode if starts with /).
page_size: Number of results per page (max 300, default 30).
next_page_token: Pagination token from a previous response.
Returns:
Dict with participants list and pagination info.
"""
token = _get_token(credentials)
if isinstance(token, dict):
return token
if not meeting_id:
return {"error": "meeting_id is required"}
try:
params: dict[str, Any] = {"page_size": min(page_size, 300)}
if next_page_token:
params["next_page_token"] = next_page_token
resp = httpx.get(
f"{ZOOM_API_BASE}/past_meetings/{meeting_id}/participants",
headers=_headers(token),
params=params,
timeout=30.0,
)
result = _handle_response(resp)
if "error" in result:
return result
participants = []
for p in result.get("participants", []):
participants.append(
{
"id": p.get("id"),
"name": p.get("name"),
"user_email": p.get("user_email"),
"join_time": p.get("join_time"),
"leave_time": p.get("leave_time"),
"duration": p.get("duration"),
}
)
output: dict[str, Any] = {
"total_records": result.get("total_records", 0),
"count": len(participants),
"participants": participants,
}
npt = result.get("next_page_token", "")
if npt:
output["next_page_token"] = npt
return output
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}
@mcp.tool()
def zoom_list_meeting_registrants(
meeting_id: str,
status: str = "approved",
page_size: int = 30,
next_page_token: str = "",
) -> dict:
"""
List registrants for a Zoom meeting (requires registration-enabled meeting).
Args:
meeting_id: The Zoom meeting ID (required).
status: Filter by status: "pending", "approved", or "denied" (default "approved").
page_size: Number of results per page (max 300, default 30).
next_page_token: Pagination token from a previous response.
Returns:
Dict with registrants list and pagination info.
"""
token = _get_token(credentials)
if isinstance(token, dict):
return token
if not meeting_id:
return {"error": "meeting_id is required"}
try:
params: dict[str, Any] = {
"status": status,
"page_size": min(page_size, 300),
}
if next_page_token:
params["next_page_token"] = next_page_token
resp = httpx.get(
f"{ZOOM_API_BASE}/meetings/{meeting_id}/registrants",
headers=_headers(token),
params=params,
timeout=30.0,
)
result = _handle_response(resp)
if "error" in result:
return result
registrants = []
for r in result.get("registrants", []):
registrants.append(
{
"id": r.get("id"),
"email": r.get("email"),
"first_name": r.get("first_name"),
"last_name": r.get("last_name"),
"status": r.get("status"),
"create_time": r.get("create_time"),
"join_url": r.get("join_url"),
}
)
output: dict[str, Any] = {
"total_records": result.get("total_records", 0),
"count": len(registrants),
"registrants": registrants,
}
npt = result.get("next_page_token", "")
if npt:
output["next_page_token"] = npt
return output
except httpx.TimeoutException:
return {"error": "Request timed out"}
except httpx.RequestError as e:
return {"error": f"Network error: {e}"}