Skip to content
126 changes: 0 additions & 126 deletions scripts/mcp-list.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,119 +96,6 @@ def generate_json_output(tools: List[Dict[str, Any]], output_file: str):
with open(output_file, 'w', encoding='utf-8') as f:
json.dump(output, f, indent=2, ensure_ascii=False)

def identify_overlapping_tools(_tools: List[Dict[str, Any]]) -> Dict[str, List[str]]:
"""Identify tools with overlapping functionality that should be consolidated."""
overlaps = {}

# Package creation tools - NOW CONSOLIDATED
package_creation = [
"create_package", # unified_package module - primary interface
"package_create_from_s3" # s3_package module - specialized S3 bulk processing
]
overlaps["Package Creation"] = package_creation

# Catalog/URL generation tools - REDUNDANT
catalog_tools = [
"catalog_url", # auth module
"catalog_uri" # auth module
]
overlaps["Catalog URLs"] = catalog_tools

# Metadata template tools - PARTIAL OVERLAP
metadata_tools = [
"get_metadata_template", # metadata_templates module
"create_metadata_from_template" # metadata_examples module
]
overlaps["Metadata Templates"] = metadata_tools

# Search tools - CONSOLIDATION NEEDED
search_tools = [
"packages_search", # packages module - package-specific
"bucket_objects_search", # buckets module - S3-specific
"unified_search" # search module - unified interface
]
overlaps["Search Functions"] = search_tools

# Tabulator admin overlap - DUPLICATE FUNCTIONALITY
tabulator_admin = [
"tabulator_open_query_status", # tabulator module
"tabulator_open_query_toggle", # tabulator module
"admin_tabulator_open_query_get", # governance module
"admin_tabulator_open_query_set" # governance module
]
overlaps["Tabulator Admin"] = tabulator_admin

return overlaps

def generate_consolidation_report(_tools: List[Dict[str, Any]], output_file: str):
"""Generate detailed consolidation recommendations."""

report = {
"breaking_changes_required": True,
"backward_compatibility": "DEPRECATED - Will break existing clients",
"consolidation_plan": {}
}

# Package Creation Consolidation - COMPLETED
report["consolidation_plan"]["package_creation"] = {
"action": "COMPLETED",
"keep": ["create_package", "package_create_from_s3"],
"removed": ["package_create", "package_update", "package_update_metadata"],
"rationale": "create_package is now the unified primary interface with all functionality",
"current_api": {
"create_package": "Primary interface - handles all package creation scenarios",
"package_create_from_s3": "Specialized tool for S3 bulk processing with organization"
}
}

# Search Consolidation
report["consolidation_plan"]["search"] = {
"action": "BREAK_COMPATIBILITY",
"keep": "unified_search",
"deprecate": ["packages_search", "bucket_objects_search"],
"rationale": "unified_search handles all search scenarios with backend selection",
"migration": {
"packages_search": "Replace with unified_search(scope='catalog')",
"bucket_objects_search": "Replace with unified_search(scope='bucket', target=bucket)"
}
}

# URL Generation Consolidation
report["consolidation_plan"]["url_generation"] = {
"action": "BREAK_COMPATIBILITY",
"keep": "catalog_url",
"deprecate": ["catalog_uri"],
"rationale": "catalog_url covers all URL generation needs",
"migration": {
"catalog_uri": "Replace with catalog_url() - URIs are legacy"
}
}

# Tabulator Admin Consolidation
report["consolidation_plan"]["tabulator_admin"] = {
"action": "BREAK_COMPATIBILITY",
"keep": ["admin_tabulator_open_query_get", "admin_tabulator_open_query_set"],
"deprecate": ["tabulator_open_query_status", "tabulator_open_query_toggle"],
"rationale": "Admin tools provide proper permissions model",
"migration": {
"tabulator_open_query_status": "Replace with admin_tabulator_open_query_get",
"tabulator_open_query_toggle": "Replace with admin_tabulator_open_query_set"
}
}

# Documentation Cleanup
report["documentation_cleanup"] = {
"action": "REGENERATE_FROM_CODE",
"current_issues": [
"docs/api/TOOLS.md manually maintained - causes drift",
"CSV file manually updated - inconsistent with code",
"Tool descriptions in docs don't match actual docstrings"
],
"solution": "Auto-generate all documentation from server introspection"
}

with open(output_file, 'w', encoding='utf-8') as f:
json.dump(report, f, indent=2, ensure_ascii=False)

async def main():
"""Generate all canonical tool listings."""
Expand All @@ -230,23 +117,10 @@ async def main():
print("📋 Generating JSON metadata...")
generate_json_output(tools, str(output_dir / "build" / "tools_metadata.json"))

print("⚠️ Generating consolidation report...")
generate_consolidation_report(tools, str(output_dir / "build" / "consolidation_report.json"))

# Print summary
overlaps = identify_overlapping_tools(tools)
print("\n🚨 OVERLAPPING TOOLS IDENTIFIED:")
for category, tool_list in overlaps.items():
print(f" {category}: {len(tool_list)} tools")
for tool in tool_list:
print(f" - {tool}")
print()

print("✅ Canonical tool listings generated!")
print("📂 Files created:")
print(" - tests/fixtures/mcp-list.csv")
print(" - build/tools_metadata.json")
print(" - build/consolidation_report.json")

if __name__ == "__main__":
import asyncio
Expand Down
2 changes: 0 additions & 2 deletions src/quilt_mcp/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@
package_browse,
package_contents_search,
package_diff,
packages_list,
)
from .tools.tabulator import (
tabulator_table_create,
Expand Down Expand Up @@ -124,7 +123,6 @@
"package_delete",
"package_diff",
"package_validate",
"packages_list",
"quick_start",
"tabulator_table_create",
"tabulator_table_delete",
Expand Down
57 changes: 50 additions & 7 deletions src/quilt_mcp/resources/package.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,16 +16,59 @@ def __init__(self):
super().__init__("package://tools")

async def list_items(self, **params) -> Dict[str, Any]:
"""List package tools.
"""List package tools with usage guidance.

Returns:
Package tools data in original format
Package tools data with categories and usage examples
"""
# Import here to avoid circular imports and maintain compatibility
from ..tools.package_creation import package_tools_list

# Call the original sync function
return package_tools_list()
return {
"primary_tools": {
"package_create": {
"description": "Main package creation tool with templates and dry-run",
"use_when": "Creating new packages with smart defaults",
"example": 'package_create("team/dataset", ["s3://bucket/file.csv"])',
},
"package_browse": {
"description": "Browse package contents with file tree view",
"use_when": "Exploring package structure and files",
"example": 'package_browse("team/dataset", recursive=True)',
},
"package_validate": {
"description": "Validate package integrity and accessibility",
"use_when": "Checking package health and file accessibility",
"example": 'package_validate("team/dataset")',
},
},
"specialized_tools": {
"package_create_from_s3": {
"description": "Advanced S3-to-package creation with organization",
"use_when": "Creating packages from entire S3 buckets/prefixes",
"example": 'package_create_from_s3("bucket-name", "team/dataset")',
},
},
"utility_tools": {
"list_metadata_templates": {
"description": "Show available metadata templates",
"example": "list_metadata_templates()",
},
"catalog_search": {
"description": "Search packages by content",
"example": 'catalog_search("genomics")',
},
},
"workflow_guide": {
"new_package": [
"1. package_create() - Create with template",
"2. package_browse() - Verify contents",
"3. package_validate() - Check integrity",
"4. catalog_url() - Get sharing URL",
],
"explore_existing": [
"1. package_browse() - Explore structure",
"2. package_contents_search() - Find specific files",
],
},
}

def _extract_items(self, raw_data: Dict[str, Any]) -> List[Any]:
"""Extract tools list from package tools data."""
Expand Down
28 changes: 21 additions & 7 deletions src/quilt_mcp/resources/tabulator.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,19 +16,33 @@ def __init__(self):
super().__init__("tabulator://{bucket}/tables")

async def list_items(self, bucket_name: str, **params) -> Dict[str, Any]:
"""List tabulator tables for a bucket.
"""List all tabulator tables configured for a bucket.

Tabulator tables enable SQL querying across multiple Quilt packages,
aggregating data based on configurable patterns and schemas.

Args:
bucket_name: Name of the bucket to list tables for

Returns:
Tabulator tables data in original format
Dict containing:
- success: Whether the operation succeeded
- tables: List of tabulator tables with their configurations
- bucket_name: The bucket name that was queried
- count: Number of tables found
"""
# Import here to avoid circular imports and maintain compatibility
from ..tools.tabulator import tabulator_tables_list

# Call the original async function
return await tabulator_tables_list(bucket_name=bucket_name)
from ..tools.tabulator import get_tabulator_service
from ..utils import format_error_response
import logging

logger = logging.getLogger(__name__)

try:
service = get_tabulator_service()
return service.list_tables(bucket_name)
except Exception as e:
logger.error(f"Error in tabulator list_items: {e}")
return format_error_response(f"Failed to list tabulator tables: {str(e)}")

def _extract_items(self, raw_data: Dict[str, Any]) -> List[Any]:
"""Extract tables list from tabulator tables data."""
Expand Down
52 changes: 46 additions & 6 deletions src/quilt_mcp/resources/workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,11 @@
"""

from typing import Dict, Any, List
import logging
from .base import MCPResource

logger = logging.getLogger(__name__)


class WorkflowResource(MCPResource):
"""MCP resource for workflow listing."""
Expand All @@ -15,16 +18,53 @@ def __init__(self):
super().__init__("workflow://workflows")

async def list_items(self, **params) -> Dict[str, Any]:
"""List workflows.
"""List all workflows with their current status.

Returns:
Workflow data in original format
Workflow data with summary information for all workflows
"""
# Import here to avoid circular imports and maintain compatibility
from ..tools.workflow_orchestration import workflow_list
from ..tools.workflow_orchestration import _workflows
from ..utils import format_error_response

try:
workflows_summary = []

for workflow_id, workflow in _workflows.items():
summary = {
"id": workflow_id,
"name": workflow["name"],
"status": workflow["status"],
"progress": {
"completed_steps": workflow["completed_steps"],
"total_steps": workflow["total_steps"],
"percentage": (
round(
(workflow["completed_steps"] / workflow["total_steps"]) * 100,
1,
)
if workflow["total_steps"] > 0
else 0
),
},
"created_at": workflow["created_at"],
"updated_at": workflow["updated_at"],
}
workflows_summary.append(summary)

# Sort by updated_at (most recent first)
workflows_summary.sort(key=lambda x: x["updated_at"], reverse=True)

return {
"success": True,
"workflows": workflows_summary,
"total_workflows": len(workflows_summary),
"active_workflows": sum(1 for w in workflows_summary if w["status"] in ["created", "in_progress"]),
"completed_workflows": sum(1 for w in workflows_summary if w["status"] == "completed"),
}

# Call the original sync function
return workflow_list()
except Exception as e:
logger.error(f"Failed to list workflows: {e}")
return format_error_response(f"Failed to list workflows: {str(e)}")

def _extract_items(self, raw_data: Dict[str, Any]) -> List[Any]:
"""Extract workflows list from workflow data."""
Expand Down
2 changes: 1 addition & 1 deletion src/quilt_mcp/tools/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
objects = buckets.bucket_objects_list("my-bucket")

# Use package tools
pkg_list = packages.packages_list()
pkg_info = packages.package_browse("team/dataset")
"""

from . import (
Expand Down
Loading
Loading