Client Overview
ShotGrid MCP Server can be accessed by any client that implements the Model Context Protocol (MCP). This page provides an overview of the available client options and how to connect to your server.
Available Clients
Official MCP Clients
The official MCP Python SDK includes a client that can connect to any MCP server, including ShotGrid MCP Server:
from mcp.client import Client
async def main():
# Connect to your ShotGrid MCP Server
client = Client("http://localhost:8000")
# List available tools
tools = await client.list_tools()
print(f"Available tools: {[tool.name for tool in tools]}")
# Call a tool
result = await client.call_tool("find_projects", {"status": "Active"})
print(f"Active projects: {result}")
if __name__ == "__main__":
import asyncio
asyncio.run(main())
LLM Integrations
Many LLM platforms and frameworks support MCP natively or through plugins:
- Anthropic Claude: Supports MCP natively through the API
- OpenAI: Supports function calling which can be bridged to MCP
- LangChain: Provides MCP tool integration
- LlamaIndex: Supports MCP through its tool framework
Custom Clients
You can also create custom clients that implement the MCP protocol. The protocol is HTTP-based and relatively simple to implement.
Connecting to ShotGrid MCP Server
Basic Connection
To connect to a ShotGrid MCP Server, you need the server’s URL:
from mcp.client import Client
# Connect to a local server
client = Client("http://localhost:8000")
# Connect to a remote server
client = Client("https://shotgrid-mcp.example.com")
Authentication
If your server requires authentication, you can provide credentials:
from mcp.client import Client
# Connect with basic authentication
client = Client(
"https://shotgrid-mcp.example.com",
auth=("username", "password")
)
# Connect with token authentication
client = Client(
"https://shotgrid-mcp.example.com",
headers={"Authorization": "Bearer your-token"}
)
Connection Options
The MCP client supports various connection options:
from mcp.client import Client
# Connect with timeout settings
client = Client(
"https://shotgrid-mcp.example.com",
timeout=30.0 # 30 seconds timeout
)
# Connect with custom headers
client = Client(
"https://shotgrid-mcp.example.com",
headers={
"User-Agent": "ShotGrid MCP Client/1.0",
"X-Custom-Header": "Value"
}
)
Using the Client
To discover what tools are available on the server:
async def list_tools():
client = Client("http://localhost:8000")
tools = await client.list_tools()
print(f"Found {len(tools)} tools:")
for tool in tools:
print(f"- {tool.name}: {tool.description}")
print(f" Parameters: {tool.parameters}")
if __name__ == "__main__":
import asyncio
asyncio.run(list_tools())
To call a tool on the server:
async def find_shots():
client = Client("http://localhost:8000")
# Call the find_shots tool with parameters
result = await client.call_tool(
"find_shots",
{
"project_id": 123,
"status": "ip"
}
)
print(f"Found {len(result)} shots:")
for shot in result:
print(f"- {shot['code']}: {shot['sg_status_list']}")
if __name__ == "__main__":
import asyncio
asyncio.run(find_shots())
Reading Resources
To read a resource from the server:
async def read_thumbnail():
client = Client("http://localhost:8000")
# Get a thumbnail URL
thumbnail_info = await client.call_tool(
"get_thumbnail_url",
{
"entity_type": "Asset",
"entity_id": 456
}
)
# Read the thumbnail resource
resource = await client.read_resource(thumbnail_info["url"])
# Save the thumbnail to a file
with open("thumbnail.jpg", "wb") as f:
f.write(resource[0].content)
print(f"Saved thumbnail to thumbnail.jpg")
if __name__ == "__main__":
import asyncio
asyncio.run(read_thumbnail())
Error Handling
Handle errors from the server:
from mcp.errors import MCPError, ToolExecutionError
async def handle_errors():
client = Client("http://localhost:8000")
try:
# Call a tool with invalid parameters
result = await client.call_tool(
"update_shot",
{
"shot_id": 999999, # Non-existent shot
"status": "invalid_status"
}
)
except ToolExecutionError as e:
print(f"Tool execution failed: {e.message}")
print(f"Error details: {e.details}")
except MCPError as e:
print(f"MCP protocol error: {e}")
except Exception as e:
print(f"Unexpected error: {e}")
if __name__ == "__main__":
import asyncio
asyncio.run(handle_errors())
Integrating with LLMs
Anthropic Claude
Integrate with Anthropic Claude:
import anthropic
from mcp.client import Client
async def claude_with_mcp():
# Connect to ShotGrid MCP Server
mcp_client = Client("http://localhost:8000")
# List available tools
tools = await mcp_client.list_tools()
# Create Claude client
claude = anthropic.Anthropic(api_key="your-api-key")
# Create a message with tools
message = claude.messages.create(
model="claude-3-opus-20240229",
max_tokens=1000,
system="You are a helpful assistant with access to ShotGrid data.",
messages=[
{"role": "user", "content": "Find all in-progress shots in the Awesome Film project."}
],
tools=tools # Claude can use MCP tools directly
)
# Process tool calls
for tool_call in message.tool_calls:
tool_result = await mcp_client.call_tool(
tool_call.name,
tool_call.parameters
)
# Send the tool result back to Claude
message = claude.messages.create(
model="claude-3-opus-20240229",
max_tokens=1000,
system="You are a helpful assistant with access to ShotGrid data.",
messages=[
{"role": "user", "content": "Find all in-progress shots in the Awesome Film project."},
{"role": "assistant", "content": message.content, "tool_calls": message.tool_calls},
{"role": "user", "content": "", "tool_results": [
{"tool_call_id": tool_call.id, "result": tool_result}
]}
]
)
print(message.content)
if __name__ == "__main__":
import asyncio
asyncio.run(claude_with_mcp())
OpenAI
Integrate with OpenAI:
from openai import OpenAI
from mcp.client import Client
async def openai_with_mcp():
# Connect to ShotGrid MCP Server
mcp_client = Client("http://localhost:8000")
# List available tools and convert to OpenAI format
mcp_tools = await mcp_client.list_tools()
openai_tools = [
{
"type": "function",
"function": {
"name": tool.name,
"description": tool.description,
"parameters": tool.parameters
}
}
for tool in mcp_tools
]
# Create OpenAI client
openai_client = OpenAI(api_key="your-api-key")
# Create a chat completion with tools
response = openai_client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a helpful assistant with access to ShotGrid data."},
{"role": "user", "content": "Find all in-progress shots in the Awesome Film project."}
],
tools=openai_tools
)
# Process tool calls
message = response.choices[0].message
for tool_call in message.tool_calls or []:
import json
tool_name = tool_call.function.name
tool_params = json.loads(tool_call.function.arguments)
# Call the MCP tool
tool_result = await mcp_client.call_tool(tool_name, tool_params)
# Send the tool result back to OpenAI
response = openai_client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a helpful assistant with access to ShotGrid data."},
{"role": "user", "content": "Find all in-progress shots in the Awesome Film project."},
{"role": "assistant", "content": message.content, "tool_calls": message.tool_calls},
{"role": "tool", "tool_call_id": tool_call.id, "content": json.dumps(tool_result)}
]
)
print(response.choices[0].message.content)
if __name__ == "__main__":
import asyncio
asyncio.run(openai_with_mcp())
Next Steps
Now that you understand how to connect to ShotGrid MCP Server, you can: