|
| 1 | +""" |
| 2 | +Pydantic MCP Example using Model Factory Pattern |
| 3 | +
|
| 4 | +This example demonstrates how to use the model factory to create agents |
| 5 | +with different providers while maintaining the same MCP server setup. |
| 6 | +
|
| 7 | +Supported model formats: |
| 8 | +- Standard: "gemini-2.5-pro", "gpt-4", "claude-3-opus" |
| 9 | +- Provider-prefixed: "openai:gpt-4", "deepseek:deepseek-chat", "bedrock:us.amazon.nova-pro-v1:0" |
| 10 | +- Local models: "ollama:llama3.2", "ollama:qwen2.5-coder:7b" |
| 11 | +""" |
| 12 | + |
| 13 | +import asyncio |
| 14 | +import argparse |
| 15 | +from typing import Optional |
| 16 | + |
| 17 | +import logfire |
| 18 | +from dotenv import load_dotenv |
| 19 | +from pydantic_ai.mcp import MCPServerStdio |
| 20 | + |
| 21 | +from agents_mcp_usage.utils import get_mcp_server_path |
| 22 | +from agents_mcp_usage.factory.model_factory import create_agent |
| 23 | + |
| 24 | +load_dotenv() |
| 25 | + |
| 26 | +# Configure logging to logfire if LOGFIRE_TOKEN is set in environment |
| 27 | +logfire.configure(send_to_logfire="if-token-present", service_name="pydantic-basic-mcp-factory") |
| 28 | +logfire.instrument_mcp() |
| 29 | +logfire.instrument_pydantic_ai() |
| 30 | + |
| 31 | +# Preset model aliases demonstrating different provider formats |
| 32 | +MODEL_ALIASES = { |
| 33 | + # Standard providers (auto-detected) |
| 34 | + "gemini": "gemini-2.5-pro-preview-06-05", |
| 35 | + "gemini-flash": "gemini-2.5-flash", |
| 36 | + "gpt4": "gpt-4", |
| 37 | + "claude": "claude-3-opus-20240229", |
| 38 | + |
| 39 | + # Explicit provider prefix |
| 40 | + "openai-o4": "openai:o4-mini", |
| 41 | + "deepseek": "deepseek:deepseek-chat", |
| 42 | + "deepseek-reasoner": "deepseek:deepseek-reasoner", |
| 43 | + |
| 44 | + # AWS Bedrock models |
| 45 | + "bedrock-nova": "bedrock:us.amazon.nova-pro-v1:0", |
| 46 | + "bedrock-claude": "bedrock:us.anthropic.claude-3-5-sonnet-20240620-v1:0", |
| 47 | + |
| 48 | + # GitHub Models (free tier) |
| 49 | + "github-grok": "github:xai/grok-3-mini", |
| 50 | + |
| 51 | + # Local models via Ollama |
| 52 | + "ollama-llama": "ollama:llama3.2", |
| 53 | + "ollama-qwen": "ollama:qwen2.5-coder:7b", |
| 54 | + |
| 55 | + # Other providers |
| 56 | + "perplexity": "perplexity:sonar-pro", |
| 57 | + "grok": "grok:grok-2-1212", |
| 58 | +} |
| 59 | + |
| 60 | + |
| 61 | +def get_mcp_server() -> MCPServerStdio: |
| 62 | + """Get the MCP server configuration.""" |
| 63 | + return MCPServerStdio( |
| 64 | + command="uv", |
| 65 | + args=[ |
| 66 | + "run", |
| 67 | + str(get_mcp_server_path("example_server.py")), |
| 68 | + "stdio", |
| 69 | + ], |
| 70 | + ) |
| 71 | + |
| 72 | + |
| 73 | +async def main( |
| 74 | + model: str = "gemini-2.5-pro-preview-06-05", |
| 75 | + query: str = "Greet Andrew and give him the current time", |
| 76 | + provider_kwargs: Optional[dict] = None |
| 77 | +) -> None: |
| 78 | + """Runs the Pydantic agent with a given query using the model factory. |
| 79 | +
|
| 80 | + This function demonstrates how to use the model factory to create agents |
| 81 | + with different providers while maintaining the same MCP functionality. |
| 82 | +
|
| 83 | + Args: |
| 84 | + model: The model identifier (e.g., "gemini-2.5-pro", "deepseek:deepseek-chat") |
| 85 | + query: The query to run the agent with |
| 86 | + provider_kwargs: Optional provider-specific configuration (e.g., for Azure) |
| 87 | + """ |
| 88 | + # Create MCP server |
| 89 | + server = get_mcp_server() |
| 90 | + |
| 91 | + # Create agent using the factory |
| 92 | + print(f"Creating agent with model: {model}") |
| 93 | + agent = create_agent( |
| 94 | + model=model, |
| 95 | + mcp_servers=[server], |
| 96 | + provider_kwargs=provider_kwargs |
| 97 | + ) |
| 98 | + |
| 99 | + # Run the agent |
| 100 | + try: |
| 101 | + async with agent.run_mcp_servers(): |
| 102 | + result = await agent.run(query) |
| 103 | + print(f"\nResult from {model}:") |
| 104 | + print(result.output) |
| 105 | + |
| 106 | + # Print usage information if available |
| 107 | + usage = result.usage() |
| 108 | + if usage: |
| 109 | + print(f"\nToken usage: {usage.total_tokens} total " |
| 110 | + f"({usage.request_tokens} in, {usage.response_tokens} out)") |
| 111 | + |
| 112 | + except Exception as e: |
| 113 | + print(f"Error running agent: {e}") |
| 114 | + print("\nMake sure you have the required API keys set:") |
| 115 | + print("- GEMINI_API_KEY for Gemini models") |
| 116 | + print("- OPENAI_API_KEY for OpenAI models") |
| 117 | + print("- DEEPSEEK_API_KEY for DeepSeek models") |
| 118 | + print("- AWS credentials for Bedrock models") |
| 119 | + print("- Other provider-specific keys as needed") |
| 120 | + |
| 121 | + |
| 122 | +def parse_arguments(): |
| 123 | + """Parse command-line arguments.""" |
| 124 | + parser = argparse.ArgumentParser( |
| 125 | + description="Run Pydantic MCP agent with different model providers", |
| 126 | + formatter_class=argparse.RawDescriptionHelpFormatter, |
| 127 | + epilog=""" |
| 128 | +Examples: |
| 129 | + # Use default Gemini model |
| 130 | + python pydantic_mcp_factory.py |
| 131 | + |
| 132 | + # Use a specific model by alias |
| 133 | + python pydantic_mcp_factory.py --model deepseek |
| 134 | + |
| 135 | + # Use a model by full name |
| 136 | + python pydantic_mcp_factory.py --model "openai:gpt-4" |
| 137 | + |
| 138 | + # Use a custom query |
| 139 | + python pydantic_mcp_factory.py --query "What's the weather like?" |
| 140 | + |
| 141 | + # List available model aliases |
| 142 | + python pydantic_mcp_factory.py --list-models |
| 143 | + """ |
| 144 | + ) |
| 145 | + |
| 146 | + parser.add_argument( |
| 147 | + "--model", "-m", |
| 148 | + default="gemini", |
| 149 | + help="Model to use (alias or full model string)" |
| 150 | + ) |
| 151 | + |
| 152 | + parser.add_argument( |
| 153 | + "--query", "-q", |
| 154 | + default="Greet Andrew and give him the current time", |
| 155 | + help="Query to send to the agent" |
| 156 | + ) |
| 157 | + |
| 158 | + parser.add_argument( |
| 159 | + "--list-models", "-l", |
| 160 | + action="store_true", |
| 161 | + help="List available model aliases and exit" |
| 162 | + ) |
| 163 | + |
| 164 | + # Provider-specific options |
| 165 | + parser.add_argument( |
| 166 | + "--azure-endpoint", |
| 167 | + help="Azure endpoint for Azure AI Foundry models" |
| 168 | + ) |
| 169 | + |
| 170 | + parser.add_argument( |
| 171 | + "--azure-api-version", |
| 172 | + help="Azure API version for Azure AI Foundry models" |
| 173 | + ) |
| 174 | + |
| 175 | + parser.add_argument( |
| 176 | + "--ollama-base-url", |
| 177 | + default="http://localhost:11434/v1", |
| 178 | + help="Base URL for Ollama server" |
| 179 | + ) |
| 180 | + |
| 181 | + return parser.parse_args() |
| 182 | + |
| 183 | + |
| 184 | +if __name__ == "__main__": |
| 185 | + args = parse_arguments() |
| 186 | + |
| 187 | + # List models and exit if requested |
| 188 | + if args.list_models: |
| 189 | + print("Available model aliases:") |
| 190 | + print("-" * 50) |
| 191 | + for alias, model_string in MODEL_ALIASES.items(): |
| 192 | + print(f"{alias:<20} -> {model_string}") |
| 193 | + print("\nYou can also use any model string directly, e.g.:") |
| 194 | + print(" openrouter:google/gemini-2.5-pro-preview") |
| 195 | + print(" fireworks:accounts/fireworks/models/qwq-32b") |
| 196 | + print(" together:meta-llama/Llama-3.3-70B-Instruct-Turbo-Free") |
| 197 | + exit(0) |
| 198 | + |
| 199 | + # Resolve model alias if applicable |
| 200 | + model = MODEL_ALIASES.get(args.model, args.model) |
| 201 | + |
| 202 | + # Build provider kwargs if Azure options are provided |
| 203 | + provider_kwargs = {} |
| 204 | + if args.azure_endpoint: |
| 205 | + provider_kwargs["azure_endpoint"] = args.azure_endpoint |
| 206 | + if args.azure_api_version: |
| 207 | + provider_kwargs["api_version"] = args.azure_api_version |
| 208 | + if args.ollama_base_url and model.startswith("ollama:"): |
| 209 | + provider_kwargs["base_url"] = args.ollama_base_url |
| 210 | + |
| 211 | + # Run the main function |
| 212 | + asyncio.run(main( |
| 213 | + model=model, |
| 214 | + query=args.query, |
| 215 | + provider_kwargs=provider_kwargs if provider_kwargs else None |
| 216 | + )) |
0 commit comments