From 97fa6781cfd3aae0106f546e1233cf9eda0f366b Mon Sep 17 00:00:00 2001 From: Fahad Date: Mon, 16 Jun 2025 13:14:53 +0400 Subject: [PATCH] Vision support via images / pdfs etc that can be passed on to other models as part of analysis, additional context etc. Image processing pipeline added OpenAI GPT-4.1 support Chat tool prompt enhancement Lint and code quality improvements --- CLAUDE.md | 5 + README.md | 16 +- conf/custom_models.json | 68 +++- config.py | 2 +- docker-compose.yml | 4 +- docs/advanced-usage.md | 60 +++- providers/base.py | 2 + providers/gemini.py | 83 ++++- providers/openai.py | 21 +- providers/openai_compatible.py | 92 ++++- providers/openrouter_registry.py | 7 +- simulator_tests/__init__.py | 3 + simulator_tests/test_vision_capability.py | 163 +++++++++ tests/test_image_support_integration.py | 388 ++++++++++++++++++++++ tests/triangle.png | Bin 0 -> 52561 bytes tools/analyze.py | 8 +- tools/base.py | 160 +++++++++ tools/chat.py | 58 +++- tools/codereview.py | 9 + tools/debug.py | 9 + tools/precommit.py | 9 + tools/thinkdeep.py | 17 +- tools/tracer.py | 49 ++- utils/conversation_memory.py | 78 +++++ utils/file_types.py | 32 +- utils/file_utils.py | 37 ++- 26 files changed, 1328 insertions(+), 52 deletions(-) create mode 100644 simulator_tests/test_vision_capability.py create mode 100644 tests/test_image_support_integration.py create mode 100644 tests/triangle.png diff --git a/CLAUDE.md b/CLAUDE.md index f7691c2..5de275c 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -112,6 +112,11 @@ docker logs zen-mcp-redis ### Testing +Simulation tests are available to test the MCP server in a 'live' scenario, using your configured +API keys to ensure the models are working and the server is able to communicate back and forth. +IMPORTANT: Any time any code is changed or updated, you MUST first ./run-server.sh to restart it once OR +pass `--rebuild` to the script below the first time so that it's able to restart and use the latest code. + #### Run All Simulator Tests ```bash # Run the complete test suite diff --git a/README.md b/README.md index 9406847..fb1f101 100644 --- a/README.md +++ b/README.md @@ -80,6 +80,7 @@ Claude is brilliant, but sometimes you need: - **Local model support** - Run models like Llama 3.2 locally via Ollama, vLLM, or LM Studio for privacy and cost control - **Dynamic collaboration** - Models can request additional context and follow-up replies from Claude mid-analysis - **Smart file handling** - Automatically expands directories, manages token limits based on model capacity +- **Vision support** - Analyze images, diagrams, screenshots, and visual content with vision-capable models - **[Bypass MCP's token limits](docs/advanced-usage.md#working-with-large-prompts)** - Work around MCP's 25K limit automatically - **[Context revival across sessions](docs/context-revival.md)** - Continue conversations even after Claude's context resets, with other models maintaining full history @@ -314,6 +315,7 @@ and then debate with the other models to give me a final verdict - Technology comparisons and best practices - Architecture and design discussions - Can reference files for context: `"Use gemini to explain this algorithm with context from algorithm.py"` +- **Image support**: Include screenshots, diagrams, UI mockups for visual analysis: `"Chat with gemini about this error dialog screenshot to understand the user experience issue"` - **Dynamic collaboration**: Gemini can request additional files or context during the conversation if needed for a more thorough response - **Web search capability**: Analyzes when web searches would be helpful and recommends specific searches for Claude to perform, ensuring access to current documentation and best practices @@ -337,6 +339,7 @@ with the best architecture for my project - Offers alternative perspectives and approaches - Validates architectural decisions and design patterns - Can reference specific files for context: `"Use gemini to think deeper about my API design with reference to api/routes.py"` +- **Image support**: Analyze architectural diagrams, flowcharts, design mockups: `"Think deeper about this system architecture diagram with gemini pro using max thinking mode"` - **Enhanced Critical Evaluation (v2.10.0)**: After Gemini's analysis, Claude is prompted to critically evaluate the suggestions, consider context and constraints, identify risks, and synthesize a final recommendation - ensuring a balanced, well-considered solution - **Web search capability**: When enabled (default: true), identifies areas where current documentation or community solutions would strengthen the analysis and suggests specific searches for Claude @@ -362,6 +365,7 @@ I need an actionable plan but break it down into smaller quick-wins that we can - Supports specialized reviews: security, performance, quick - Can enforce coding standards: `"Use gemini to review src/ against PEP8 standards"` - Filters by severity: `"Get gemini to review auth/ - only report critical vulnerabilities"` +- **Image support**: Review code from screenshots, error dialogs, or visual bug reports: `"Review this error screenshot and the related auth.py file for potential security issues"` ### 4. `precommit` - Pre-Commit Validation **Comprehensive review of staged/unstaged git changes across multiple repositories** @@ -408,6 +412,7 @@ Use zen and perform a thorough precommit ensuring there aren't any new regressio - `review_type`: full|security|performance|quick - `severity_filter`: Filter by issue severity - `max_depth`: How deep to search for nested repos +- `images`: Screenshots of requirements, design mockups, or error states for validation context ### 5. `debug` - Expert Debugging Assistant **Root cause analysis for complex problems** @@ -428,6 +433,7 @@ Use zen and perform a thorough precommit ensuring there aren't any new regressio - Supports runtime info and previous attempts - Provides structured root cause analysis with validation steps - Can request additional context when needed for thorough analysis +- **Image support**: Include error screenshots, stack traces, console output: `"Debug this error using gemini with the stack trace screenshot and the failing test.py"` - **Web search capability**: When enabled (default: true), identifies when searching for error messages, known issues, or documentation would help solve the problem and recommends specific searches for Claude ### 6. `analyze` - Smart File Analysis **General-purpose code understanding and exploration** @@ -447,6 +453,7 @@ Use zen and perform a thorough precommit ensuring there aren't any new regressio - Supports specialized analysis types: architecture, performance, security, quality - Uses file paths (not content) for clean terminal output - Can identify patterns, anti-patterns, and refactoring opportunities +- **Image support**: Analyze architecture diagrams, UML charts, flowcharts: `"Analyze this system diagram with gemini to understand the data flow and identify bottlenecks"` - **Web search capability**: When enabled with `use_websearch` (default: true), the model can request Claude to perform web searches and share results back to enhance analysis with current documentation, design patterns, and best practices ### 7. `refactor` - Intelligent Code Refactoring @@ -489,6 +496,7 @@ did *not* discover. - **Conservative approach** - Careful dependency analysis to prevent breaking changes - **Multi-file analysis** - Understands cross-file relationships and dependencies - **Priority sequencing** - Recommends implementation order for refactoring changes +- **Image support**: Analyze code architecture diagrams, legacy system charts: `"Refactor this legacy module using gemini pro with the current architecture diagram"` **Refactor Types (Progressive Priority System):** @@ -529,7 +537,8 @@ Claude can use to efficiently trace execution flows and map dependencies within - Creates structured instructions for call-flow graph generation - Provides detailed formatting requirements for consistent output - Supports any programming language with automatic convention detection -- Output can be used as an input into another tool, such as `chat` along with related code files to perform a logical call-flow analysis +- Output can be used as an input into another tool, such as `chat` along with related code files to perform a logical call-flow analysis +- **Image support**: Analyze visual call flow diagrams, sequence diagrams: `"Generate tracer analysis for this payment flow using the sequence diagram"` #### Example Prompts: ``` @@ -564,6 +573,7 @@ suites that cover realistic failure scenarios and integration points that shorte - Prioritizes smallest test files for pattern detection - Can reference existing test files: `"Generate tests following patterns from tests/unit/"` - Specific code coverage - target specific functions/classes rather than testing everything +- **Image support**: Test UI components, analyze visual requirements: `"Generate tests for this login form using the UI mockup screenshot"` ### 10. `version` - Server Information ``` @@ -626,6 +636,7 @@ This server enables **true AI collaboration** between Claude and multiple AI mod - **Automatic 25K limit bypass**: Each exchange sends only incremental context, allowing unlimited total conversation size - Up to 10 exchanges per conversation (configurable via `MAX_CONVERSATION_TURNS`) with 3-hour expiry (configurable via `CONVERSATION_TIMEOUT_HOURS`) - Thread-safe with Redis persistence across all tools +- **Image context preservation** - Images and visual references are maintained across conversation turns and tool switches **Cross-tool & Cross-Model Continuation Example:** ``` @@ -659,7 +670,7 @@ DEFAULT_MODEL=auto # Claude picks the best model automatically # API Keys (at least one required) GEMINI_API_KEY=your-gemini-key # Enables Gemini Pro & Flash -OPENAI_API_KEY=your-openai-key # Enables O3, O3mini, O4-mini, O4-mini-high +OPENAI_API_KEY=your-openai-key # Enables O3, O3mini, O4-mini, O4-mini-high, GPT-4.1 ``` **Available Models:** @@ -669,6 +680,7 @@ OPENAI_API_KEY=your-openai-key # Enables O3, O3mini, O4-mini, O4-mini-high - **`o3mini`**: Balanced speed/quality - **`o4-mini`**: Latest reasoning model, optimized for shorter contexts - **`o4-mini-high`**: Enhanced O4 with higher reasoning effort +- **`gpt4.1`**: GPT-4.1 with 1M context window - **Custom models**: via OpenRouter or local APIs (Ollama, vLLM, etc.) For detailed configuration options, see the [Advanced Usage Guide](docs/advanced-usage.md). diff --git a/conf/custom_models.json b/conf/custom_models.json index 8c32dd8..fb69e0c 100644 --- a/conf/custom_models.json +++ b/conf/custom_models.json @@ -25,6 +25,8 @@ "supports_extended_thinking": "Whether the model supports extended reasoning tokens (currently none do via OpenRouter or custom APIs)", "supports_json_mode": "Whether the model can guarantee valid JSON output", "supports_function_calling": "Whether the model supports function/tool calling", + "supports_images": "Whether the model can process images/visual input", + "max_image_size_mb": "Maximum total size in MB for all images combined (capped at 40MB max for custom models)", "is_custom": "Set to true for models that should ONLY be used with custom API endpoints (Ollama, vLLM, etc.). False or omitted for OpenRouter/cloud models.", "description": "Human-readable description of the model" }, @@ -35,6 +37,8 @@ "supports_extended_thinking": false, "supports_json_mode": true, "supports_function_calling": true, + "supports_images": true, + "max_image_size_mb": 10.0, "is_custom": true, "description": "Example custom/local model for Ollama, vLLM, etc." } @@ -47,7 +51,9 @@ "supports_extended_thinking": false, "supports_json_mode": false, "supports_function_calling": false, - "description": "Claude 3 Opus - Most capable Claude model" + "supports_images": true, + "max_image_size_mb": 5.0, + "description": "Claude 3 Opus - Most capable Claude model with vision" }, { "model_name": "anthropic/claude-3-sonnet", @@ -56,7 +62,9 @@ "supports_extended_thinking": false, "supports_json_mode": false, "supports_function_calling": false, - "description": "Claude 3 Sonnet - Balanced performance" + "supports_images": true, + "max_image_size_mb": 5.0, + "description": "Claude 3 Sonnet - Balanced performance with vision" }, { "model_name": "anthropic/claude-3-haiku", @@ -65,7 +73,9 @@ "supports_extended_thinking": false, "supports_json_mode": false, "supports_function_calling": false, - "description": "Claude 3 Haiku - Fast and efficient" + "supports_images": true, + "max_image_size_mb": 5.0, + "description": "Claude 3 Haiku - Fast and efficient with vision" }, { "model_name": "google/gemini-2.5-pro-preview", @@ -74,7 +84,9 @@ "supports_extended_thinking": false, "supports_json_mode": true, "supports_function_calling": false, - "description": "Google's Gemini 2.5 Pro via OpenRouter" + "supports_images": true, + "max_image_size_mb": 20.0, + "description": "Google's Gemini 2.5 Pro via OpenRouter with vision" }, { "model_name": "google/gemini-2.5-flash-preview-05-20", @@ -83,7 +95,9 @@ "supports_extended_thinking": false, "supports_json_mode": true, "supports_function_calling": false, - "description": "Google's Gemini 2.5 Flash via OpenRouter" + "supports_images": true, + "max_image_size_mb": 15.0, + "description": "Google's Gemini 2.5 Flash via OpenRouter with vision" }, { "model_name": "mistral/mistral-large", @@ -92,7 +106,9 @@ "supports_extended_thinking": false, "supports_json_mode": true, "supports_function_calling": true, - "description": "Mistral's largest model" + "supports_images": false, + "max_image_size_mb": 0.0, + "description": "Mistral's largest model (text-only)" }, { "model_name": "meta-llama/llama-3-70b", @@ -101,7 +117,9 @@ "supports_extended_thinking": false, "supports_json_mode": false, "supports_function_calling": false, - "description": "Meta's Llama 3 70B model" + "supports_images": false, + "max_image_size_mb": 0.0, + "description": "Meta's Llama 3 70B model (text-only)" }, { "model_name": "deepseek/deepseek-r1-0528", @@ -110,7 +128,9 @@ "supports_extended_thinking": true, "supports_json_mode": true, "supports_function_calling": false, - "description": "DeepSeek R1 with thinking mode - advanced reasoning capabilities" + "supports_images": false, + "max_image_size_mb": 0.0, + "description": "DeepSeek R1 with thinking mode - advanced reasoning capabilities (text-only)" }, { "model_name": "perplexity/llama-3-sonar-large-32k-online", @@ -119,7 +139,9 @@ "supports_extended_thinking": false, "supports_json_mode": false, "supports_function_calling": false, - "description": "Perplexity's online model with web search" + "supports_images": false, + "max_image_size_mb": 0.0, + "description": "Perplexity's online model with web search (text-only)" }, { "model_name": "openai/o3", @@ -128,7 +150,9 @@ "supports_extended_thinking": false, "supports_json_mode": true, "supports_function_calling": true, - "description": "OpenAI's o3 model - well-rounded and powerful across domains" + "supports_images": true, + "max_image_size_mb": 20.0, + "description": "OpenAI's o3 model - well-rounded and powerful across domains with vision" }, { "model_name": "openai/o3-mini", @@ -137,7 +161,9 @@ "supports_extended_thinking": false, "supports_json_mode": true, "supports_function_calling": true, - "description": "OpenAI's o3-mini model - balanced performance and speed" + "supports_images": true, + "max_image_size_mb": 20.0, + "description": "OpenAI's o3-mini model - balanced performance and speed with vision" }, { "model_name": "openai/o3-mini-high", @@ -146,7 +172,9 @@ "supports_extended_thinking": false, "supports_json_mode": true, "supports_function_calling": true, - "description": "OpenAI's o3-mini with high reasoning effort - optimized for complex problems" + "supports_images": true, + "max_image_size_mb": 20.0, + "description": "OpenAI's o3-mini with high reasoning effort - optimized for complex problems with vision" }, { "model_name": "openai/o3-pro", @@ -155,7 +183,9 @@ "supports_extended_thinking": false, "supports_json_mode": true, "supports_function_calling": true, - "description": "OpenAI's o3-pro model - professional-grade reasoning and analysis" + "supports_images": true, + "max_image_size_mb": 20.0, + "description": "OpenAI's o3-pro model - professional-grade reasoning and analysis with vision" }, { "model_name": "openai/o4-mini", @@ -164,7 +194,9 @@ "supports_extended_thinking": false, "supports_json_mode": true, "supports_function_calling": true, - "description": "OpenAI's o4-mini model - optimized for shorter contexts with rapid reasoning" + "supports_images": true, + "max_image_size_mb": 20.0, + "description": "OpenAI's o4-mini model - optimized for shorter contexts with rapid reasoning and vision" }, { "model_name": "openai/o4-mini-high", @@ -173,7 +205,9 @@ "supports_extended_thinking": false, "supports_json_mode": true, "supports_function_calling": true, - "description": "OpenAI's o4-mini with high reasoning effort - enhanced for complex tasks" + "supports_images": true, + "max_image_size_mb": 20.0, + "description": "OpenAI's o4-mini with high reasoning effort - enhanced for complex tasks with vision" }, { "model_name": "llama3.2", @@ -182,8 +216,10 @@ "supports_extended_thinking": false, "supports_json_mode": false, "supports_function_calling": false, + "supports_images": false, + "max_image_size_mb": 0.0, "is_custom": true, - "description": "Local Llama 3.2 model via custom endpoint (Ollama/vLLM) - 128K context window" + "description": "Local Llama 3.2 model via custom endpoint (Ollama/vLLM) - 128K context window (text-only)" } ] } diff --git a/config.py b/config.py index b958bc3..dba3893 100644 --- a/config.py +++ b/config.py @@ -14,7 +14,7 @@ import os # These values are used in server responses and for tracking releases # IMPORTANT: This is the single source of truth for version and author info # Semantic versioning: MAJOR.MINOR.PATCH -__version__ = "4.7.4" +__version__ = "4.8.0" # Last update date in ISO format __updated__ = "2025-06-16" # Primary maintainer diff --git a/docker-compose.yml b/docker-compose.yml index af9eacd..71dcce1 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -8,13 +8,13 @@ services: - "6379:6379" volumes: - redis_data:/data - command: redis-server --save 60 1 --loglevel warning --maxmemory 64mb --maxmemory-policy allkeys-lru + command: redis-server --save 60 1 --loglevel warning --maxmemory 512mb --maxmemory-policy allkeys-lru deploy: resources: limits: memory: 1G reservations: - memory: 256M + memory: 128M zen-mcp: build: . diff --git a/docs/advanced-usage.md b/docs/advanced-usage.md index 0888d1d..7b50a6b 100644 --- a/docs/advanced-usage.md +++ b/docs/advanced-usage.md @@ -11,6 +11,7 @@ This guide covers advanced features, configuration options, and workflows for po - [Context Revival: AI Memory Beyond Context Limits](#context-revival-ai-memory-beyond-context-limits) - [Collaborative Workflows](#collaborative-workflows) - [Working with Large Prompts](#working-with-large-prompts) +- [Vision Support](#vision-support) - [Web Search Integration](#web-search-integration) - [System Prompts](#system-prompts) @@ -25,7 +26,7 @@ DEFAULT_MODEL=auto # Claude picks the best model automatically # API Keys (at least one required) GEMINI_API_KEY=your-gemini-key # Enables Gemini Pro & Flash -OPENAI_API_KEY=your-openai-key # Enables O3, O3-mini, O4-mini, O4-mini-high +OPENAI_API_KEY=your-openai-key # Enables O3, O3-mini, O4-mini, O4-mini-high, GPT-4.1 ``` **How Auto Mode Works:** @@ -43,6 +44,7 @@ OPENAI_API_KEY=your-openai-key # Enables O3, O3-mini, O4-mini, O4-mini-high | **`o3-mini`** | OpenAI | 200K tokens | Balanced speed/quality | Moderate complexity tasks | | **`o4-mini`** | OpenAI | 200K tokens | Latest reasoning model | Optimized for shorter contexts | | **`o4-mini-high`** | OpenAI | 200K tokens | Enhanced reasoning | Complex tasks requiring deeper analysis | +| **`gpt4.1`** | OpenAI | 1M tokens | Latest GPT-4 with extended context | Large codebase analysis, comprehensive reviews | | **`llama`** (Llama 3.2) | Custom/Local | 128K tokens | Local inference, privacy | On-device analysis, cost-free processing | | **Any model** | OpenRouter | Varies | Access to GPT-4, Claude, Llama, etc. | User-specified or based on task requirements | @@ -57,6 +59,7 @@ You can specify a default model instead of auto mode: DEFAULT_MODEL=gemini-2.5-pro-preview-06-05 # Always use Gemini Pro DEFAULT_MODEL=flash # Always use Flash DEFAULT_MODEL=o3 # Always use O3 +DEFAULT_MODEL=gpt4.1 # Always use GPT-4.1 ``` **Important:** After changing any configuration in `.env` (including `DEFAULT_MODEL`, API keys, or other settings), restart the server with `./run-server.sh` to apply the changes. @@ -67,10 +70,12 @@ Regardless of your default setting, you can specify models per request: - "Use **flash** to quickly format this code" - "Use **o3** to debug this logic error" - "Review with **o4-mini** for balanced analysis" +- "Use **gpt4.1** for comprehensive codebase analysis" **Model Capabilities:** - **Gemini Models**: Support thinking modes (minimal to max), web search, 1M context - **O3 Models**: Excellent reasoning, systematic analysis, 200K context +- **GPT-4.1**: Extended context window (1M tokens), general capabilities ## Model Usage Restrictions @@ -186,7 +191,7 @@ All tools that work with files support **both individual files and entire direct **`analyze`** - Analyze files or directories - `files`: List of file paths or directories (required) - `question`: What to analyze (required) -- `model`: auto|pro|flash|o3|o3-mini|o4-mini|o4-mini-high (default: server default) +- `model`: auto|pro|flash|o3|o3-mini|o4-mini|o4-mini-high|gpt4.1 (default: server default) - `analysis_type`: architecture|performance|security|quality|general - `output_format`: summary|detailed|actionable - `thinking_mode`: minimal|low|medium|high|max (default: medium, Gemini only) @@ -201,7 +206,7 @@ All tools that work with files support **both individual files and entire direct **`codereview`** - Review code files or directories - `files`: List of file paths or directories (required) -- `model`: auto|pro|flash|o3|o3-mini|o4-mini|o4-mini-high (default: server default) +- `model`: auto|pro|flash|o3|o3-mini|o4-mini|o4-mini-high|gpt4.1 (default: server default) - `review_type`: full|security|performance|quick - `focus_on`: Specific aspects to focus on - `standards`: Coding standards to enforce @@ -217,7 +222,7 @@ All tools that work with files support **both individual files and entire direct **`debug`** - Debug with file context - `error_description`: Description of the issue (required) -- `model`: auto|pro|flash|o3|o3-mini|o4-mini|o4-mini-high (default: server default) +- `model`: auto|pro|flash|o3|o3-mini|o4-mini|o4-mini-high|gpt4.1 (default: server default) - `error_context`: Stack trace or logs - `files`: Files or directories related to the issue - `runtime_info`: Environment details @@ -233,7 +238,7 @@ All tools that work with files support **both individual files and entire direct **`thinkdeep`** - Extended analysis with file context - `current_analysis`: Your current thinking (required) -- `model`: auto|pro|flash|o3|o3-mini|o4-mini|o4-mini-high (default: server default) +- `model`: auto|pro|flash|o3|o3-mini|o4-mini|o4-mini-high|gpt4.1 (default: server default) - `problem_context`: Additional context - `focus_areas`: Specific aspects to focus on - `files`: Files or directories for context @@ -249,7 +254,7 @@ All tools that work with files support **both individual files and entire direct **`testgen`** - Comprehensive test generation with edge case coverage - `files`: Code files or directories to generate tests for (required) - `prompt`: Description of what to test, testing objectives, and scope (required) -- `model`: auto|pro|flash|o3|o3-mini|o4-mini|o4-mini-high (default: server default) +- `model`: auto|pro|flash|o3|o3-mini|o4-mini|o4-mini-high|gpt4.1 (default: server default) - `test_examples`: Optional existing test files as style/pattern reference - `thinking_mode`: minimal|low|medium|high|max (default: medium, Gemini only) @@ -264,7 +269,7 @@ All tools that work with files support **both individual files and entire direct - `files`: Code files or directories to analyze for refactoring opportunities (required) - `prompt`: Description of refactoring goals, context, and specific areas of focus (required) - `refactor_type`: codesmells|decompose|modernize|organization (required) -- `model`: auto|pro|flash|o3|o3-mini|o4-mini|o4-mini-high (default: server default) +- `model`: auto|pro|flash|o3|o3-mini|o4-mini|o4-mini-high|gpt4.1 (default: server default) - `focus_areas`: Specific areas to focus on (e.g., 'performance', 'readability', 'maintainability', 'security') - `style_guide_examples`: Optional existing code files to use as style/pattern reference - `thinking_mode`: minimal|low|medium|high|max (default: medium, Gemini only) @@ -357,6 +362,47 @@ To help choose the right tool for your needs: - `refactor` vs `codereview`: refactor suggests structural improvements, codereview finds bugs/issues - `refactor` vs `analyze`: refactor provides actionable refactoring steps, analyze provides understanding +## Vision Support + +The Zen MCP server supports vision-capable models for analyzing images, diagrams, screenshots, and visual content. Vision support works seamlessly with all tools and conversation threading. + +**Supported Models:** +- **Gemini 2.5 Pro & Flash**: Excellent for diagrams, architecture analysis, UI mockups (up to 20MB total) +- **OpenAI O3/O4 series**: Strong for visual debugging, error screenshots (up to 20MB total) +- **Claude models via OpenRouter**: Good for code screenshots, visual analysis (up to 5MB total) +- **Custom models**: Support varies by model, with 40MB maximum enforced for abuse prevention + +**Usage Examples:** +```bash +# Debug with error screenshots +"Use zen to debug this error with the stack trace screenshot and error.py" + +# Architecture analysis with diagrams +"Analyze this system architecture diagram with gemini pro for bottlenecks" + +# UI review with mockups +"Chat with flash about this UI mockup - is the layout intuitive?" + +# Code review with visual context +"Review this authentication code along with the error dialog screenshot" +``` + +**Image Formats Supported:** +- **Images**: JPG, PNG, GIF, WebP, BMP, SVG, TIFF +- **Documents**: PDF (where supported by model) +- **Data URLs**: Base64-encoded images from Claude + +**Key Features:** +- **Automatic validation**: File type, magic bytes, and size validation +- **Conversation context**: Images persist across tool switches and continuation +- **Budget management**: Automatic dropping of old images when limits exceeded +- **Model capability-aware**: Only sends images to vision-capable models + +**Best Practices:** +- Describe images when including them: "screenshot of login error", "system architecture diagram" +- Use appropriate models: Gemini for complex diagrams, O3 for debugging visuals +- Consider image sizes: Larger images consume more of the model's capacity + ## Working with Large Prompts The MCP protocol has a combined request+response limit of approximately 25K tokens. This server intelligently works around this limitation by automatically handling large prompts as files: diff --git a/providers/base.py b/providers/base.py index 580f39f..c3688b9 100644 --- a/providers/base.py +++ b/providers/base.py @@ -112,6 +112,8 @@ class ModelCapabilities: supports_system_prompts: bool = True supports_streaming: bool = True supports_function_calling: bool = False + supports_images: bool = False # Whether model can process images + max_image_size_mb: float = 0.0 # Maximum total size for all images in MB # Temperature constraint object - preferred way to define temperature limits temperature_constraint: TemperatureConstraint = field( diff --git a/providers/gemini.py b/providers/gemini.py index 43b5d56..0972e89 100644 --- a/providers/gemini.py +++ b/providers/gemini.py @@ -1,6 +1,8 @@ """Gemini model provider implementation.""" +import base64 import logging +import os import time from typing import Optional @@ -21,11 +23,15 @@ class GeminiModelProvider(ModelProvider): "context_window": 1_048_576, # 1M tokens "supports_extended_thinking": True, "max_thinking_tokens": 24576, # Flash 2.5 thinking budget limit + "supports_images": True, # Vision capability + "max_image_size_mb": 20.0, # Conservative 20MB limit for reliability }, "gemini-2.5-pro-preview-06-05": { "context_window": 1_048_576, # 1M tokens "supports_extended_thinking": True, "max_thinking_tokens": 32768, # Pro 2.5 thinking budget limit + "supports_images": True, # Vision capability + "max_image_size_mb": 32.0, # Higher limit for Pro model }, # Shorthands "flash": "gemini-2.5-flash-preview-05-20", @@ -84,6 +90,8 @@ class GeminiModelProvider(ModelProvider): supports_system_prompts=True, supports_streaming=True, supports_function_calling=True, + supports_images=config.get("supports_images", False), + max_image_size_mb=config.get("max_image_size_mb", 0.0), temperature_constraint=temp_constraint, ) @@ -95,6 +103,7 @@ class GeminiModelProvider(ModelProvider): temperature: float = 0.7, max_output_tokens: Optional[int] = None, thinking_mode: str = "medium", + images: Optional[list[str]] = None, **kwargs, ) -> ModelResponse: """Generate content using Gemini model.""" @@ -102,12 +111,34 @@ class GeminiModelProvider(ModelProvider): resolved_name = self._resolve_model_name(model_name) self.validate_parameters(resolved_name, temperature) - # Combine system prompt with user prompt if provided + # Prepare content parts (text and potentially images) + parts = [] + + # Add system and user prompts as text if system_prompt: full_prompt = f"{system_prompt}\n\n{prompt}" else: full_prompt = prompt + parts.append({"text": full_prompt}) + + # Add images if provided and model supports vision + if images and self._supports_vision(resolved_name): + for image_path in images: + try: + image_part = self._process_image(image_path) + if image_part: + parts.append(image_part) + except Exception as e: + logger.warning(f"Failed to process image {image_path}: {e}") + # Continue with other images and text + continue + elif images and not self._supports_vision(resolved_name): + logger.warning(f"Model {resolved_name} does not support images, ignoring {len(images)} image(s)") + + # Create contents structure + contents = [{"parts": parts}] + # Prepare generation config generation_config = types.GenerateContentConfig( temperature=temperature, @@ -139,7 +170,7 @@ class GeminiModelProvider(ModelProvider): # Generate content response = self.client.models.generate_content( model=resolved_name, - contents=full_prompt, + contents=contents, config=generation_config, ) @@ -274,3 +305,51 @@ class GeminiModelProvider(ModelProvider): usage["total_tokens"] = usage["input_tokens"] + usage["output_tokens"] return usage + + def _supports_vision(self, model_name: str) -> bool: + """Check if the model supports vision (image processing).""" + # Gemini 2.5 models support vision + vision_models = { + "gemini-2.5-flash-preview-05-20", + "gemini-2.5-pro-preview-06-05", + "gemini-2.0-flash", + "gemini-1.5-pro", + "gemini-1.5-flash", + } + return model_name in vision_models + + def _process_image(self, image_path: str) -> Optional[dict]: + """Process an image for Gemini API.""" + try: + if image_path.startswith("data:image/"): + # Handle data URL: data:image/png;base64,iVBORw0... + header, data = image_path.split(",", 1) + mime_type = header.split(";")[0].split(":")[1] + return {"inline_data": {"mime_type": mime_type, "data": data}} + else: + # Handle file path - translate for Docker environment + from utils.file_types import get_image_mime_type + from utils.file_utils import translate_path_for_environment + + translated_path = translate_path_for_environment(image_path) + logger.debug(f"Translated image path from '{image_path}' to '{translated_path}'") + + if not os.path.exists(translated_path): + logger.warning(f"Image file not found: {translated_path} (original: {image_path})") + return None + + # Use translated path for all subsequent operations + image_path = translated_path + + # Detect MIME type from file extension using centralized mappings + ext = os.path.splitext(image_path)[1].lower() + mime_type = get_image_mime_type(ext) + + # Read and encode the image + with open(image_path, "rb") as f: + image_data = base64.b64encode(f.read()).decode() + + return {"inline_data": {"mime_type": mime_type, "data": image_data}} + except Exception as e: + logger.error(f"Error processing image {image_path}: {e}") + return None diff --git a/providers/openai.py b/providers/openai.py index b920af8..181bef9 100644 --- a/providers/openai.py +++ b/providers/openai.py @@ -23,22 +23,38 @@ class OpenAIModelProvider(OpenAICompatibleProvider): "o3": { "context_window": 200_000, # 200K tokens "supports_extended_thinking": False, + "supports_images": True, # O3 models support vision + "max_image_size_mb": 20.0, # 20MB per OpenAI docs }, "o3-mini": { "context_window": 200_000, # 200K tokens "supports_extended_thinking": False, + "supports_images": True, # O3 models support vision + "max_image_size_mb": 20.0, # 20MB per OpenAI docs }, "o3-pro": { "context_window": 200_000, # 200K tokens "supports_extended_thinking": False, + "supports_images": True, # O3 models support vision + "max_image_size_mb": 20.0, # 20MB per OpenAI docs }, "o4-mini": { "context_window": 200_000, # 200K tokens "supports_extended_thinking": False, + "supports_images": True, # O4 models support vision + "max_image_size_mb": 20.0, # 20MB per OpenAI docs }, "o4-mini-high": { "context_window": 200_000, # 200K tokens "supports_extended_thinking": False, + "supports_images": True, # O4 models support vision + "max_image_size_mb": 20.0, # 20MB per OpenAI docs + }, + "gpt-4.1-2025-04-14": { + "context_window": 1_000_000, # 1M tokens + "supports_extended_thinking": False, + "supports_images": True, # GPT-4.1 supports vision + "max_image_size_mb": 20.0, # 20MB per OpenAI docs }, # Shorthands "mini": "o4-mini", # Default 'mini' to latest mini model @@ -46,6 +62,7 @@ class OpenAIModelProvider(OpenAICompatibleProvider): "o4mini": "o4-mini", "o4minihigh": "o4-mini-high", "o4minihi": "o4-mini-high", + "gpt4.1": "gpt-4.1-2025-04-14", } def __init__(self, api_key: str, **kwargs): @@ -76,7 +93,7 @@ class OpenAIModelProvider(OpenAICompatibleProvider): # O3 and O4 reasoning models only support temperature=1.0 temp_constraint = FixedTemperatureConstraint(1.0) else: - # Other OpenAI models support 0.0-2.0 range + # Other OpenAI models (including GPT-4.1) support 0.0-2.0 range temp_constraint = RangeTemperatureConstraint(0.0, 2.0, 0.7) return ModelCapabilities( @@ -88,6 +105,8 @@ class OpenAIModelProvider(OpenAICompatibleProvider): supports_system_prompts=True, supports_streaming=True, supports_function_calling=True, + supports_images=config.get("supports_images", False), + max_image_size_mb=config.get("max_image_size_mb", 0.0), temperature_constraint=temp_constraint, ) diff --git a/providers/openai_compatible.py b/providers/openai_compatible.py index 91d4c0c..9e0b02b 100644 --- a/providers/openai_compatible.py +++ b/providers/openai_compatible.py @@ -1,5 +1,6 @@ """Base class for OpenAI-compatible API providers.""" +import base64 import ipaddress import logging import os @@ -229,6 +230,7 @@ class OpenAICompatibleProvider(ModelProvider): system_prompt: Optional[str] = None, temperature: float = 0.7, max_output_tokens: Optional[int] = None, + images: Optional[list[str]] = None, **kwargs, ) -> ModelResponse: """Generate content using the OpenAI-compatible API. @@ -255,7 +257,32 @@ class OpenAICompatibleProvider(ModelProvider): messages = [] if system_prompt: messages.append({"role": "system", "content": system_prompt}) - messages.append({"role": "user", "content": prompt}) + + # Prepare user message with text and potentially images + user_content = [] + user_content.append({"type": "text", "text": prompt}) + + # Add images if provided and model supports vision + if images and self._supports_vision(model_name): + for image_path in images: + try: + image_content = self._process_image(image_path) + if image_content: + user_content.append(image_content) + except Exception as e: + logging.warning(f"Failed to process image {image_path}: {e}") + # Continue with other images and text + continue + elif images and not self._supports_vision(model_name): + logging.warning(f"Model {model_name} does not support images, ignoring {len(images)} image(s)") + + # Add user message + if len(user_content) == 1: + # Only text content, use simple string format for compatibility + messages.append({"role": "user", "content": prompt}) + else: + # Text + images, use content array format + messages.append({"role": "user", "content": user_content}) # Prepare completion parameters completion_params = { @@ -424,3 +451,66 @@ class OpenAICompatibleProvider(ModelProvider): Default is False for OpenAI-compatible providers. """ return False + + def _supports_vision(self, model_name: str) -> bool: + """Check if the model supports vision (image processing). + + Default implementation for OpenAI-compatible providers. + Subclasses should override with specific model support. + """ + # Common vision-capable models - only include models that actually support images + vision_models = { + "gpt-4o", + "gpt-4o-mini", + "gpt-4-turbo", + "gpt-4-vision-preview", + "gpt-4.1-2025-04-14", # GPT-4.1 supports vision + "o3", + "o3-mini", + "o3-pro", + "o4-mini", + "o4-mini-high", + # Note: Claude models would be handled by a separate provider + } + supports = model_name.lower() in vision_models + logging.debug(f"Model '{model_name}' vision support: {supports}") + return supports + + def _process_image(self, image_path: str) -> Optional[dict]: + """Process an image for OpenAI-compatible API.""" + try: + if image_path.startswith("data:image/"): + # Handle data URL: data:image/png;base64,iVBORw0... + return {"type": "image_url", "image_url": {"url": image_path}} + else: + # Handle file path - translate for Docker environment + from utils.file_utils import translate_path_for_environment + + translated_path = translate_path_for_environment(image_path) + logging.debug(f"Translated image path from '{image_path}' to '{translated_path}'") + + if not os.path.exists(translated_path): + logging.warning(f"Image file not found: {translated_path} (original: {image_path})") + return None + + # Use translated path for all subsequent operations + image_path = translated_path + + # Detect MIME type from file extension using centralized mappings + from utils.file_types import get_image_mime_type + + ext = os.path.splitext(image_path)[1].lower() + mime_type = get_image_mime_type(ext) + logging.debug(f"Processing image '{image_path}' with extension '{ext}' as MIME type '{mime_type}'") + + # Read and encode the image + with open(image_path, "rb") as f: + image_data = base64.b64encode(f.read()).decode() + + # Create data URL for OpenAI API + data_url = f"data:{mime_type};base64,{image_data}" + + return {"type": "image_url", "image_url": {"url": data_url}} + except Exception as e: + logging.error(f"Error processing image {image_path}: {e}") + return None diff --git a/providers/openrouter_registry.py b/providers/openrouter_registry.py index 647b7db..ce84bc2 100644 --- a/providers/openrouter_registry.py +++ b/providers/openrouter_registry.py @@ -23,6 +23,8 @@ class OpenRouterModelConfig: supports_streaming: bool = True supports_function_calling: bool = False supports_json_mode: bool = False + supports_images: bool = False # Whether model can process images + max_image_size_mb: float = 0.0 # Maximum total size for all images in MB is_custom: bool = False # True for models that should only be used with custom endpoints description: str = "" @@ -37,6 +39,8 @@ class OpenRouterModelConfig: supports_system_prompts=self.supports_system_prompts, supports_streaming=self.supports_streaming, supports_function_calling=self.supports_function_calling, + supports_images=self.supports_images, + max_image_size_mb=self.max_image_size_mb, temperature_constraint=RangeTemperatureConstraint(0.0, 2.0, 1.0), ) @@ -66,7 +70,8 @@ class OpenRouterModelRegistry: translated_path = translate_path_for_environment(env_path) self.config_path = Path(translated_path) else: - # Default to conf/custom_models.json (already in container) + # Default to conf/custom_models.json - use relative path from this file + # This works both in development and container environments self.config_path = Path(__file__).parent.parent / "conf" / "custom_models.json" # Load configuration diff --git a/simulator_tests/__init__.py b/simulator_tests/__init__.py index 64ede47..956ba66 100644 --- a/simulator_tests/__init__.py +++ b/simulator_tests/__init__.py @@ -24,6 +24,7 @@ from .test_redis_validation import RedisValidationTest from .test_refactor_validation import RefactorValidationTest from .test_testgen_validation import TestGenValidationTest from .test_token_allocation_validation import TokenAllocationValidationTest +from .test_vision_capability import VisionCapabilityTest from .test_xai_models import XAIModelsTest # Test registry for dynamic loading @@ -45,6 +46,7 @@ TEST_REGISTRY = { "testgen_validation": TestGenValidationTest, "refactor_validation": RefactorValidationTest, "conversation_chain_validation": ConversationChainValidationTest, + "vision_capability": VisionCapabilityTest, "xai_models": XAIModelsTest, # "o3_pro_expensive": O3ProExpensiveTest, # COMMENTED OUT - too expensive to run by default } @@ -69,6 +71,7 @@ __all__ = [ "TestGenValidationTest", "RefactorValidationTest", "ConversationChainValidationTest", + "VisionCapabilityTest", "XAIModelsTest", "TEST_REGISTRY", ] diff --git a/simulator_tests/test_vision_capability.py b/simulator_tests/test_vision_capability.py new file mode 100644 index 0000000..e75b3c9 --- /dev/null +++ b/simulator_tests/test_vision_capability.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python3 +""" +Vision Capability Test + +Tests vision capability with the chat tool using O3 model: +- Test file path image (PNG triangle) +- Test base64 data URL image +- Use chat tool with O3 model to analyze the images +- Verify the model correctly identifies shapes +""" + +import base64 +import os + +from .base_test import BaseSimulatorTest + + +class VisionCapabilityTest(BaseSimulatorTest): + """Test vision capability with chat tool and O3 model""" + + @property + def test_name(self) -> str: + return "vision_capability" + + @property + def test_description(self) -> str: + return "Vision capability test with chat tool and O3 model" + + def get_triangle_png_path(self) -> str: + """Get the path to the triangle.png file in tests directory""" + # Get the project root and find the triangle.png in tests/ + current_dir = os.getcwd() + triangle_path = os.path.join(current_dir, "tests", "triangle.png") + + if not os.path.exists(triangle_path): + raise FileNotFoundError(f"triangle.png not found at {triangle_path}") + + abs_path = os.path.abspath(triangle_path) + self.logger.debug(f"Using triangle PNG at host path: {abs_path}") + return abs_path + + def create_base64_triangle_data_url(self) -> str: + """Create a base64 data URL from the triangle.png file""" + triangle_path = self.get_triangle_png_path() + + with open(triangle_path, "rb") as f: + image_data = base64.b64encode(f.read()).decode() + + data_url = f"data:image/png;base64,{image_data}" + self.logger.debug(f"Created base64 data URL with {len(image_data)} characters") + return data_url + + def run_test(self) -> bool: + """Test vision capability with O3 model""" + try: + self.logger.info("Test: Vision capability with O3 model") + + # Test 1: File path image + self.logger.info(" 1.1: Testing file path image (PNG triangle)") + triangle_path = self.get_triangle_png_path() + self.logger.info(f" ✅ Using triangle PNG at: {triangle_path}") + + response1, continuation_id = self.call_mcp_tool( + "chat", + { + "prompt": "What shape do you see in this image? Please be specific and only mention the shape name.", + "images": [triangle_path], + "model": "o3", + }, + ) + + if not response1: + self.logger.error("Failed to get response from O3 model for file path test") + return False + + # Check for error indicators first + response1_lower = response1.lower() + if any( + error_phrase in response1_lower + for error_phrase in [ + "don't have access", + "cannot see", + "no image", + "clarification_required", + "image you're referring to", + "supply the image", + "error", + ] + ): + self.logger.error(f" ❌ O3 model cannot access file path image. Response: {response1[:300]}...") + return False + + if "triangle" not in response1_lower: + self.logger.error( + f" ❌ O3 did not identify triangle in file path test. Response: {response1[:200]}..." + ) + return False + + self.logger.info(" ✅ O3 correctly identified file path image as triangle") + + # Test 2: Base64 data URL image + self.logger.info(" 1.2: Testing base64 data URL image") + data_url = self.create_base64_triangle_data_url() + + response2, _ = self.call_mcp_tool( + "chat", + { + "prompt": "What shape do you see in this image? Please be specific and only mention the shape name.", + "images": [data_url], + "model": "o3", + }, + ) + + if not response2: + self.logger.error("Failed to get response from O3 model for base64 test") + return False + + response2_lower = response2.lower() + if any( + error_phrase in response2_lower + for error_phrase in [ + "don't have access", + "cannot see", + "no image", + "clarification_required", + "image you're referring to", + "supply the image", + "error", + ] + ): + self.logger.error(f" ❌ O3 model cannot access base64 image. Response: {response2[:300]}...") + return False + + if "triangle" not in response2_lower: + self.logger.error(f" ❌ O3 did not identify triangle in base64 test. Response: {response2[:200]}...") + return False + + self.logger.info(" ✅ O3 correctly identified base64 image as triangle") + + # Optional: Test continuation with same image + if continuation_id: + self.logger.info(" 1.3: Testing continuation with same image") + response3, _ = self.call_mcp_tool( + "chat", + { + "prompt": "What color is this triangle?", + "images": [triangle_path], # Same image should be deduplicated + "continuation_id": continuation_id, + "model": "o3", + }, + ) + + if response3: + self.logger.info(" ✅ Continuation also working correctly") + else: + self.logger.warning(" ⚠️ Continuation response not received") + + self.logger.info(" ✅ Vision capability test completed successfully") + return True + + except Exception as e: + self.logger.error(f"Vision capability test failed: {e}") + return False diff --git a/tests/test_image_support_integration.py b/tests/test_image_support_integration.py new file mode 100644 index 0000000..538ddd9 --- /dev/null +++ b/tests/test_image_support_integration.py @@ -0,0 +1,388 @@ +""" +Integration tests for native image support feature. + +Tests the complete image support pipeline: +- Conversation memory integration with images +- Tool request validation and schema support +- Provider image processing capabilities +- Cross-tool image context preservation +""" + +import json +import os +import tempfile +import uuid + +import pytest + +from tools.chat import ChatTool +from tools.debug import DebugIssueTool +from utils.conversation_memory import ( + ConversationTurn, + ThreadContext, + add_turn, + create_thread, + get_conversation_image_list, + get_thread, +) + + +class TestImageSupportIntegration: + """Integration tests for the complete image support feature.""" + + def test_conversation_turn_includes_images(self): + """Test that ConversationTurn can store and track images.""" + turn = ConversationTurn( + role="user", + content="Please analyze this diagram", + timestamp="2025-01-01T00:00:00Z", + files=["code.py"], + images=["diagram.png", "flowchart.jpg"], + tool_name="chat", + ) + + assert turn.images == ["diagram.png", "flowchart.jpg"] + assert turn.files == ["code.py"] + assert turn.content == "Please analyze this diagram" + + def test_get_conversation_image_list_newest_first(self): + """Test that image list prioritizes newest references.""" + # Create thread context with multiple turns + context = ThreadContext( + thread_id=str(uuid.uuid4()), + created_at="2025-01-01T00:00:00Z", + last_updated_at="2025-01-01T00:00:00Z", + tool_name="chat", + turns=[ + ConversationTurn( + role="user", + content="Turn 1", + timestamp="2025-01-01T00:00:00Z", + images=["old_diagram.png", "shared.png"], + ), + ConversationTurn( + role="assistant", content="Turn 2", timestamp="2025-01-01T01:00:00Z", images=["middle.png"] + ), + ConversationTurn( + role="user", + content="Turn 3", + timestamp="2025-01-01T02:00:00Z", + images=["shared.png", "new_diagram.png"], # shared.png appears again + ), + ], + initial_context={}, + ) + + image_list = get_conversation_image_list(context) + + # Should prioritize newest first, with duplicates removed (newest wins) + expected = ["shared.png", "new_diagram.png", "middle.png", "old_diagram.png"] + assert image_list == expected + + def test_add_turn_with_images(self): + """Test adding a conversation turn with images.""" + thread_id = create_thread("test_tool", {"initial": "context"}) + + success = add_turn( + thread_id=thread_id, + role="user", + content="Analyze these screenshots", + files=["app.py"], + images=["screenshot1.png", "screenshot2.png"], + tool_name="debug", + ) + + assert success + + # Retrieve and verify the thread + context = get_thread(thread_id) + assert context is not None + assert len(context.turns) == 1 + + turn = context.turns[0] + assert turn.images == ["screenshot1.png", "screenshot2.png"] + assert turn.files == ["app.py"] + assert turn.content == "Analyze these screenshots" + + def test_chat_tool_schema_includes_images(self): + """Test that ChatTool schema includes images field.""" + tool = ChatTool() + schema = tool.get_input_schema() + + assert "images" in schema["properties"] + images_field = schema["properties"]["images"] + assert images_field["type"] == "array" + assert images_field["items"]["type"] == "string" + assert "visual context" in images_field["description"].lower() + + def test_debug_tool_schema_includes_images(self): + """Test that DebugIssueTool schema includes images field.""" + tool = DebugIssueTool() + schema = tool.get_input_schema() + + assert "images" in schema["properties"] + images_field = schema["properties"]["images"] + assert images_field["type"] == "array" + assert images_field["items"]["type"] == "string" + assert "error screens" in images_field["description"].lower() + + def test_tool_image_validation_limits(self): + """Test that tools validate image size limits at MCP boundary using real capabilities.""" + tool = ChatTool() + + # Create small test images (each 0.5MB, total 1MB) + small_images = [] + for _ in range(2): + with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as temp_file: + # Write 0.5MB of data + temp_file.write(b"\x00" * (512 * 1024)) + small_images.append(temp_file.name) + + try: + # Test with vision-capable model (should pass for small images) + result = tool._validate_image_limits(small_images, "gemini-2.5-flash-preview-05-20") + assert result is None # No error + + # Test with non-vision model (should fail) + result = tool._validate_image_limits(small_images, "mistral-large") + assert result is not None + assert result["status"] == "error" + assert "does not support image processing" in result["content"] + assert result["metadata"]["supports_images"] is False + + finally: + # Clean up temp files + for img_path in small_images: + if os.path.exists(img_path): + os.unlink(img_path) + + def test_image_validation_model_specific_limits(self): + """Test that different models have appropriate size limits using real capabilities.""" + tool = ChatTool() + + # Test OpenAI O3 model (20MB limit) - Create 15MB image (should pass) + small_image_path = None + large_image_path = None + + try: + # Create 15MB image (under 20MB O3 limit) + with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as temp_file: + temp_file.write(b"\x00" * (15 * 1024 * 1024)) # 15MB + small_image_path = temp_file.name + + result = tool._validate_image_limits([small_image_path], "o3") + assert result is None # Should pass (15MB < 20MB limit) + + # Create 25MB image (over 20MB O3 limit) + with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as temp_file: + temp_file.write(b"\x00" * (25 * 1024 * 1024)) # 25MB + large_image_path = temp_file.name + + result = tool._validate_image_limits([large_image_path], "o3") + assert result is not None # Should fail (25MB > 20MB limit) + assert result["status"] == "error" + assert "Image size limit exceeded" in result["content"] + assert "20.0MB" in result["content"] # O3 limit + assert "25.0MB" in result["content"] # Provided size + + finally: + # Clean up temp files + if small_image_path and os.path.exists(small_image_path): + os.unlink(small_image_path) + if large_image_path and os.path.exists(large_image_path): + os.unlink(large_image_path) + + @pytest.mark.asyncio + async def test_chat_tool_execution_with_images(self): + """Test that ChatTool can execute with images parameter using real provider resolution.""" + import importlib + + # Create a temporary image file for testing + with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as temp_file: + # Write a simple PNG header (minimal valid PNG) + png_header = b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x06\x00\x00\x00\x1f\x15\xc4\x89\x00\x00\x00\rIDATx\x9cc\x00\x01\x00\x00\x05\x00\x01\r\n-\xdb\x00\x00\x00\x00IEND\xaeB`\x82" + temp_file.write(png_header) + temp_image_path = temp_file.name + + # Save original environment + original_env = { + "OPENAI_API_KEY": os.environ.get("OPENAI_API_KEY"), + "DEFAULT_MODEL": os.environ.get("DEFAULT_MODEL"), + } + + try: + # Set up environment for real provider resolution + os.environ["OPENAI_API_KEY"] = "sk-test-key-images-test-not-real" + os.environ["DEFAULT_MODEL"] = "gpt-4o" + + # Clear other provider keys to isolate to OpenAI + for key in ["GEMINI_API_KEY", "XAI_API_KEY", "OPENROUTER_API_KEY"]: + os.environ.pop(key, None) + + # Reload config and clear registry + import config + + importlib.reload(config) + from providers.registry import ModelProviderRegistry + + ModelProviderRegistry._instance = None + + tool = ChatTool() + + # Test with real provider resolution + try: + result = await tool.execute( + {"prompt": "What do you see in this image?", "images": [temp_image_path], "model": "gpt-4o"} + ) + + # If we get here, check the response format + assert len(result) == 1 + # Should be a valid JSON response + output = json.loads(result[0].text) + assert "status" in output + # Test passed - provider accepted images parameter + + except Exception as e: + # Expected: API call will fail with fake key + error_msg = str(e) + # Should NOT be a mock-related error + assert "MagicMock" not in error_msg + assert "'<' not supported between instances" not in error_msg + + # Should be a real provider error (API key or network) + assert any( + phrase in error_msg + for phrase in ["API", "key", "authentication", "provider", "network", "connection", "401", "403"] + ) + # Test passed - provider processed images parameter before failing on auth + + finally: + # Clean up temp file + os.unlink(temp_image_path) + + # Restore environment + for key, value in original_env.items(): + if value is not None: + os.environ[key] = value + else: + os.environ.pop(key, None) + + # Reload config and clear registry + importlib.reload(config) + ModelProviderRegistry._instance = None + + def test_cross_tool_image_context_preservation(self): + """Test that images are preserved across different tools in conversation.""" + # Create initial thread with chat tool + thread_id = create_thread("chat", {"initial": "context"}) + + # Add turn with images from chat tool + add_turn( + thread_id=thread_id, + role="user", + content="Here's my UI design", + images=["design.png", "mockup.jpg"], + tool_name="chat", + ) + + add_turn( + thread_id=thread_id, role="assistant", content="I can see your design. It looks good!", tool_name="chat" + ) + + # Add turn with different images from debug tool + add_turn( + thread_id=thread_id, + role="user", + content="Now I'm getting this error", + images=["error_screen.png"], + files=["error.log"], + tool_name="debug", + ) + + # Retrieve thread and check image preservation + context = get_thread(thread_id) + assert context is not None + + # Get conversation image list (should prioritize newest first) + image_list = get_conversation_image_list(context) + expected = ["error_screen.png", "design.png", "mockup.jpg"] + assert image_list == expected + + # Verify each turn has correct images + assert context.turns[0].images == ["design.png", "mockup.jpg"] + assert context.turns[1].images is None # Assistant turn without images + assert context.turns[2].images == ["error_screen.png"] + + def test_tool_request_base_class_has_images(self): + """Test that base ToolRequest class includes images field.""" + from tools.base import ToolRequest + + # Create request with images + request = ToolRequest(images=["test.png", "test2.jpg"]) + assert request.images == ["test.png", "test2.jpg"] + + # Test default value + request_no_images = ToolRequest() + assert request_no_images.images is None + + def test_data_url_image_format_support(self): + """Test that tools can handle data URL format images.""" + tool = ChatTool() + + # Test with data URL (base64 encoded 1x1 transparent PNG) + data_url = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==" + images = [data_url] + + # Use a model that should be available - o3 from OpenAI + result = tool._validate_image_limits(images, "o3") + assert result is None # Small data URL should pass validation + + # Also test with a non-vision model to ensure validation works + result = tool._validate_image_limits(images, "mistral-large") + # This should fail because mistral doesn't support images + assert result is not None + assert result["status"] == "error" + assert "does not support image processing" in result["content"] + + def test_empty_images_handling(self): + """Test that tools handle empty images lists gracefully.""" + tool = ChatTool() + + # Empty list should not fail validation + result = tool._validate_image_limits([], "test_model") + assert result is None + + # None should not fail validation + result = tool._validate_image_limits(None, "test_model") + assert result is None + + def test_conversation_memory_thread_chaining_with_images(self): + """Test that images work correctly with conversation thread chaining.""" + # Create parent thread with images + parent_thread_id = create_thread("chat", {"parent": "context"}) + add_turn( + thread_id=parent_thread_id, + role="user", + content="Parent thread with images", + images=["parent1.png", "shared.png"], + tool_name="chat", + ) + + # Create child thread linked to parent + child_thread_id = create_thread("debug", {"child": "context"}, parent_thread_id=parent_thread_id) + add_turn( + thread_id=child_thread_id, + role="user", + content="Child thread with more images", + images=["child1.png", "shared.png"], # shared.png appears again (should prioritize newer) + tool_name="debug", + ) + + # Get child thread and verify image collection works across chain + child_context = get_thread(child_thread_id) + assert child_context is not None + assert child_context.parent_thread_id == parent_thread_id + + # Test image collection for child thread only + child_images = get_conversation_image_list(child_context) + assert child_images == ["child1.png", "shared.png"] diff --git a/tests/triangle.png b/tests/triangle.png new file mode 100644 index 0000000000000000000000000000000000000000..3009ec26caa9228a225859b0de89ce03b6eb007b GIT binary patch literal 52561 zcmeGEgG)@-Pa;o^N}Jx4kZo*0>M{SlGBDjpzEmr;85^R ze{TaH_#c{=wxSHAVuX4d{NiA1sBEXM4q*eY;Sd-aAp{+D2>2I*CIf-}^BQ~~P4<7U zUpoHhHAD)04uPQOqW$k%xzPW73SG}d|KDrWfw=JULU7&&M_og2Lv=MV>nE;UmNrkU zY`Oeg-BAZXB>cp{OIKTOOIkly7dJ04KS}z3jt~Q{QEzk8)Bbaax3eU@p}Hom>=RF0 zS|KhTE*^R*99mjh2~Qh4F>N`8e}5hPCQ0w$?d>kc&F$;!%jL_@^~BSjn^#m+l$(c- zn~#qZ9Kq@3@8)gk$LZ$9@UN5n=R9(@Ue=zD?%s}1+-OnfwX}NT<1I-~kGjx*|M}NA zy&diTcO^Hkf8Q3kL2lG1+`L>o-2Xi{_^JfzT`>($M_X`Z)cK`&CH^_`|MA}cuJfc>aC-|K9umI$F)v&Hn#&_`l!$_wf?ks9XF0yVZZq<3I0$iIl>T z;QsGJk;0)%g585akPu}#Xpl#PZE;hd)|;N#uL<(D*6jG1ec45dTSc!PtZLK8_~2*kIlXF=bVy8P2T--_~2exK(5 zV86K#G`u}&`se8%ooUmh7wp%YKi>u2+;yd=h5q{|K^J{cA$z|J)C4bw1z!Zq5Np$R0g%kLl6BFD-|(lDGZOIOJ%@VT}5O z*^(-6|8uXX8~@KZqWhqh4+7J{Qr_0$fWB)uD?Ec@$21f(?KjtmOC9%d8v|+8%nQ9 z4Q(4ZC1f4h@F4-A=1G6lc6-^@N8`JgDd^n$it6ve(hX_hiTUjxbCyJxLyG7W&%wa& zv2fz};LH%5KK)er+*NoSwJ2c*pY34s_XW$vWw?!$7^mQ#&~*S|N?%vX(gZ*kB}(sc=@@2TPE2BOMbaIgeB8z%VH zICieCd+yb)`K{H5Cre*`ZGLs0ty?;3!4E?S|RR<=OZb@>`Uz6w;6g|Xfh+-G$ zNxAORo81HX_N?~T4{y#|magw)OnmnL^|9f{MJ)H1cE)G6)1`WoMe5Gqn|0h+;Ho|C?`S&-Zh~?l?^qkB4I59@Qkk7dcN43zU=Q8=h$VBw3He zw|=CaFTq|SPzNSI;T8DyT>n-hJPJt*T?+jB9sD$mD^LagnKp>JYl9<2Vl{y$_b$cl zdov<9Xj~vCTA<7Kda#Xd6|JAZxlr`}f;~TD2`V}dO3fC3jq%rUPn|uZ-7N^!rfc7sF0&ua`JOKyEATT$g8iIl_cJ`IT?{W^dB^V;>CP%v=00?mM3{aApiGxQ(pO6M{l$jh3czMJQEq;YyP*BSjOYuN1y|~=39MQYB_JsMY zKDs{Yy8CDM+dHX1F=xIt=Qm~^Hgyb8dV+B9z(O^($-yAZ;T%Ra1s-d?zw>)OByQ`U zGc8vBbs(`K=g&#ekU9_VWe_j1%Qcss2m;rgCIaF7j7y9SEOSmYyAOfuZxkI?OLRWm zG1VqupT-=LP_lSXw{ez0C+l8kgE@vkExdjOWKN2UjWa~ zQ16S^53igqJkgMggd9n50|EP<>@RqE!< zI)Lua&a|x4vi`u+Hh&U4PU*Uvs@@gS$1cZzzL}4{6|FU@wVRPi{#q0tivqPEouQVi z5jHIYx!v$#u5=i@+k|!IF_UxnTjnvzdRl%=NBK^^?>=+(Llhd!|5~VExMooATl$1B zNLLU6%ahN?m_v~Ls;)a~SbcA(50a=q@Kx5V#fx*9eLhPuFcUpn4&xu)i9)3a5wK1~ zi02V#2qorrU6#0|ro%6nN1^4$pJINn<*i5Yzto5byzKcP!%uK)m2rLWy@lKS%VNY- z{6G1H5abt|d{JBPs@!K$=={J?X)qN^2uqsMjF}le*LJ$@SAb|+Q({) z3{x^vt7;2^k1|cKcFRE;>>&g(YgKC|%mwv_FW*{F&?c*dVd96fq%3Zla2qG%?1aA( zqW#RF_NkM5Bx`ZpM<2v#B6feR_CQnRbFvT<*wIq~*Lm*+*9O}Xo8Pb#dV{S}FaNG| zB%RC7oivM%Tnc_Wp16#g&;FJ9kbgx~PKwHq$M;J3I-Nym*b>$~zuRHIZJoH`nMY`RxWeNU#w3=s-5euqkkpg;~*AP=@|s^w(rR&3>N0NRGZu~a7$%hq0b4T^1VpkVn!o;_RIE=JJwYghFXEMl znD3$o)?nCxQ|$8oGNJRg3iNk1_gzUL6v@O z-J+2%Y4^Qp{<*45A+BDy*T$!BX4d5V*tBj4E=slt91ak)VX&BE%F>R7$wClTG6*F7 zSaxazrV&*};ASYpGxzTV5;n%A336Gk^%Ub2PrD_We0FD;AXt;3+vea_2N}Vw+HSTS z7{f}LExJ<9zX$yK+QpW#!-**#qh@I;YvrPI0!*Tc6s`|C{jxGd zJs2y(wKE)>M$FqHMo0lYkA!DP`o6vLY&dTu#ZR1H$NUer3e(K2_5_Gp3n&Lt$tN-> z)hP z-n)iYA8h}v=R_@OCN-p0VK~2rGFeC!jIRev3LA#g*B4JCZaYY?(8f%bpn+-cfWVzH} zf7hRNT6`x>FZ|tmQqXy3I7|3-`1NT+o4UHIU3??efJ7l`mD(avx6m*5JroXON-!=F z=f<6|Ia=;WpY>aNHRszGM#+g3k(*Yaat*hn>K%^%gkMzAvluZM|w57YdyG{qAe79oxc`WjOFu z;!uwmq{&=HI9P>*a|LoS$SYX!vKEA+RKlVi^7N>L$4-P!C89-MXxtxT!1HIeZ=%ps z5eF#y3(^`EP#s+Wx*Zs~U-$@yqtNb+Jy|LFYnZ=LFO}QetV&g$SM8fICd->DvNEBp z3;BD=EG(d;tGoj>QRU%XA1r!;9LN`(?m+|dK%aIrtbpI+isdkyq}`VdzYod%q|`J- zaNk5kG-(j97bN7c@u5`k@xF8#;XcTg$zrHTlEg}RfryfURLhVh zykStx*ClsbE74Vw&eV=hEc)Z2=5L7OSXh(8{#xP$-Zza`cQ5fK25E~J>oPYw|JXu; zrfAU-ioyA~-ljw5V)`NqGD(_2y4ToZGE3u?pIbazzv!uI7nC9Bte^BiohQpt^q11? z5Gw#RNK^t#Twm0hm!o|FozaVsMxtZgDTu&eO(@~_G$*k|mQV2r%v~M$wynY2d`g8b zg$31S11n@YXOHHVZS#I{(}ZM9&fJOoRCI(2NOceli6c@>X0T2~ZjoYnF6WlP^P>)| z$v~3V80wsP<+Kd5YQG5(E1hjy(Z-*gct0);Ke=)T$=eYsbAHx{dKPVMrW(HJNaNPmK}>7QEui-`vS+0jy=vH5`Hyq@#?pu;+( zg}z*ycL)xD>LS8x)e|1ensT5=m59LV&7DxENjf2~aF z3Vj50sAMcuW@NbULq?zQ4w4A{j#R&0PtwTo9AsU1(supf-3v--<$`8j4l$v*bV!;V zrAuP+e*MB7U-$v3apsw@pvq0ST{x{+G;_x=LQ!LZe^i2>b1vtUpQ{+PS_kMsHmf<2 zEkT5IB9I!(`j%2Ds@$!~^VxQk@p3KHfe94Y?z>m7Am?MN#XrNA0&9QvOjf4wW?EQ9As_Yu)R; zHP%rWJyO+2?S&_4kifx$NA?3Fsw(WjlKhwbyFeiTx3hhJWF)T(B?==Cw!{FUNgx0~ zfsv6R84tlCg4XyHO9G&lbC9&^NAlU;Skw zlWMAA%BY8if&hdUS1QKlbZ_o!rl4%G#TZxLVc-3rI0k!AxlL>B@Ws&m*Zt*Ve-vvm z*ws-9yS*KFPtB*4Lo#}$=Z#Q;gBT%9M5siJ((#JT}n-m-;!oD3YuDy8;pK9%E!Fy&HXa`&PX1s1rA%X2vDc z=mjSdWc%&mSTjotey3iYN<;5%oEfvA>puYJyL0CV;l4%Z18k1~9mu8Rw1eSr##9K2 zDAjkzQOUn1US$co-`2XB=hV&=ei~hL&*LWvIo#%|(Nw1jn zsy5+yA7C2f_TDo__TvS%CoH}k*veU)*K^F8g#xz!+Q*5c~q4$<%R=Dt==92C4FJO67 z5Jatf(!(V7#{CEM;(GlYu}$V~HF0&DYwSirG6Ht)&qtkfFR4Q{m8NkP@6>L;Crrmqi(VGljQc<#P}teg`u9f*7)mkI#Y*qQ z5R9Z#Uj6sju$;zY-;Z?yTPV1~hvI1zHu-MSuo^vrRFGOI4{SMq- zXy8_1ic-f7)7Z#+c~555rEw*)TEt+_j67b>V5#l)`mmgCd8wHgEh-t4~6cMz$ zg!G!|u;L-bSCsPh2!%Tf6MfZ;&;}fpLcshK@A)%gs`IXYiu+PBxBm`WzH`CDDV6NP9pG=w|M;b}+4@#WHO&YX4a zA6AbL^aCDfl-8gK|CWJjEv`zB(IdTkC6^<7Zg+KifO4_58Tums17}!RI_n5VRJ|;v z`?O2*b`hT44WO${8&hQ;4isxE4MA#EM$J5-r?ewMNIEtxl)NHNBsZF|0w=4UimGC< zM2A@z3SY~3{7E=$A?yX){`@EL+pAw%Y--ZZzee?`M5a3Kzc>r2^B4lPFRqYXt+7%2+QDl);*8-&=g2OqZra2ea`W9lq0DE9rB6_=iK4$}0TL zXk5^v6!8*EnSC$p&dK>6&c6rWFTprtdt8bISWtH$jiM)-pZU0;f=G+sTjR_*e)PYl z`HF72?L(Nlo0hjgRBTeZ>q@Mt746o@FFFLug|gnt5NoSPGGE5I!&V<-3WNr^M4U?UR`U5%@Z4N|Y;{Pa)pg0@F=>l}yD(~g<~+X_baf1qZjmEr z-1{A4y50y_>2^WVmTlqDcJuY_vB9&iCk$M&;PD^S_zWpkRDL|t|sIbvl_c298Wk9kYUi>|L@=59k#@qYV zERc&Y0awR;WM+POZQ3i5N(% z=0t0GgV}C`U!AN*4dw5l*6KS*%5_=gmzW$27qfjUeg+l4G zl9dqsD(#ER-mJD$Tp?QxAtD0Xu2S3zYZNF{04$}OPi=rCL;x!E!yZ7oa#**Y zg&hSQ1goS?Qdj}9QqWNgP}YZKE9I7o(Kg26eE#;cy2(e@f_BdqXt@`U;k}$(ZHwYw z^L#-xI+v52kqj;fJhm7R;+~*!$jHo{Oo)QdFJZBNu8v;^6{r-YVSX1RAibI9kIxYE z*|B&ly0=|TRcWZ)sRYq46>mC=6AzWH;(qEJpTZ5 z98fg12UrVrKsF~Aav@KVRA*BY2>l#Y*ns0k@}OOEGu`?IV3S;39D}7v?z64;uciJ> z=^U@}Brxx)+dT~7t6k5Kv%V37)9RB`z+Op*VP^bdZ9&UMDI;QNOOg8pOYbI_p&O5a zD=LSBM$2n-PwPt4wewC+y(`9)Bcw`oK0LC0M&e@ib<(0ffQRY$-!+bl&wH~+lz7LO zi^un?-kJRSU?@d+Fgk7-qKOGw!z9}W%0!|XK;n!)<0l>fHG`Jnpyg1i)c#`HSo3tw zByaMJkLkGvglxBX3E zMZY%aagX+Eoqz}KD@wuq_juO+0hDgb$R{KP*y`3I26%UJ9Y$k3>K8>0&FjI+JVCJR ziL)$C`rx((S-Fy#}4P$z4Se0;4 zJj0NpbKoM>RWjHGZDBY)d?dav@Z`s5GV_4rD2DjuDDkYX7AAdK#sb&N(7e{o;`i~O z^d&7=NpPb1Kj9IS}(k4OZLH{ji&ctb3 zhPKdFdxlqwV<5WHxBEL81>%lJ5N031Ga*z3!PIT{itqt+8%97dBXR=bi7y~>Ct$9| zRv;Iq%eK8)dZS2hL4@nASZL14zXY&js{-alKIa$_W0>QjalK3QWK>Q)oG-p5`NKg= zSd(v~bcWI=YDdbij_hI-{G!megy=w-qe`1 zbk6ITW$Jl%fIPU)q{&}d@OeNR0%lGCs$1dG4;M?O2Kv|UGFwTSn;zu^QogCSE%X3> zL!rNTvUcg_@j<*+^DN1NqwQQ2u{d^V+{K?Ca0 zqTlfo9+yE95WMZ64CE(*^oI;qnRn8E7H#JH-KSs{b5)_@v#F$dwt9npb9O#n@*W?Jasws&i<&t`7H=uY%hkeg(kV zxaO`5{jzGX%3nZm0&lPXW^BdP$Mx$qVnbE&FLl=e$b7h-APcY%U(|!(fJ4{70KzV= z3gU^dTl7POwZDtSuM1kO8&4e6Dx;%tpMkWRR$>I0N7sJCTJuQ8-Nu~3+fIuRGSA;Yzb8xR{5z( zj49ZlpCu|?H6?3#v78s`T`7Ww(rIo4y%2Bl zOWa5(1Ks(@vQox7IW*%0?M1CTtz+!|%3G!9RR9U!yS|64Q`(`*1Mx_{(KoiobW-r8 z<#6Sf3n$Rjbgy7sudohf=BL+;gsmH~cBxHsrVyb8Ky(ZJblh71=pW!#29JKXjQP6r zoXyeL@aS=s8$TtB%=g2j#6=HfMzls>QnpTvPS_P=3ar*h4mV*zY18Q7P1ITO^1!1? zv)1%MCN1Hd3_BW7ErNkRB_;%zVjwxc=GCJW!saq6@k>o1%!|&~1l57UW&}2%b~UX;xn(lw z8DWm!Dt0g0_y<_^s`1?hBN;s z&MRp@9Zj~k^vV@OuEpz?W5b!XPN2h!+_Yd}_(wSx0OANS{wFR*@H7lvRsN2K-4TmI z@ZxdaK{T-y>-CZj?f5wJ<~_By5||oIX4N@e^wQ{7sk@MJd6pmX5}k^b`AuV}JKVKy zT!FfJMPgR1_3Lt4d#HRt;Q(pnJz=-S5UhLTi=n8l7;zGylsyF>ouiSOP~F0SUHOZLGu zlX{W!F|$cYV@thvzCpamD%?c9(J*~7 z^)Y`(MpRW50PCqdV+XJt5gO&JVTyZ(wo!Hm^gMZQMBVJw)m@OZ87e!x)&_7jLoi)}c@_D?PVeO>_-`uCc zMrp5?v)4sEgV&WM)bv)V)r;hBfgC;}emIP%U-bB=4U~>b0S{bFRbeMY7RQv(75zLm zTYW#_O{SpXI06wyO~$E~J5pf-oZT@#f80CxCKK8j3NjW}%)kC9^Hs^Z51CoR12=@agwqGc;~K!bPsjUV9*uT({h{-cY2{W zB+tsm{uVy}dm!L7J2#=twupf()PQBG288XdZhpUDPH2FA0@Qj5=pu;3~|#J^~%4e4f_EA4xp)b^lc$* z721sA^qRG>DLmcUD<2<(p<-|^ymT0#vO17whR0%pB7#Jtyn5~v)V7a1oXk5{esW!y zaax+%nOk~+Z;aF%qe!i%!o*s!J$}NsW=2lh9d*pzc9p1?;Sg8W(eh85jT!_CHO_t% z5}$xaUgjmCQ0i^rKqRlY1M$`UHz_#A-RtmcuVb<0=Z$+cLt85y`x$JdA(tHOOig6# zWVo1gJ??eg8yM28`DE1!Wf>Yz(xHphYB9(SRDaa-mzi%44EjL7n35uX2#+2pucG&$ zJ4&N|9way;6Tt7y5&sBgwD5Wkzp+)xCFF5)}JbvbTkAZlqg9EANh?*9_Zrl zPRh|xt_phA&5`V^XM^_l?a}o_gn+?B{j?-dOD2k>EH03PnVIVgy|Z-Ky`=Bs-(hoP zP3Obk$#Q26S29Fb9kRr4&enz$)|;M#Ms+eNm}3?8w&Zy}#`bkP12can%Ft5YiiIvp z8t$_m;b84NcD!tbA+?hB7;&ot-d}wtAoE|TW~HgKtwkf6(5q9}4f4aAy|-oh>@~t- zxS*hoh_vb<%FT}rCPUkiC%r7}8mwP%b5_4=wmG9%qz>Em0?I25kneY2r*!>hJ20Dg zYM6HX`PazfF9D)uyqC|VXMA3spWD}jsadLx;c|aYcq;mjZ@>oXgvrL|Zu$uJLO!^; z%62H#0k^rMSFi!+2Sy~4zX1oAxHj_6H*3BbbJI`Mw4-8c37jaGaHv8bULB#YS#SG=e2!8rfjHHE6}kkXC;8;i z5xq@J+XuRpqj4r47hrrE&5@+&9cELyX>gHz-cMVDxh*U7(X>Tqg&IgbEY@KCCVOn| z%p|G?U!-uo^^-bX3cAI_rv#-SAw$M$oSn+03hi2L+juD5jDwj>I)JszsqUO2$D7NN zoIl$qf$T=omDf2VG|Opwq7WD`EGj6QZ@SamjR5t7s{lNhTR{8>wn}f2!ftJK2pBOX zB>`!`m&x~&Up5)cd-C}sBjyZ1_*!oIymBdBr}sS6EX0rUn6I0Mpqb+;(l5b1s+Efp z1j)ONfy+lCE0vyE0Rc$MKgQo!JaQAO#20+6%R$|vLuM%|) z)J#Y6-X~F4O}7%Xqvm`{_p;|F;Yid|KpH(kgfL-IXA=TRcI-F_leV`5u*NTHwvpPF zOtd>Pw<83)Wm83sdPnSgKyV;0e``mZro1RPayNhubc-zA2}V*tuuAu$+DHU+Lse>R zrbXYvPzJG49y@Wj-Z}fe7YrscS}4%~rA2bnV-SRp!_%qrRAjSx?xbG4rV&zGU>$WH zD+xug|DF{r3>DmYB&1PWNJ0~cX}PGYW;)sM#C$VKw%tEQ2+tU^S~Fj?-LMB+x!a?D zV)gx0400Ql^SpmOV&&$ADq)pMMzY`c=tew#(kj?`|83~8E{S(QhjN%=$c{hICj^{R zj8a0A@u4W++T{EyhUJ|Xk-yHycycx&&Wnw_uCAcs^)#kDRLr7zSC(w(sP8GC+lWFA zTu)3thGH)(TAsWH=T0#%)!1!{j_vsn$-p)Fg%=L500t{Ym5A3bP6J(1y-7XRT;RCR zj=T?l(0m?_vr2lOUK-`@OU_7239qeVukO;8+p|b`Drr9xDqmH}0zkMwC}ivM1rSst zs8pa-t@jI`6-5J%wN=T_Dph>N4YG4+dXJaI5M;c1YERem<3Q^IW{m6|(E<=fao94A zywhqSSDDVtC!dJ5KtWFeU2(pQttdEgIK))|?^e1- zbA^{U%K7NK#`RAIeI6NMZMzWAOruU(v6`b6!e|Pa;B^XAaS#6tG(~`StVc-1dNb>f zny~qBvk2_i9`?d4lN`6S@e@;E%ffmc1(;)~-Vh2E%dHg|qLAI6MTXZBoylw*xWn9U zN~W(;a8j5kw}4D*?}wPte7KURSCnTtls$ehQ}6mX(?yd+EuL_wm0nIWJZ`;{lBRIZ z*KTEW^JW@FcjHj?2T`OJjns$Y6}bX;X6v&*hD7PeDW`ydHHgy3AA6Gz|0oH0ot}@( zHT&#N4D9NXo_g>3 zo@K1@V>eIv3ii>MyEZ)}A!^`GZ1~0zh^SYPPn1+XHiti2wFvQo3Ln&lU3pTJO>6Jg zPcdX%a^Z-_sY#dMQ3@0*Cp{=3wNedarGiF%s@VrZcNo@PyE@{|k^Y~JoN0oN;u%^F zm{#*7sq{4gjE_0vSW||N#OOh70c4ou{xh)R&#}keq>iL>7|PlP_e%3*$A0R)hY`YP|Vu?-hUntRn zTOSam#&kFFS#?JdJre1_v_!nAogq)7WSY!HiOS)qhBu1|pfh>cW0xr_gSw_OB{N?> zOFHwhsVcBe8vKnHKS0?;N@ykVBfpe&D?*}Nz1j0Jih%pFm{gZ$Oyr)>Il)sZk2eq2 z8LZyq)Hf`8;@)r)vCQBp_E(*vR2G0Lqf-_Gy!Bz9;hJSD`j)oR&pP>9W9CjuhRgG9 ztw_XP<-EAqzT9sxubcn2TcN_t*GNFXg53)Hv&$+&5fg?gAJI<3MW7ScB@(3T%B`h%kJxy>2p%jUVYTLJW+qrDo&B6 zdiW?PYr_@otbQEdKh5-M-hL!k!yF?99r^?<13YdN^G_G${A}HH`o2h;3z|AB%6Vi4 zV&GrvGP~XSwWPwK@eY3`o>fU}F>ML)HXHCX-(^jqrO;ubn%TxA#RIlfvQj8_T98=+ zg4{Cv&AH>2<4{|X&($leChiDCNDOMe>X%`~6|?Ry#vlUEPhB?8E$Bre*(dw> zQ=KPedwQ3&w`)|)IfU3d;G&_z5uzs)$oN>?*GfBT=kXB#pQpbL(#7&9;{DIl_aiNb zQ9PPdWF%F%844aLj2T?QT_1-P8UokU8b?c|7(hY)JJu!?)qCd8jU-jtj0um=)?VvK zW2(G@eq;_dL!)G99l+h)Lf3xZETy#72B(!91rG0iw*b@MR>uP5Vi+;NT`3KE1bin( zzDC|q@ECT0lpezX)YqQtU7k|IRJ44wEg)Xn({fGQj31VSb8mIitD5=l&Aw5~OX9=& ze(dm9q23D3oJ`V{qD#%*>Gns2Y?nGWkKnei<7uk5zLY%aFn_ii)eW zwkumuKs|I4MfO0<#R9ME)_C+Rh{VQS?l|5cAeG5k9p@IT-sZPd}wB@G$%mHq0PZ6QnmH z*B)7>B~3ADeJ05AHq%=??`&B|I+sU`rZqHI?nAx9IP^X_8IG-D2*4~gpaITAML|Or z;tnhRx#c|Y@_E~f>nP<(h~jLgQ3;oTLV9TsXe!5jcAc%V^pW=FU_m1y0P?z%dijfX z5%;(3Ii;a^olP)cTjB5fbK~MNeqobLZC&eC^JIXU8!_l@qii~ z>+@I=jxjw99RPXkYjm3LX%Z7+V;4ini;u2Q}@m z@=_{bI0e|tv84J3Th6zFQtR5o7c+k(4;t5s5t6QKVxky&FWwLXxm947J@90Jc^L6i zqlm5Lh23|RX7*Q9!ts{FbvPblOEnx|trSxz*&<;9*oG0^N(9ejayg6P6wSlF^)z?& zOf$BsVMcNVcfaD)=~wj<^zrUyne=GT@mTlb*-_YWSzuxnZX|EW&vzsjZ?|wACP@9R zkS|H_$ZX-JNxC5mQG)$mjSW|SBe(nwcH}Psn%UHAW%J1~*>}yRujhX>-;OciQwmrZ zU+-47-7AzaWhqQlW}Qc*aRYzd_WUPynWR5i6Kc{~UzHQ;YBO%dvg#nllM`ckve*h) z7IqdW%@HmS0JCNzu_0^%TT@=^-EqiBGf#O!U!TBjjnVW5Th(s|qZ{V@Y&!Z@LE(7^ zh9XbJ%GsDNM)ce6-cWB$(7ebY!F)N%liVCmbZbRMK}koXB`Oq*n>GtjU_3@z?f7JA zI=yBGD7+=h!{0t$BHVq*1s8!MoToQETXSi3uHv<=lZsu)`+GfndL^aJF6H$18}`EZ zyUK%yHNEU=q-n*%`iX`9J~^V{AbOKZ`{MHNp+<@Os$e4`0_?_6Vgi|p4Idc2HKF>` zT2xvK>X`!39;IsVNV4!89J@~F>=VuRyL|-&vDIwp^B`y_V5*ba?zlJ<36KPJ*`pz` z;h?Lv`+cwPzc$Nz>4L;<3LE$8*^J5(@mRgNJiH$p^EFUDo8*6%qmRL`@|?Bz6b&kX z=V);XF{hjIl`AN#vXD@@kz9zO;9B9TBj?w~8$syEg7gnMd~x4{&tMk`^4@DDj;O7s zPVM3Jd<{G+r3`XhJ&F=T(5ylMe-Ccxl;n)Z35pV*Z=^Yvc$lx}OcSQCcn$K3Y`pav z*&fRaGhibTy^rQlODE>>0z$1P{g6&LF++#EYAGOfVsy>Z8YaFgyW;nJC4wxULRq-< zRz6o7rSNNenf;}~3XzW`6?B{k|MMKdQa69}Dt=nGIm8?(cLcU7$GE%eCkm@jva{|g{Uc4 z0KDRT)qsv4@c1+23g@bX!sQr|`+q>YR6C>k-o4m^d|i=F((%O$hq!0*C1a4OMi1*t z>V~C1KmQ10LQ{28jjl4O9ltjV@{dB7)ZH5mmve!w=aYa94khvE{%Qq~gu+@RwVVKX^alVxm9L?f_ilzw!zk zI%UJL5SzF>nIXn1H@qiZvXYLTRv*UvE@0wfdj3PeDY+OCKU%?0;J9~B%p)Gq_YA*= zKO6te#D?V9P`pFNo}OV+_7wr)Nn6@#a_4h@F9|OPn&)Xd`du4}A`D#8sZzZm&yX<7eM7(Q zm<{f1i-f#7SnF{w6`X8$f%t2YRW#BTIP6P?1HPFmG9#xW9v1>%?e8-A63yeYDM@{V zcA-}F_+Q|a1*+uOPzRf6bjsE_vEYMi%gW{*HcS{VN55x?L`*_Ys&BXaF+sM6LLHW= z*H=L}*_|4B_1x8HBopfV-6r28F9~v2{r9T-fB>niif`4k!+$@lyXS$3_GnY$L5F$S> z0%SuaGgxdk)%%q7Sf0ruh4ha)kmt|#iU#vA2Tga=_xiI`q`tU%Uf+FqRRwnVOu17J zxQbS)WAsbY`hG#)TT!CA|Anh)rQsg4X!(XzgrH|qz07ApxQPPQ*9`ub(%XYei;DnG zYNL#?DzcaK39-S`?%sMU&d-z7g*vM5s|JQ`S^C&2#NyqPNH=<{M%ZH|`t9qM$x~>Q z{Ab!Y)eo(&u`jgA@CNUdT=1_yp!(Ub`80v!YQkUMYhHg;RZ*T67l=nS!Q+;Qep;mZ zFL39mhWnIF9m4&GGEP|npA}53o!H&a;b`Xb&Bg8>R!pr_f@=E(`o*q%`^g*g--DnV zjcU5rxOirZ`P4LxK{s1$leS5yg@&0#{7=?T_ZJu<-N*e44d2M;@}b-)6zAnFabXa_ zNy)#9*4IqcWU-nS0UY>9L%R^Vr>jauE@y=0Bs#ix!V}zA6T)S;mSsaea^~|9{>#1EF^I}P2x*) z_HR9US6<#fm=n&w@ZNPGhnCE#Sl@T%&-UNU@Ea#TkDwStl3P!|EIb;^$Yd%+wGoXW z*40MWDc-uU!6?8d2U+-i*drL*Pq>C%Rn)t_@%*@)GJZ9rQ0D0XUH+AQFRffr3);ijPR2uHp~ZG)KtAM@R{zV|Kk+|*3t*@3v};Cqi}1^b45N34dtf#1 zf&Dgy7OI~p)-k&x?^@|t_QTUYt`k>qKQLLL{U{{#w$IX+E=;xyo)M4n8|O#nT&=xM zU8tgg5~5fQdXZR$TIDYpcz_k2UQ%I z6zlH)E0hPH)@TO!UT=MJt7H`zjApk;fkd*D*`#c$2UCebIS*4zRa`u<4O{tqcBZaR z7hdZJN8plqhKofWIxd)e>}|_SR6r-7F+T;H4S;`SCXzi%8{4^$iFRt5M1&hH4G3{C zoO_>u-79c!w%TR}c$!%;?K&`qoOuL;bqa?oW%H~$a)H&W@Lz=gzrD7z2#6j6U7eyr zEeW6B)sUn|dI8?>LinvV9X;-t#j6p5I7}ILC9f-JtHNJCeqzfG>6+*YdB#^XS;0%p z8RU#&ZDj_Pb|xsJz+82hlpToyY%m>QKa^ScYZ~McaU}cNm69nY{LP9{<=Ed1Fv3<~ zKF%QJ_;@$`NeszDH)t|i0i$R<)gF)-`Vcag*+1pWuPjpiT@dVRy|1!(EJI;s%zEp?*a;uPc(P(gEZMb*kayz77e`*HKH5pjx69bm7Iq<&sB9)9Qj>YRHO zz=lz7YB@r{t~Qtg9_mJBZ8Ntyl;c&)ZbeHzrzWSjVf7bW@;~S4b~qx<_5l9IbD($A z(i$i8WbM#kJ4*;w9OART7#S9v!n3yP$VGVvu!jQ9XI`=LiF#EKWLpjc%0uCWzS^Xf zqp(>LWs7nn@r{^#H}-}`VO~t1w8mmDf@Vmx+wsGjFlnYZrVc*ypWcrLblIeONgBpr zYKr-6U_YEth5W*+HiKbD9T>0B&mz2HSNH82RUSY#LZxD4cZJU&G68TJ~@$WTD>6-h**Ge8kqW!T+YVz zJYMr@h@nn|bcU)GBwUT&UcF{#qjN35s!U|xeX(Vs`jA|qcJkxGog%F1TNKD;)Mpd< z<+I!NVA%}!4-$OdY;IzD#Or> zowguD!P*7bH1M~#U5X{kDwd6WL_xf#Daxwf1GNR41NRv-AWcsOYH3t3_&2I72x<#> z&K56(;ftcgeU_xz0KHm6R&Zee2h;&dZG>$*Vw5hOw zy)CeKsaBAZU`P9Geu9a#fr2wK$Dl}8)0kke9*2Zod%wEx-c4)`JVXDPOJ#eK^c`j~%10`^$uoqV%oeviC zJs5rj1VAWxKT&r#0mfZs>WC`e!yj^0Vg6P-b#Pq>5S?)PZN6JUpe%`~o!6lJRX?C2 z19Po;toaH>4N-q^1!YlJFK-eXHI)%(QVcGV{A??tE9Sj*1Vn)d%VDtJfCekj4QAS@ zaGX1e(^{1CY^MZ>K9|G~-*vN8z!_hGBAA%UprY}WpuKSPH*;Lita}&!HkU;XN!des z=BPt!VSY!@WFIc-RHT-B?Y`*x4>hG}y% z0^|Rk`%v55Z4;uKF#x*|i=!<3iKY_4J#Uzd^9gsFJe!G(+ibD*xvy}I?5B7GeI`~u zqv!>63iE)3PdIlqR1qZjr>U ze+1ivWBJdB@3Avp(FYp-$iQB8P>_@L>aNXTMQcxnA)+3EY_hdSTq^A)Y&w&T8GlOXW+k4qi^-dTHnUod)gRKd zBjMc)Hu=}t!6i8180s&C7v0fMx9dAM*GxK;^e9Y(`L=-9+5*K+dWJgVKVZgOpW14| zVo+{dhhm>R7{a|dJ;%1aeZ4Q&f+i2_GChu>AZ0NmZuBa{Yo>bR#VS7 zMo8EnZjsFR=YSn{z~07M=E0ih`Ss3sgL4!gy*knLO^ywY#=T+A%slaDo7eClYFx`n@3l^wOd=)V* zIdb>OEn!-v)3g5{QD4CjWwfmgMBNQ!_6ch9-s`R)&JM&{k`UTZzIIJu4L*CPQ|4?4@3B;yd9$|_VFOd{U{ky*4s1BVuPjd4AH3Ro?NTa}rC1 z1{|Tn1h7fF4Tz{{8Y%@Cjo4~m{vghB$JH}L)k{gko2s|ivD)d>%!mTZH)!-s?DmFc zo)~umcs$D1pPpK!v&V650}-;juyXBr+*5ty|N90C^ml}tXJhYw0HfT%sQ0moV5Mu?02wJ4{%KLA|=Py&nuyD!l11<3B0Nms)- zZ-FPj95eMQ)5@<8`a#P~}UH1;1;g zJ1+Am9Oi|UM7Fu29I<|T>dF<*0>7jxQ4Ez_GMjp;CZSztoc2Tx^O!W(|1RF6*bbm5 zPc;t0jz~8p$1fS<8BD+nFfs5UV#k4S?)}HL!-sCZ-}x(`=_$BEhVcTcvUMMQ&@kHS zib4(~x!vq1t>=WUhhfd8tC&aNYGp#%X+;d#@%y9h_XmASa^Xq{{+KQ_&Zzrpc=#9k zJ9nmrIG$Eh!d3|0&ygvnvgs^D*J!0EN|?c8PobVhTFh&?aPTJc z=&Uy~!z(S{L{9g%0^u^L@Ct*^hl@_*155J4`|c7UR6_|bUNL@>)*J&Gh!Hk0RNhGF z;J(CJG%fg?fuiU$(XDxdn}$7GwH9iui)b5F!W$}q-+e}&;w16mXcEK_fi z1-`0kH>!NOXEA;3}IA*#3JqYvc= z?;Zf9%RIry1gSTg2{!?^dUgy~putNmR8%RZKqq10z8C3$EHfqKy68xDolnPA*b^Cf zzHRE>UIySYv%xvnt#fwkRGgv&(^l7|Dnq_Zh7}+L0P=AYz~>}2-lR!j69HoB0AZ@%_do{EbN88s-O+d_@$Czgot z3UvngqW{X+s@_81axVH;JUtm8G0Ud1pG#MHJ8$_(IDYWlY+hDV@;9jpk|__@PCbk( z48a9cyZ;2J0 z*&z7K@`_odhu_*|Z=JN<3a6`x(_UH7Fhevxom5_(DYtD?T42pOh0bsz1H2bLpIdK9 zc?NLA?8Ir`lgXghqN4=0q}*R&J1@Syd;v=6l#4pL9EcBf+O;Upbf5O$S+y~4`~ThI%OFGcJpU{^C~m3e&Wa;j_Jq25CBP;Mn->bj;Y#MuG- z_J`M4(nn)sl)fIs+}U@t>zhOUV@V2QrPwqJ64F4X&yEjuy;y{%e&GwT8K*zRB;&wB z&;@$IfHIvFE(F3mQHzxW_N>FlxYdrMtM@{Q#MsOiG86u;_r02S6;5 z@?!8(LAX`FHaqEFe-?nmyboyg!TLfqk2`*&@Hj#(V87;lw%s>Y~I0X7l)Ba|&9%}~V&1_3Y1MJI5OwE~DuOb|0ZosCo<(x36;ofk+s z`t|YdR!@*-gha7I9xyMCPvlNZUVzyp==N&&yR}u4$X&QC`CbN+?&SZ$nKy2?VjI_* zQ0D@GG)(M<=@5#C1^sG|<_xPd>K6I8yT18Z^RW$+24DLYv4NqVMXfvAh3Tu_QznE% z((G{dBWi?^W?U$TDorsWfSj3IYnkKhfPU`TzRnf@{3CsCaGAwTT%UG&>J4V|+Y|t( zEB81DBk9HayO*1aR*%T9KchTzpc$e}7_1KMvn}4jrdO%{;Q(?F%K5RU=zbC2*{gg< zcq6x?%|ouX8*DxWzAH?|ypOm%G>3d1E3Ngw(_qYO@XkuQ$E#fs4r`iCuC7``GLKpTF~!xcJK0ZSv2IxV z3YnoQ89q((HvgZm-B7Qb%YRXmxW~sFfur`SP6uP z`Ix@8Y5w|z;66sOpPFzrk9lYIm0^joF8BY10Vp^^I@6b8gk2Y5Ay=Gs2z~*iVW^|& zK2tVb!QCDlQF;6CuVAZExQ!(3KOSs!l}FUuLk~Sl_jHb3HNFlTmb6(%TLk9Xkc(d+ z7W;|8NEQd5KuOQ+ul9e=;2bO1zm^OV=Bg>uI}SP{iftd}vs*jTd+12YiNC_hmiHXO zRRg2KR8*@wb`hT;smW%1g+yE4?8D;=2J{QNms*xc^n!Y&`{vUZNe^t7mk*3i7#;llkd5c z4V?)l1gx}qq93wnV>7=3Uk4GCSAG`-e}}svKmPYoSl?(k&3oD5Y;8rchquAVpuRc}wyqYP%}sg&2iRwE)ja!aeJ5jkQ5ugr*o}Z6^?K z0`($Y)1ptg&Yx9jAkpTAR{I#Em&^VEyf+~i5Oa7B-OF@4IQAjT-bFqt75+$5icHsd zhW|JaFt@=&RPE=EiqJSs`U1n?lero$g9qqqovxkUtCv={&+09u$1-`GRx5OCy3$4W zMj~^5Q%S45SOpft8BFw(+9$6K^6QWMBIh23Hm&!2O?%PQu* z*_frKNwIhi7pnMPmoAeW>&J>^4pF~8!l2{r;cl$f_AnD5W64I6E8;~29)!=t6=poA z1?t$`>44zbQe|!>Tap_^L8I&sS^7l7D20QWj}NaX^Z+^bgz;x25y`mWnCeQZgBa@z zo!Cixt8tgjKJPrH7_yKlVB1K?#awb|g6V7D{rhh1qzJWUgEI(jtDoG0h{!l=`%gYi zQ!VwecHGls`TDge^g54=MvnL@TOZ8Ktl+A%o?U2RBwinM=J|t-4cKWWw)@9IyJvhA zebmp`WEb#=Hm#}H?vJfq?9JGKNX8Z>qVXv8sJ?h$I7%a>DZPB`v6hDsVWbj15+Y(< z!APOXX7K#;G>gm4Lnf}_AD{ZA5rUFJvj22Vmt3K(GN^F|q}Q`y zj*^>Yc!A^KthILQ3H|Kb0jLjf%%HnfnjsZAhR(;*u}M)Wa~Ke=rUs(*)kHQLZ~uB@ z8NbOqQ6O+4vk?!t^Wt2$TlkE&fA4J6z9|NqVu5>$bd6e<(~r|6gAm4#%L)<2Hd^Yx ziq7^2#3p-*PkY&MlPtGkg16DIggp0nBe1H3{BKH)X}u$+AYU;P3XNhg|AV0(oe&Qf zhP=h<<_ORQ917jHw#R5QbD2--=4h5ScchXq-l1Q2(*WJB+6k~E>&nw;y0MMA$vMc= zn?uTJ*}taC%3THQ=0_`3JTDPszhD(-v`E>LPyr-~5fJD%JI(nQxh^o)k}Z{kYx%#K z5OO|{PE+-idO*FofgpZNl4`-=s7=nbXIE%WmCVhz-Yj_EB2 zlf9I}NItQ`iGgfMJ^Ebk* z)E>>pAaNBorMo`dS%Ok2)X%50utl6> zQZ2KYeW4Yakjo7Ku=i0Y%}lWJH9lgqsw~Uhjd_)AE!#5P9EZ<{mTY?Po*qQ$p-6q} zUUnu7hMuZ4Lry8OwJ!-=_J#S5?qpd~3CHEF@);mQ%FoZS?}bBvMbPq#a`6AWg{r}| zwFD9qGg=IfBflH&{w03hIV!43Jx@!TSZ_@h zgSH$l5ZhXGHVU|zQRi`P0{6kkhjBD&;;DxgaUECM)BE@~Az(T%8p>nxgPC#zd0;L| zRH~lx#kY!WeXaFiTDC0o?$#&5?m^X0*=PypqZezx2R?wPIc?5^bwEdy+n4Ie$mk!3 zlhg|+f87ScjqyRFZrlL3!N;d90p!PX^bz*+0haTguzRR!opY9H{hqETo+3cTpS#+LvZJrHUo88A zCMp}hh6YF%y;Y9|?mM$Ph;%bGOa(T*%>)B_T79rq$CR!rT;rY?2jnrRmz;;vpiO9f zY_^PTI|bF340w7Lvr%KUIsm*p)>?ssY41uX{`-fBKrV*1vYs>^HVJxe4uC|rO@mnZ zYRgwy;_)@>>i=;r#ZU8=&i+KpGHXE>C0$mgN4FgSr{Eu$FpAp&%BzkWU>-c{G~;A% z$NW^2#+2J!t&o|c7F$mh{>Zgzup|PAUmzI8JrAhE^MOr*|0cRla&5AexluD*Ev4R) zA#Lms6lhdlTtsI3tNogAPu_f1v*6mdfS#Rpd5L~MJNHt7XSZ)WN2g1>g2th=+#{x3fN*}QMG=r(^Cpta`;Vm z0Zh@(O+96tWOyX(HMq=>$BLG#M)~jT7;C#OOy-4wOJU%Ta>|c~G@MFwu8oi^$p|T7 zK*C$26vYQhb}&FGqzr6rkBuApFb_2K35*fBmB|T~h5zcU>3eRsj1U>YTi()Efgi@f zM+n!gta7jo7==QMz2COw*S`qI>~ou|x003M0+xkUpr!VQpC?2PzCA{wVt+RlN4>Bi zowiHkTz97>Ura^h*4Q0Sv9%DWi9E=beeJ-% zoCr^Xx|$@?*=kUWm9ekrqw$m;M+ZnbCFXj7^m9%{1n~%%%2dwA={ zN_w2S_DpPB zCXlxV@^3r+p$9F)d`xWM;SB>x$M+TcB#kN3Y#pl%d;4jbYs~n?GV>^i~BUg4Mu3S#UB9BT9?2Gd_hwhx2jawu&?jG&?@WuD?NUsr+h4vC=Yf zuQ~HH{;?D#h%A_7`r#x9vK;OM2P5znWJHt_8g=^f7q~TJG)Wmbm`vm^wKt)jQ{|2bhl62BLbztn^oa`36W~KUYi)(;%&WTHJQc_ zeQ69+Do6@M>57!3GhN^wgL%t16K$4GFZG1-f(=Dk-^UAHjZsOiM~v(02*#I#WQOZR zz}cM?it`EbFtsTq(mH-uXwI zi4L0;g7^pMG-%nPq|LjX(N3XNd$El00Q)!o;F8S=Ak8TfIPrB{rd2d6 zgG3Bz&|T@K#fH_ugL_GPaPs)3xu^P#S|cclfao6*&!bLqHlS1mIo==%J!R&8r}xXX zR`M@-i@+#lGeObOxu|j(GXU8VMIB%@j{OJ*$ob2PKm* zm%&ePxml{uUYphSsf+U+$hZT8B*-EO1b|)j@h2}TK+CVg4hn@?AF4$g$KDz(jw%^J zP8^Tx4fXv&!WX694IrOSI6?M`{lEr|dZpzw15?Mrx%&2+hJ9y-|49c9DBCa3UfAU^ zPZC6_44n$wQqgw{{CGE8p-C6TYeRLlxzs_r>o%5&>Puoz@p#)1pX-d)U*)*j{BZ1R ziZwTGu*T@mez6wd@cSPpd||{E^+G`VO5sTQo`J?rBW>c~2er>WWU3CFu13K(GDZbF z4hHO+2;cL)W+Y@n?@X0Y!M7P=<#{Ked$aHHja9PGAH?$3jJa_WRHC4@SwO?-nIy4o zh6LP8O#&cN+SL;(2C6JgjMspfu(|NeL|?YcdUlyLr5^Gb9}0ELfFnd#*l!9$t~nB= zJ;{RXNi4N|zIDGuk!eOyEf(9zU;KW6Z;g9YiNC{!*o~-b1|bQOKQcF6j-?K{IO3LIQ#tIp>P6$^xBN8nUc!jllP<_ zOR?9@Y?o|lp`=kQ_pjLK%4UJ8^@~O5OK^)w7~EpoNkjWkCOmF_WO5jqyR$8BQ!%}O zf7K?$D&GD}pWhBuMBmzCQzS15=VOV`3)Dz!>fviS{7l>AxSEfR+|WQV5LL zusB!4)TGL{^shGhn$r-3P4V)cKcoJ6J?cEDs%_ZN_jI@zZH!IbJ&t3v`fleuwj>xh zcm3J=n~xg)NCplOI|n5Ao*CeXl*maXG^0b8`NL%~A|&eD+!mAn7YFr1i6TV<^3%oh zpLm#qsTp8!L%tG>mwE?-yJy#LR-*%|DA5O;S?$w4WEd%{WO6-{XG0XU0U>P;HyzPb zXMBShod?jS{^-%r{0hR}hTW)uWE)2bOq7TsQf^_$9?l#~=K!41`@3^oL$ z+oSkEBkwKX#QfuNuP?T*<)N_auf^^mU6?5@ArHvdV+0{I&<4n_y3OS{#I{`(9n zM(rq@cG(BsrdF68p2DK_O|H+#9I!mopxYNnZpJWW2WR3?{urTEy*7r$uqD|S0em6F z-Ql8b(Aro*jvkev>eVw-nT=DAPxyu|2QRN|29{_speLWv6El1N)efzBew)s(&NGel zIF>KtLwPAXj7p4y|6i zJl1!%ddC??nqmCU(z(ZWJ9#KmyK=J~LPP3m3qY49X|KA(enS$U-q|)laq2HbrXcx- zl%ekj$xYWCW4BrN7^A$?rDuT6%N3QjFKj0V^#TddEKlh26Qj}iCMpU7<2*iqjFTcg z*w1sQHA~;)SFGbRvFBFNzqKyy1XsUvE5fSS8MQQ^*`qO-^t6O|ToNy15{!O5a2anu z1w9qWX#d{KD<2eTqa2%{_W(@(qa*vdow_?8KZ1q^mcBq1fd#9cpB~}kaSFP3)6mt}dneyYFWY^C3Hku+#k zZh@pj-RgOCfeLctX~KvtwjIzp1Td*qrt25aObjR@D(><3+PXLQm%xm^!+u9Focj9L z8gu7M&t%(~-wpc#-8O_+3+pTD?O;UDo5D9N0Luot^o@+5`eEk$cUXUm6LQ%b*qaBA zfP||$N{P<2$^K(8K4(3)9t6E3n_f$c^O3k;FyQ(E{HCd_3Z?FC7nptRCH>^IW@qwM zg}ySVS{fXXdHS2vk3t<=ba$6nDj4r^beHBv-mny0cwvh8FY5s^J_xe#Cf+yP z-}boic!|)s(sEQX>UsaizAq460l?gJ*(GZ~F=_(WO{MXzU5IZcx-;mcB(1LA$+k_q zA>T9eiLAYtG0*Dc$`K34*av|(;{e}1>x!vtrf|#={CpS-SAs}*O1MkB9QXh(5fB!%T8E2@U)2 ztPM&y$TkH?)AWz{{U;uOIbLi2Vz-yh5ss}#j801;9E6FI^Y|_L$1uX`*}epJfe@}{ z{u#=Fh*-=xOD%0w#8T{kE+ratbLZyU6;NKp59(zjJ=$C;`gg~l+hwMK$gc0y(_Bk3 zKfyi_1k-v!I&j^?LZZVYs)uN5DtO8X*^LxVEYU~8zomcb1VT9A&-OF~IS2!)aBCLx zg2oFENP>>NlYvw_C680|fjD=gCI(Y*S3?rDuqMbkbC~cO0Lh5Jt0{-6NtY;{aN!^< zAJ~0%AVQQTUYpM%wh?Qm+3;^ncO*TT`}g3-eDN3dql(L5tA{Qn%7|o*A`~CUA-<5u z=kb4FNg`x3ICw_I9^xD3KXR%)Oa-vC2WgLj*sL>aIw-|h!LAED@J-41Q!~O}T~P>) z#xaetVx|RT`)fbs0U5T|_vR5%3=%#NlGI>ksxS&t zBaA`b0m-CQ=K#K8Z7RwBd1TjMN!J+VSdaf0YLAD$hAX+q!&k<&_`Vj5gyQJ5lEQj; zntK=&W<3-bAwJCbpJw7m5j0!h!z!tlsQ9fTCvl<#=Z(A0S|I{yH6UETXe3?gH)UY(7SM1}r+Ur4+1rpE02S#{>?c?l6P5k=f<`SU%=FdpY!M&I)M( z%CpwUoTJ-jgoUtqR%^dY`lGed%eUgbKL=Jg@l9bf{U>pds@G&;GKBRGa|PwzQ<5)$ zpttpB=5EU?>BRvN0=}*nIUc!RfRaR>0_6cMu_h!yFVE?F)F&`pt~dU6Bz}dW9(I!p z_Y@cY8O_%Ow%BCrY^0~FxMzbjaCDqw_J$?F-+w;de~dc4QFDrW#DV|DYuK0iFnF}4*g3Iht>oyP#E>}DH8ISwwG>>_k60`F28$RRCNP;HZ8+e06 zl+a#Wd};Vy2$b`3kPEejSp$$MXVf3xp0aZGuy^x%*MGIsTW$9vp#A-h)_{23UriJ# z{WsCl7-UO;#eqHXXdZM1hO|H_{>@fs={WT?fUdnw#EDXG&o(9m zox3k_wUwrX`$;O4ndgyHfeaCV znjeQA8#G4z(%YJ!R;7r(?~(?4LqX%|odX8y!z&tS})7}e+ag2Z7U^8V2 zrNEPkPNrgA-zDq4ej`o%1_l;Y^@yqGALb>+!&{)$no#!529JG>t3Yr1A7FQZQgMIggr3>6Y zyb98Yu%L*Q8_!u14&>7P; z97AVN*`~T2lt?s2(+;NWLyVu3&WisD&+_+Q^3Ti(JPF6+tq%>L4ZsxL@%+I#!t6P<%q%5dEvY0xrAi^o>% zDG)Dk(ZdiVf)(UwrWnyGq~oS3B+-bb@?LGsmQFKj$pds#pAhUvNU87e0+1pQqiTLJ zF>Qk7#U@N&YCCdQcz+{Or0K23Z*loDiT$5NWJs2tH-|nrx5d1|SOh<)XWV6w z=E=%&wl@y`WP#*gaIbWIFt9pOSr|hCBz-$IO};Z3q~xPq^{j5)UDoZ~cxX#w@f@b5 zde+jMOpJt?@Z7+*(^0Mzkc3n|B`$7NQ--{|JM4PVrvJI6?d8$g&95$N3e^K5j1Pz| zOY462A@_v9L!MSrbs_4(@Ypy>XF}2jsJ!(2Vne*;7nk@3L%n07#Db^2T zg$x3rLAvIo$lm~A%mZ4d0XMFJ_=oNe<*WQSQFe*{{{B(SUs`e=9knVpjYo4!8ya>W z2hfCN7H|0qxm}&*^}3lA4?|GgM(4;;t{czsntBEJP%-)M1o}{z7Ah?jFET<#h@vDL zbEr_Pmn{C_NP&PmainI;MHxIip*;8E9~y^^iM_P#F5BFlf%vnaw$ zhU|QPHDI-zsiPR(6@kuce4@B0{%qDF4ZMk_jht$CKdYCikEblMXJ`;C$E^`$X@-oN zk+OEzn+1SPM}TJeK+P$rAgEFlge`{^;nI*008&%_0@A(S5Vu`OnlQUFT!le{S2YsX z*xpE~mvj~Zegx{{;`87X%ChpaNET0EM9~B$ku?JM44IwY;%p;mq+tNqFb8OZQIG3$ z32s&TunT$wrllr+c-^#@_Jb>r+?}aFxG+BOl8pjp?5)gjax;>p&8Hnn=RRcGuT#u7 z7@a~rmA^Bf0E=hrrhhy8K#t3 z`r`mCKAOW*&fDsVeX}%s zUShu{Y9iebu&C5(U(OchE-d0#a805;ugaNCVpMp^w&sx znbl!we9aN{n;*uW2M0{kk6*Ff_Et+xKD|>;jy}2>G;u6O0x|Uc1yWdDt4?mF3d{Xu zmg#lqo;|P<@J1?@d{6PtN?5|(Da4k;fpH~80DQUVEq(j_q?ye!mZFB#Oe51$hwiUn z$%C3jG_B_CLj{rzN&7S+xkOOgW`2zpyC-JcL}t*3xij@x@(NCc{Q2}z-A?^ip z=81wN<&Qve)p7dzZ9cYbwe4Z#+pb?^8T?j#k$^=maJs7I+}gowU|X~(n4AO6l=rnQ zB|O%TOU^$?lHtezavzS!S0oVKBE(H)E88gRr>) zg!qyIy}X5=cEEDlsUm+!PN?<%yI9tldyhlaH5BH_uLY~E?7bcu;E~+qBp04Lr>4_3 z^Po#|x&7Vy*ma_U>|=fFFZKLTz%_3!6JO~ zB#b_eM|}xGJT&>K_>%hU-6uzwTd}RU~z_;5KEi;ZF|iruj~!EdXUw zCD*z9W3Iwmp!z;OUXpG`;d!iR;{eE(&qJ9J_-i9yZ3Q#JY71`m!=TL5mrxne`uAT% zck^V2gI5}6ehOyM++#Uy^yaM11?2J>SeZD&J&{nO2)^cbfm4TgUm*RxOxy@5{JA25 z#b*g38F~1JY3f)hTzMTRpE+39gfK-~{Yc7G>1fO~0dEyPqa!HVW%BuNeDGp%^$V7RiXtPXNc-1SOsJ4%yVyul8Yhy>_nOE z5jYbF9pUDyL)XRmo}MSedsD#TOG25q_V^IcF#{|r5T$jOonKWjw;A5HEb zgRugU%6PvP=mN&!TA!-O3W^_8=@Qv*8%v7;G zk|5;=Do~f{A?2IVF9+_gCZZzWx#MQ4g0TN)0bBv$ChesbFwq$gTWV#CpJ%P}14=O4 z=^@kngu?JCxhyz1lt~jT7!%>VMcX85+n}3#Bo%xK;;cY$77xiX!_*;cZ&dnE@4vPJ24*L8P&e5oH7v!%z&_P^B8NxcT}H*Q!%rL!6T z$6yvA3h)$m0?h~0g-yge(9$+b(=;~GR5AYPw2)|(>8!WyRM>=yR7ID17$37h_36RP5=@h2V<(TRb_WA7eSsiD4VA zfhwkxy^}NO+H^OqiA9Qd7a|qA7$g7L-s%_g#!zQJhA*gf&J$cS}RZXK_W9;Ls z|NLYy?&I=f(Y>OGk}L1LChu@_8A$)=c{p~Jr01qwx9CWPalO%G2`CJt@{OWf)5A_T z!REj-3YZUGuoa6PB7~QzQgD$B|5fNo7u>40YT;7TvZ>|`Z%79xjRZC6Gcb zjr#G%vb>%I>n`fOuxZ#d^`aPbJ`{lROP^nY9_DDTRJ+O6#)TJ0=n;DJD6oww@3C&G z3B>Xwm!+`8;3XkvMsQmGqP8+5-c_*=!Vo(TeZDx|zUdD}+{hOBvn_2i9;MFD9?CXl zp9_zl{Gk}a4VeegjOsZ8NcpJo&x$XfONCssT0IMuJ%afs5KFTT;l7Ks{5t+M`6eSS z_WPD@mJbQ&y3{q$wx5I7+`78{p|?gABPpp^BriGtWA%WLeVykSL}p=A z_N?F!ciL9rT`;&Jk3%g@nKkq@?BNL3J~6A~?1-{Vlb^uv^b~BL0U=n)?DJU8ED>r= zN^l5|EW&;KXH{Lu5S2p_4W;K~xdSn^Rk~&Mdheiq|Ha#by`=FxYmzHkAt~^t4}~gV zq{DK>zNzUK_u5zuFt>|)ewXu*T`<_Q0Y+0T@4FTSPP-$LAbkX<8YpNRXJV;UHw z;CLi(!|BJ2CFl)V)1r=15~B>H58ATFG~Z?bC85}4sg5;|+J|iqb(^wnx|8b1AD-ygN&C~q{rv}f<>h-XW zhCvR$d|&D7S-WzNP<-E4!Jp9kl@qvXx-OOvprpcanWOKAB~syEN#Gl>xlG9j>q}O) z#h^foC@6vj+)Ath}?UUEOout=J|cmGC})wGjWv~VW3j+ z>7z z@@(Gm^Z45?fOTd|ObrN?z{@jF)iGBDX`()dLpBjd{RB@XJe!ZXIKRK%y3Ig<(avQ+ zbm6Ed>5|#XP~c7M1Iqp-s7ldCId04jO;4SFX*tf{9p;UkZXD9zN1_aCGdYfnphU{k z8gIBHdhC2AE^-=(Ap)D##QlZ>5lw69xQoq(#E0NEv63Bzk z)=&_m2aGA+Y)+Cq=r~H2SzuGm2ejXD6JT zP96;%8(6ilLXs^;?|};a<|(=0^Rsw;nS0=(bn1!Lu}wkc`w#Hzvy}AMktZhhu{8>6 z15r)uSHQ30X+Oz{w^aa80ta>xyNn@fBj1N#9b6yyn_j^tx454Q6LLiy*ICvPPGC62 z=8keK9V9A%KYXS@iGRCzb=DDdf*Ln;2jxU>gmdJl8g;vuqVBQX_rm5?e9GDc7X-hn zn>_ta2fugHqk*=Qn}?Vz=XtT_OCzMDi=(c{$Ly(h_NYmyA@Yp^)@h~ zij4aTgmxu{QH`T~#yJ1|pIU8S0pxpNQQJh+=kIn_P%*R+FARUmkc|Aht_} z{!O1;*qb9_qBM40_IL@TO$g0c2H0Y<3J50F#T_C$bMVuyVi$!|mP`BAYZ5-hw6fv` zHIb-`gAG@$5QOZf7qbjy-`?y#-TmS#I%abr0A7l8>6U1_k_SL$-7h26K|y0Eyejf* z^Fvd~99FK>%WJXy>a%{jTi7gpY12Rfc%7N2Q{Vy>wxHTMNML8kB(%w_G~>fI-bs_H z;#&W0N;3STrb_}&!_QBKZ;mP1t2f2>tD6=vbJ+hr)+v38#O)ay9r(N0EQ()rZavO` zj?*k2mhJTQ!;7Oi4e|SSNO9&n0efyTIsUm!WnWaZ2vEtnjb|)WIe=<*q4q;R59DX9 zke%t>0%(rZE69<&{39wq8>QA{#S4^vuLAaPGKL8=BiumF5O@f`0MjpGc)YyxqLx05 z-~zWc7scHyH#@mux3+i~+mw|R@YxT5*M8idN{x#F*KE^g!aGgPH2wf=%yT`D+Z0b$ zL%p(Ys%RK}SogL;xa%kNc!#pL+i?j{BF;a+lokRs`z2LT7;~+8rUyfh4NP{!W<|aQ z9Z6c7kUS{#9piVvIAfW5RHBs)jcz8^ZH<+nN#VSYjr1XTfOOF?fBG6qr^y9f3 zl{D0mM$E^(?R`l#uJvNTSf^o)KLP^Uz?DAao##t&`P8^wQ8WCCMb38%U;WAM;mr5q zCYdLoVR~TwL%;@3BaWG#_HKz~dIvhq;K_k*vnyhGHKE1EBK`mG=blO?;j$OE@&^ z;aXJ@;wYgBA`#eXv4fLE@yNiD5$L1A=*v|AIm~*5oc*VVz851XI+)uv)Vmo&6!(ly z1eQlU8>%~N>IE{`@TZi6{y^x6+7@CKlXk_zRqOz2(mD4Xq*mi~sVAsNZHXH48P^%^E_iq9cBV%dUPCU*dBzxJAnc!IV4~|xh2FcLnlxh zX_&2EP@$F+-p^X0R%pmX2u^Wi#j7fOO2UMBLUg3xq+ZKFfzks3rHbCU`5*f?;6?fd z(1sMa%M=q#mTH_4oMvJ4@&V5X5ksILkgJHf#CBCS8s>RZM|oc{IUkTrB8DQ<<{VI0 z0^Au?XE|t=8`2NRSwWB{MrL3HMhIlHG9w*KGm!=P^zYIXdzpGywqhA6xYacvBN7>f zH(PA}G1`w4q;?Ih&#UXTQR&0YWIF>zqpxx-4)W~&d~|17w={ph$@|^Vn019dn1k7?$r!0 zDPC8Xw-);ATGAl#YJ}KPnA=vL=*;r{T>&Q_d11h z2PSN|W zr-uh!@BSL8ec$a*!;doUy3RqiXuYW&BXV4N4M z{P5$3r9irm90L?ZElMX4wJa}(f?-wX*`qX!`wDd2u30hr{{WTmbqe@f zc*?tPIwuDw1JqoSx7a$Qh=oBfRD1{0PQ3b)YD&{ssgL#ZW@6*Qq?k;VthYB{cQ<=J z+71R?hb^jNQqmlZ=lQ|dIqa;{pCg{+KEDOy@i#!X*)3*3wRLI68zo9CTVqT<+ag>$t}b36A%mh z3T^`VpZ31`E6VSS8W50_Mi516kd~6}9vY;(LqbX+3^Q=+i)jf^OdGkCoENdG2l>MpWx zh<97ToT|u28o3?eWQEVdEqUxvRLTZuvnRIMDRFq z^g-iBZ@Yya$6&N?(yM&wKlW%*dBYC_4r2k6aMzJKEO8_O6F^Q!I0flv1-rm&$7c%u zfTZq^=-2aG_D3cp)-a|zUYy;?W-Japr5Q)sp-oOC2qEP@$9M_qT>$$O0DiC_iwzrW zsBkR)Mky!z9SjkG^|l^?B%QoBJR+;{@78;bV{kr5%jD5!!{!Pjy$`x?9{ZmqA8%Lo z8Jq)q5Ca?FH$~KnXr(-h5G}2ra!V+xkm{2Y0b=gxB`}-ohdfz-3l1Z*OKhCUJPYDa z-5(Yw3~GJlEZ5m*fAyj5nm~rbshEyf)&_$SsL{`;JJllAuLFr*m4TcVnHC5lYNGRfG zF-fqd5*LjpCX>px5P9~9UZ|H=ILOx}oObQ3V->uBam25h>*?1bqR`68DpBYoPI zW}NTXrx$y*S8N7T^FR^GG(m^iLuq+%#~;h~Etp(<-{S*4Vv&1VfR$cB)dMA+`-Jds zwA&0vo}EX%C`Wpi{bSbQwE_C&cNOk={cm8ea3?Ro_>QE?nzkoE-3PmaXW#Kxcv{Oq zK*&+Z;vSs@(qZZxX3rL%d3|H4pgjs7X4u4wNpiq{E6Z(uCKrInP?mmny2?PEyo1+( zI0r+?Y_}dYF)*tX^C!@Rf$bJ579|`b&E7K)AyAHSeU-LK&*JlUFH47s4WVqM*dJse zRjr#@&%7nRb)@26pLr!TWdF_Fxz4~V;2#N$IO8~RCleh;GA5Hv-lNm zRs5J7v9ipd&8I=qRZY6+3>+_t2oMg6gA?+Aw_e#y;Q-p{%2u$K7`%)=n7 z3&QkEMDs&bTARH9)Sm4Ob|l`*x@|ugqcn9B%=gfYZ&7_7DN7`tI^mgvJGu}mxLaHa zVu23&&Uv6?Fl2|ekr8-#?le|8swpS;9%!v-T$)J0Mp)(v;>F>Kg`z+nEa)~S9|mkF zLK^C;{$e#pyY-Ni`4(8SIHe8%fC|qU3~4kQ;NBXe7~xBAa-e<7P|tP%=VckmEE*&( zoED(Zx=-p>$!)yR@n@e#%*U;U;kxFKN+6Yb#q*>WZ2}CEtkc(Y<**+3&=tQ@dZunH zg`m*muzr04`V#`~$sPdJseo%XK|oehy1>g^zz5IZR){8BMlBy(wDNvE)2TqE62PL; zB0h%}L;OyCrvf&;9khuPYJKqaZ2>;0|?w)roC=NuI!WLH_k_(eFfj&qxv& zcIcTMwdaQ+?~j{cG{b;0wGQ8Kkp{QpR1t70;AM#QZ?wK0e43)nKZ>M`@(jP>JxBLJ zxnd7um6#?zV9tL=tgc{4kA(Xs>A{(w4>nPt$jKj)n7Fp>Fc*#=KDKj=HXw=tVgYUP zA&m7p<;+}p_p-Woym3sW_E1gT zt6CeDdyAwO@mOO&;Em!{Spi8Q zo?@0f-FNyl_O&4bYnVNL_Z+<{{^N1)MaNPAU~!c;alEmig_2DenN;K90_bL%Po#eK z=AZ}v_-Tt%#LLh~nk{9Nx%3z;-GtQhvSEl{KN!=b&4dnxD0DCdjVNEdms}15xzChE zr8Nihq$?Dn6jP~>Fw^}3nE%npiF46-peIueILy^@s~ z=wtZSRgpqO{~YdkIAPIHCy9%`N((g2poDg=@l^xiL7d%|Y+pG5@e`FJ>4GPfsav>)O#ENac>6hI60KhM2_l!uR12d!M6sEWKbQDhWA<~gO!?tVqa`1 zoR&EN6E8CbQX@ZhB@-<1-=V_vB&HF`5MmTQqthoxM;qW zKb*?3{<=OX?fr~!&Bc&4v|QDS%aT{elD`(69`47E&TLn|`m6n!F-A}V**ZUyII9qr zviul!ki9+|$lmC0!U1-Ym2XLk!fPw$kARhlhfC+9ZQ-3)8p(@9U!6V`;*Ktr;iD1u zumR0t({_UIq0q3&5Hu8o2NRnGFNHE#+vCaP`f2o^AbUUHBLOoo8Ivc9RwZfxVwRzS z>I{>nGF&odZi2oG(obyX;S7ns%~VpDm7jrK+UFZAlG-rvo7JSjcTPJ4(@|M+IxQN! zdVQR(aTLXBtqI_oI2>K^ZdDs8J(xf3O6ivwEbR$5_y67uyjsWoCD{Bg5UFP6r!&$S%XYd$?DIX z=L4b$Lj~q7LN!0<7LQkTjmS@vPW`Mp>9X!qFRhtQod3cXAM3%*TT7{83sQMOGZ!K; z{fDgJYUU3H(B)mP3297E094Z3O6&7AHG!(u?Afd?$Lt=-ETP_RKkfF3uDI{0;#nz2^h+ss&A zj;NR6PhjMfvGm(E3<9NXu1W!MW-M{Y{Q#L6kGs(Xfsm8035~ferrwH>#*Hoo%jdR* z7&(C1A>e%W%hXpKT7@|2ARj_!s=iMKEnLxB**P0MBHd=)S1$ooQx}Xo6~i0qL0MJZ z;8b;jTDt`48o%vuDoEuZp-3v-IJah#0&}`08u0WMJCKU^->hk`xV|J9i3Kp(8LS2Q zPr1Q&Kj3bqpp;t1df@lK)5@S_D16ljjzKic3?iVw%AF&ga}Y<&lMHt)OXOy2x86m( zU9gjIrNGu3L{P5@NIfF)g^2H)7!F!hWCZw>Mji+W)HFpE6D7%36o0q$hHt{VNU)l} z-164gfYEvHy&goh4VpuI{^QHuZI0e1V0mx~&StO2hRmlt)>sRV>j#&i^Lx1$-2!?j z>S7`ZL#bfnhC()0Cmc-^GMKp#U9Q#mWHCo++V1&|hmn=|H&GOR$^8Rh+!HIG{(n%nKzh z5d5HOJj2+=LlAq>Ml!3`*9zwM zCSs}{v;no?yjSYnhX;X9dlC$0aOmTNF*PG_%$~mgshI#&a|N*mAD(f)%xLEY>XRH0 z(os!umE;VD^rir--pSIi?Bv}J<&k*yO{WFjYC&B+C4@4hx)+Vu>G%u+nh{c2iD?0r zM5@x@lT*l|#+JZHJ8AR%>Nj7k=w;{y~_sjrPyx9czInLvzqqn-rX$stqcTmv3fkL!S z+%ISA-M8s`Z?+ym7a%`(Ft{8De(|Aq>~K-)sOwE<0+o8y+&jy$q-V*@DWP81P8 zy9A_~D6xZp_-)9C_>m~vb1Q=)Hca0ks@VK|aBQE=8Ai^9ge4lnDmI5I_%GR^pg@4( zP{G?*f`s#!`K*M43OqlPU8q$wtzs>+)hr}$3lx{5+Kj2(?|33He||S$hw-)f z@iokR2W8cOZj=U$^2d7lL{fSk0;MMmOzS~~rz9*yy|v4kvIG76rkm(qjBRg#_kgapDC};fAsOrpMNfScyloe@{BfK0JJo ziaO&V(t|gZtU8wW!RlSVGhj?{52T9(1mzy=-IP96_Qg^ekK}mvJ?AYWEiISLUy<57 zkRoM<;uAO0%`a;M|L$&2zOL8tz^fwP2SO0K;54S7#r4q4kVQFPu0|BEyrX)Nv~y{$ zw^gYu*JSd^o%IraJ>?g~^PfS#>x1^a=N-rQzVT06SbMb$$}>rx((*&Z0^7_Uo3(9AJ|!o{c%doT7WxDWb)0biEf=s7z`LTtRPZ#mwzr*s+^!vS~> zLiP06U>9*F1{{J2=Ab+ku}EwWLm=6f7zon+YUC7bxSD6Qt5Ts1%Mg>CSqs?}U$~i7 zU`;s=ZXi+zI7XR?4g*&rAdqPC1+7m$*|7{YQ7RNp4JpmA%Z!uE1OnHryvp@ zpNharL&;vg)b>+kE@N4(^QT^v=UAU@j|jYDw^P6{7gS(N1~n#uX4lJ>h-So3 zS5-f72c!U8Y09dCL$b^^m!PD&FRpFnK%W-6F5}CT{U#Q2nOuc`$>ox#%5}bWQnGO<+xile^N=kSUVjEX$4)w z@UG4zi?P^0<_Z|MZrEaeG;9Hxf52LtpdE>1ux^H<3UO*p+-~b4k5bg~Z!jZDNK{kK zv*N2wdFS~vZ^eJSr7|Yz-=P)>j#~`vLfJhEs!OeG>tQ!4nt6GELWOo5k7SFgYD*p( z7lWW!vw-z^&~6(J;kw1|sj^#=sdtpWeNi^5W6tJfo14cdd++LG|8ur)bLUzA!wmgc zDG8;uckxxsP^m{3djLAvG(?b=Z|96}PDSRjhu zrZ8fAG+{6PDAFZEH|pF@4gRV@0ze3Wn)yXdSbS=2Wvw?&8=`LSQs5*SvBOGG&g+!> z_f1C4NGHyos?O+OBAdG!Zr=5wvBK1BC$SEp%ErvFIAfN1?vZV}yUupYvR0?u7PI4Y z(FsCfDUF?9z1NoKIPUb*<9(gY$oMncJ>NQB7<(;CIU!opvBaj4s%%4rqaHI2@137j zIh}93%5MmPL&vpY2vQ#ylTvBKg?xxL8|CWp_+ZF3W|j2{u`>|OC%!z$^&<7}wPnlA zd;+lPa&*`%YlSE+`M1Boi~JlIYZE&}eP-b1D0^pFE2patOX=g!0LXac&u5r>l7PKT z9DguYN@42?vMir@TFWG$jI(7<{%ECn+icaL6PTD* za((N}#1$e)rT%=Ex_tZP?~~|Y4ZZJ=KRRMtmUx-|5aqL_lD9&`c}lp1=F)M#DW`&w5hs?%NocZ1HVUUK5 z4Qu{%6Uaceo>b{fDZRWKA-qrne3RJ(BhG=kCj5q*x9+&X$D{}sb#kFkY(P;Z`8w|w zT5$lk9oXYc+=3YM#18s<6fIT?3%rw<{WOiLyxF%|0*{WK6hy*RmuQ~0C+$1gP20OZ zpHS%BH5~d`XKlM^etC3zRj{#(N;Jzap-<#9ThX3Dbon*`TK#1lyczPx0XkCBW=Xt7 zo|hj56I+L#an6}T=n_z-s^Sr(rknXifBs}Z+s(Tv=Tb8J2AyeWOx0M7eQ$F>Pp-PV z;^q&Tab>PE9rr!b(@-fuW5mvtl|;XTl{b{v1gl1*%#)v-Ol#QNnidx)E5}DepR8n2 zgLz={x(NuaB!S(hRUZjVtaMJTfiHgiQ2w?l=yMqIx2)OVkyZoRzb97ZznJonAkGB2 z1X-m^S1{_P5ZNie7}q5pVw;|D#wm@O?DZ96M@>~U_*6{kZ&v%7Am7{F(SWWT2V;zA zZJ4Ieu}a3n4;+-J*o^jMLj0_|pJOpGS1YI`1H?S7lFK6(Y+8J|#M#T)rL-G|hI;Zr z??o0pT(1L`rmTdrgK-B9QxVsb13&IyS>9(WDCiNSFdT8hY6j{GJcYdX40_hdV)#le zyLZuomE!epXo-UkvawA54c_l}0VuH9r8N#L`V&bjH-K5Xl~Qsr92G01@qPVqLQa;W z1HF!`K=nMR9D4<7l_`&QuWE+(0gnn^xu!f_s(|bPz}~yDNO@e~DTU3wlz$4wB}^mEx6<5AKu@5 z00Zg{y;;hYNe!JxB|0*69UeXx>Kj&uy%4R#rqKI(_S`1B%;*nr2}Aj@;Yh;1MSL@|cE)*OuB&VY4a+Q%+B_sj0+A$M z<)lGHXeJ`?Vaslj`s0Vwb9)C>iNNpHOs><`B;~sZZfS;|dfF2Azku>WmX3$REw~^I zw<|%ks4QG;@B0Oiq&JT9jbu<$tsals{x!ah?mHV^&PGy`3fw;=v4a@ul+~9)6dLBO zVtE`dsv5_7{u;pI%u_UU;Xnur{c=3|pw<3}u5;;!CjefyaVP$=U-*K4k2-QIB8a?Q zVqz~bKa0+|V#S|~3wo9{p*21|{RA-}Op+ z(|9a|jazjD=y_-GR2-SI>cwTlYhz445C(j`EVNA#RQ{PtPBxqo( zNPEOi_-!+bTR%+j?Lxvp+Jo6|a?1*JL=<>-W2da#fXzK|rl;okc^ZP6tqRSm_!AgC z7aLZm(?+>b`|nuMqMx`q>;bSE>iDxbi{P@Q-BPyt#4VU4%tIv(%nZ0{FF08Z$CYCS zA@m1QUZX1eJ7rvUQJ32?R@$~H{0Ib4Vw8e{n$6Np)}m-f)khIGn&ny;qC|HinCpa7 zOGIl-?wPd_eQ zR3HU`FzEp6u%|NSO8#N%#nEGD@Fu zdr}%$>3r)~1nYI2E0Dl&uxl&no9=1!0L+|qIR|K5aM(e`WFhENcFAbdeX#&>`l!T3 zT*I2{@0p)m(~B(V-s`@z+CpL`V5hJ3(!J8AvUWzN%JGTV2=c!F9?x7qeM*W2;zy0| zTuO-(?j?kANns!{+w?XaOq=*)>H3Ad$-!*mASg}*S)v$yLR(2cT3R35-# zrPZXnhM6O414s`bc+{-Ss34`_H=Gqn&&j<|eP#kAywv(&{>D6TV9Ye5#oaA0`B}8H z-f%4?pbK8a)U19a34an`YKzp(AXNeVPBppIf;qKYWLCbf&Ri)$t zLZXu@ub&g{`P}GFMmXQt)VhJI5L;z?BdWpN@J(B?_94xqJl2g2QJ*U+>>i~pg3iI` z@;!GVe-@P|jXwSt&ZDIY*R?w5$#{H8uxLMjjAuwWxffUgy0kMS-adiMh&KBUJfdxK zha9)wfJ2$3yKVl!u*MsB{_~RI{Shpg)_&82xnITm4Cd;MdSg&~1&WcRvZ$X=IcLV6 zH8~YsuV4WaM)a!uiuG&m7m}`@gg&kK44w;K$@^9xcyucWgIOBXM8l^>L*|H*L`}5) zlDFf(@!`*+?}Ta{0TA>PXpgIh@sbvrPen z+jq1@*N%EJ%)n0ZDGI;h7MY$>mBAuKQfZc@t39`0{4Qps;( z`7a@QRSIJRn6Qir;+@yL+h<4>u77~zYFwPZRWa_AM*Li~lB>?hXQvK9ogm6}k;Pic zrO&5UnK#p@{nw`4<!AkI`=r;&j$IBwg-7CbBKX5WA<^-XMu3)>#`F1ZjDHGgaun+u+3 zEm->;sI7v-Rl=ktiV9*h@%i-Sn21a7Kl}=%eiPbOQ*E7OQ^9-Z?w$5cp$|VFmy}KVBn^?0x?;o z@E-C``IeaObb+BwlKkcAo6dUU6~+Xyo)<^McOOk})&54NA3$OdpwWPJ@!FlLGHZVV z)K&i8fXUlR0Ovp0cp1J||jdIZk6a4yj z<)deg>XLs?*i?#tFE(C1-TG!g7IjT_^OK{S-DL61VOJ#rRn?NoK!vCb&Sz_NTEIaY z$Yg#M(zP<)G>=0H)53nP0hH(^I5BRO7FDQ2=`FW2m2Va-{gN zQPi!$S3Neyp9DaffUEI>-YT~3kT&(T{f6-w&>!>r)794X;LWr}9)S^^DCKNM8e%z1FTNbeMskb;nfPh!=R+D}&ud8x zSa;vyu7DxVPf5lHQ_FNcFi%2#iB)i_Xmz!c!{-!yT$Fc1fTi2w>Q-s2)~j z@R1k=N5a|3x7l5QbeaP%pyvR{IM|#eS6R*45(n%HSo1hlwC{1SZhIi}u)IlOw@9gL z6yh=YaRbQ@WGJBDtCAen4&P{cS5In(i9Z0%PkTb^j@!FMDIkZf!Z0L5E);(_2VT#x zrLNNyrByvtaCu3ii6T8^7q4RJog%K2!Rv(scpa7HR0BuX#?5pJR{IyL<7ClB$F~5;W(?S~ zbfYJ%W-uhWJYRas?LirSnO24cjch1~r$!EY zc?DKsnJSft!JGjbm$Di|asgkk zFPeuj(LYK;Q#!l8?;3U=+3N3u(mem~z4;{iO$99?D@v6_wGCg!hdb0QeRC_legdk_ z>Lm?6N0M3)3ka|^K{BbW*WYjnBR#`+>f25577`LECc!m)w~-v&n`4}BJ|$;AS$eMg z$4{?Jb-tn#SdaT&gbBuH!lH|1n%88Tmmg~sC)m7LYnJ`c7fn{LQ}}x>k;N-yMaKFC7DTX??&AwOinWs#VXd zLcg*CHP7P)@dpu!QTp$IFF>Qc9lhPN7OM=E>u}hXiT3c8$a_e6^?cs)CU)q2XGpN_!kcm#%liyU{|dR&UR z2N~Sq@i?CD6F?Hb)dsjzeII}KVgad~xY8e-A(bE-4&>L1O~JlBMJ0NPuBLu&h{nAZ z7f})10hZL3`Ud{0Zc8%CJ@~P|$n7S;t{av17tvpwYz%>Q-{DJRw=Q3$r#idxLLCq`pUbVvUPi3$|O~^xhX;+vK>BM{&VU zfLl*<3}WqQIPtvRk96;oD$QrWj0KKd_1(H({;G{GGWsE)j@|--zh39yPjmHQcB6Eb zDYPq&Ntd4yd*b>qnt+yksIilzeN}ZVdo{e@3(Kz`^7C+1GR|ZTuK6woWXO;o6N;vgiS;nAf+zx9&- zCe1d)GmS$7pTe3>eMg#d?Ze&sRJd9(k3;A}#r9AXII&?wq-E?J`fC7a>Cui(`TjWa zt-0j&F4Keusp*e_v14Q0p0HjZkSkoJ+QM5=IGl!BDTIzhnH|3Kx!6}(q(ofZu!ikL zi|JvV;_dE$;_34ZBC$jYDGvQxyEoq`JAI`20ldvX%xGig_BmJv_Zp44QmxthqfT<4 zR$|xqS*Zl^)A=HJ3WJEkN#;8)1*eNeu3jlIeFE)OG~OM@U@Xcmxj0M7JTC$3aDem(vZeUYDS=jg& zs;MKS7tAcxerYcDqj!ltamH^!FRZbs(tPTplgWwnLCv=Uivl{WWrN~01GX8OKJN>D z)6u%F>jg=Sky0ETzvT{G@I)TsZ_=(muhFC`SvV4rrE{M$yxT|3l~s|bmi3lXj9pB{ z8AtC07=_>fu){UK^1WJ?Yx2rKG7>}Qy7C)hBJ+C#uzH1955s6a1m@>x+ z_e9f%*xOm7I+L%A5Cj%~=`wNK0n4LyckA}jGU;MZV-FuBoC6n@U`wqHFUL4lW#O>j zvg)X!E|LS@i$}L<-DrKRCiuDUt+vOPYdtmiXc+pZ^FMoyMA?71J%iv4NJ(4K0eGnB z6}Xh!LFb-t^Q1194T9C!vh9I>EDgJ5DtEO_Wp#@mSQF}I%kTb*Dp+%laY&%ZNQ)s| z@?KlOzimFEpT1w7Y9y$zm}HK0yNd`m&UJek1F5lpeC}SmC<^&b#%K* z43|@aAPlz$jo6ifgTiWLHKfw9yl7SB?{cVdcv1o2lJ;1iKF{Co>TN!->rMz`um^X362Rq z!PQ9B7_68uEH1AyK(;c7pW;mkZOWRi18vl`4j8;sGeQy$$uAN^POx$I8}#i#z+}J; z-Q8m+?)WJ~>(7igZ|%b)2QlZ%U|ObUV^>12BUY4GWKE-e-_7(~BpcU8_$prS>m0ON z0tt?L;j&t_xR_Hkr75UiK;1Kp;b^8HkNdQ5>ru8Pet@OOfiJ))>h%GscEgV#)(1^& zEIxlMPi-*Ooq6i97HMWK{Gk0hW`d>xGx7F_RjfY2{PpJKNL(2PQ7|G58YHqUa6Gs6 zjP@(%soB#7OS(KtyFI`Y_qT+rc8%Q?dr(e%&xt}ec!ABNY)#`@Y6(IxCb}7=JMqKR z^{-+tNVSA?qT)tMqJg$yEkj>>Ue-5E(Bey4oW)2eG$n2xJ8EMXG&^ElQEE%F-KXbm z)?o|w`3G{C5-&f}naO5J>fLV-o3dyo`62p6xqesWwyncdeoSDvAP!C%J6TiQpaO9! zz|TY)b-Np%CWXb&0~4ep{pN&^kA zR<%=fQ$`bwA*M12BbkkZ1Gmb93*`zH!Xq3`NTUF7aQfi&S%pZbV<-|$TG<#Wi|?m5 z|LZyx0e%lYKB~r+1T5pd(~ZfId7v8$WN~!$Y!h;oduS- zAbMm&nQvcZzjpbHG-@fQU7H{~%Ip2AgtPvF5auBws`SMRlxs)XPdkPrxc9iHt$0?A zMjJyM;m&s{U3sLlwv4QVIF2A7We$b1{wLY+nx6K1MV&A%h>r5*J5t6@nEBj zO9o*3zJi|RJ&3;A58H0OUKwDoqeQnyuS#YXNX35P4}!y>R}?LWZg9FQ);L}Zlg2ib zP6K_?VbFV!&7wV__H5{itY*1hn(Y7#OL*!_(=UF9tJarzuU5bW%~Jptytw-F)!sOx z;o)b(qwFt@GO*>|f^lH%DG9RAIzw+k&0|vNQ2`Pb!O&Hicw|vK5&Xb;kRg_X7pUfC ziUf4%`EHlA#vy$KGUA+tg#At&3cDPuY-;eX(@EfQz|39w<8nOEs+O1#*bSdjNBAi`R`IpoI2R?O+r&G?%Lh8>m?6>v2Q&ZTAu;yG_a;}rSJ43?h`dMagQn_Z zm*sLS5#jBs{^i~E2_jdg6FGnj=4G}3N3qrShG*eziWVtIQB=~I9mXWMxf_AL1O=)F z=eiuRt%? zS0sP#QF67?U<%aNPZ4^x13we!Kf%^fFEC8!65XC&(;dXPesGX`L`tQM8kUFjG$AYa zhB}S-aPj`Ta0zjB#*Pq6Prul*jEWP?gv(F0ClnDHr@r|YkiyBzUdRjl5v>QWqUU}! z#`S3jug!rUV^qYxcv8sdTHFb39A!u$OKQ-6eaAvv4gn-e6k>z!t?O--41oAlOuhPl ze?`Y7|K~@dd`jd-p7--D5|L<5c-Zb1PFX|6AF9>6ul ztdxOHzkfUFe?Q@WFXDfD;eRLMe{lh-75=|ih2X3EHlAnsIuq4;Xy6ZAUR|z6#v Optional[dict]: + """ + Validate image size against model capabilities at MCP boundary. + + This performs strict validation to ensure we don't exceed model-specific + image size limits. Uses capability-based validation with actual model + configuration rather than hard-coded limits. + + Args: + images: List of image paths/data URLs to validate + model_name: Name of the model to check limits against + + Returns: + Optional[dict]: Error response if validation fails, None if valid + """ + if not images: + return None + + # Get model capabilities to check image support and size limits + try: + provider = self.get_model_provider(model_name) + capabilities = provider.get_capabilities(model_name) + except Exception as e: + logger.warning(f"Failed to get capabilities for model {model_name}: {e}") + # Fall back to checking custom models configuration + capabilities = None + + # Check if model supports images at all + supports_images = False + max_size_mb = 0.0 + + if capabilities: + supports_images = capabilities.supports_images + max_size_mb = capabilities.max_image_size_mb + else: + # Fall back to custom models configuration + try: + import json + from pathlib import Path + + custom_models_path = Path(__file__).parent.parent / "conf" / "custom_models.json" + if custom_models_path.exists(): + with open(custom_models_path) as f: + custom_config = json.load(f) + + # Check if model is in custom models list + for model_config in custom_config.get("models", []): + if model_config.get("model_name") == model_name or model_name in model_config.get( + "aliases", [] + ): + supports_images = model_config.get("supports_images", False) + max_size_mb = model_config.get("max_image_size_mb", 0.0) + break + except Exception as e: + logger.warning(f"Failed to load custom models config: {e}") + + # If model doesn't support images, reject + if not supports_images: + return { + "status": "error", + "content": ( + f"Image support not available: Model '{model_name}' does not support image processing. " + f"Please use a vision-capable model such as 'gemini-2.5-flash-preview-05-20', 'o3', " + f"or 'claude-3-opus' for image analysis tasks." + ), + "content_type": "text", + "metadata": { + "error_type": "validation_error", + "model_name": model_name, + "supports_images": False, + "image_count": len(images), + }, + } + + # Calculate total size of all images + total_size_mb = 0.0 + for image_path in images: + try: + if image_path.startswith("data:image/"): + # Handle data URL: data:image/png;base64,iVBORw0... + _, data = image_path.split(",", 1) + # Base64 encoding increases size by ~33%, so decode to get actual size + import base64 + + actual_size = len(base64.b64decode(data)) + total_size_mb += actual_size / (1024 * 1024) + else: + # Handle file path + if os.path.exists(image_path): + file_size = os.path.getsize(image_path) + total_size_mb += file_size / (1024 * 1024) + else: + logger.warning(f"Image file not found: {image_path}") + # Assume a reasonable size for missing files to avoid breaking validation + total_size_mb += 1.0 # 1MB assumption + except Exception as e: + logger.warning(f"Failed to get size for image {image_path}: {e}") + # Assume a reasonable size for problematic files + total_size_mb += 1.0 # 1MB assumption + + # Apply 40MB cap for custom models as requested + effective_limit_mb = max_size_mb + if hasattr(capabilities, "provider") and capabilities.provider == ProviderType.CUSTOM: + effective_limit_mb = min(max_size_mb, 40.0) + elif not capabilities: # Fallback case for custom models + effective_limit_mb = min(max_size_mb, 40.0) + + # Validate against size limit + if total_size_mb > effective_limit_mb: + return { + "status": "error", + "content": ( + f"Image size limit exceeded: Model '{model_name}' supports maximum {effective_limit_mb:.1f}MB " + f"for all images combined, but {total_size_mb:.1f}MB was provided. " + f"Please reduce image sizes or count and try again." + ), + "content_type": "text", + "metadata": { + "error_type": "validation_error", + "model_name": model_name, + "total_size_mb": round(total_size_mb, 2), + "limit_mb": round(effective_limit_mb, 2), + "image_count": len(images), + "supports_images": supports_images, + }, + } + + # All validations passed + logger.debug(f"Image validation passed: {len(images)} images") + return None + def estimate_tokens_smart(self, file_path: str) -> int: """ Estimate tokens for a file using file-type aware ratios. @@ -1131,6 +1276,9 @@ When recommending searches, be specific about what information you need and why ) return [TextContent(type="text", text=error_output.model_dump_json())] + # Extract and validate images from request + images = getattr(request, "images", None) or [] + # Check if we have continuation_id - if so, conversation history is already embedded continuation_id = getattr(request, "continuation_id", None) @@ -1215,6 +1363,12 @@ When recommending searches, be specific about what information you need and why # Only set this after auto mode validation to prevent "auto" being used as a model name self._current_model_name = model_name + # Validate images at MCP boundary if any were provided + if images: + image_validation_error = self._validate_image_limits(images, model_name, continuation_id) + if image_validation_error: + return [TextContent(type="text", text=json.dumps(image_validation_error))] + temperature = getattr(request, "temperature", None) if temperature is None: temperature = self.get_default_temperature() @@ -1247,6 +1401,7 @@ When recommending searches, be specific about what information you need and why system_prompt=system_prompt, temperature=temperature, thinking_mode=thinking_mode if provider.supports_thinking_mode(model_name) else None, + images=images if images else None, # Pass images via kwargs ) logger.info(f"Received response from {provider.get_provider_type().value} API for {self.name}") @@ -1298,6 +1453,7 @@ When recommending searches, be specific about what information you need and why system_prompt=system_prompt, temperature=temperature, thinking_mode=thinking_mode if provider.supports_thinking_mode(model_name) else None, + images=images if images else None, # Pass images via kwargs in retry too ) if retry_response.content: @@ -1398,6 +1554,7 @@ When recommending searches, be specific about what information you need and why continuation_id = getattr(request, "continuation_id", None) if continuation_id: request_files = getattr(request, "files", []) or [] + request_images = getattr(request, "images", []) or [] # Extract model metadata for conversation tracking model_provider = None model_name = None @@ -1417,6 +1574,7 @@ When recommending searches, be specific about what information you need and why "assistant", formatted_content, files=request_files, + images=request_images, tool_name=self.name, model_provider=model_provider, model_name=model_name, @@ -1519,6 +1677,7 @@ When recommending searches, be specific about what information you need and why # Use actually processed files from file preparation instead of original request files # This ensures directories are tracked as their individual expanded files request_files = getattr(self, "_actually_processed_files", []) or getattr(request, "files", []) or [] + request_images = getattr(request, "images", []) or [] # Extract model metadata model_provider = None model_name = None @@ -1538,6 +1697,7 @@ When recommending searches, be specific about what information you need and why "assistant", content, files=request_files, + images=request_images, tool_name=self.name, model_provider=model_provider, model_name=model_name, diff --git a/tools/chat.py b/tools/chat.py index 08c5486..72cbdcc 100644 --- a/tools/chat.py +++ b/tools/chat.py @@ -20,12 +20,25 @@ class ChatRequest(ToolRequest): prompt: str = Field( ..., - description="Your question, topic, or current thinking to discuss", + description=( + "Your thorough, expressive question with as much context as possible. Remember: you're talking to " + "another Claude assistant who has deep expertise and can provide nuanced insights. Include your " + "current thinking, specific challenges, background context, what you've already tried, and what " + "kind of response would be most helpful. The more context and detail you provide, the more " + "valuable and targeted the response will be." + ), ) files: Optional[list[str]] = Field( default_factory=list, description="Optional files for context (must be absolute paths)", ) + images: Optional[list[str]] = Field( + default_factory=list, + description=( + "Optional images for visual context. Useful for UI discussions, diagrams, visual problems, " + "error screens, or architectural mockups." + ), + ) class ChatTool(BaseTool): @@ -42,7 +55,8 @@ class ChatTool(BaseTool): "Also great for: explanations, comparisons, general development questions. " "Use this when you want to ask questions, brainstorm ideas, get opinions, discuss topics, " "share your thinking, or need explanations about concepts and approaches. " - "Note: If you're not currently using a top-tier model such as Opus 4 or above, these tools can provide enhanced capabilities." + "Note: If you're not currently using a top-tier model such as Opus 4 or above, these tools can " + "provide enhanced capabilities." ) def get_input_schema(self) -> dict[str, Any]: @@ -51,13 +65,27 @@ class ChatTool(BaseTool): "properties": { "prompt": { "type": "string", - "description": "Your question, topic, or current thinking to discuss", + "description": ( + "Your thorough, expressive question with as much context as possible. Remember: you're " + "talking to another Claude assistant who has deep expertise and can provide nuanced " + "insights. Include your current thinking, specific challenges, background context, what " + "you've already tried, and what kind of response would be most helpful. The more context " + "and detail you provide, the more valuable and targeted the response will be." + ), }, "files": { "type": "array", "items": {"type": "string"}, "description": "Optional files for context (must be absolute paths)", }, + "images": { + "type": "array", + "items": {"type": "string"}, + "description": ( + "Optional images for visual context. Useful for UI discussions, diagrams, visual " + "problems, error screens, or architectural mockups." + ), + }, "model": self.get_model_field_schema(), "temperature": { "type": "number", @@ -68,16 +96,29 @@ class ChatTool(BaseTool): "thinking_mode": { "type": "string", "enum": ["minimal", "low", "medium", "high", "max"], - "description": "Thinking depth: minimal (0.5% of model max), low (8%), medium (33%), high (67%), max (100% of model max)", + "description": ( + "Thinking depth: minimal (0.5% of model max), low (8%), medium (33%), high (67%), " + "max (100% of model max)" + ), }, "use_websearch": { "type": "boolean", - "description": "Enable web search for documentation, best practices, and current information. Particularly useful for: brainstorming sessions, architectural design discussions, exploring industry best practices, working with specific frameworks/technologies, researching solutions to complex problems, or when current documentation and community insights would enhance the analysis.", + "description": ( + "Enable web search for documentation, best practices, and current information. " + "Particularly useful for: brainstorming sessions, architectural design discussions, " + "exploring industry best practices, working with specific frameworks/technologies, " + "researching solutions to complex problems, or when current documentation and " + "community insights would enhance the analysis." + ), "default": True, }, "continuation_id": { "type": "string", - "description": "Thread continuation ID for multi-turn conversations. Can be used to continue conversations across different tools. Only provide this if continuing a previous conversation thread.", + "description": ( + "Thread continuation ID for multi-turn conversations. Can be used to continue " + "conversations across different tools. Only provide this if continuing a previous " + "conversation thread." + ), }, }, "required": ["prompt"] + (["model"] if self.is_effective_auto_mode() else []), @@ -157,4 +198,7 @@ Please provide a thoughtful, comprehensive response:""" def format_response(self, response: str, request: ChatRequest, model_info: Optional[dict] = None) -> str: """Format the chat response""" - return f"{response}\n\n---\n\n**Claude's Turn:** Evaluate this perspective alongside your analysis to form a comprehensive solution and continue with the user's request and task at hand." + return ( + f"{response}\n\n---\n\n**Claude's Turn:** Evaluate this perspective alongside your analysis to " + "form a comprehensive solution and continue with the user's request and task at hand." + ) diff --git a/tools/codereview.py b/tools/codereview.py index 78f735a..c2b3c11 100644 --- a/tools/codereview.py +++ b/tools/codereview.py @@ -41,6 +41,10 @@ class CodeReviewRequest(ToolRequest): ..., description="User's summary of what the code does, expected behavior, constraints, and review objectives", ) + images: Optional[list[str]] = Field( + None, + description="Optional images of architecture diagrams, UI mockups, design documents, or visual references for code review context", + ) review_type: str = Field("full", description="Type of review: full|security|performance|quick") focus_on: Optional[str] = Field( None, @@ -94,6 +98,11 @@ class CodeReviewTool(BaseTool): "type": "string", "description": "User's summary of what the code does, expected behavior, constraints, and review objectives", }, + "images": { + "type": "array", + "items": {"type": "string"}, + "description": "Optional images of architecture diagrams, UI mockups, design documents, or visual references for code review context", + }, "review_type": { "type": "string", "enum": ["full", "security", "performance", "quick"], diff --git a/tools/debug.py b/tools/debug.py index 1b95b3a..af21272 100644 --- a/tools/debug.py +++ b/tools/debug.py @@ -24,6 +24,10 @@ class DebugIssueRequest(ToolRequest): None, description="Files or directories that might be related to the issue (must be absolute paths)", ) + images: Optional[list[str]] = Field( + None, + description="Optional images showing error screens, UI issues, logs displays, or visual debugging information", + ) runtime_info: Optional[str] = Field(None, description="Environment, versions, or runtime information") previous_attempts: Optional[str] = Field(None, description="What has been tried already") @@ -69,6 +73,11 @@ class DebugIssueTool(BaseTool): "items": {"type": "string"}, "description": "Files or directories that might be related to the issue (must be absolute paths)", }, + "images": { + "type": "array", + "items": {"type": "string"}, + "description": "Optional images showing error screens, UI issues, logs displays, or visual debugging information", + }, "runtime_info": { "type": "string", "description": "Environment, versions, or runtime information", diff --git a/tools/precommit.py b/tools/precommit.py index 0d5a909..4d1668c 100644 --- a/tools/precommit.py +++ b/tools/precommit.py @@ -78,6 +78,10 @@ class PrecommitRequest(ToolRequest): None, description="Optional files or directories to provide as context (must be absolute paths). These files are not part of the changes but provide helpful context like configs, docs, or related code.", ) + images: Optional[list[str]] = Field( + None, + description="Optional images showing expected UI changes, design requirements, or visual references for the changes being validated", + ) class Precommit(BaseTool): @@ -170,6 +174,11 @@ class Precommit(BaseTool): "items": {"type": "string"}, "description": "Optional files or directories to provide as context (must be absolute paths). These files are not part of the changes but provide helpful context like configs, docs, or related code.", }, + "images": { + "type": "array", + "items": {"type": "string"}, + "description": "Optional images showing expected UI changes, design requirements, or visual references for the changes being validated", + }, "use_websearch": { "type": "boolean", "description": "Enable web search for documentation, best practices, and current information. Particularly useful for: brainstorming sessions, architectural design discussions, exploring industry best practices, working with specific frameworks/technologies, researching solutions to complex problems, or when current documentation and community insights would enhance the analysis.", diff --git a/tools/thinkdeep.py b/tools/thinkdeep.py index 088c07f..fcf65a5 100644 --- a/tools/thinkdeep.py +++ b/tools/thinkdeep.py @@ -33,6 +33,10 @@ class ThinkDeepRequest(ToolRequest): None, description="Optional file paths or directories for additional context (must be absolute paths)", ) + images: Optional[list[str]] = Field( + None, + description="Optional images for visual analysis - diagrams, charts, system architectures, or any visual information to analyze", + ) class ThinkDeepTool(BaseTool): @@ -60,7 +64,13 @@ class ThinkDeepTool(BaseTool): "properties": { "prompt": { "type": "string", - "description": "Your current thinking/analysis to extend and validate. IMPORTANT: Before using this tool, Claude MUST first think deeply and establish a deep understanding of the topic and question by thinking through all relevant details, context, constraints, and implications. Share these extended thoughts and ideas in the prompt so the model has comprehensive information to work with for the best analysis.", + "description": ( + "Your current thinking/analysis to extend and validate. IMPORTANT: Before using this tool, " + "Claude MUST first think deeply and establish a deep understanding of the topic and question " + "by thinking through all relevant details, context, constraints, and implications. Share " + "these extended thoughts and ideas in the prompt so the model has comprehensive information " + "to work with for the best analysis." + ), }, "model": self.get_model_field_schema(), "problem_context": { @@ -77,6 +87,11 @@ class ThinkDeepTool(BaseTool): "items": {"type": "string"}, "description": "Optional file paths or directories for additional context (must be absolute paths)", }, + "images": { + "type": "array", + "items": {"type": "string"}, + "description": "Optional images for visual analysis - diagrams, charts, system architectures, or any visual information to analyze", + }, "temperature": { "type": "number", "description": "Temperature for creative thinking (0-1, default 0.7)", diff --git a/tools/tracer.py b/tools/tracer.py index 491dbe8..3439053 100644 --- a/tools/tracer.py +++ b/tools/tracer.py @@ -22,11 +22,29 @@ class TracerRequest(ToolRequest): prompt: str = Field( ..., - description="Detailed description of what to trace and WHY you need this analysis. Include context about what you're trying to understand, debug, or analyze. For precision mode: describe the specific method/function and what aspect of its execution flow you need to understand. For dependencies mode: describe the class/module and what relationships you need to map. Example: 'I need to understand how BookingManager.finalizeInvoice method is called throughout the system and what side effects it has, as I'm debugging payment processing issues' rather than just 'BookingManager finalizeInvoice method'", + description=( + "Detailed description of what to trace and WHY you need this analysis. Include context about what " + "you're trying to understand, debug, or analyze. For precision mode: describe the specific " + "method/function and what aspect of its execution flow you need to understand. For dependencies " + "mode: describe the class/module and what relationships you need to map. Example: 'I need to " + "understand how BookingManager.finalizeInvoice method is called throughout the system and what " + "side effects it has, as I'm debugging payment processing issues' rather than just " + "'BookingManager finalizeInvoice method'" + ), ) trace_mode: Literal["precision", "dependencies"] = Field( ..., - description="Trace mode: 'precision' (for methods/functions - shows execution flow and usage patterns) or 'dependencies' (for classes/modules/protocols - shows structural relationships)", + description=( + "Trace mode: 'precision' (for methods/functions - shows execution flow and usage patterns) or " + "'dependencies' (for classes/modules/protocols - shows structural relationships)" + ), + ) + images: list[str] = Field( + default_factory=list, + description=( + "Optional images of system architecture diagrams, flow charts, or visual references to help " + "understand the tracing context" + ), ) @@ -44,11 +62,15 @@ class TracerTool(BaseTool): def get_description(self) -> str: return ( "ANALYSIS PROMPT GENERATOR - Creates structured prompts for static code analysis. " - "Helps generate detailed analysis requests with specific method/function names, file paths, and component context. " - "Type 'precision': For methods/functions - traces execution flow, call chains, call stacks, and shows when/how they are used. " - "Type 'dependencies': For classes/modules/protocols - maps structural relationships and bidirectional dependencies. " + "Helps generate detailed analysis requests with specific method/function names, file paths, and " + "component context. " + "Type 'precision': For methods/functions - traces execution flow, call chains, call stacks, and " + "shows when/how they are used. " + "Type 'dependencies': For classes/modules/protocols - maps structural relationships and " + "bidirectional dependencies. " "Returns detailed instructions on how to perform the analysis and format the results. " - "Use this to create focused analysis requests that can be fed back to Claude with the appropriate code files. " + "Use this to create focused analysis requests that can be fed back to Claude with the appropriate " + "code files. " ) def get_input_schema(self) -> dict[str, Any]: @@ -57,13 +79,26 @@ class TracerTool(BaseTool): "properties": { "prompt": { "type": "string", - "description": "Detailed description of what to trace and WHY you need this analysis. Include context about what you're trying to understand, debug, or analyze. For precision mode: describe the specific method/function and what aspect of its execution flow you need to understand. For dependencies mode: describe the class/module and what relationships you need to map. Example: 'I need to understand how BookingManager.finalizeInvoice method is called throughout the system and what side effects it has, as I'm debugging payment processing issues' rather than just 'BookingManager finalizeInvoice method'", + "description": ( + "Detailed description of what to trace and WHY you need this analysis. Include context " + "about what you're trying to understand, debug, or analyze. For precision mode: describe " + "the specific method/function and what aspect of its execution flow you need to understand. " + "For dependencies mode: describe the class/module and what relationships you need to map. " + "Example: 'I need to understand how BookingManager.finalizeInvoice method is called " + "throughout the system and what side effects it has, as I'm debugging payment processing " + "issues' rather than just 'BookingManager finalizeInvoice method'" + ), }, "trace_mode": { "type": "string", "enum": ["precision", "dependencies"], "description": "Trace mode: 'precision' (for methods/functions - shows execution flow and usage patterns) or 'dependencies' (for classes/modules/protocols - shows structural relationships)", }, + "images": { + "type": "array", + "items": {"type": "string"}, + "description": "Optional images of system architecture diagrams, flow charts, or visual references to help understand the tracing context", + }, }, "required": ["prompt", "trace_mode"], } diff --git a/utils/conversation_memory.py b/utils/conversation_memory.py index 1eee435..c93fdfc 100644 --- a/utils/conversation_memory.py +++ b/utils/conversation_memory.py @@ -142,6 +142,7 @@ class ConversationTurn(BaseModel): content: The actual message content/response timestamp: ISO timestamp when this turn was created files: List of file paths referenced in this specific turn + images: List of image paths referenced in this specific turn tool_name: Which tool generated this turn (for cross-tool tracking) model_provider: Provider used (e.g., "google", "openai") model_name: Specific model used (e.g., "gemini-2.5-flash-preview-05-20", "o3-mini") @@ -152,6 +153,7 @@ class ConversationTurn(BaseModel): content: str timestamp: str files: Optional[list[str]] = None # Files referenced in this turn + images: Optional[list[str]] = None # Images referenced in this turn tool_name: Optional[str] = None # Tool used for this turn model_provider: Optional[str] = None # Model provider (google, openai, etc) model_name: Optional[str] = None # Specific model used @@ -300,6 +302,7 @@ def add_turn( role: str, content: str, files: Optional[list[str]] = None, + images: Optional[list[str]] = None, tool_name: Optional[str] = None, model_provider: Optional[str] = None, model_name: Optional[str] = None, @@ -318,6 +321,7 @@ def add_turn( role: "user" (Claude) or "assistant" (Gemini/O3/etc) content: The actual message/response content files: Optional list of files referenced in this turn + images: Optional list of images referenced in this turn tool_name: Name of the tool adding this turn (for attribution) model_provider: Provider used (e.g., "google", "openai") model_name: Specific model used (e.g., "gemini-2.5-flash-preview-05-20", "o3-mini") @@ -335,6 +339,7 @@ def add_turn( - Refreshes thread TTL to configured timeout on successful update - Turn limits prevent runaway conversations - File references are preserved for cross-tool access with atomic ordering + - Image references are preserved for cross-tool visual context - Model information enables cross-provider conversations """ logger.debug(f"[FLOW] Adding {role} turn to {thread_id} ({tool_name})") @@ -355,6 +360,7 @@ def add_turn( content=content, timestamp=datetime.now(timezone.utc).isoformat(), files=files, # Preserved for cross-tool file context + images=images, # Preserved for cross-tool visual context tool_name=tool_name, # Track which tool generated this turn model_provider=model_provider, # Track model provider model_name=model_name, # Track specific model @@ -489,6 +495,78 @@ def get_conversation_file_list(context: ThreadContext) -> list[str]: return file_list +def get_conversation_image_list(context: ThreadContext) -> list[str]: + """ + Extract all unique images from conversation turns with newest-first prioritization. + + This function implements the identical prioritization logic as get_conversation_file_list() + to ensure consistency in how images are handled across conversation turns. It walks + backwards through conversation turns (from newest to oldest) and collects unique image + references, ensuring that when the same image appears in multiple turns, the reference + from the NEWEST turn takes precedence. + + PRIORITIZATION ALGORITHM: + 1. Iterate through turns in REVERSE order (index len-1 down to 0) + 2. For each turn, process images in the order they appear in turn.images + 3. Add image to result list only if not already seen (newest reference wins) + 4. Skip duplicate images that were already added from newer turns + + This ensures that: + - Images from newer conversation turns appear first in the result + - When the same image is referenced multiple times, only the newest reference is kept + - The order reflects the most recent conversation context + + Example: + Turn 1: images = ["diagram.png", "flow.jpg"] + Turn 2: images = ["error.png"] + Turn 3: images = ["diagram.png", "updated.png"] # diagram.png appears again + + Result: ["diagram.png", "updated.png", "error.png", "flow.jpg"] + (diagram.png from Turn 3 takes precedence over Turn 1) + + Args: + context: ThreadContext containing all conversation turns to process + + Returns: + list[str]: Unique image paths ordered by newest reference first. + Empty list if no turns exist or no images are referenced. + + Performance: + - Time Complexity: O(n*m) where n=turns, m=avg images per turn + - Space Complexity: O(i) where i=total unique images + - Uses set for O(1) duplicate detection + """ + if not context.turns: + logger.debug("[IMAGES] No turns found, returning empty image list") + return [] + + # Collect images by walking backwards (newest to oldest turns) + seen_images = set() + image_list = [] + + logger.debug(f"[IMAGES] Collecting images from {len(context.turns)} turns (newest first)") + + # Process turns in reverse order (newest first) - this is the CORE of newest-first prioritization + # By iterating from len-1 down to 0, we encounter newer turns before older turns + # When we find a duplicate image, we skip it because the newer version is already in our list + for i in range(len(context.turns) - 1, -1, -1): # REVERSE: newest turn first + turn = context.turns[i] + if turn.images: + logger.debug(f"[IMAGES] Turn {i + 1} has {len(turn.images)} images: {turn.images}") + for image_path in turn.images: + if image_path not in seen_images: + # First time seeing this image - add it (this is the NEWEST reference) + seen_images.add(image_path) + image_list.append(image_path) + logger.debug(f"[IMAGES] Added new image: {image_path} (from turn {i + 1})") + else: + # Image already seen from a NEWER turn - skip this older reference + logger.debug(f"[IMAGES] Skipping duplicate image: {image_path} (newer version already included)") + + logger.debug(f"[IMAGES] Final image list ({len(image_list)}): {image_list}") + return image_list + + def _plan_file_inclusion_by_size(all_files: list[str], max_file_tokens: int) -> tuple[list[str], list[str], int]: """ Plan which files to include based on size constraints. diff --git a/utils/file_types.py b/utils/file_types.py index fd91bdd..f820722 100644 --- a/utils/file_types.py +++ b/utils/file_types.py @@ -88,8 +88,9 @@ TEXT_DATA = { ".lock", # Lock files } -# Image file extensions -IMAGES = {".jpg", ".jpeg", ".png", ".gif", ".bmp", ".svg", ".webp", ".ico", ".tiff", ".tif"} +# Image file extensions - limited to what AI models actually support +# Based on OpenAI and Gemini supported formats: PNG, JPEG, GIF, WebP +IMAGES = {".jpg", ".jpeg", ".png", ".gif", ".webp"} # Binary executable and library extensions BINARIES = { @@ -240,3 +241,30 @@ def get_token_estimation_ratio(file_path: str) -> float: extension = Path(file_path).suffix.lower() return TOKEN_ESTIMATION_RATIOS.get(extension, 3.5) # Conservative default + + +# MIME type mappings for image files - limited to what AI models actually support +# Based on OpenAI and Gemini supported formats: PNG, JPEG, GIF, WebP +IMAGE_MIME_TYPES = { + ".jpg": "image/jpeg", + ".jpeg": "image/jpeg", + ".png": "image/png", + ".gif": "image/gif", + ".webp": "image/webp", +} + + +def get_image_mime_type(extension: str) -> str: + """ + Get the MIME type for an image file extension. + + Args: + extension: File extension (with or without leading dot) + + Returns: + MIME type string (default: image/jpeg for unknown extensions) + """ + if not extension.startswith("."): + extension = "." + extension + extension = extension.lower() + return IMAGE_MIME_TYPES.get(extension, "image/jpeg") diff --git a/utils/file_utils.py b/utils/file_utils.py index f6d8033..f16eb57 100644 --- a/utils/file_utils.py +++ b/utils/file_utils.py @@ -48,6 +48,36 @@ from .file_types import BINARY_EXTENSIONS, CODE_EXTENSIONS, IMAGE_EXTENSIONS, TE from .security_config import CONTAINER_WORKSPACE, EXCLUDED_DIRS, MCP_SIGNATURE_FILES, SECURITY_ROOT, WORKSPACE_ROOT from .token_utils import DEFAULT_CONTEXT_WINDOW, estimate_tokens + +def _is_builtin_custom_models_config(path_str: str) -> bool: + """ + Check if path points to the server's built-in custom_models.json config file. + + This only matches the server's internal config, not user-specified CUSTOM_MODELS_CONFIG_PATH. + We identify the built-in config by checking if it resolves to the server's conf directory. + + Args: + path_str: Path to check + + Returns: + True if this is the server's built-in custom_models.json config file + """ + try: + path = Path(path_str) + + # Get the server root by going up from this file: utils/file_utils.py -> server_root + server_root = Path(__file__).parent.parent + builtin_config = server_root / "conf" / "custom_models.json" + + # Check if the path resolves to the same file as our built-in config + # This handles both relative and absolute paths to the same file + return path.resolve() == builtin_config.resolve() + + except Exception: + # If path resolution fails, it's not our built-in config + return False + + logger = logging.getLogger(__name__) @@ -271,7 +301,8 @@ def translate_path_for_environment(path_str: str) -> str: tools and utilities throughout the codebase. It handles: 1. Docker host-to-container path translation (host paths -> /workspace/...) 2. Direct mode (no translation needed) - 3. Security validation and error handling + 3. Internal server files (conf/custom_models.json) + 4. Security validation and error handling Docker Path Translation Logic: - Input: /Users/john/project/src/file.py (host path from Claude) @@ -284,6 +315,10 @@ def translate_path_for_environment(path_str: str) -> str: Returns: Translated path appropriate for the current environment """ + # Handle built-in server config file - no translation needed + if _is_builtin_custom_models_config(path_str): + return path_str + if not WORKSPACE_ROOT or not WORKSPACE_ROOT.strip() or not CONTAINER_WORKSPACE.exists(): # Not in the configured Docker environment, no translation needed return path_str