From 493082405237e66a2f033481a5f8bf8293b0d553 Mon Sep 17 00:00:00 2001 From: David Knedlik Date: Thu, 21 Aug 2025 14:27:00 -0500 Subject: [PATCH] feat: Add comprehensive GPT-5 series model support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add GPT-5, GPT-5-mini, and GPT-5-nano models to unified configuration - Implement proper thinking mode support via dynamic capability checking - Add OpenAI provider model enumeration methods for registry integration - Update tests to cover all GPT-5 models and their aliases - Fix critical bug where thinking mode was hardcoded instead of using model capabilities Breaking Changes: - None (backward compatible) New Models Available: - gpt-5 (400K context, 128K output, reasoning support) - gpt-5-mini (400K context, 128K output, efficient variant) - gpt-5-nano (400K context, fastest/cheapest variant) Aliases: - gpt5, gpt5-mini, gpt5mini, gpt5-nano, gpt5nano, nano All models support: - Extended thinking mode (reasoning tokens) - Vision capabilities - JSON mode - Function calling 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude --- conf/custom_models.json | 42 +++++++++++++++++++++++++++++++++++ providers/openai_provider.py | 26 +++++++++++++++++----- tests/test_openai_provider.py | 18 +++++++++++---- 3 files changed, 76 insertions(+), 10 deletions(-) diff --git a/conf/custom_models.json b/conf/custom_models.json index 8d83e00..2bda899 100644 --- a/conf/custom_models.json +++ b/conf/custom_models.json @@ -228,6 +228,48 @@ "temperature_constraint": "fixed", "description": "OpenAI's o4-mini model - optimized for shorter contexts with rapid reasoning and vision" }, + { + "model_name": "gpt-5", + "aliases": ["gpt5", "gpt-5"], + "context_window": 400000, + "max_output_tokens": 128000, + "supports_extended_thinking": true, + "supports_json_mode": true, + "supports_function_calling": true, + "supports_images": true, + "max_image_size_mb": 20.0, + "supports_temperature": true, + "temperature_constraint": "fixed", + "description": "GPT-5 (400K context, 128K output) - Advanced model with reasoning support" + }, + { + "model_name": "gpt-5-mini", + "aliases": ["gpt5-mini", "gpt5mini", "mini"], + "context_window": 400000, + "max_output_tokens": 128000, + "supports_extended_thinking": true, + "supports_json_mode": true, + "supports_function_calling": true, + "supports_images": true, + "max_image_size_mb": 20.0, + "supports_temperature": true, + "temperature_constraint": "fixed", + "description": "GPT-5-mini (400K context, 128K output) - Efficient variant with reasoning support" + }, + { + "model_name": "gpt-5-nano", + "aliases": ["gpt5nano", "gpt5-nano", "nano"], + "context_window": 400000, + "max_output_tokens": 128000, + "supports_extended_thinking": true, + "supports_json_mode": true, + "supports_function_calling": true, + "supports_images": true, + "max_image_size_mb": 20.0, + "supports_temperature": true, + "temperature_constraint": "fixed", + "description": "GPT-5 nano (400K context) - Fastest, cheapest version of GPT-5 for summarization and classification tasks" + }, { "model_name": "llama3.2", "aliases": ["local-llama", "local", "llama3.2", "ollama-llama"], diff --git a/providers/openai_provider.py b/providers/openai_provider.py index 2d3c0cd..a7f5898 100644 --- a/providers/openai_provider.py +++ b/providers/openai_provider.py @@ -259,12 +259,10 @@ class OpenAIModelProvider(OpenAICompatibleProvider): def supports_thinking_mode(self, model_name: str) -> bool: """Check if the model supports extended thinking mode.""" - # GPT-5 models support reasoning tokens (extended thinking) - resolved_name = self._resolve_model_name(model_name) - if resolved_name in ["gpt-5", "gpt-5-mini"]: - return True - # O3 models don't support extended thinking yet - return False + try: + return self.get_capabilities(model_name).supports_extended_thinking + except ValueError: + return False def get_preferred_model(self, category: "ToolModelCategory", allowed_models: list[str]) -> Optional[str]: """Get OpenAI's preferred model for a given category from allowed models. @@ -303,3 +301,19 @@ class OpenAIModelProvider(OpenAICompatibleProvider): # Prefer balanced performance/cost models preferred = find_first(["gpt-5", "gpt-5-mini", "o4-mini", "o3-mini"]) return preferred if preferred else allowed_models[0] + + def get_model_configurations(self) -> dict[str, ModelCapabilities]: + """Get model configurations supported by this provider. + + Returns: + Dict mapping model names to their ModelCapabilities + """ + return self.SUPPORTED_MODELS.copy() + + def get_all_model_aliases(self) -> dict[str, list[str]]: + """Get all model aliases supported by this provider. + + Returns: + Dict mapping model names to their alias lists + """ + return {model_name: caps.aliases for model_name, caps in self.SUPPORTED_MODELS.items()} diff --git a/tests/test_openai_provider.py b/tests/test_openai_provider.py index 3a00faa..5278ff5 100644 --- a/tests/test_openai_provider.py +++ b/tests/test_openai_provider.py @@ -253,14 +253,21 @@ class TestOpenAIProvider: assert call_kwargs["model"] == "o3-mini" # Should be unchanged def test_supports_thinking_mode(self): - """Test thinking mode support.""" + """Test thinking mode support based on model capabilities.""" provider = OpenAIModelProvider("test-key") - # GPT-5 models support thinking mode (reasoning tokens) + # GPT-5 models support thinking mode (reasoning tokens) - all variants assert provider.supports_thinking_mode("gpt-5") is True assert provider.supports_thinking_mode("gpt-5-mini") is True - assert provider.supports_thinking_mode("gpt5") is True # Test with alias - assert provider.supports_thinking_mode("gpt5mini") is True # Test with alias + assert provider.supports_thinking_mode("gpt-5-nano") is True # Now included + + # Test GPT-5 aliases + assert provider.supports_thinking_mode("gpt5") is True + assert provider.supports_thinking_mode("gpt5-mini") is True + assert provider.supports_thinking_mode("gpt5mini") is True + assert provider.supports_thinking_mode("gpt5-nano") is True + assert provider.supports_thinking_mode("gpt5nano") is True + assert provider.supports_thinking_mode("nano") is True # New alias for gpt-5-nano # O3/O4 models don't support thinking mode assert provider.supports_thinking_mode("o3") is False @@ -270,6 +277,9 @@ class TestOpenAIProvider: provider.supports_thinking_mode("mini") is True ) # "mini" now resolves to gpt-5-mini which supports thinking + # Test invalid model name + assert provider.supports_thinking_mode("invalid-model") is False + @patch("providers.openai_compatible.OpenAI") def test_o3_pro_routes_to_responses_endpoint(self, mock_openai_class): """Test that o3-pro model routes to the /v1/responses endpoint (mock test)."""