diff --git a/conf/custom_models.json b/conf/custom_models.json index 8d83e00..04dce42 100644 --- a/conf/custom_models.json +++ b/conf/custom_models.json @@ -52,7 +52,7 @@ "models": [ { "model_name": "anthropic/claude-opus-4.1", - "aliases": ["opus", "claude-opus", "claude-opus-4.1", "claude-4.1-opus"], + "aliases": ["opus", "claude-opus"], "context_window": 200000, "max_output_tokens": 64000, "supports_extended_thinking": false, @@ -64,7 +64,7 @@ }, { "model_name": "anthropic/claude-sonnet-4.1", - "aliases": ["sonnet", "claude-sonnet", "claude-sonnet-4.1", "claude-4.1-sonnet", "claude"], + "aliases": ["claude", "sonnet", "sonnet4.1", "claude-sonnet", "claude-4.1-sonnet", "claude-sonnet-4.1"], "context_window": 200000, "max_output_tokens": 64000, "supports_extended_thinking": false, @@ -76,7 +76,7 @@ }, { "model_name": "anthropic/claude-3.5-haiku", - "aliases": ["haiku", "claude-haiku", "claude3-haiku", "claude-3-haiku"], + "aliases": ["haiku"], "context_window": 200000, "max_output_tokens": 64000, "supports_extended_thinking": false, @@ -100,7 +100,7 @@ }, { "model_name": "google/gemini-2.5-flash", - "aliases": ["flash","gemini-flash", "flash-openrouter", "flash-2.5"], + "aliases": ["flash","gemini-flash"], "context_window": 1048576, "max_output_tokens": 65536, "supports_extended_thinking": false, @@ -202,7 +202,7 @@ }, { "model_name": "openai/o3-pro", - "aliases": ["o3-pro", "o3pro"], + "aliases": ["o3pro"], "context_window": 200000, "max_output_tokens": 100000, "supports_extended_thinking": false, @@ -228,9 +228,51 @@ "temperature_constraint": "fixed", "description": "OpenAI's o4-mini model - optimized for shorter contexts with rapid reasoning and vision" }, + { + "model_name": "openai/gpt-5", + "aliases": ["gpt5"], + "context_window": 400000, + "max_output_tokens": 128000, + "supports_extended_thinking": true, + "supports_json_mode": true, + "supports_function_calling": true, + "supports_images": true, + "max_image_size_mb": 20.0, + "supports_temperature": true, + "temperature_constraint": "range", + "description": "GPT-5 (400K context, 128K output) - Advanced model with reasoning support" + }, + { + "model_name": "openai/gpt-5-mini", + "aliases": ["gpt5mini"], + "context_window": 400000, + "max_output_tokens": 128000, + "supports_extended_thinking": true, + "supports_json_mode": true, + "supports_function_calling": true, + "supports_images": true, + "max_image_size_mb": 20.0, + "supports_temperature": true, + "temperature_constraint": "fixed", + "description": "GPT-5-mini (400K context, 128K output) - Efficient variant with reasoning support" + }, + { + "model_name": "openai/gpt-5-nano", + "aliases": ["gpt5nano"], + "context_window": 400000, + "max_output_tokens": 128000, + "supports_extended_thinking": true, + "supports_json_mode": true, + "supports_function_calling": true, + "supports_images": true, + "max_image_size_mb": 20.0, + "supports_temperature": true, + "temperature_constraint": "fixed", + "description": "GPT-5 nano (400K context, 128K output) - Fastest, cheapest version of GPT-5 for summarization and classification tasks" + }, { "model_name": "llama3.2", - "aliases": ["local-llama", "local", "llama3.2", "ollama-llama"], + "aliases": ["local-llama", "ollama-llama"], "context_window": 128000, "max_output_tokens": 64000, "supports_extended_thinking": false, diff --git a/providers/openai_provider.py b/providers/openai_provider.py index 7143f71..81bb067 100644 --- a/providers/openai_provider.py +++ b/providers/openai_provider.py @@ -300,12 +300,10 @@ class OpenAIModelProvider(OpenAICompatibleProvider): def supports_thinking_mode(self, model_name: str) -> bool: """Check if the model supports extended thinking mode.""" - # GPT-5 models support reasoning tokens (extended thinking) - resolved_name = self._resolve_model_name(model_name) - if resolved_name in ["gpt-5", "gpt-5-mini"]: - return True - # O3 models don't support extended thinking yet - return False + try: + return self.get_capabilities(model_name).supports_extended_thinking + except ValueError: + return False def get_preferred_model(self, category: "ToolModelCategory", allowed_models: list[str]) -> Optional[str]: """Get OpenAI's preferred model for a given category from allowed models. diff --git a/tests/test_openai_provider.py b/tests/test_openai_provider.py index 3a00faa..5278ff5 100644 --- a/tests/test_openai_provider.py +++ b/tests/test_openai_provider.py @@ -253,14 +253,21 @@ class TestOpenAIProvider: assert call_kwargs["model"] == "o3-mini" # Should be unchanged def test_supports_thinking_mode(self): - """Test thinking mode support.""" + """Test thinking mode support based on model capabilities.""" provider = OpenAIModelProvider("test-key") - # GPT-5 models support thinking mode (reasoning tokens) + # GPT-5 models support thinking mode (reasoning tokens) - all variants assert provider.supports_thinking_mode("gpt-5") is True assert provider.supports_thinking_mode("gpt-5-mini") is True - assert provider.supports_thinking_mode("gpt5") is True # Test with alias - assert provider.supports_thinking_mode("gpt5mini") is True # Test with alias + assert provider.supports_thinking_mode("gpt-5-nano") is True # Now included + + # Test GPT-5 aliases + assert provider.supports_thinking_mode("gpt5") is True + assert provider.supports_thinking_mode("gpt5-mini") is True + assert provider.supports_thinking_mode("gpt5mini") is True + assert provider.supports_thinking_mode("gpt5-nano") is True + assert provider.supports_thinking_mode("gpt5nano") is True + assert provider.supports_thinking_mode("nano") is True # New alias for gpt-5-nano # O3/O4 models don't support thinking mode assert provider.supports_thinking_mode("o3") is False @@ -270,6 +277,9 @@ class TestOpenAIProvider: provider.supports_thinking_mode("mini") is True ) # "mini" now resolves to gpt-5-mini which supports thinking + # Test invalid model name + assert provider.supports_thinking_mode("invalid-model") is False + @patch("providers.openai_compatible.OpenAI") def test_o3_pro_routes_to_responses_endpoint(self, mock_openai_class): """Test that o3-pro model routes to the /v1/responses endpoint (mock test)."""