From fcb0fe3ef2f4c6e691ed8c0eef7206a7afb3c1d0 Mon Sep 17 00:00:00 2001 From: Fahad Date: Fri, 8 Aug 2025 10:52:23 +0500 Subject: [PATCH] Fix o3-pro model resolution to use o3-pro consistently MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Use o3-pro throughout the codebase instead of o3-pro-2025-06-10 - Update test expectations to match o3-pro model name - Update cassette to use o3-pro for consistency - Ensure responses endpoint routing works correctly with o3-pro 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude --- providers/openai_compatible.py | 2 +- tests/openai_cassettes/o3_pro_basic_math.json | 2 +- tests/test_o3_pro_output_text_fix.py | 2 +- tests/test_openai_provider.py | 6 +++--- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/providers/openai_compatible.py b/providers/openai_compatible.py index be6911f..75b5af2 100644 --- a/providers/openai_compatible.py +++ b/providers/openai_compatible.py @@ -541,7 +541,7 @@ class OpenAICompatibleProvider(ModelProvider): completion_params[key] = value # Check if this is o3-pro and needs the responses endpoint - if resolved_model == "o3-pro-2025-06-10": + if resolved_model == "o3-pro": # This model requires the /v1/responses endpoint # If it fails, we should not fall back to chat/completions return self._generate_with_responses_endpoint( diff --git a/tests/openai_cassettes/o3_pro_basic_math.json b/tests/openai_cassettes/o3_pro_basic_math.json index 855aa31..4ccd4df 100644 --- a/tests/openai_cassettes/o3_pro_basic_math.json +++ b/tests/openai_cassettes/o3_pro_basic_math.json @@ -23,7 +23,7 @@ "role": "user" } ], - "model": "o3-pro-2025-06-10", + "model": "o3-pro", "reasoning": { "effort": "medium" }, diff --git a/tests/test_o3_pro_output_text_fix.py b/tests/test_o3_pro_output_text_fix.py index 43115fd..1461d83 100644 --- a/tests/test_o3_pro_output_text_fix.py +++ b/tests/test_o3_pro_output_text_fix.py @@ -55,7 +55,7 @@ class TestO3ProOutputTextFix: ModelProviderRegistry.reset_for_testing() @pytest.mark.no_mock_provider # Disable provider mocking for this test - @patch.dict(os.environ, {"OPENAI_ALLOWED_MODELS": "o3-pro,o3-pro-2025-06-10", "LOCALE": ""}) + @patch.dict(os.environ, {"OPENAI_ALLOWED_MODELS": "o3-pro", "LOCALE": ""}) async def test_o3_pro_uses_output_text_field(self, monkeypatch): """Test that o3-pro parsing uses the output_text convenience field via ChatTool.""" cassette_path = cassette_dir / "o3_pro_basic_math.json" diff --git a/tests/test_openai_provider.py b/tests/test_openai_provider.py index 2bb3c06..3a00faa 100644 --- a/tests/test_openai_provider.py +++ b/tests/test_openai_provider.py @@ -280,7 +280,7 @@ class TestOpenAIProvider: mock_response = MagicMock() # New o3-pro format: direct output_text field mock_response.output_text = "4" - mock_response.model = "o3-pro-2025-06-10" + mock_response.model = "o3-pro" mock_response.id = "test-id" mock_response.created_at = 1234567890 mock_response.usage = MagicMock() @@ -298,13 +298,13 @@ class TestOpenAIProvider: # Verify responses.create was called mock_client.responses.create.assert_called_once() call_args = mock_client.responses.create.call_args[1] - assert call_args["model"] == "o3-pro-2025-06-10" + assert call_args["model"] == "o3-pro" assert call_args["input"][0]["role"] == "user" assert "What is 2 + 2?" in call_args["input"][0]["content"][0]["text"] # Verify the response assert result.content == "4" - assert result.model_name == "o3-pro-2025-06-10" + assert result.model_name == "o3-pro" assert result.metadata["endpoint"] == "responses" @patch("providers.openai_compatible.OpenAI")