Use the new flash model
Updated tests
This commit is contained in:
@@ -72,7 +72,7 @@ class TestClaudeContinuationOffers:
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
content="Analysis complete.",
|
||||
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
|
||||
model_name="gemini-2.0-flash-exp",
|
||||
model_name="gemini-2.0-flash",
|
||||
metadata={"finish_reason": "STOP"},
|
||||
)
|
||||
mock_get_provider.return_value = mock_provider
|
||||
@@ -129,7 +129,7 @@ class TestClaudeContinuationOffers:
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
content="Continued analysis.",
|
||||
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
|
||||
model_name="gemini-2.0-flash-exp",
|
||||
model_name="gemini-2.0-flash",
|
||||
metadata={"finish_reason": "STOP"},
|
||||
)
|
||||
mock_get_provider.return_value = mock_provider
|
||||
@@ -162,7 +162,7 @@ class TestClaudeContinuationOffers:
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
content="Analysis complete. The code looks good.",
|
||||
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
|
||||
model_name="gemini-2.0-flash-exp",
|
||||
model_name="gemini-2.0-flash",
|
||||
metadata={"finish_reason": "STOP"},
|
||||
)
|
||||
mock_get_provider.return_value = mock_provider
|
||||
@@ -208,7 +208,7 @@ I'd be happy to examine the error handling patterns in more detail if that would
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
content=content_with_followup,
|
||||
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
|
||||
model_name="gemini-2.0-flash-exp",
|
||||
model_name="gemini-2.0-flash",
|
||||
metadata={"finish_reason": "STOP"},
|
||||
)
|
||||
mock_get_provider.return_value = mock_provider
|
||||
@@ -253,7 +253,7 @@ I'd be happy to examine the error handling patterns in more detail if that would
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
content="Continued analysis complete.",
|
||||
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
|
||||
model_name="gemini-2.0-flash-exp",
|
||||
model_name="gemini-2.0-flash",
|
||||
metadata={"finish_reason": "STOP"},
|
||||
)
|
||||
mock_get_provider.return_value = mock_provider
|
||||
@@ -309,7 +309,7 @@ I'd be happy to examine the error handling patterns in more detail if that would
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
content="Final response.",
|
||||
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
|
||||
model_name="gemini-2.0-flash-exp",
|
||||
model_name="gemini-2.0-flash",
|
||||
metadata={"finish_reason": "STOP"},
|
||||
)
|
||||
mock_get_provider.return_value = mock_provider
|
||||
@@ -358,7 +358,7 @@ class TestContinuationIntegration:
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
content="Analysis result",
|
||||
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
|
||||
model_name="gemini-2.0-flash-exp",
|
||||
model_name="gemini-2.0-flash",
|
||||
metadata={"finish_reason": "STOP"},
|
||||
)
|
||||
mock_get_provider.return_value = mock_provider
|
||||
@@ -411,7 +411,7 @@ class TestContinuationIntegration:
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
content="Structure analysis done.",
|
||||
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
|
||||
model_name="gemini-2.0-flash-exp",
|
||||
model_name="gemini-2.0-flash",
|
||||
metadata={"finish_reason": "STOP"},
|
||||
)
|
||||
mock_get_provider.return_value = mock_provider
|
||||
@@ -448,7 +448,7 @@ class TestContinuationIntegration:
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
content="Performance analysis done.",
|
||||
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
|
||||
model_name="gemini-2.0-flash-exp",
|
||||
model_name="gemini-2.0-flash",
|
||||
metadata={"finish_reason": "STOP"},
|
||||
)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user