refactor: moved temperature method from base provider to model capabilities

refactor: model listing cleanup, moved logic to model_capabilities.py
docs: added AGENTS.md for onboarding Codex
This commit is contained in:
Fahad
2025-10-02 10:25:41 +04:00
parent f461cb4519
commit 6d237d0970
14 changed files with 460 additions and 512 deletions

View File

@@ -22,7 +22,7 @@ class TestAliasTargetRestrictions:
provider = OpenAIModelProvider(api_key="test-key")
# Get all known models including aliases and targets
all_known = provider.list_all_known_models()
all_known = provider.list_models(respect_restrictions=False, include_aliases=True, lowercase=True, unique=True)
# Should include both aliases and their targets
assert "mini" in all_known # alias
@@ -35,7 +35,7 @@ class TestAliasTargetRestrictions:
provider = GeminiModelProvider(api_key="test-key")
# Get all known models including aliases and targets
all_known = provider.list_all_known_models()
all_known = provider.list_models(respect_restrictions=False, include_aliases=True, lowercase=True, unique=True)
# Should include both aliases and their targets
assert "flash" in all_known # alias
@@ -162,7 +162,9 @@ class TestAliasTargetRestrictions:
"""
# Test OpenAI provider
openai_provider = OpenAIModelProvider(api_key="test-key")
openai_all_known = openai_provider.list_all_known_models()
openai_all_known = openai_provider.list_models(
respect_restrictions=False, include_aliases=True, lowercase=True, unique=True
)
# Verify that for each alias, its target is also included
for model_name, config in openai_provider.MODEL_CAPABILITIES.items():
@@ -175,7 +177,9 @@ class TestAliasTargetRestrictions:
# Test Gemini provider
gemini_provider = GeminiModelProvider(api_key="test-key")
gemini_all_known = gemini_provider.list_all_known_models()
gemini_all_known = gemini_provider.list_models(
respect_restrictions=False, include_aliases=True, lowercase=True, unique=True
)
# Verify that for each alias, its target is also included
for model_name, config in gemini_provider.MODEL_CAPABILITIES.items():
@@ -186,8 +190,8 @@ class TestAliasTargetRestrictions:
config.lower() in gemini_all_known
), f"Target '{config}' for alias '{model_name}' not in known models"
def test_no_duplicate_models_in_list_all_known_models(self):
"""Test that list_all_known_models doesn't return duplicates."""
def test_no_duplicate_models_in_alias_aware_listing(self):
"""Test that alias-aware list_models variant doesn't return duplicates."""
# Test all providers
providers = [
OpenAIModelProvider(api_key="test-key"),
@@ -195,7 +199,9 @@ class TestAliasTargetRestrictions:
]
for provider in providers:
all_known = provider.list_all_known_models()
all_known = provider.list_models(
respect_restrictions=False, include_aliases=True, lowercase=True, unique=True
)
# Should not have duplicates
assert len(all_known) == len(set(all_known)), f"{provider.__class__.__name__} returns duplicate models"
@@ -207,7 +213,7 @@ class TestAliasTargetRestrictions:
from unittest.mock import MagicMock
mock_provider = MagicMock()
mock_provider.list_all_known_models.return_value = ["model1", "model2", "target-model"]
mock_provider.list_models.return_value = ["model1", "model2", "target-model"]
# Set up a restriction that should trigger validation
service.restrictions = {ProviderType.OPENAI: {"invalid-model"}}
@@ -218,7 +224,12 @@ class TestAliasTargetRestrictions:
service.validate_against_known_models(provider_instances)
# Verify the polymorphic method was called
mock_provider.list_all_known_models.assert_called_once()
mock_provider.list_models.assert_called_once_with(
respect_restrictions=False,
include_aliases=True,
lowercase=True,
unique=True,
)
@patch.dict(os.environ, {"OPENAI_ALLOWED_MODELS": "o4-mini"}) # Restrict to specific model
def test_complex_alias_chains_handled_correctly(self):
@@ -250,7 +261,7 @@ class TestAliasTargetRestrictions:
- A restriction on "o4-mini" (target) would not be recognized as valid
After the fix:
- list_all_known_models() returns ["mini", "o3mini", "o4-mini", "o3-mini"] (aliases + targets)
- list_models(respect_restrictions=False, include_aliases=True, lowercase=True, unique=True) returns ["mini", "o3mini", "o4-mini", "o3-mini"] (aliases + targets)
- validate_against_known_models() checks against all names
- A restriction on "o4-mini" is recognized as valid
"""
@@ -262,7 +273,7 @@ class TestAliasTargetRestrictions:
provider_instances = {ProviderType.OPENAI: provider}
# Get all known models - should include BOTH aliases AND targets
all_known = provider.list_all_known_models()
all_known = provider.list_models(respect_restrictions=False, include_aliases=True, lowercase=True, unique=True)
# Critical check: should contain both aliases and their targets
assert "mini" in all_known # alias
@@ -310,7 +321,7 @@ class TestAliasTargetRestrictions:
the restriction is properly enforced and the target is recognized as a valid
model to restrict.
The bug: If list_all_known_models() doesn't include targets, then validation
The bug: If list_models(respect_restrictions=False, include_aliases=True, lowercase=True, unique=True) doesn't include targets, then validation
would incorrectly warn that target model names are "not recognized", making
it appear that target-based restrictions don't work.
"""
@@ -325,7 +336,9 @@ class TestAliasTargetRestrictions:
provider = OpenAIModelProvider(api_key="test-key")
# These specific target models should be recognized as valid
all_known = provider.list_all_known_models()
all_known = provider.list_models(
respect_restrictions=False, include_aliases=True, lowercase=True, unique=True
)
assert "o4-mini" in all_known, "Target model o4-mini should be known"
assert "o3-mini" in all_known, "Target model o3-mini should be known"