WIP
- OpenRouter model configuration registry - Model definition file for users to be able to control - Update instructions
This commit is contained in:
141
conf/openrouter_models.json
Normal file
141
conf/openrouter_models.json
Normal file
@@ -0,0 +1,141 @@
|
||||
{
|
||||
"_README": {
|
||||
"description": "OpenRouter model configuration for Zen MCP Server",
|
||||
"documentation": "https://github.com/BeehiveInnovations/zen-mcp-server/blob/main/docs/openrouter.md",
|
||||
"instructions": [
|
||||
"Add new models by copying an existing entry and modifying it",
|
||||
"Aliases are case-insensitive and should be unique across all models",
|
||||
"context_window is the model's total context window size in tokens (input + output)",
|
||||
"Set supports_* flags based on the model's actual capabilities",
|
||||
"Models not listed here will use generic defaults (32K context window, basic features)"
|
||||
],
|
||||
"field_descriptions": {
|
||||
"model_name": "The official OpenRouter model identifier (e.g., 'anthropic/claude-3-opus')",
|
||||
"aliases": "Array of short names users can type instead of the full model name",
|
||||
"context_window": "Total number of tokens the model can process (input + output combined)",
|
||||
"supports_extended_thinking": "Whether the model supports extended reasoning tokens (currently none do via OpenRouter)",
|
||||
"supports_json_mode": "Whether the model can guarantee valid JSON output",
|
||||
"supports_function_calling": "Whether the model supports function/tool calling",
|
||||
"description": "Human-readable description of the model"
|
||||
},
|
||||
"example_custom_model": {
|
||||
"model_name": "vendor/model-name-version",
|
||||
"aliases": ["shortname", "nickname", "abbrev"],
|
||||
"context_window": 128000,
|
||||
"supports_extended_thinking": false,
|
||||
"supports_json_mode": true,
|
||||
"supports_function_calling": true,
|
||||
"description": "Brief description of the model"
|
||||
}
|
||||
},
|
||||
"models": [
|
||||
{
|
||||
"model_name": "openai/gpt-4o",
|
||||
"aliases": ["gpt4o", "4o", "gpt-4o"],
|
||||
"context_window": 128000,
|
||||
"supports_extended_thinking": false,
|
||||
"supports_json_mode": true,
|
||||
"supports_function_calling": true,
|
||||
"description": "OpenAI's most capable model, GPT-4 Optimized"
|
||||
},
|
||||
{
|
||||
"model_name": "openai/gpt-4o-mini",
|
||||
"aliases": ["gpt4o-mini", "4o-mini", "gpt-4o-mini"],
|
||||
"context_window": 128000,
|
||||
"supports_extended_thinking": false,
|
||||
"supports_json_mode": true,
|
||||
"supports_function_calling": true,
|
||||
"description": "Smaller, faster version of GPT-4o"
|
||||
},
|
||||
{
|
||||
"model_name": "anthropic/claude-3-opus",
|
||||
"aliases": ["opus", "claude-opus", "claude3-opus", "claude-3-opus"],
|
||||
"context_window": 200000,
|
||||
"supports_extended_thinking": false,
|
||||
"supports_json_mode": false,
|
||||
"supports_function_calling": false,
|
||||
"description": "Claude 3 Opus - Most capable Claude model"
|
||||
},
|
||||
{
|
||||
"model_name": "anthropic/claude-3-sonnet",
|
||||
"aliases": ["sonnet", "claude-sonnet", "claude3-sonnet", "claude-3-sonnet", "claude"],
|
||||
"context_window": 200000,
|
||||
"supports_extended_thinking": false,
|
||||
"supports_json_mode": false,
|
||||
"supports_function_calling": false,
|
||||
"description": "Claude 3 Sonnet - Balanced performance"
|
||||
},
|
||||
{
|
||||
"model_name": "anthropic/claude-3-haiku",
|
||||
"aliases": ["haiku", "claude-haiku", "claude3-haiku", "claude-3-haiku"],
|
||||
"context_window": 200000,
|
||||
"supports_extended_thinking": false,
|
||||
"supports_json_mode": false,
|
||||
"supports_function_calling": false,
|
||||
"description": "Claude 3 Haiku - Fast and efficient"
|
||||
},
|
||||
{
|
||||
"model_name": "google/gemini-pro-1.5",
|
||||
"aliases": ["gemini-pro", "gemini", "pro-openrouter"],
|
||||
"context_window": 1048576,
|
||||
"supports_extended_thinking": false,
|
||||
"supports_json_mode": true,
|
||||
"supports_function_calling": false,
|
||||
"description": "Google's Gemini Pro 1.5 via OpenRouter"
|
||||
},
|
||||
{
|
||||
"model_name": "google/gemini-flash-1.5-8b",
|
||||
"aliases": ["gemini-flash", "flash-openrouter", "flash-8b"],
|
||||
"context_window": 1048576,
|
||||
"supports_extended_thinking": false,
|
||||
"supports_json_mode": true,
|
||||
"supports_function_calling": false,
|
||||
"description": "Google's Gemini Flash 1.5 8B via OpenRouter"
|
||||
},
|
||||
{
|
||||
"model_name": "mistral/mistral-large",
|
||||
"aliases": ["mistral-large", "mistral"],
|
||||
"context_window": 128000,
|
||||
"supports_extended_thinking": false,
|
||||
"supports_json_mode": true,
|
||||
"supports_function_calling": true,
|
||||
"description": "Mistral's largest model"
|
||||
},
|
||||
{
|
||||
"model_name": "meta-llama/llama-3-70b",
|
||||
"aliases": ["llama3-70b", "llama-70b", "llama3"],
|
||||
"context_window": 8192,
|
||||
"supports_extended_thinking": false,
|
||||
"supports_json_mode": false,
|
||||
"supports_function_calling": false,
|
||||
"description": "Meta's Llama 3 70B model"
|
||||
},
|
||||
{
|
||||
"model_name": "cohere/command-r-plus",
|
||||
"aliases": ["command-r-plus", "command-r", "cohere"],
|
||||
"context_window": 128000,
|
||||
"supports_extended_thinking": false,
|
||||
"supports_json_mode": false,
|
||||
"supports_function_calling": true,
|
||||
"description": "Cohere's Command R Plus model"
|
||||
},
|
||||
{
|
||||
"model_name": "deepseek/deepseek-coder",
|
||||
"aliases": ["deepseek-coder", "deepseek", "coder"],
|
||||
"context_window": 16384,
|
||||
"supports_extended_thinking": false,
|
||||
"supports_json_mode": false,
|
||||
"supports_function_calling": false,
|
||||
"description": "DeepSeek's coding-focused model"
|
||||
},
|
||||
{
|
||||
"model_name": "perplexity/llama-3-sonar-large-32k-online",
|
||||
"aliases": ["perplexity", "sonar", "perplexity-online"],
|
||||
"context_window": 32768,
|
||||
"supports_extended_thinking": false,
|
||||
"supports_json_mode": false,
|
||||
"supports_function_calling": false,
|
||||
"description": "Perplexity's online model with web search"
|
||||
}
|
||||
]
|
||||
}
|
||||
178
providers/openrouter_registry.py
Normal file
178
providers/openrouter_registry.py
Normal file
@@ -0,0 +1,178 @@
|
||||
"""OpenRouter model registry for managing model configurations and aliases."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Any
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
from .base import ModelCapabilities, ProviderType, RangeTemperatureConstraint
|
||||
|
||||
|
||||
@dataclass
|
||||
class OpenRouterModelConfig:
|
||||
"""Configuration for an OpenRouter model."""
|
||||
|
||||
model_name: str
|
||||
aliases: List[str] = field(default_factory=list)
|
||||
context_window: int = 32768 # Total context window size in tokens
|
||||
supports_extended_thinking: bool = False
|
||||
supports_system_prompts: bool = True
|
||||
supports_streaming: bool = True
|
||||
supports_function_calling: bool = False
|
||||
supports_json_mode: bool = False
|
||||
description: str = ""
|
||||
|
||||
|
||||
def to_capabilities(self) -> ModelCapabilities:
|
||||
"""Convert to ModelCapabilities object."""
|
||||
return ModelCapabilities(
|
||||
provider=ProviderType.OPENROUTER,
|
||||
model_name=self.model_name,
|
||||
friendly_name="OpenRouter",
|
||||
max_tokens=self.context_window, # ModelCapabilities still uses max_tokens
|
||||
supports_extended_thinking=self.supports_extended_thinking,
|
||||
supports_system_prompts=self.supports_system_prompts,
|
||||
supports_streaming=self.supports_streaming,
|
||||
supports_function_calling=self.supports_function_calling,
|
||||
temperature_constraint=RangeTemperatureConstraint(0.0, 2.0, 1.0),
|
||||
)
|
||||
|
||||
|
||||
class OpenRouterModelRegistry:
|
||||
"""Registry for managing OpenRouter model configurations and aliases."""
|
||||
|
||||
def __init__(self, config_path: Optional[str] = None):
|
||||
"""Initialize the registry.
|
||||
|
||||
Args:
|
||||
config_path: Path to config file. If None, uses default locations.
|
||||
"""
|
||||
self.alias_map: Dict[str, str] = {} # alias -> model_name
|
||||
self.model_map: Dict[str, OpenRouterModelConfig] = {} # model_name -> config
|
||||
|
||||
# Determine config path
|
||||
if config_path:
|
||||
self.config_path = Path(config_path)
|
||||
else:
|
||||
# Check environment variable first
|
||||
env_path = os.getenv("OPENROUTER_MODELS_PATH")
|
||||
if env_path:
|
||||
self.config_path = Path(env_path)
|
||||
else:
|
||||
# Default to conf/openrouter_models.json
|
||||
self.config_path = Path(__file__).parent.parent / "conf" / "openrouter_models.json"
|
||||
|
||||
# Load configuration
|
||||
self.reload()
|
||||
|
||||
def reload(self) -> None:
|
||||
"""Reload configuration from disk."""
|
||||
try:
|
||||
configs = self._read_config()
|
||||
self._build_maps(configs)
|
||||
logging.info(f"Loaded {len(self.model_map)} OpenRouter models with {len(self.alias_map)} aliases")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to load OpenRouter model configuration: {e}")
|
||||
# Initialize with empty maps on failure
|
||||
self.alias_map = {}
|
||||
self.model_map = {}
|
||||
|
||||
def _read_config(self) -> List[OpenRouterModelConfig]:
|
||||
"""Read configuration from file.
|
||||
|
||||
Returns:
|
||||
List of model configurations
|
||||
"""
|
||||
if not self.config_path.exists():
|
||||
logging.warning(f"OpenRouter model config not found at {self.config_path}")
|
||||
return []
|
||||
|
||||
try:
|
||||
with open(self.config_path, 'r') as f:
|
||||
data = json.load(f)
|
||||
|
||||
# Parse models
|
||||
configs = []
|
||||
for model_data in data.get("models", []):
|
||||
# Handle backwards compatibility - rename max_tokens to context_window
|
||||
if 'max_tokens' in model_data and 'context_window' not in model_data:
|
||||
model_data['context_window'] = model_data.pop('max_tokens')
|
||||
|
||||
config = OpenRouterModelConfig(**model_data)
|
||||
configs.append(config)
|
||||
|
||||
return configs
|
||||
except json.JSONDecodeError as e:
|
||||
raise ValueError(f"Invalid JSON in {self.config_path}: {e}")
|
||||
except Exception as e:
|
||||
raise ValueError(f"Error reading config from {self.config_path}: {e}")
|
||||
|
||||
def _build_maps(self, configs: List[OpenRouterModelConfig]) -> None:
|
||||
"""Build alias and model maps from configurations.
|
||||
|
||||
Args:
|
||||
configs: List of model configurations
|
||||
"""
|
||||
alias_map = {}
|
||||
model_map = {}
|
||||
|
||||
for config in configs:
|
||||
# Add to model map
|
||||
model_map[config.model_name] = config
|
||||
|
||||
# Add aliases
|
||||
for alias in config.aliases:
|
||||
alias_lower = alias.lower()
|
||||
if alias_lower in alias_map:
|
||||
existing_model = alias_map[alias_lower]
|
||||
raise ValueError(
|
||||
f"Duplicate alias '{alias}' found for models "
|
||||
f"'{existing_model}' and '{config.model_name}'"
|
||||
)
|
||||
alias_map[alias_lower] = config.model_name
|
||||
|
||||
# Atomic update
|
||||
self.alias_map = alias_map
|
||||
self.model_map = model_map
|
||||
|
||||
def resolve(self, name_or_alias: str) -> Optional[OpenRouterModelConfig]:
|
||||
"""Resolve a model name or alias to configuration.
|
||||
|
||||
Args:
|
||||
name_or_alias: Model name or alias to resolve
|
||||
|
||||
Returns:
|
||||
Model configuration if found, None otherwise
|
||||
"""
|
||||
# Try alias first (case-insensitive)
|
||||
alias_lower = name_or_alias.lower()
|
||||
if alias_lower in self.alias_map:
|
||||
model_name = self.alias_map[alias_lower]
|
||||
return self.model_map.get(model_name)
|
||||
|
||||
# Try as direct model name
|
||||
return self.model_map.get(name_or_alias)
|
||||
|
||||
def get_capabilities(self, name_or_alias: str) -> Optional[ModelCapabilities]:
|
||||
"""Get model capabilities for a name or alias.
|
||||
|
||||
Args:
|
||||
name_or_alias: Model name or alias
|
||||
|
||||
Returns:
|
||||
ModelCapabilities if found, None otherwise
|
||||
"""
|
||||
config = self.resolve(name_or_alias)
|
||||
if config:
|
||||
return config.to_capabilities()
|
||||
return None
|
||||
|
||||
def list_models(self) -> List[str]:
|
||||
"""List all available model names."""
|
||||
return list(self.model_map.keys())
|
||||
|
||||
def list_aliases(self) -> List[str]:
|
||||
"""List all available aliases."""
|
||||
return list(self.alias_map.keys())
|
||||
38
test_mapping.py
Normal file
38
test_mapping.py
Normal file
@@ -0,0 +1,38 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test OpenRouter model mapping
|
||||
"""
|
||||
|
||||
import sys
|
||||
sys.path.append('/Users/fahad/Developer/gemini-mcp-server')
|
||||
|
||||
from simulator_tests.base_test import BaseSimulatorTest
|
||||
|
||||
class MappingTest(BaseSimulatorTest):
|
||||
def test_mapping(self):
|
||||
"""Test model alias mapping"""
|
||||
|
||||
# Test with 'flash' alias - should map to google/gemini-flash-1.5-8b
|
||||
print("\nTesting 'flash' alias mapping...")
|
||||
|
||||
response, continuation_id = self.call_mcp_tool(
|
||||
"chat",
|
||||
{
|
||||
"prompt": "Say 'Hello from Flash model!'",
|
||||
"model": "flash", # Should be mapped to google/gemini-flash-1.5-8b
|
||||
"temperature": 0.1
|
||||
}
|
||||
)
|
||||
|
||||
if response:
|
||||
print(f"✅ Flash alias worked!")
|
||||
print(f"Response: {response[:200]}...")
|
||||
return True
|
||||
else:
|
||||
print("❌ Flash alias failed")
|
||||
return False
|
||||
|
||||
if __name__ == "__main__":
|
||||
test = MappingTest(verbose=False)
|
||||
success = test.test_mapping()
|
||||
print(f"\nTest result: {'Success' if success else 'Failed'}")
|
||||
243
tests/test_openrouter_registry.py
Normal file
243
tests/test_openrouter_registry.py
Normal file
@@ -0,0 +1,243 @@
|
||||
"""Tests for OpenRouter model registry functionality."""
|
||||
|
||||
import json
|
||||
import os
|
||||
import pytest
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
from providers.openrouter_registry import OpenRouterModelRegistry, OpenRouterModelConfig
|
||||
from providers.base import ProviderType
|
||||
|
||||
|
||||
class TestOpenRouterModelRegistry:
|
||||
"""Test cases for OpenRouter model registry."""
|
||||
|
||||
def test_registry_initialization(self):
|
||||
"""Test registry initializes with default config."""
|
||||
registry = OpenRouterModelRegistry()
|
||||
|
||||
# Should load models from default location
|
||||
assert len(registry.list_models()) > 0
|
||||
assert len(registry.list_aliases()) > 0
|
||||
|
||||
def test_custom_config_path(self):
|
||||
"""Test registry with custom config path."""
|
||||
# Create temporary config
|
||||
config_data = {
|
||||
"models": [
|
||||
{
|
||||
"model_name": "test/model-1",
|
||||
"aliases": ["test1", "t1"],
|
||||
"context_window": 4096
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
|
||||
json.dump(config_data, f)
|
||||
temp_path = f.name
|
||||
|
||||
try:
|
||||
registry = OpenRouterModelRegistry(config_path=temp_path)
|
||||
assert len(registry.list_models()) == 1
|
||||
assert "test/model-1" in registry.list_models()
|
||||
assert "test1" in registry.list_aliases()
|
||||
assert "t1" in registry.list_aliases()
|
||||
finally:
|
||||
os.unlink(temp_path)
|
||||
|
||||
def test_environment_variable_override(self):
|
||||
"""Test OPENROUTER_MODELS_PATH environment variable."""
|
||||
# Create custom config
|
||||
config_data = {
|
||||
"models": [
|
||||
{
|
||||
"model_name": "env/model",
|
||||
"aliases": ["envtest"],
|
||||
"context_window": 8192
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
|
||||
json.dump(config_data, f)
|
||||
temp_path = f.name
|
||||
|
||||
try:
|
||||
# Set environment variable
|
||||
original_env = os.environ.get('OPENROUTER_MODELS_PATH')
|
||||
os.environ['OPENROUTER_MODELS_PATH'] = temp_path
|
||||
|
||||
# Create registry without explicit path
|
||||
registry = OpenRouterModelRegistry()
|
||||
|
||||
# Should load from environment path
|
||||
assert "env/model" in registry.list_models()
|
||||
assert "envtest" in registry.list_aliases()
|
||||
|
||||
finally:
|
||||
# Restore environment
|
||||
if original_env is not None:
|
||||
os.environ['OPENROUTER_MODELS_PATH'] = original_env
|
||||
else:
|
||||
del os.environ['OPENROUTER_MODELS_PATH']
|
||||
os.unlink(temp_path)
|
||||
|
||||
def test_alias_resolution(self):
|
||||
"""Test alias resolution functionality."""
|
||||
registry = OpenRouterModelRegistry()
|
||||
|
||||
# Test various aliases
|
||||
test_cases = [
|
||||
("opus", "anthropic/claude-3-opus"),
|
||||
("OPUS", "anthropic/claude-3-opus"), # Case insensitive
|
||||
("claude", "anthropic/claude-3-sonnet"),
|
||||
("gpt4o", "openai/gpt-4o"),
|
||||
("4o", "openai/gpt-4o"),
|
||||
("mistral", "mistral/mistral-large"),
|
||||
]
|
||||
|
||||
for alias, expected_model in test_cases:
|
||||
config = registry.resolve(alias)
|
||||
assert config is not None, f"Failed to resolve alias '{alias}'"
|
||||
assert config.model_name == expected_model
|
||||
|
||||
def test_direct_model_name_lookup(self):
|
||||
"""Test looking up models by their full name."""
|
||||
registry = OpenRouterModelRegistry()
|
||||
|
||||
# Should be able to look up by full model name
|
||||
config = registry.resolve("anthropic/claude-3-opus")
|
||||
assert config is not None
|
||||
assert config.model_name == "anthropic/claude-3-opus"
|
||||
|
||||
config = registry.resolve("openai/gpt-4o")
|
||||
assert config is not None
|
||||
assert config.model_name == "openai/gpt-4o"
|
||||
|
||||
def test_unknown_model_resolution(self):
|
||||
"""Test resolution of unknown models."""
|
||||
registry = OpenRouterModelRegistry()
|
||||
|
||||
# Unknown aliases should return None
|
||||
assert registry.resolve("unknown-alias") is None
|
||||
assert registry.resolve("") is None
|
||||
assert registry.resolve("non-existent") is None
|
||||
|
||||
def test_model_capabilities_conversion(self):
|
||||
"""Test conversion to ModelCapabilities."""
|
||||
registry = OpenRouterModelRegistry()
|
||||
|
||||
config = registry.resolve("opus")
|
||||
assert config is not None
|
||||
|
||||
caps = config.to_capabilities()
|
||||
assert caps.provider == ProviderType.OPENROUTER
|
||||
assert caps.model_name == "anthropic/claude-3-opus"
|
||||
assert caps.friendly_name == "OpenRouter"
|
||||
assert caps.max_tokens == 200000
|
||||
assert not caps.supports_extended_thinking
|
||||
|
||||
def test_duplicate_alias_detection(self):
|
||||
"""Test that duplicate aliases are detected."""
|
||||
config_data = {
|
||||
"models": [
|
||||
{
|
||||
"model_name": "test/model-1",
|
||||
"aliases": ["dupe"],
|
||||
"context_window": 4096
|
||||
},
|
||||
{
|
||||
"model_name": "test/model-2",
|
||||
"aliases": ["DUPE"], # Same alias, different case
|
||||
"context_window": 8192
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
|
||||
json.dump(config_data, f)
|
||||
temp_path = f.name
|
||||
|
||||
try:
|
||||
with pytest.raises(ValueError, match="Duplicate alias"):
|
||||
OpenRouterModelRegistry(config_path=temp_path)
|
||||
finally:
|
||||
os.unlink(temp_path)
|
||||
|
||||
def test_backwards_compatibility_max_tokens(self):
|
||||
"""Test backwards compatibility with old max_tokens field."""
|
||||
config_data = {
|
||||
"models": [
|
||||
{
|
||||
"model_name": "test/old-model",
|
||||
"aliases": ["old"],
|
||||
"max_tokens": 16384, # Old field name
|
||||
"supports_extended_thinking": False
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
|
||||
json.dump(config_data, f)
|
||||
temp_path = f.name
|
||||
|
||||
try:
|
||||
registry = OpenRouterModelRegistry(config_path=temp_path)
|
||||
config = registry.resolve("old")
|
||||
|
||||
assert config is not None
|
||||
assert config.context_window == 16384 # Should be converted
|
||||
|
||||
# Check capabilities still work
|
||||
caps = config.to_capabilities()
|
||||
assert caps.max_tokens == 16384
|
||||
finally:
|
||||
os.unlink(temp_path)
|
||||
|
||||
def test_missing_config_file(self):
|
||||
"""Test behavior with missing config file."""
|
||||
# Use a non-existent path
|
||||
registry = OpenRouterModelRegistry(config_path="/non/existent/path.json")
|
||||
|
||||
# Should initialize with empty maps
|
||||
assert len(registry.list_models()) == 0
|
||||
assert len(registry.list_aliases()) == 0
|
||||
assert registry.resolve("anything") is None
|
||||
|
||||
def test_invalid_json_config(self):
|
||||
"""Test handling of invalid JSON."""
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
|
||||
f.write("{ invalid json }")
|
||||
temp_path = f.name
|
||||
|
||||
try:
|
||||
registry = OpenRouterModelRegistry(config_path=temp_path)
|
||||
# Should handle gracefully and initialize empty
|
||||
assert len(registry.list_models()) == 0
|
||||
assert len(registry.list_aliases()) == 0
|
||||
finally:
|
||||
os.unlink(temp_path)
|
||||
|
||||
def test_model_with_all_capabilities(self):
|
||||
"""Test model with all capability flags."""
|
||||
config = OpenRouterModelConfig(
|
||||
model_name="test/full-featured",
|
||||
aliases=["full"],
|
||||
context_window=128000,
|
||||
supports_extended_thinking=True,
|
||||
supports_system_prompts=True,
|
||||
supports_streaming=True,
|
||||
supports_function_calling=True,
|
||||
supports_json_mode=True,
|
||||
description="Fully featured test model"
|
||||
)
|
||||
|
||||
caps = config.to_capabilities()
|
||||
assert caps.max_tokens == 128000
|
||||
assert caps.supports_extended_thinking
|
||||
assert caps.supports_system_prompts
|
||||
assert caps.supports_streaming
|
||||
assert caps.supports_function_calling
|
||||
# Note: supports_json_mode is not in ModelCapabilities yet
|
||||
Reference in New Issue
Block a user