refactor: moved registries into a separate module and code cleanup

fix: refactored dial provider to follow the same pattern
This commit is contained in:
Fahad
2025-10-07 12:59:09 +04:00
parent c27e81d6d2
commit 7c36b9255a
54 changed files with 325 additions and 282 deletions

169
conf/dial_models.json Normal file
View File

@@ -0,0 +1,169 @@
{
"_README": {
"description": "Model metadata for the DIAL (Data & AI Layer) aggregation provider.",
"documentation": "https://github.com/BeehiveInnovations/zen-mcp-server/blob/main/docs/configuration.md",
"usage": "Models listed here are exposed through the DIAL provider. Aliases are case-insensitive.",
"field_notes": "Matches providers/shared/model_capabilities.py.",
"field_descriptions": {
"model_name": "The model identifier as exposed by DIAL (typically deployment name)",
"aliases": "Array of shorthand names users can type instead of the full model name",
"context_window": "Total number of tokens the model can process (input + output combined)",
"max_output_tokens": "Maximum number of tokens the model can generate in a single response",
"supports_extended_thinking": "Whether the model supports extended reasoning tokens",
"supports_json_mode": "Whether the model can guarantee valid JSON output",
"supports_function_calling": "Whether the model supports function/tool calling",
"supports_images": "Whether the model can process images/visual input",
"max_image_size_mb": "Maximum total size in MB for all images combined",
"supports_temperature": "Whether the model accepts the temperature parameter",
"temperature_constraint": "Temperature constraint hint: 'fixed', 'range', or 'discrete'",
"description": "Human-readable description of the model",
"intelligence_score": "1-20 human rating used as the primary signal for auto-mode ordering"
}
},
"models": [
{
"model_name": "o3-2025-04-16",
"friendly_name": "DIAL (O3)",
"aliases": ["o3"],
"intelligence_score": 14,
"description": "OpenAI O3 via DIAL - Strong reasoning model",
"context_window": 200000,
"max_output_tokens": 100000,
"supports_extended_thinking": false,
"supports_function_calling": false,
"supports_json_mode": true,
"supports_images": true,
"max_image_size_mb": 20.0,
"supports_temperature": false,
"temperature_constraint": "fixed"
},
{
"model_name": "o4-mini-2025-04-16",
"friendly_name": "DIAL (O4-mini)",
"aliases": ["o4-mini"],
"intelligence_score": 11,
"description": "OpenAI O4-mini via DIAL - Fast reasoning model",
"context_window": 200000,
"max_output_tokens": 100000,
"supports_extended_thinking": false,
"supports_function_calling": false,
"supports_json_mode": true,
"supports_images": true,
"max_image_size_mb": 20.0,
"supports_temperature": false,
"temperature_constraint": "fixed"
},
{
"model_name": "anthropic.claude-sonnet-4.1-20250805-v1:0",
"friendly_name": "DIAL (Sonnet 4.1)",
"aliases": ["sonnet-4.1", "sonnet-4"],
"intelligence_score": 10,
"description": "Claude Sonnet 4.1 via DIAL - Balanced performance",
"context_window": 200000,
"max_output_tokens": 64000,
"supports_extended_thinking": false,
"supports_function_calling": false,
"supports_json_mode": true,
"supports_images": true,
"max_image_size_mb": 5.0,
"supports_temperature": true,
"temperature_constraint": "range"
},
{
"model_name": "anthropic.claude-sonnet-4.1-20250805-v1:0-with-thinking",
"friendly_name": "DIAL (Sonnet 4.1 Thinking)",
"aliases": ["sonnet-4.1-thinking", "sonnet-4-thinking"],
"intelligence_score": 11,
"description": "Claude Sonnet 4.1 with thinking mode via DIAL",
"context_window": 200000,
"max_output_tokens": 64000,
"supports_extended_thinking": true,
"supports_function_calling": false,
"supports_json_mode": true,
"supports_images": true,
"max_image_size_mb": 5.0,
"supports_temperature": true,
"temperature_constraint": "range"
},
{
"model_name": "anthropic.claude-opus-4.1-20250805-v1:0",
"friendly_name": "DIAL (Opus 4.1)",
"aliases": ["opus-4.1", "opus-4"],
"intelligence_score": 14,
"description": "Claude Opus 4.1 via DIAL - Most capable Claude model",
"context_window": 200000,
"max_output_tokens": 64000,
"supports_extended_thinking": false,
"supports_function_calling": false,
"supports_json_mode": true,
"supports_images": true,
"max_image_size_mb": 5.0,
"supports_temperature": true,
"temperature_constraint": "range"
},
{
"model_name": "anthropic.claude-opus-4.1-20250805-v1:0-with-thinking",
"friendly_name": "DIAL (Opus 4.1 Thinking)",
"aliases": ["opus-4.1-thinking", "opus-4-thinking"],
"intelligence_score": 15,
"description": "Claude Opus 4.1 with thinking mode via DIAL",
"context_window": 200000,
"max_output_tokens": 64000,
"supports_extended_thinking": true,
"supports_function_calling": false,
"supports_json_mode": true,
"supports_images": true,
"max_image_size_mb": 5.0,
"supports_temperature": true,
"temperature_constraint": "range"
},
{
"model_name": "gemini-2.5-pro-preview-03-25-google-search",
"friendly_name": "DIAL (Gemini 2.5 Pro Search)",
"aliases": ["gemini-2.5-pro-search"],
"intelligence_score": 17,
"description": "Gemini 2.5 Pro with Google Search via DIAL",
"context_window": 1000000,
"max_output_tokens": 65536,
"supports_extended_thinking": false,
"supports_function_calling": false,
"supports_json_mode": true,
"supports_images": true,
"max_image_size_mb": 20.0,
"supports_temperature": true,
"temperature_constraint": "range"
},
{
"model_name": "gemini-2.5-pro-preview-05-06",
"friendly_name": "DIAL (Gemini 2.5 Pro)",
"aliases": ["gemini-2.5-pro"],
"intelligence_score": 18,
"description": "Gemini 2.5 Pro via DIAL - Deep reasoning",
"context_window": 1000000,
"max_output_tokens": 65536,
"supports_extended_thinking": false,
"supports_function_calling": false,
"supports_json_mode": true,
"supports_images": true,
"max_image_size_mb": 20.0,
"supports_temperature": true,
"temperature_constraint": "range"
},
{
"model_name": "gemini-2.5-flash-preview-05-20",
"friendly_name": "DIAL (Gemini Flash 2.5)",
"aliases": ["gemini-2.5-flash"],
"intelligence_score": 10,
"description": "Gemini 2.5 Flash via DIAL - Ultra-fast",
"context_window": 1000000,
"max_output_tokens": 65536,
"supports_extended_thinking": false,
"supports_function_calling": false,
"supports_json_mode": true,
"supports_images": true,
"max_image_size_mb": 20.0,
"supports_temperature": true,
"temperature_constraint": "range"
}
]
}

View File

@@ -53,7 +53,7 @@
"gpt5-pro"
],
"intelligence_score": 18,
"description": "GPT-5 Pro (400K context, 272K output) - Advanced model with reasoning support",
"description": "GPT-5 Pro (400K context, 272K output) - Very advanced, reasoning model",
"context_window": 400000,
"max_output_tokens": 272000,
"supports_extended_thinking": true,
@@ -156,7 +156,7 @@
"o3pro"
],
"intelligence_score": 15,
"description": "Professional-grade reasoning (200K context) - EXTREMELY EXPENSIVE: Only for the most complex problems requiring universe-scale complexity analysis OR when the user explicitly asks for this model. Use sparingly for critical architectural decisions or exceptionally complex debugging that other models cannot handle.",
"description": "Professional-grade reasoning (200K context)",
"context_window": 200000,
"max_output_tokens": 65536,
"supports_extended_thinking": false,