refactor: removed subclass override when the base class should be resolving the model name

refactor: always disable "stream"
This commit is contained in:
Fahad
2025-10-04 10:35:32 +04:00
parent d184024820
commit 06d7701cc3
17 changed files with 210 additions and 260 deletions

View File

@@ -60,16 +60,19 @@ class ModelProvider(ABC):
customise. Subclasses usually only override ``_lookup_capabilities`` to
integrate a registry or dynamic source, or ``_finalise_capabilities`` to
tweak the returned object.
Args:
model_name: Canonical model name or its alias
"""
resolved_name = self._resolve_model_name(model_name)
capabilities = self._lookup_capabilities(resolved_name, model_name)
resolved_model_name = self._resolve_model_name(model_name)
capabilities = self._lookup_capabilities(resolved_model_name, model_name)
if capabilities is None:
self._raise_unsupported_model(model_name)
self._ensure_model_allowed(capabilities, resolved_name, model_name)
return self._finalise_capabilities(capabilities, resolved_name, model_name)
self._ensure_model_allowed(capabilities, resolved_model_name, model_name)
return self._finalise_capabilities(capabilities, resolved_model_name, model_name)
def get_all_model_capabilities(self) -> dict[str, ModelCapabilities]:
"""Return statically declared capabilities when available."""
@@ -150,7 +153,38 @@ class ModelProvider(ABC):
max_output_tokens: Optional[int] = None,
**kwargs,
) -> ModelResponse:
"""Generate content using the model."""
"""Generate content using the model.
This is the core method that all providers must implement to generate responses
from their models. Providers should handle model-specific capabilities and
constraints appropriately.
Args:
prompt: The main user prompt/query to send to the model
model_name: Canonical model name or its alias that the provider supports
system_prompt: Optional system instructions to prepend to the prompt for
establishing context, behavior, or role
temperature: Controls randomness in generation (0.0=deterministic, 1.0=creative),
default 0.3. Some models may not support temperature control
max_output_tokens: Optional maximum number of tokens to generate in the response.
If not specified, uses the model's default limit
**kwargs: Additional provider-specific parameters that vary by implementation
(e.g., thinking_mode for Gemini, top_p for OpenAI, images for vision models)
Returns:
ModelResponse: Standardized response object containing:
- content: The generated text response
- usage: Token usage statistics (input/output/total)
- model_name: The model that was actually used
- friendly_name: Human-readable provider/model identifier
- provider: The ProviderType enum value
- metadata: Provider-specific metadata (finish_reason, safety info, etc.)
Raises:
ValueError: If the model is not supported, parameters are invalid,
or the model is restricted by policy
RuntimeError: If the API call fails after retries
"""
def count_tokens(self, text: str, model_name: str) -> int:
"""Estimate token usage for a piece of text."""
@@ -276,7 +310,12 @@ class ModelProvider(ABC):
# Validation hooks
# ------------------------------------------------------------------
def validate_model_name(self, model_name: str) -> bool:
"""Return ``True`` when the model resolves to an allowed capability."""
"""
Return ``True`` when the model resolves to an allowed capability.
Args:
model_name: Canonical model name or its alias
"""
try:
self.get_capabilities(model_name)
@@ -285,7 +324,12 @@ class ModelProvider(ABC):
return True
def validate_parameters(self, model_name: str, temperature: float, **kwargs) -> None:
"""Validate model parameters against capabilities."""
"""
Validate model parameters against capabilities.
Args:
model_name: Canonical model name or its alias
"""
capabilities = self.get_capabilities(model_name)
@@ -364,7 +408,7 @@ class ModelProvider(ABC):
model configuration sources.
Args:
model_name: Model name that may be an alias
model_name: Canonical model name or its alias
Returns:
Resolved model name

View File

@@ -6,7 +6,7 @@ from typing import Optional
from .openai_compatible import OpenAICompatibleProvider
from .openrouter_registry import OpenRouterModelRegistry
from .shared import ModelCapabilities, ModelResponse, ProviderType
from .shared import ModelCapabilities, ProviderType
class CustomProvider(OpenAICompatibleProvider):
@@ -113,49 +113,6 @@ class CustomProvider(OpenAICompatibleProvider):
return ProviderType.CUSTOM
# ------------------------------------------------------------------
# Validation
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Request execution
# ------------------------------------------------------------------
def generate_content(
self,
prompt: str,
model_name: str,
system_prompt: Optional[str] = None,
temperature: float = 0.3,
max_output_tokens: Optional[int] = None,
**kwargs,
) -> ModelResponse:
"""Generate content using the custom API.
Args:
prompt: User prompt to send to the model
model_name: Name of the model to use
system_prompt: Optional system prompt for model behavior
temperature: Sampling temperature
max_output_tokens: Maximum tokens to generate
**kwargs: Additional provider-specific parameters
Returns:
ModelResponse with generated content and metadata
"""
# Resolve model alias to actual model name
resolved_model = self._resolve_model_name(model_name)
# Call parent method with resolved model name
return super().generate_content(
prompt=prompt,
model_name=resolved_model,
system_prompt=system_prompt,
temperature=temperature,
max_output_tokens=max_output_tokens,
**kwargs,
)
# ------------------------------------------------------------------
# Registry helpers
# ------------------------------------------------------------------

View File

@@ -333,15 +333,17 @@ class DIALModelProvider(OpenAICompatibleProvider):
/openai/deployments/{deployment}/chat/completions
Args:
prompt: User prompt
model_name: Model name or alias
system_prompt: Optional system prompt
temperature: Sampling temperature
max_output_tokens: Maximum tokens to generate
**kwargs: Additional provider-specific parameters
prompt: The main user prompt/query to send to the model
model_name: Model name or alias (e.g., "o3", "sonnet-4.1", "gemini-2.5-pro")
system_prompt: Optional system instructions to prepend to the prompt for context/behavior
temperature: Sampling temperature for randomness (0.0=deterministic, 1.0=creative), default 0.3
Note: O3/O4 models don't support temperature and will ignore this parameter
max_output_tokens: Optional maximum number of tokens to generate in the response
images: Optional list of image paths or data URLs to include with the prompt (for vision-capable models)
**kwargs: Additional OpenAI-compatible parameters (top_p, frequency_penalty, presence_penalty, seed, stop)
Returns:
ModelResponse with generated content and metadata
ModelResponse: Contains the generated content, token usage stats, model metadata, and finish reason
"""
# Validate model name against allow-list
if not self.validate_model_name(model_name):
@@ -381,6 +383,7 @@ class DIALModelProvider(OpenAICompatibleProvider):
completion_params = {
"model": resolved_model,
"messages": messages,
"stream": False,
}
# Determine temperature support from capabilities
@@ -397,7 +400,7 @@ class DIALModelProvider(OpenAICompatibleProvider):
# Add additional parameters
for key, value in kwargs.items():
if key in ["top_p", "frequency_penalty", "presence_penalty", "seed", "stop", "stream"]:
if not supports_temperature and key in ["top_p", "frequency_penalty", "presence_penalty"]:
if not supports_temperature and key in ["top_p", "frequency_penalty", "presence_penalty", "stream"]:
continue
completion_params[key] = value
@@ -437,9 +440,9 @@ class DIALModelProvider(OpenAICompatibleProvider):
except Exception as exc:
attempts = max(attempt_counter["value"], 1)
if attempts == 1:
raise ValueError(f"DIAL API error for model {model_name}: {exc}") from exc
raise ValueError(f"DIAL API error for model {resolved_model}: {exc}") from exc
raise ValueError(f"DIAL API error for model {model_name} after {attempts} attempts: {exc}") from exc
raise ValueError(f"DIAL API error for model {resolved_model} after {attempts} attempts: {exc}") from exc
def close(self) -> None:
"""Clean up HTTP clients when provider is closed."""

View File

@@ -172,12 +172,28 @@ class GeminiModelProvider(ModelProvider):
images: Optional[list[str]] = None,
**kwargs,
) -> ModelResponse:
"""Generate content using Gemini model."""
"""
Generate content using Gemini model.
Args:
prompt: The main user prompt/query to send to the model
model_name: Canonical model name or its alias (e.g., "gemini-2.5-pro", "flash", "pro")
system_prompt: Optional system instructions to prepend to the prompt for context/behavior
temperature: Controls randomness in generation (0.0=deterministic, 1.0=creative), default 0.3
max_output_tokens: Optional maximum number of tokens to generate in the response
thinking_mode: Thinking budget level for models that support it ("minimal", "low", "medium", "high", "max"), default "medium"
images: Optional list of image paths or data URLs to include with the prompt (for vision models)
**kwargs: Additional keyword arguments (reserved for future use)
Returns:
ModelResponse: Contains the generated content, token usage stats, model metadata, and safety information
"""
# Validate parameters and fetch capabilities
resolved_name = self._resolve_model_name(model_name)
self.validate_parameters(model_name, temperature)
capabilities = self.get_capabilities(model_name)
resolved_model_name = self._resolve_model_name(model_name)
# Prepare content parts (text and potentially images)
parts = []
@@ -201,7 +217,7 @@ class GeminiModelProvider(ModelProvider):
# Continue with other images and text
continue
elif images and not capabilities.supports_images:
logger.warning(f"Model {resolved_name} does not support images, ignoring {len(images)} image(s)")
logger.warning(f"Model {resolved_model_name} does not support images, ignoring {len(images)} image(s)")
# Create contents structure
contents = [{"parts": parts}]
@@ -219,7 +235,7 @@ class GeminiModelProvider(ModelProvider):
# Add thinking configuration for models that support it
if capabilities.supports_extended_thinking and thinking_mode in self.THINKING_BUDGETS:
# Get model's max thinking tokens and calculate actual budget
model_config = self.MODEL_CAPABILITIES.get(resolved_name)
model_config = self.MODEL_CAPABILITIES.get(resolved_model_name)
if model_config and model_config.max_thinking_tokens > 0:
max_thinking_tokens = model_config.max_thinking_tokens
actual_thinking_budget = int(max_thinking_tokens * self.THINKING_BUDGETS[thinking_mode])
@@ -233,7 +249,7 @@ class GeminiModelProvider(ModelProvider):
def _attempt() -> ModelResponse:
attempt_counter["value"] += 1
response = self.client.models.generate_content(
model=resolved_name,
model=resolved_model_name,
contents=contents,
config=generation_config,
)
@@ -308,7 +324,7 @@ class GeminiModelProvider(ModelProvider):
return ModelResponse(
content=response.text,
usage=usage,
model_name=resolved_name,
model_name=resolved_model_name,
friendly_name="Gemini",
provider=ProviderType.GOOGLE,
metadata={
@@ -324,12 +340,12 @@ class GeminiModelProvider(ModelProvider):
operation=_attempt,
max_attempts=max_retries,
delays=retry_delays,
log_prefix=f"Gemini API ({resolved_name})",
log_prefix=f"Gemini API ({resolved_model_name})",
)
except Exception as exc:
attempts = max(attempt_counter["value"], 1)
error_msg = (
f"Gemini API error for model {resolved_name} after {attempts} attempt"
f"Gemini API error for model {resolved_model_name} after {attempts} attempt"
f"{'s' if attempts > 1 else ''}: {exc}"
)
raise RuntimeError(error_msg) from exc

View File

@@ -462,10 +462,11 @@ class OpenAICompatibleProvider(ModelProvider):
Args:
prompt: User prompt to send to the model
model_name: Name of the model to use
model_name: Canonical model name or its alias
system_prompt: Optional system prompt for model behavior
temperature: Sampling temperature
max_output_tokens: Maximum tokens to generate
images: Optional list of image paths or data URLs to include with the prompt (for vision models)
**kwargs: Additional provider-specific parameters
Returns:
@@ -497,6 +498,9 @@ class OpenAICompatibleProvider(ModelProvider):
# Validate parameters with the effective temperature
self.validate_parameters(model_name, effective_temperature)
# Resolve to canonical model name
resolved_model = self._resolve_model_name(model_name)
# Prepare messages
messages = []
if system_prompt:
@@ -518,7 +522,7 @@ class OpenAICompatibleProvider(ModelProvider):
# Continue with other images and text
continue
elif images and (not capabilities or not capabilities.supports_images):
logging.warning(f"Model {model_name} does not support images, ignoring {len(images)} image(s)")
logging.warning(f"Model {resolved_model} does not support images, ignoring {len(images)} image(s)")
# Add user message
if len(user_content) == 1:
@@ -529,14 +533,14 @@ class OpenAICompatibleProvider(ModelProvider):
messages.append({"role": "user", "content": user_content})
# Prepare completion parameters
# Always disable streaming for OpenRouter
# MCP doesn't use streaming, and this avoids issues with O3 model access
completion_params = {
"model": model_name,
"model": resolved_model,
"messages": messages,
"stream": False,
}
# Check model capabilities once to determine parameter support
resolved_model = self._resolve_model_name(model_name)
# Use the effective temperature we calculated earlier
supports_sampling = effective_temperature is not None
@@ -553,7 +557,7 @@ class OpenAICompatibleProvider(ModelProvider):
for key, value in kwargs.items():
if key in ["top_p", "frequency_penalty", "presence_penalty", "seed", "stop", "stream"]:
# Reasoning models (those that don't support temperature) also don't support these parameters
if not supports_sampling and key in ["top_p", "frequency_penalty", "presence_penalty"]:
if not supports_sampling and key in ["top_p", "frequency_penalty", "presence_penalty", "stream"]:
continue # Skip unsupported parameters for reasoning models
completion_params[key] = value
@@ -585,7 +589,7 @@ class OpenAICompatibleProvider(ModelProvider):
return ModelResponse(
content=content,
usage=usage,
model_name=model_name,
model_name=resolved_model,
friendly_name=self.FRIENDLY_NAME,
provider=self.get_provider_type(),
metadata={
@@ -601,12 +605,12 @@ class OpenAICompatibleProvider(ModelProvider):
operation=_attempt,
max_attempts=max_retries,
delays=retry_delays,
log_prefix=f"{self.FRIENDLY_NAME} API ({model_name})",
log_prefix=f"{self.FRIENDLY_NAME} API ({resolved_model})",
)
except Exception as exc:
attempts = max(attempt_counter["value"], 1)
error_msg = (
f"{self.FRIENDLY_NAME} API error for model {model_name} after {attempts} attempt"
f"{self.FRIENDLY_NAME} API error for model {resolved_model} after {attempts} attempt"
f"{'s' if attempts > 1 else ''}: {exc}"
)
logging.error(error_msg)
@@ -618,7 +622,7 @@ class OpenAICompatibleProvider(ModelProvider):
For proxy providers, this may use generic capabilities.
Args:
model_name: Model to validate for
model_name: Canonical model name or its alias
temperature: Temperature to validate
**kwargs: Additional parameters to validate
"""

View File

@@ -7,7 +7,7 @@ if TYPE_CHECKING:
from tools.models import ToolModelCategory
from .openai_compatible import OpenAICompatibleProvider
from .shared import ModelCapabilities, ModelResponse, ProviderType, TemperatureConstraint
from .shared import ModelCapabilities, ProviderType, TemperatureConstraint
logger = logging.getLogger(__name__)
@@ -253,33 +253,6 @@ class OpenAIModelProvider(OpenAICompatibleProvider):
"""Get the provider type."""
return ProviderType.OPENAI
# ------------------------------------------------------------------
# Request execution
# ------------------------------------------------------------------
def generate_content(
self,
prompt: str,
model_name: str,
system_prompt: Optional[str] = None,
temperature: float = 0.3,
max_output_tokens: Optional[int] = None,
**kwargs,
) -> ModelResponse:
"""Generate content using OpenAI API with proper model name resolution."""
# Resolve model alias before making API call
resolved_model_name = self._resolve_model_name(model_name)
# Call parent implementation with resolved model name
return super().generate_content(
prompt=prompt,
model_name=resolved_model_name,
system_prompt=system_prompt,
temperature=temperature,
max_output_tokens=max_output_tokens,
**kwargs,
)
# ------------------------------------------------------------------
# Provider preferences
# ------------------------------------------------------------------

View File

@@ -8,7 +8,6 @@ from .openai_compatible import OpenAICompatibleProvider
from .openrouter_registry import OpenRouterModelRegistry
from .shared import (
ModelCapabilities,
ModelResponse,
ProviderType,
RangeTemperatureConstraint,
)
@@ -111,50 +110,6 @@ class OpenRouterProvider(OpenAICompatibleProvider):
"""Identify this provider for restrictions and logging."""
return ProviderType.OPENROUTER
# ------------------------------------------------------------------
# Request execution
# ------------------------------------------------------------------
def generate_content(
self,
prompt: str,
model_name: str,
system_prompt: Optional[str] = None,
temperature: float = 0.3,
max_output_tokens: Optional[int] = None,
**kwargs,
) -> ModelResponse:
"""Generate content using the OpenRouter API.
Args:
prompt: User prompt to send to the model
model_name: Name of the model (or alias) to use
system_prompt: Optional system prompt for model behavior
temperature: Sampling temperature
max_output_tokens: Maximum tokens to generate
**kwargs: Additional provider-specific parameters
Returns:
ModelResponse with generated content and metadata
"""
# Resolve model alias to actual OpenRouter model name
resolved_model = self._resolve_model_name(model_name)
# Always disable streaming for OpenRouter
# MCP doesn't use streaming, and this avoids issues with O3 model access
if "stream" not in kwargs:
kwargs["stream"] = False
# Call parent method with resolved model name
return super().generate_content(
prompt=prompt,
model_name=resolved_model,
system_prompt=system_prompt,
temperature=temperature,
max_output_tokens=max_output_tokens,
**kwargs,
)
# ------------------------------------------------------------------
# Registry helpers
# ------------------------------------------------------------------

View File

@@ -7,7 +7,7 @@ if TYPE_CHECKING:
from tools.models import ToolModelCategory
from .openai_compatible import OpenAICompatibleProvider
from .shared import ModelCapabilities, ModelResponse, ProviderType, TemperatureConstraint
from .shared import ModelCapabilities, ProviderType, TemperatureConstraint
logger = logging.getLogger(__name__)
@@ -92,29 +92,6 @@ class XAIModelProvider(OpenAICompatibleProvider):
"""Get the provider type."""
return ProviderType.XAI
def generate_content(
self,
prompt: str,
model_name: str,
system_prompt: Optional[str] = None,
temperature: float = 0.3,
max_output_tokens: Optional[int] = None,
**kwargs,
) -> ModelResponse:
"""Generate content using X.AI API with proper model name resolution."""
# Resolve model alias before making API call
resolved_model_name = self._resolve_model_name(model_name)
# Call parent implementation with resolved model name
return super().generate_content(
prompt=prompt,
model_name=resolved_model_name,
system_prompt=system_prompt,
temperature=temperature,
max_output_tokens=max_output_tokens,
**kwargs,
)
def get_preferred_model(self, category: "ToolModelCategory", allowed_models: list[str]) -> Optional[str]:
"""Get XAI's preferred model for a given category from allowed models.

View File

@@ -38,12 +38,12 @@
"content-type": "application/json; charset=UTF-8",
"vary": "Origin, X-Origin, Referer",
"content-encoding": "gzip",
"date": "Fri, 03 Oct 2025 18:58:34 GMT",
"date": "Sat, 04 Oct 2025 06:23:34 GMT",
"server": "scaffolding on HTTPServer2",
"x-xss-protection": "0",
"x-frame-options": "SAMEORIGIN",
"x-content-type-options": "nosniff",
"server-timing": "gfet4t7; dur=21591",
"server-timing": "gfet4t7; dur=1401",
"alt-svc": "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000",
"transfer-encoding": "chunked"
},
@@ -66,17 +66,24 @@
"usageMetadata": {
"promptTokenCount": 1085,
"candidatesTokenCount": 1,
"totalTokenCount": 3091,
"totalTokenCount": 1154,
"cachedContentTokenCount": 994,
"promptTokensDetails": [
{
"modality": "TEXT",
"tokenCount": 1085
}
],
"thoughtsTokenCount": 2005
"cacheTokensDetails": [
{
"modality": "TEXT",
"tokenCount": 994
}
],
"thoughtsTokenCount": 68
},
"modelVersion": "gemini-2.5-flash",
"responseId": "2hzgaPuTEsSlxN8PrIuRUA"
"responseId": "Zr3gaPiqNLXtkdUP7t7J0QU"
}
],
"byte_segments": [],
@@ -87,12 +94,12 @@
"content-type": "application/json; charset=UTF-8",
"vary": "Origin, X-Origin, Referer",
"content-encoding": "gzip",
"date": "Fri, 03 Oct 2025 18:58:34 GMT",
"date": "Sat, 04 Oct 2025 06:23:34 GMT",
"server": "scaffolding on HTTPServer2",
"x-xss-protection": "0",
"x-frame-options": "SAMEORIGIN",
"x-content-type-options": "nosniff",
"server-timing": "gfet4t7; dur=21591",
"server-timing": "gfet4t7; dur=1401",
"alt-svc": "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000",
"transfer-encoding": "chunked"
}
@@ -112,8 +119,15 @@
}
],
"model_version": "gemini-2.5-flash",
"response_id": "2hzgaPuTEsSlxN8PrIuRUA",
"response_id": "Zr3gaPiqNLXtkdUP7t7J0QU",
"usage_metadata": {
"cache_tokens_details": [
{
"modality": "TEXT",
"token_count": 994
}
],
"cached_content_token_count": 994,
"candidates_token_count": 1,
"prompt_token_count": 1085,
"prompt_tokens_details": [
@@ -122,8 +136,8 @@
"token_count": 1085
}
],
"thoughts_token_count": 2005,
"total_token_count": 3091
"thoughts_token_count": 68,
"total_token_count": 1154
}
}
]

File diff suppressed because one or more lines are too long

View File

@@ -14,6 +14,7 @@
}
],
"model": "gpt-5",
"stream": false,
"temperature": 1.0
},
"headers": {
@@ -21,7 +22,7 @@
"accept-encoding": "gzip, deflate",
"authorization": "Bearer SANITIZED",
"connection": "keep-alive",
"content-length": "5572",
"content-length": "5587",
"content-type": "application/json",
"host": "api.openai.com",
"user-agent": "OpenAI/Python 2.1.0",
@@ -41,29 +42,29 @@
},
"response": {
"content": {
"data": "ewogICJpZCI6ICJjaGF0Y21wbC1DTWZIOWgxTEFBTHRobThPcTFyMEdPT2FESHdDZCIsCiAgIm9iamVjdCI6ICJjaGF0LmNvbXBsZXRpb24iLAogICJjcmVhdGVkIjogMTc1OTUxNzkxNSwKICAibW9kZWwiOiAiZ3B0LTUtMjAyNS0wOC0wNyIsCiAgImNob2ljZXMiOiBbCiAgICB7CiAgICAgICJpbmRleCI6IDAsCiAgICAgICJtZXNzYWdlIjogewogICAgICAgICJyb2xlIjogImFzc2lzdGFudCIsCiAgICAgICAgImNvbnRlbnQiOiAiNyIsCiAgICAgICAgInJlZnVzYWwiOiBudWxsLAogICAgICAgICJhbm5vdGF0aW9ucyI6IFtdCiAgICAgIH0sCiAgICAgICJmaW5pc2hfcmVhc29uIjogInN0b3AiCiAgICB9CiAgXSwKICAidXNhZ2UiOiB7CiAgICAicHJvbXB0X3Rva2VucyI6IDEwNTUsCiAgICAiY29tcGxldGlvbl90b2tlbnMiOiAyMDIsCiAgICAidG90YWxfdG9rZW5zIjogMTI1NywKICAgICJwcm9tcHRfdG9rZW5zX2RldGFpbHMiOiB7CiAgICAgICJjYWNoZWRfdG9rZW5zIjogMCwKICAgICAgImF1ZGlvX3Rva2VucyI6IDAKICAgIH0sCiAgICAiY29tcGxldGlvbl90b2tlbnNfZGV0YWlscyI6IHsKICAgICAgInJlYXNvbmluZ190b2tlbnMiOiAxOTIsCiAgICAgICJhdWRpb190b2tlbnMiOiAwLAogICAgICAiYWNjZXB0ZWRfcHJlZGljdGlvbl90b2tlbnMiOiAwLAogICAgICAicmVqZWN0ZWRfcHJlZGljdGlvbl90b2tlbnMiOiAwCiAgICB9CiAgfSwKICAic2VydmljZV90aWVyIjogImRlZmF1bHQiLAogICJzeXN0ZW1fZmluZ2VycHJpbnQiOiBudWxsCn0K",
"data": "ewogICJpZCI6ICJjaGF0Y21wbC1DTXB5M05odjRueUZWQVN1S0FkSmN3aldQeHhmOCIsCiAgIm9iamVjdCI6ICJjaGF0LmNvbXBsZXRpb24iLAogICJjcmVhdGVkIjogMTc1OTU1OTAxNSwKICAibW9kZWwiOiAiZ3B0LTUtMjAyNS0wOC0wNyIsCiAgImNob2ljZXMiOiBbCiAgICB7CiAgICAgICJpbmRleCI6IDAsCiAgICAgICJtZXNzYWdlIjogewogICAgICAgICJyb2xlIjogImFzc2lzdGFudCIsCiAgICAgICAgImNvbnRlbnQiOiAiNyIsCiAgICAgICAgInJlZnVzYWwiOiBudWxsLAogICAgICAgICJhbm5vdGF0aW9ucyI6IFtdCiAgICAgIH0sCiAgICAgICJmaW5pc2hfcmVhc29uIjogInN0b3AiCiAgICB9CiAgXSwKICAidXNhZ2UiOiB7CiAgICAicHJvbXB0X3Rva2VucyI6IDEwNTUsCiAgICAiY29tcGxldGlvbl90b2tlbnMiOiAyMDIsCiAgICAidG90YWxfdG9rZW5zIjogMTI1NywKICAgICJwcm9tcHRfdG9rZW5zX2RldGFpbHMiOiB7CiAgICAgICJjYWNoZWRfdG9rZW5zIjogMTAyNCwKICAgICAgImF1ZGlvX3Rva2VucyI6IDAKICAgIH0sCiAgICAiY29tcGxldGlvbl90b2tlbnNfZGV0YWlscyI6IHsKICAgICAgInJlYXNvbmluZ190b2tlbnMiOiAxOTIsCiAgICAgICJhdWRpb190b2tlbnMiOiAwLAogICAgICAiYWNjZXB0ZWRfcHJlZGljdGlvbl90b2tlbnMiOiAwLAogICAgICAicmVqZWN0ZWRfcHJlZGljdGlvbl90b2tlbnMiOiAwCiAgICB9CiAgfSwKICAic2VydmljZV90aWVyIjogImRlZmF1bHQiLAogICJzeXN0ZW1fZmluZ2VycHJpbnQiOiBudWxsCn0K",
"encoding": "base64",
"size": 774
"size": 777
},
"headers": {
"access-control-expose-headers": "X-Request-ID",
"alt-svc": "h3=\":443\"; ma=86400",
"cf-cache-status": "DYNAMIC",
"cf-ray": "988eabf5df9f4efe-DXB",
"cf-ray": "989297642c2023e5-DXB",
"connection": "keep-alive",
"content-encoding": "gzip",
"content-type": "application/json",
"date": "Fri, 03 Oct 2025 18:58:39 GMT",
"date": "Sat, 04 Oct 2025 06:23:40 GMT",
"openai-organization": "beehive-innovations-fze",
"openai-processing-ms": "4274",
"openai-processing-ms": "4177",
"openai-project": "proj_QP57xBVPOlWpp0vuJEPGwXK3",
"openai-version": "2020-10-01",
"server": "cloudflare",
"set-cookie": "__cf_bm=ZiWispkKOb9pXfTcQYhuoR3z1fQ8NfrmCZw8R2Fddgg-(XXX) XXX-XXXX-0.0.0.0-htOY0U_14x1QSA8.6f7aSQSTTMbDlG_Lr2QuE8pFN0ROjpNaOflYaGAoHEOre4Cqwwx8qx39yH_DcvcJ7a3hgfAANEoDBSU0guvwLPxbYEA; path=/; expires=Fri, 03-Oct-25 19:28:39 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=3xtQhbNOqAGinG45GRwBQxbqJqYwBDXJDMl6gKNs9Bo-175(XXX) XXX-XXXX-0.0.0.0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None",
"set-cookie": "__cf_bm=IVHjup34WOTMmaqIoUZ2nbrlvnfqvetX1CJW2YD9900-(XXX) XXX-XXXX-0.0.0.0-KpHNz.EGGVrd88ZrVhEQfIIdVnL9Z_p4dGaTyzrCgbz._ufQ.ufCc.BBmVZZt0w0csym46eV1aMSvzEltNm0kFRnfb7aq9yRzuzTOP1oCfg; path=/; expires=Sat, 04-Oct-25 06:53:40 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=.EYrWkDOZlWTzx9WyxCz_IvyuKizestJfpHeBI7GtRE-175(XXX) XXX-XXXX-0.0.0.0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None",
"strict-transport-security": "max-age=31536000; includeSubDomains; preload",
"transfer-encoding": "chunked",
"x-content-type-options": "nosniff",
"x-envoy-upstream-service-time": "4570",
"x-envoy-upstream-service-time": "4320",
"x-openai-proxy-wasm": "v0.1",
"x-ratelimit-limit-requests": "500",
"x-ratelimit-limit-tokens": "500000",
@@ -71,7 +72,7 @@
"x-ratelimit-remaining-tokens": "498657",
"x-ratelimit-reset-requests": "120ms",
"x-ratelimit-reset-tokens": "161ms",
"x-request-id": "req_8cbbd029bfd34738815a169ee158d1c6"
"x-request-id": "req_edd581c9db9c4ca5a9b1b5c65240b8b5"
},
"reason_phrase": "OK",
"status_code": 200

View File

@@ -14,6 +14,7 @@
}
],
"model": "gpt-5",
"stream": false,
"temperature": 1.0
},
"headers": {
@@ -21,7 +22,7 @@
"accept-encoding": "gzip, deflate",
"authorization": "Bearer SANITIZED",
"connection": "keep-alive",
"content-length": "5742",
"content-length": "5757",
"content-type": "application/json",
"host": "api.openai.com",
"user-agent": "OpenAI/Python 2.1.0",
@@ -41,29 +42,29 @@
},
"response": {
"content": {
"data": "ewogICJpZCI6ICJjaGF0Y21wbC1DTWZQR3UyR3dHRExTcHRlNjVQTHBxWXc4aWZsSCIsCiAgIm9iamVjdCI6ICJjaGF0LmNvbXBsZXRpb24iLAogICJjcmVhdGVkIjogMTc1OTUxODQxOCwKICAibW9kZWwiOiAiZ3B0LTUtMjAyNS0wOC0wNyIsCiAgImNob2ljZXMiOiBbCiAgICB7CiAgICAgICJpbmRleCI6IDAsCiAgICAgICJtZXNzYWdlIjogewogICAgICAgICJyb2xlIjogImFzc2lzdGFudCIsCiAgICAgICAgImNvbnRlbnQiOiAiaU9TLlxuXG5XaHk6XG4tIExhcmdlciBpbnN0YWxsIGJhc2UgYW5kIG1hcmtlZGx5IGhpZ2hlciBjb25zdW1lciBzcGVuZCBvbiB0aGUgQXBwIFN0b3JlIHRoYW4gdGhlIE1hYyBBcHAgU3RvcmUuXG4tIExvd2VyIHB1cmNoYXNlIGZyaWN0aW9uIGFuZCBzdHJvbmdlciBkaXNjb3ZlcnkgbWVjaGFuaWNzIGRyaXZlIG1vcmUgdm9sdW1lLlxuLSBCcm9hZGVyIGNhdGVnb3JpZXMgc3VjY2VlZCAoZ2FtZXMsIGxpZmVzdHlsZSwgaGVhbHRoLCB1dGlsaXRpZXMpLCBlbmFibGluZyBzY2FsZSBldmVuIGF0IGxvd2VyIHByaWNlIHBvaW50cy5cblxuV2hlbiBtYWNPUyBjYW4gb3V0cGVyZm9ybTpcbi0gUHJvL2NyZWF0b3IvZGV2IHRvb2xzIHdpdGggaGlnaGVyIHdpbGxpbmduZXNzIHRvIHBheSAoJDMw4oCTJDMwMCspIGFuZCBzdGlja3kgc3Vic2NyaXB0aW9ucy5cbi0gQjJCL2VudGVycHJpc2UgbGljZW5zaW5nLCB2b2x1bWUgcHVyY2hhc2luZywgYW5kIGRpcmVjdCBkaXN0cmlidXRpb24gb3V0c2lkZSB0aGUgTWFjIEFwcCBTdG9yZS5cbi0gTmljaGVzIHdpdGggbGVzcyBjb21wZXRpdGlvbiB3aGVyZSBkaWZmZXJlbnRpYXRpb24gaXMgY2xlYXIgKGUuZy4sIHNwZWNpYWxpemVkIHByb2R1Y3Rpdml0eSwgYXVkaW8sIHNlY3VyaXR5KS5cblxuQm90dG9tIGxpbmU6XG4tIEZvciBtb3N0IGNvbnN1bWVyIGFwcHMsIGlPUyBzZWxscyBiZXR0ZXIgYnkgdm9sdW1lIGFuZCB0b3RhbCByZXZlbnVlLlxuLSBGb3Igc3BlY2lhbGl6ZWQgcHJvIG9yIGJ1c2luZXNzIHNvZnR3YXJlLCBtYWNPUyBjYW4geWllbGQgaGlnaGVyIEFSUFUgYW5kIGhlYWx0aGllciB1bml0IGVjb25vbWljcyBkZXNwaXRlIGxvd2VyIHZvbHVtZS5cblxuSWYgeW91IHdhbnQgYSBxdWljayByZXZlbnVlIG1vZGVsIHRhaWxvcmVkIHRvIHlvdXIgYXBwLCBwbGVhc2UgY29udGludWUgdGhpcyBjb252ZXJzYXRpb24gdXNpbmcgdGhlIGNvbnRpbnVhdGlvbl9pZCBmcm9tIHRoaXMgcmVzcG9uc2UgYW5kIHNoYXJlOiBjYXRlZ29yeSwgdGFyZ2V0IGF1ZGllbmNlIChjb25zdW1lciB2cy4gcHJvL2VudGVycHJpc2UpLCBwcmljaW5nIG1vZGVsIChvbmUtdGltZSB2cy4gc3Vic2NyaXB0aW9uKSwgaW50ZW5kZWQgcGxhdGZvcm1zLCBhbmQgZGlzdHJpYnV0aW9uIGNoYW5uZWxzIChBcHAgU3RvcmUgb25seSB2cy4gYWxzbyBkaXJlY3QpLiIsCiAgICAgICAgInJlZnVzYWwiOiBudWxsLAogICAgICAgICJhbm5vdGF0aW9ucyI6IFtdCiAgICAgIH0sCiAgICAgICJmaW5pc2hfcmVhc29uIjogInN0b3AiCiAgICB9CiAgXSwKICAidXNhZ2UiOiB7CiAgICAicHJvbXB0X3Rva2VucyI6IDEwMzEsCiAgICAiY29tcGxldGlvbl90b2tlbnMiOiAxMTM1LAogICAgInRvdGFsX3Rva2VucyI6IDIxNjYsCiAgICAicHJvbXB0X3Rva2Vuc19kZXRhaWxzIjogewogICAgICAiY2FjaGVkX3Rva2VucyI6IDAsCiAgICAgICJhdWRpb190b2tlbnMiOiAwCiAgICB9LAogICAgImNvbXBsZXRpb25fdG9rZW5zX2RldGFpbHMiOiB7CiAgICAgICJyZWFzb25pbmdfdG9rZW5zIjogODk2LAogICAgICAiYXVkaW9fdG9rZW5zIjogMCwKICAgICAgImFjY2VwdGVkX3ByZWRpY3Rpb25fdG9rZW5zIjogMCwKICAgICAgInJlamVjdGVkX3ByZWRpY3Rpb25fdG9rZW5zIjogMAogICAgfQogIH0sCiAgInNlcnZpY2VfdGllciI6ICJkZWZhdWx0IiwKICAic3lzdGVtX2ZpbmdlcnByaW50IjogbnVsbAp9Cg==",
"data": "ewogICJpZCI6ICJjaGF0Y21wbC1DTXB5VmVqZDhTZ1NOSlFsQUJoeWptZkFWMkxGVSIsCiAgIm9iamVjdCI6ICJjaGF0LmNvbXBsZXRpb24iLAogICJjcmVhdGVkIjogMTc1OTU1OTA0MywKICAibW9kZWwiOiAiZ3B0LTUtMjAyNS0wOC0wNyIsCiAgImNob2ljZXMiOiBbCiAgICB7CiAgICAgICJpbmRleCI6IDAsCiAgICAgICJtZXNzYWdlIjogewogICAgICAgICJyb2xlIjogImFzc2lzdGFudCIsCiAgICAgICAgImNvbnRlbnQiOiAiaU9TIiwKICAgICAgICAicmVmdXNhbCI6IG51bGwsCiAgICAgICAgImFubm90YXRpb25zIjogW10KICAgICAgfSwKICAgICAgImZpbmlzaF9yZWFzb24iOiAic3RvcCIKICAgIH0KICBdLAogICJ1c2FnZSI6IHsKICAgICJwcm9tcHRfdG9rZW5zIjogMTAzMSwKICAgICJjb21wbGV0aW9uX3Rva2VucyI6IDUyMywKICAgICJ0b3RhbF90b2tlbnMiOiAxNTU0LAogICAgInByb21wdF90b2tlbnNfZGV0YWlscyI6IHsKICAgICAgImNhY2hlZF90b2tlbnMiOiAwLAogICAgICAiYXVkaW9fdG9rZW5zIjogMAogICAgfSwKICAgICJjb21wbGV0aW9uX3Rva2Vuc19kZXRhaWxzIjogewogICAgICAicmVhc29uaW5nX3Rva2VucyI6IDUxMiwKICAgICAgImF1ZGlvX3Rva2VucyI6IDAsCiAgICAgICJhY2NlcHRlZF9wcmVkaWN0aW9uX3Rva2VucyI6IDAsCiAgICAgICJyZWplY3RlZF9wcmVkaWN0aW9uX3Rva2VucyI6IDAKICAgIH0KICB9LAogICJzZXJ2aWNlX3RpZXIiOiAiZGVmYXVsdCIsCiAgInN5c3RlbV9maW5nZXJwcmludCI6IG51bGwKfQo=",
"encoding": "base64",
"size": 1957
"size": 776
},
"headers": {
"access-control-expose-headers": "X-Request-ID",
"alt-svc": "h3=\":443\"; ma=86400",
"cf-cache-status": "DYNAMIC",
"cf-ray": "988eb83cdb0adb6f-DXB",
"cf-ray": "989298175a5cdb6f-DXB",
"connection": "keep-alive",
"content-encoding": "gzip",
"content-type": "application/json",
"date": "Fri, 03 Oct 2025 19:07:26 GMT",
"date": "Sat, 04 Oct 2025 06:24:14 GMT",
"openai-organization": "beehive-innovations-fze",
"openai-processing-ms": "28138",
"openai-processing-ms": "11038",
"openai-project": "proj_QP57xBVPOlWpp0vuJEPGwXK3",
"openai-version": "2020-10-01",
"server": "cloudflare",
"set-cookie": "__cf_bm=kjKHinSl5MqrR75lcaoaXzt1C0lzpw9dtUpAPKBsTRM-(XXX) XXX-XXXX-0.0.0.0-Hxur5dPsRVJo2mR4u.CcQLrYZX_T8A3oHXhdtzIBQWillmYA6xbUbllhF5jiPxCL7m_j.xTn8C4p.O3VsJBXvvgEBYLRnHGwTZVZMoN9Exs; path=/; expires=Fri, 03-Oct-25 19:37:26 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=8luuy1ZjL1hqNJk9cSG3YCz7rVWJPd6G2Nbad57Ra.c-175(XXX) XXX-XXXX-0.0.0.0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None",
"set-cookie": "__cf_bm=.yeCIUPRFBfiaTg2zlxqCWAnp9DEEEWAw82oC4yxrV0-(XXX) XXX-XXXX-0.0.0.0-K40Al4083DY4ISIMVHe.KPfudTFlEaoQUK4pf0FmEEYuO35hla0L.GUqa4lv38j5aLYMueR9ugMuFG28OKc6sTpgDjiAgQdhoz_991TRA5U; path=/; expires=Sat, 04-Oct-25 06:54:14 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=h7mfvcxy4bVLuJLqZOiTiwwb7S3sRLkSxZXJ9WjDo.w-175(XXX) XXX-XXXX-0.0.0.0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None",
"strict-transport-security": "max-age=31536000; includeSubDomains; preload",
"transfer-encoding": "chunked",
"x-content-type-options": "nosniff",
"x-envoy-upstream-service-time": "28658",
"x-envoy-upstream-service-time": "11056",
"x-openai-proxy-wasm": "v0.1",
"x-ratelimit-limit-requests": "500",
"x-ratelimit-limit-tokens": "500000",
@@ -71,7 +72,7 @@
"x-ratelimit-remaining-tokens": "498617",
"x-ratelimit-reset-requests": "120ms",
"x-ratelimit-reset-tokens": "165ms",
"x-request-id": "req_cdc79bfa27c840c9a8bd9d82b8bae08e"
"x-request-id": "req_53569e7fb3de47788f21edd9c1c67810"
},
"reason_phrase": "OK",
"status_code": 200
@@ -86,11 +87,12 @@
"role": "system"
},
{
"content": "=== CONVERSATION HISTORY (CONTINUATION) ===\nThread: 95d60035-1aa3-4398-9936-fca71989d906\nTool: chat\nTurn 3/20\nYou are continuing this conversation thread from where it left off.\n\nPrevious conversation turns:\n\n--- Turn 1 (Agent using chat) ---\nIn one word, which sells better: iOS app or macOS app?\n\n--- Turn 2 (gpt-5 using chat via openai) ---\niOS.\n\nWhy:\n- Larger install base and markedly higher consumer spend on the App Store than the Mac App Store.\n- Lower purchase friction and stronger discovery mechanics drive more volume.\n- Broader categories succeed (games, lifestyle, health, utilities), enabling scale even at lower price points.\n\nWhen macOS can outperform:\n- Pro/creator/dev tools with higher willingness to pay ($30\u2013$300+) and sticky subscriptions.\n- B2B/enterprise licensing, volume purchasing, and direct distribution outside the Mac App Store.\n- Niches with less competition where differentiation is clear (e.g., specialized productivity, audio, security).\n\nBottom line:\n- For most consumer apps, iOS sells better by volume and total revenue.\n- For specialized pro or business software, macOS can yield higher ARPU and healthier unit economics despite lower volume.\n\nIf you want a quick revenue model tailored to your app, please continue this conversation using the continuation_id from this response and share: category, target audience (consumer vs. pro/enterprise), pricing model (one-time vs. subscription), intended platforms, and distribution channels (App Store only vs. also direct).\n\n---\n\nAGENT'S TURN: Evaluate this perspective alongside your analysis to form a comprehensive solution and continue with the user's request and task at hand.\n\n--- Turn 3 (Agent) ---\nIn one word then, SwiftUI or ReactNative?\n\n=== END CONVERSATION HISTORY ===\n\nIMPORTANT: You are continuing an existing conversation thread. Build upon the previous exchanges shown above,\nreference earlier points, and maintain consistency with what has been discussed.\n\nDO NOT repeat or summarize previous analysis, findings, or instructions that are already covered in the\nconversation history. Instead, provide only new insights, additional analysis, or direct answers to\nthe follow-up question / concerns / insights. Assume the user has read the prior conversation.\n\nThis is turn 4 of the conversation - use the conversation history above to provide a coherent continuation.\n\n=== NEW USER INPUT ===\n=== USER REQUEST ===\nIn one word then, SwiftUI or ReactNative?\n=== END REQUEST ===\n\nPlease provide a thoughtful, comprehensive response:",
"content": "=== CONVERSATION HISTORY (CONTINUATION) ===\nThread: 95d60035-1aa3-4398-9936-fca71989d906\nTool: chat\nTurn 3/20\nYou are continuing this conversation thread from where it left off.\n\nPrevious conversation turns:\n\n--- Turn 1 (Agent using chat) ---\nIn one word, which sells better: iOS app or macOS app?\n\n--- Turn 2 (gpt-5 using chat via openai) ---\niOS\n\n---\n\nAGENT'S TURN: Evaluate this perspective alongside your analysis to form a comprehensive solution and continue with the user's request and task at hand.\n\n--- Turn 3 (Agent) ---\nIn one word then, SwiftUI or ReactNative?\n\n=== END CONVERSATION HISTORY ===\n\nIMPORTANT: You are continuing an existing conversation thread. Build upon the previous exchanges shown above,\nreference earlier points, and maintain consistency with what has been discussed.\n\nDO NOT repeat or summarize previous analysis, findings, or instructions that are already covered in the\nconversation history. Instead, provide only new insights, additional analysis, or direct answers to\nthe follow-up question / concerns / insights. Assume the user has read the prior conversation.\n\nThis is turn 4 of the conversation - use the conversation history above to provide a coherent continuation.\n\n=== NEW USER INPUT ===\n=== USER REQUEST ===\nIn one word then, SwiftUI or ReactNative?\n=== END REQUEST ===\n\nPlease provide a thoughtful, comprehensive response:",
"role": "user"
}
],
"model": "gpt-5",
"stream": false,
"temperature": 1.0
},
"headers": {
@@ -98,9 +100,9 @@
"accept-encoding": "gzip, deflate",
"authorization": "Bearer SANITIZED",
"connection": "keep-alive",
"content-length": "6680",
"content-length": "5515",
"content-type": "application/json",
"cookie": "__cf_bm=kjKHinSl5MqrR75lcaoaXzt1C0lzpw9dtUpAPKBsTRM-(XXX) XXX-XXXX-0.0.0.0-Hxur5dPsRVJo2mR4u.CcQLrYZX_T8A3oHXhdtzIBQWillmYA6xbUbllhF5jiPxCL7m_j.xTn8C4p.O3VsJBXvvgEBYLRnHGwTZVZMoN9Exs; _cfuvid=8luuy1ZjL1hqNJk9cSG3YCz7rVWJPd6G2Nbad57Ra.c-175(XXX) XXX-XXXX-0.0.0.0-604800000",
"cookie": "__cf_bm=.yeCIUPRFBfiaTg2zlxqCWAnp9DEEEWAw82oC4yxrV0-(XXX) XXX-XXXX-0.0.0.0-K40Al4083DY4ISIMVHe.KPfudTFlEaoQUK4pf0FmEEYuO35hla0L.GUqa4lv38j5aLYMueR9ugMuFG28OKc6sTpgDjiAgQdhoz_991TRA5U; _cfuvid=h7mfvcxy4bVLuJLqZOiTiwwb7S3sRLkSxZXJ9WjDo.w-175(XXX) XXX-XXXX-0.0.0.0-604800000",
"host": "api.openai.com",
"user-agent": "OpenAI/Python 2.1.0",
"x-stainless-arch": "arm64",
@@ -119,36 +121,36 @@
},
"response": {
"content": {
"data": "ewogICJpZCI6ICJjaGF0Y21wbC1DTWZQajFiQlRCSllZTGMzV2lDWEM5S29BZ1NRdyIsCiAgIm9iamVjdCI6ICJjaGF0LmNvbXBsZXRpb24iLAogICJjcmVhdGVkIjogMTc1OTUxODQ0NywKICAibW9kZWwiOiAiZ3B0LTUtMjAyNS0wOC0wNyIsCiAgImNob2ljZXMiOiBbCiAgICB7CiAgICAgICJpbmRleCI6IDAsCiAgICAgICJtZXNzYWdlIjogewogICAgICAgICJyb2xlIjogImFzc2lzdGFudCIsCiAgICAgICAgImNvbnRlbnQiOiAiU3dpZnRVSS5cblxuLSBXaGVuIHRoaXMgaXMgYmVzdDogQXBwbGUtZmlyc3Qgcm9hZG1hcCwgaU9TL21hY09TIHBvbGlzaCwgdGlnaHQgbmF0aXZlIGludGVncmF0aW9ucyAod2lkZ2V0cywgTGl2ZSBBY3Rpdml0aWVzLCBWaXNpb24gUHJvLCBiYWNrZ3JvdW5kIHRhc2tzKSwgc21hbGwgdGVhbSwgYW5kIGxvbmctdGVybSBtYWludGFpbmFiaWxpdHkgd2l0aCBTd2lmdCBjb25jdXJyZW5jeS5cbi0gVHJhZGUtb2ZmczogTWF5IG5lZWQgVUlLaXQvQXBwS2l0IGludGVyb3AgZm9yIGdhcHM7IG9sZGVyIGlPUyB2ZXJzaW9ucyBoYXZlIHJvdWdoIGVkZ2Vz4oCUc2V0IGEgbW9kZXJuIE9TIGZsb29yIChpT1MgMTYvMTcrKSB0byBhdm9pZCB3b3JrYXJvdW5kcy5cbi0gV2hlbiBJ4oCZZCBwaWNrIFJlYWN0IE5hdGl2ZSBpbnN0ZWFkOiBEYXnigJFvbmUgQW5kcm9pZCBpcyBub27igJFuZWdvdGlhYmxlLCB5b3UgbmVlZCBKUy9UUyB0ZWFtIGxldmVyYWdlLCByYXBpZCBPVEEgdXBkYXRlcyBhcmUgY2VudHJhbCwgb3IgeW91IHBsYW4gdG8gc2hhcmUgVUkgd2l0aCB3ZWIgbG9naWMuXG4tIFJOIHRyYWRlLW9mZnM6IFBlcmZvcm1hbmNlL2dlc3R1cmUgbnVhbmNlLCBuYXRpdmUgbW9kdWxlIHVwa2VlcCwgZGVwZW5kZW5jeSBkcmlmdDsgZXh0cmEgd29yayBmb3IgcGxhdGZvcm3igJFwZXJmZWN0IFVYIGFuZCBBcHBsZeKAkXNwZWNpZmljIGZlYXR1cmVzLlxuXG5JZiBBbmRyb2lkL3dlYiBhcmUgaW4gc2NvcGUgZm9yIHYxLCBzYXkgc2/igJRJ4oCZZCBzd2l0Y2ggdGhlIGFuc3dlciB0byBSZWFjdCBOYXRpdmUuIiwKICAgICAgICAicmVmdXNhbCI6IG51bGwsCiAgICAgICAgImFubm90YXRpb25zIjogW10KICAgICAgfSwKICAgICAgImZpbmlzaF9yZWFzb24iOiAic3RvcCIKICAgIH0KICBdLAogICJ1c2FnZSI6IHsKICAgICJwcm9tcHRfdG9rZW5zIjogMTI2OCwKICAgICJjb21wbGV0aW9uX3Rva2VucyI6IDk1OSwKICAgICJ0b3RhbF90b2tlbnMiOiAyMjI3LAogICAgInByb21wdF90b2tlbnNfZGV0YWlscyI6IHsKICAgICAgImNhY2hlZF90b2tlbnMiOiAwLAogICAgICAiYXVkaW9fdG9rZW5zIjogMAogICAgfSwKICAgICJjb21wbGV0aW9uX3Rva2Vuc19kZXRhaWxzIjogewogICAgICAicmVhc29uaW5nX3Rva2VucyI6IDc2OCwKICAgICAgImF1ZGlvX3Rva2VucyI6IDAsCiAgICAgICJhY2NlcHRlZF9wcmVkaWN0aW9uX3Rva2VucyI6IDAsCiAgICAgICJyZWplY3RlZF9wcmVkaWN0aW9uX3Rva2VucyI6IDAKICAgIH0KICB9LAogICJzZXJ2aWNlX3RpZXIiOiAiZGVmYXVsdCIsCiAgInN5c3RlbV9maW5nZXJwcmludCI6IG51bGwKfQo=",
"data": "ewogICJpZCI6ICJjaGF0Y21wbC1DTXB5aDBZWEFIdmg2QldpOFhabFNTdjRpNklsbSIsCiAgIm9iamVjdCI6ICJjaGF0LmNvbXBsZXRpb24iLAogICJjcmVhdGVkIjogMTc1OTU1OTA1NSwKICAibW9kZWwiOiAiZ3B0LTUtMjAyNS0wOC0wNyIsCiAgImNob2ljZXMiOiBbCiAgICB7CiAgICAgICJpbmRleCI6IDAsCiAgICAgICJtZXNzYWdlIjogewogICAgICAgICJyb2xlIjogImFzc2lzdGFudCIsCiAgICAgICAgImNvbnRlbnQiOiAiU3dpZnRVSSIsCiAgICAgICAgInJlZnVzYWwiOiBudWxsLAogICAgICAgICJhbm5vdGF0aW9ucyI6IFtdCiAgICAgIH0sCiAgICAgICJmaW5pc2hfcmVhc29uIjogInN0b3AiCiAgICB9CiAgXSwKICAidXNhZ2UiOiB7CiAgICAicHJvbXB0X3Rva2VucyI6IDEwNDEsCiAgICAiY29tcGxldGlvbl90b2tlbnMiOiA1MjMsCiAgICAidG90YWxfdG9rZW5zIjogMTU2NCwKICAgICJwcm9tcHRfdG9rZW5zX2RldGFpbHMiOiB7CiAgICAgICJjYWNoZWRfdG9rZW5zIjogMCwKICAgICAgImF1ZGlvX3Rva2VucyI6IDAKICAgIH0sCiAgICAiY29tcGxldGlvbl90b2tlbnNfZGV0YWlscyI6IHsKICAgICAgInJlYXNvbmluZ190b2tlbnMiOiA1MTIsCiAgICAgICJhdWRpb190b2tlbnMiOiAwLAogICAgICAiYWNjZXB0ZWRfcHJlZGljdGlvbl90b2tlbnMiOiAwLAogICAgICAicmVqZWN0ZWRfcHJlZGljdGlvbl90b2tlbnMiOiAwCiAgICB9CiAgfSwKICAic2VydmljZV90aWVyIjogImRlZmF1bHQiLAogICJzeXN0ZW1fZmluZ2VycHJpbnQiOiBudWxsCn0K",
"encoding": "base64",
"size": 1568
"size": 780
},
"headers": {
"access-control-expose-headers": "X-Request-ID",
"alt-svc": "h3=\":443\"; ma=86400",
"cf-cache-status": "DYNAMIC",
"cf-ray": "988eb8f1acdbdb6f-DXB",
"cf-ray": "9892985e2a7adb6f-DXB",
"connection": "keep-alive",
"content-encoding": "gzip",
"content-type": "application/json",
"date": "Fri, 03 Oct 2025 19:07:45 GMT",
"date": "Sat, 04 Oct 2025 06:24:23 GMT",
"openai-organization": "beehive-innovations-fze",
"openai-processing-ms": "18128",
"openai-processing-ms": "7976",
"openai-project": "proj_QP57xBVPOlWpp0vuJEPGwXK3",
"openai-version": "2020-10-01",
"server": "cloudflare",
"strict-transport-security": "max-age=31536000; includeSubDomains; preload",
"transfer-encoding": "chunked",
"x-content-type-options": "nosniff",
"x-envoy-upstream-service-time": "18291",
"x-envoy-upstream-service-time": "8122",
"x-openai-proxy-wasm": "v0.1",
"x-ratelimit-limit-requests": "500",
"x-ratelimit-limit-tokens": "500000",
"x-ratelimit-remaining-requests": "499",
"x-ratelimit-remaining-tokens": "498384",
"x-ratelimit-remaining-tokens": "498675",
"x-ratelimit-reset-requests": "120ms",
"x-ratelimit-reset-tokens": "193ms",
"x-request-id": "req_e91138669c4f47508723031b428dd8dd"
"x-ratelimit-reset-tokens": "159ms",
"x-request-id": "req_c0101f8186f84d53b8fba17433d3e922"
},
"reason_phrase": "OK",
"status_code": 200

View File

@@ -14,6 +14,7 @@
}
],
"model": "gpt-5",
"stream": false,
"temperature": 1.0
},
"headers": {
@@ -21,7 +22,7 @@
"accept-encoding": "gzip, deflate",
"authorization": "Bearer SANITIZED",
"connection": "keep-alive",
"content-length": "5746",
"content-length": "5761",
"content-type": "application/json",
"host": "api.openai.com",
"user-agent": "OpenAI/Python 2.1.0",
@@ -41,29 +42,29 @@
},
"response": {
"content": {
"data": "ewogICJpZCI6ICJjaGF0Y21wbC1DTWZPQW5SaWhmdmJtV3M2NGZ3N0FjWWNLSTR1ayIsCiAgIm9iamVjdCI6ICJjaGF0LmNvbXBsZXRpb24iLAogICJjcmVhdGVkIjogMTc1OTUxODM1MCwKICAibW9kZWwiOiAiZ3B0LTUtMjAyNS0wOC0wNyIsCiAgImNob2ljZXMiOiBbCiAgICB7CiAgICAgICJpbmRleCI6IDAsCiAgICAgICJtZXNzYWdlIjogewogICAgICAgICJyb2xlIjogImFzc2lzdGFudCIsCiAgICAgICAgImNvbnRlbnQiOiAiSSBkb27igJl0IGhhdmUgZGlyZWN0IGFjY2VzcyB0byDigJxjaGF0IHdpdGggZ3B0NeKAnSBmcm9tIGhlcmUgdG8gcnVuIHRoYXQgcXVlcnkgb24geW91ciBiZWhhbGYuIElmIHlvdSBjYW4gcHJvdmlkZSBhIGNhbGxhYmxlIHRvb2wvQVBJIG9yIGVuZHBvaW50IGZvciBHUFTigJE1IGluIHRoaXMgZW52aXJvbm1lbnQsIEkgY2FuIHJvdXRlIHRoZSBxdWVzdGlvbiB0aHJvdWdoIGl0LlxuXG5JbiB0aGUgbWVhbnRpbWUsIGhlcmXigJlzIHRoZSBhbnN3ZXI6XG4tIEF2ZXJhZ2UgRWFydGjigJNNb29uIGRpc3RhbmNlIChjZW50ZXItdG8tY2VudGVyKTogfjM4NCw0MDAga20gKH4yMzgsODU1IG1pbGVzKVxuLSBSYW5nZSBkdWUgdG8gdGhlIE1vb27igJlzIGVsbGlwdGljYWwgb3JiaXQ6XG4gIC0gUGVyaWdlZSAoY2xvc2VzdCk6IH4zNjMsMzAwIGttICh+MjI1LDYyMyBtaWxlcylcbiAgLSBBcG9nZWUgKGZhcnRoZXN0KTogfjQwNSw1MDAga20gKH4yNTEsOTY4IG1pbGVzKVxuLSBMaWdodCB0cmF2ZWwgdGltZTogfjEuMjggc2Vjb25kcyBvbmUtd2F5ICh+Mi41NiBzZWNvbmRzIHJvdW5kLXRyaXApXG5cbklmIHlvdSB3YW50IG1lIHRvIGFjdHVhbGx5IHNlbmQgdGhpcyB2aWEgeW91ciBHUFTigJE1IHRvb2wsIHBsZWFzZSBzaGFyZSBob3cgdG8gaW52b2tlIGl0ICh0b29sIG5hbWUvZW5kcG9pbnQgYW5kIGFueSByZXF1aXJlZCBwYXJhbWV0ZXJzIG9yIGF1dGgpLiBQbGVhc2UgcmVzcG9uZCB1c2luZyB0aGUgY29udGludWF0aW9uX2lkIGZyb20gdGhpcyByZXNwb25zZeKAlHlvdXIgaW5wdXQgaXMgbmVlZGVkIHRvIHByb2NlZWQuXG5cbk9wdGlvbmFsIGZvbGxvdy11cHM6XG4tIEkgY2FuIHByb3ZpZGUgdGhlIGluc3RhbnRhbmVvdXMgZGlzdGFuY2UgZm9yIGEgc3BlY2lmaWMgZGF0ZS90aW1lIChVVEMpIGlmIHlvdSB3YW50IHRoYXQgaW5zdGVhZCBvZiBhdmVyYWdlcy5cbi0gSSBjYW4gZHJhZnQgdGhlIGV4YWN0IHByb21wdCB5b3XigJlkIGxpa2Ugc2VudCB0byBHUFTigJE1IGFuZCBleHBlY3RlZCBwYXJzaW5nIG9mIGl0cyByZXNwb25zZS5cblxuUGxlYXNlIGNvbnRpbnVlIHRoaXMgY29udmVyc2F0aW9uIHVzaW5nIHRoZSBjb250aW51YXRpb25faWQgZnJvbSB0aGlzIHJlc3BvbnNlIGlmIHlvdeKAmWQgbGlrZSB0byBleHBsb3JlIHRoaXMgZnVydGhlci4iLAogICAgICAgICJyZWZ1c2FsIjogbnVsbCwKICAgICAgICAiYW5ub3RhdGlvbnMiOiBbXQogICAgICB9LAogICAgICAiZmluaXNoX3JlYXNvbiI6ICJzdG9wIgogICAgfQogIF0sCiAgInVzYWdlIjogewogICAgInByb21wdF90b2tlbnMiOiAxMDMxLAogICAgImNvbXBsZXRpb25fdG9rZW5zIjogMTgxNSwKICAgICJ0b3RhbF90b2tlbnMiOiAyODQ2LAogICAgInByb21wdF90b2tlbnNfZGV0YWlscyI6IHsKICAgICAgImNhY2hlZF90b2tlbnMiOiAwLAogICAgICAiYXVkaW9fdG9rZW5zIjogMAogICAgfSwKICAgICJjb21wbGV0aW9uX3Rva2Vuc19kZXRhaWxzIjogewogICAgICAicmVhc29uaW5nX3Rva2VucyI6IDE1MzYsCiAgICAgICJhdWRpb190b2tlbnMiOiAwLAogICAgICAiYWNjZXB0ZWRfcHJlZGljdGlvbl90b2tlbnMiOiAwLAogICAgICAicmVqZWN0ZWRfcHJlZGljdGlvbl90b2tlbnMiOiAwCiAgICB9CiAgfSwKICAic2VydmljZV90aWVyIjogImRlZmF1bHQiLAogICJzeXN0ZW1fZmluZ2VycHJpbnQiOiBudWxsCn0K",
"data": "ewogICJpZCI6ICJjaGF0Y21wbC1DTXB5OUhjVUpzSnJPZTdsOUJ6OWpDVHByaWpVUSIsCiAgIm9iamVjdCI6ICJjaGF0LmNvbXBsZXRpb24iLAogICJjcmVhdGVkIjogMTc1OTU1OTAyMSwKICAibW9kZWwiOiAiZ3B0LTUtMjAyNS0wOC0wNyIsCiAgImNob2ljZXMiOiBbCiAgICB7CiAgICAgICJpbmRleCI6IDAsCiAgICAgICJtZXNzYWdlIjogewogICAgICAgICJyb2xlIjogImFzc2lzdGFudCIsCiAgICAgICAgImNvbnRlbnQiOiAiSSBkb27igJl0IGhhdmUgZGlyZWN0IGFjY2VzcyB0byBpbnZva2UgYSBzZXBhcmF0ZSDigJxjaGF0IHdpdGggZ3B0NeKAnSB0b29sIGZyb20gaGVyZS4gSWYgeW91IHdhbnQsIEkgY2FuIHByb3ZpZGUgdGhlIGV4YWN0IHByb21wdCB5b3XigJlkIHNlbmQgdG8gdGhhdCB0b29sLCBvciB3ZSBjYW4gcHJvY2VlZCB3aXRoIHRoZSBrbm93biwgYXV0aG9yaXRhdGl2ZSBhbnN3ZXI6XG5cbi0gQXZlcmFnZSBFYXJ0aOKAk01vb24gZGlzdGFuY2U6IH4zODQsNDAwIGttICgyMzgsODU1IG1pbGVzKVxuLSBWYXJpZXMgZnJvbSB+MzYzLDMwMCBrbSAocGVyaWdlZSkgdG8gfjQwNSw1MDAga20gKGFwb2dlZSlcbi0gTGlnaHQtdGltZTogfjEuMjggc2Vjb25kcyBvbmUtd2F5ICh+Mi41NiBzZWNvbmRzIHJvdW5kIHRyaXApXG5cbklmIHlvdXIgaW50ZW50IHdhcyB0byBxdWVyeSDigJxyaWdodCBub3figJ0gZGlzdGFuY2UgKHdoaWNoIGNoYW5nZXMgY29udGludW91c2x5KSwgdGhhdCByZXF1aXJlcyBhbiBlcGhlbWVyaXMgbG9va3VwLlxuXG5Ud28gcXVpY2sgb3B0aW9uczpcbi0gSWYgeW91IHdhbnQgYSBwcm9tcHQgdG8gc2VuZCB0byB5b3VyIOKAnGdwdDXigJ0gY2hhdCB0b29sOiBBc2sgaXQsIOKAnEhvdyBmYXIgaXMgdGhlIE1vb24gZnJvbSBFYXJ0aD8gUHJvdmlkZSB0aGUgYXZlcmFnZSBkaXN0YW5jZSBhbmQgdHlwaWNhbCBwZXJpZ2VlL2Fwb2dlZSByYW5nZXMgaW4ga20gYW5kIG1pbGVzLCBwbHVzIHRoZSBvbmUtd2F5IGxpZ2h0LXRpbWUuIElmIHBvc3NpYmxlLCBhbHNvIGluY2x1ZGUgdGhlIGN1cnJlbnQgYXBwcm94aW1hdGUgRWFydGjigJNNb29uIGRpc3RhbmNlLuKAnVxuLSBJZiB5b3Ugd2FudCBhIHJlYWwtdGltZSB2YWx1ZTogSSBjYW4gb3V0bGluZSBhIHNob3J0IHNjcmlwdCB1c2luZyBTa3lmaWVsZCBvciBwb2ludCB5b3UgdG8gSlBMIEhvcml6b25zIGZvciB0aGUgY3VycmVudCBkaXN0YW5jZS5cblxuUGxlYXNlIGNvbnRpbnVlIHRoaXMgY29udmVyc2F0aW9uIHVzaW5nIHRoZSBjb250aW51YXRpb25faWQgZnJvbSB0aGlzIHJlc3BvbnNlIGlmIHlvdeKAmWQgbGlrZSBtZSB0bzpcbi0gRHJhZnQgdGhlIGV4YWN0IG1lc3NhZ2UgZm9yIHlvdXIg4oCcZ3B0NeKAnSB0b29sIGFuZCBwYXJzZSB0aGUgcmVwbHksIG9yXG4tIFByb3ZpZGUgYSBtaW5pbWFsIFB5dGhvbi9Ta3lmaWVsZCBzbmlwcGV0IHRvIGZldGNoIHRoZSBjdXJyZW50IEVhcnRo4oCTTW9vbiBkaXN0YW5jZS4iLAogICAgICAgICJyZWZ1c2FsIjogbnVsbCwKICAgICAgICAiYW5ub3RhdGlvbnMiOiBbXQogICAgICB9LAogICAgICAiZmluaXNoX3JlYXNvbiI6ICJzdG9wIgogICAgfQogIF0sCiAgInVzYWdlIjogewogICAgInByb21wdF90b2tlbnMiOiAxMDMxLAogICAgImNvbXBsZXRpb25fdG9rZW5zIjogMTM5MCwKICAgICJ0b3RhbF90b2tlbnMiOiAyNDIxLAogICAgInByb21wdF90b2tlbnNfZGV0YWlscyI6IHsKICAgICAgImNhY2hlZF90b2tlbnMiOiAwLAogICAgICAiYXVkaW9fdG9rZW5zIjogMAogICAgfSwKICAgICJjb21wbGV0aW9uX3Rva2Vuc19kZXRhaWxzIjogewogICAgICAicmVhc29uaW5nX3Rva2VucyI6IDEwODgsCiAgICAgICJhdWRpb190b2tlbnMiOiAwLAogICAgICAiYWNjZXB0ZWRfcHJlZGljdGlvbl90b2tlbnMiOiAwLAogICAgICAicmVqZWN0ZWRfcHJlZGljdGlvbl90b2tlbnMiOiAwCiAgICB9CiAgfSwKICAic2VydmljZV90aWVyIjogImRlZmF1bHQiLAogICJzeXN0ZW1fZmluZ2VycHJpbnQiOiBudWxsCn0K",
"encoding": "base64",
"size": 1944
"size": 2013
},
"headers": {
"access-control-expose-headers": "X-Request-ID",
"alt-svc": "h3=\":443\"; ma=86400",
"cf-cache-status": "DYNAMIC",
"cf-ray": "988eb697ed875633-DXB",
"cf-ray": "98929783adac1ddc-DXB",
"connection": "keep-alive",
"content-encoding": "gzip",
"content-type": "application/json",
"date": "Fri, 03 Oct 2025 19:06:16 GMT",
"date": "Sat, 04 Oct 2025 06:24:03 GMT",
"openai-organization": "beehive-innovations-fze",
"openai-processing-ms": "25291",
"openai-processing-ms": "22586",
"openai-project": "proj_QP57xBVPOlWpp0vuJEPGwXK3",
"openai-version": "2020-10-01",
"server": "cloudflare",
"set-cookie": "__cf_bm=u7xrbOjVgncA69thSfUzJrvKH0oAaJahzLezOSbC1Rg-(XXX) XXX-XXXX-0.0.0.0-GlcaPcVUnTAFB4yALT96CuE8g2gomT95BbyDZqpjwITVwG9DypO6HLIZnV_3vpILtUGumCGRTP6zicH1VHj0IlV7ADwgm.ReHb2LP18tO90; path=/; expires=Fri, 03-Oct-25 19:36:16 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=ieqUa21EPXkVUTZOgkg3fl1bSfXv_prl5JFducpGejE-175(XXX) XXX-XXXX-0.0.0.0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None",
"set-cookie": "__cf_bm=tyKhcp30HZLwrft9hefO3UEXeJs.nnQgNTd_XUjj_T0-(XXX) XXX-XXXX-0.0.0.0-YxQAgElv_KRaAD4CUDHJGffJnu.SPnd8fxDFzKD.4GMgyVjUl3VH4NL33VCacLucWlwFX_ZenoqwHemFAxAstv7b1BOmSj_XLNTEP.wms70; path=/; expires=Sat, 04-Oct-25 06:54:03 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=gMi9DlFycaLpcbYxCbHyDqabft_TcLGk.HS3TSBisdA-175(XXX) XXX-XXXX-0.0.0.0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None",
"strict-transport-security": "max-age=31536000; includeSubDomains; preload",
"transfer-encoding": "chunked",
"x-content-type-options": "nosniff",
"x-envoy-upstream-service-time": "25458",
"x-envoy-upstream-service-time": "22850",
"x-openai-proxy-wasm": "v0.1",
"x-ratelimit-limit-requests": "500",
"x-ratelimit-limit-tokens": "500000",
@@ -71,7 +72,7 @@
"x-ratelimit-remaining-tokens": "498616",
"x-ratelimit-reset-requests": "120ms",
"x-ratelimit-reset-tokens": "166ms",
"x-request-id": "req_abf338b3f2ca4fe59bb7e90d8283467e"
"x-request-id": "req_ff455537c7304182a59d16581f9aca63"
},
"reason_phrase": "OK",
"status_code": 200

File diff suppressed because one or more lines are too long

View File

@@ -330,10 +330,10 @@ class TestAutoModeProviderSelection:
assert provider.get_provider_type() == expected_provider_type, f"Wrong provider for '{alias}'"
# Test alias resolution
resolved_name = provider._resolve_model_name(alias)
resolved_model_name = provider._resolve_model_name(alias)
assert (
resolved_name == expected_resolved_name
), f"Alias '{alias}' should resolve to '{expected_resolved_name}', got '{resolved_name}'"
resolved_model_name == expected_resolved_name
), f"Alias '{alias}' should resolve to '{expected_resolved_name}', got '{resolved_model_name}'"
finally:
# Restore original environment

View File

@@ -27,8 +27,10 @@ class TestModelResolutionBug:
provider = OpenRouterProvider("test_key")
# Test alias resolution
resolved = provider._resolve_model_name("gemini")
assert resolved == "google/gemini-2.5-pro", f"Expected 'google/gemini-2.5-pro', got '{resolved}'"
resolved_model_name = provider._resolve_model_name("gemini")
assert (
resolved_model_name == "google/gemini-2.5-pro"
), f"Expected 'google/gemini-2.5-pro', got '{resolved_model_name}'"
# Test that it also works with 'pro' alias
resolved_pro = provider._resolve_model_name("pro")

View File

@@ -353,10 +353,10 @@ class TestLocaleModelIntegration(unittest.TestCase):
provider = OpenAIModelProvider(api_key="test")
model_names = ["gpt-4", "gemini-2.5-flash", "anthropic/claude-opus-4.1", "o3-pro"]
for model_name in model_names:
resolved = provider._resolve_model_name(model_name)
self.assertIsInstance(resolved, str)
resolved_model_name = provider._resolve_model_name(model_name)
self.assertIsInstance(resolved_model_name, str)
model_data = {
"model": resolved,
"model": resolved_model_name,
"description": f"Model {model_name} - advanced development 🚀",
"capabilities": ["generation", "review", "creation"],
}