refactor: moved temperature method from base provider to model capabilities

docs: added AGENTS.md for onboarding Codex
This commit is contained in:
Fahad
2025-10-02 09:17:36 +04:00
parent 1dc25f6c3d
commit f461cb4519
4 changed files with 57 additions and 46 deletions

27
AGENTS.md Normal file
View File

@@ -0,0 +1,27 @@
# Repository Guidelines
## Project Structure & Module Organization
Zen MCP Server centers on `server.py`, which exposes MCP entrypoints and coordinates multi-model workflows.
Feature-specific tools live in `tools/`, provider integrations in `providers/`, and shared helpers in `utils/`.
Prompt and system context assets stay in `systemprompts/`, while configuration templates and automation scripts live under `conf/`, `scripts/`, and `docker/`.
Unit tests sit in `tests/`; simulator-driven scenarios and log utilities are in `simulator_tests/` with the `communication_simulator_test.py` harness.
Authoritative documentation and samples live in `docs/`, and runtime diagnostics are rotated in `logs/`.
## Build, Test, and Development Commands
- `source .zen_venv/bin/activate` activate the managed Python environment.
- `./run-server.sh` install dependencies, refresh `.env`, and launch the MCP server locally.
- `./code_quality_checks.sh` run Ruff autofix, Black, isort, and the default pytest suite.
- `python communication_simulator_test.py --quick` smoke-test orchestration across tools and providers.
- `./run_integration_tests.sh [--with-simulator]` exercise provider-dependent flows against remote or Ollama models.
## Coding Style & Naming Conventions
Target Python 3.9+ with Black and isort using a 120-character line limit; Ruff enforces pycodestyle, pyflakes, bugbear, comprehension, and pyupgrade rules. Prefer explicit type hints, snake_case modules, and imperative commit-time docstrings. Extend workflows by defining hook or abstract methods instead of checking `hasattr()`/`getattr()`—inheritance-backed contracts keep behavior discoverable and testable.
## Testing Guidelines
Mirror production modules inside `tests/` and name tests `test_<behavior>` or `Test<Feature>` classes. Run `python -m pytest tests/ -v -m "not integration"` before every commit, adding `--cov=. --cov-report=html` for coverage-sensitive changes. Use `python communication_simulator_test.py --verbose` or `--individual <case>` to validate cross-agent flows, and reserve `./run_integration_tests.sh` for provider or transport modifications. Capture relevant excerpts from `logs/mcp_server.log` or `logs/mcp_activity.log` when documenting failures.
## Commit & Pull Request Guidelines
Follow Conventional Commits: `type(scope): summary`, where `type` is one of `feat`, `fix`, `docs`, `style`, `refactor`, `perf`, `test`, `build`, `ci`, or `chore`. Keep commits focused, referencing issues or simulator cases when helpful. Pull requests should outline intent, list validation commands executed, flag configuration or tool toggles, and attach screenshots or log snippets when user-visible behavior changes.
## Security & Configuration Tips
Store API keys and provider URLs in `.env` or your MCP client config; never commit secrets or generated log artifacts. Use `run-server.sh` to regenerate environments and verify connectivity after dependency changes. When adding providers or tools, sanitize prompts and responses, document required environment variables in `docs/`, and update `claude_config_example.json` if new capabilities ship by default.

View File

@@ -83,43 +83,6 @@ class ModelProvider(ABC):
"""Validate if the model name is supported by this provider."""
pass
def get_effective_temperature(self, model_name: str, requested_temperature: float) -> Optional[float]:
"""Get the effective temperature to use for a model given a requested temperature.
This method handles:
- Models that don't support temperature (returns None)
- Fixed temperature models (returns the fixed value)
- Clamping to min/max range for models with constraints
Args:
model_name: The model to get temperature for
requested_temperature: The temperature requested by the user/tool
Returns:
The effective temperature to use, or None if temperature shouldn't be passed
"""
try:
capabilities = self.get_capabilities(model_name)
# Check if model supports temperature at all
if not capabilities.supports_temperature:
return None
# Use temperature constraint to get corrected value
corrected_temp = capabilities.temperature_constraint.get_corrected_value(requested_temperature)
if corrected_temp != requested_temperature:
logger.debug(
f"Adjusting temperature from {requested_temperature} to {corrected_temp} for model {model_name}"
)
return corrected_temp
except Exception as e:
logger.debug(f"Could not determine effective temperature for {model_name}: {e}")
# If we can't get capabilities, return the requested temperature
return requested_temperature
def validate_parameters(self, model_name: str, temperature: float, **kwargs) -> None:
"""Validate model parameters against capabilities.

View File

@@ -469,8 +469,22 @@ class OpenAICompatibleProvider(ModelProvider):
if not self.validate_model_name(model_name):
raise ValueError(f"Model '{model_name}' not in allowed models list. Allowed models: {self.allowed_models}")
# Get effective temperature for this model
effective_temperature = self.get_effective_temperature(model_name, temperature)
capabilities: Optional[ModelCapabilities]
try:
capabilities = self.get_capabilities(model_name)
except Exception as exc:
logging.debug(f"Falling back to generic capabilities for {model_name}: {exc}")
capabilities = None
# Get effective temperature for this model from capabilities when available
if capabilities:
effective_temperature = capabilities.get_effective_temperature(temperature)
if effective_temperature is not None and effective_temperature != temperature:
logging.debug(
f"Adjusting temperature from {temperature} to {effective_temperature} for model {model_name}"
)
else:
effective_temperature = temperature
# Only validate if temperature is not None (meaning the model supports it)
if effective_temperature is not None:
@@ -482,13 +496,6 @@ class OpenAICompatibleProvider(ModelProvider):
if system_prompt:
messages.append({"role": "system", "content": system_prompt})
# Resolve capabilities once for vision/temperature checks
try:
capabilities = self.get_capabilities(model_name)
except Exception as exc:
logging.debug(f"Falling back to generic capabilities for {model_name}: {exc}")
capabilities = None
# Prepare user message with text and potentially images
user_content = []
user_content.append({"type": "text", "text": prompt})

View File

@@ -1,6 +1,7 @@
"""Dataclass describing the feature set of a model exposed by a provider."""
from dataclasses import dataclass, field
from typing import Optional
from .provider_type import ProviderType
from .temperature import RangeTemperatureConstraint, TemperatureConstraint
@@ -33,6 +34,19 @@ class ModelCapabilities:
default_factory=lambda: RangeTemperatureConstraint(0.0, 2.0, 0.3)
)
def get_effective_temperature(self, requested_temperature: float) -> Optional[float]:
"""Return the temperature that should be sent to the provider.
Models that do not support temperature return ``None`` so that callers
can omit the parameter entirely. For supported models, the configured
constraint clamps the requested value into a provider-safe range.
"""
if not self.supports_temperature:
return None
return self.temperature_constraint.get_corrected_value(requested_temperature)
@staticmethod
def collect_aliases(model_configs: dict[str, "ModelCapabilities"]) -> dict[str, list[str]]:
"""Build a mapping of model name to aliases from capability configs."""