feat: centralized environment handling, ensures ZEN_MCP_FORCE_ENV_OVERRIDE is honored correctly
fix: updated tests to override env variables they need instead of relying on the current values from .env
This commit is contained in:
10
config.py
10
config.py
@@ -8,7 +8,7 @@ constants used throughout the application.
|
||||
Configuration values can be overridden by environment variables where appropriate.
|
||||
"""
|
||||
|
||||
import os
|
||||
from utils.env import get_env
|
||||
|
||||
# Version and metadata
|
||||
# These values are used in server responses and for tracking releases
|
||||
@@ -25,7 +25,7 @@ __author__ = "Fahad Gilani"
|
||||
# This should be a stable, high-performance model suitable for code analysis
|
||||
# Can be overridden by setting DEFAULT_MODEL environment variable
|
||||
# Special value "auto" means Claude should pick the best model for each task
|
||||
DEFAULT_MODEL = os.getenv("DEFAULT_MODEL", "auto")
|
||||
DEFAULT_MODEL = get_env("DEFAULT_MODEL", "auto") or "auto"
|
||||
|
||||
# Auto mode detection - when DEFAULT_MODEL is "auto", Claude picks the model
|
||||
IS_AUTO_MODE = DEFAULT_MODEL.lower() == "auto"
|
||||
@@ -61,7 +61,7 @@ TEMPERATURE_CREATIVE = 0.7 # For architecture, deep thinking
|
||||
# Thinking Mode Defaults
|
||||
# DEFAULT_THINKING_MODE_THINKDEEP: Default thinking depth for extended reasoning tool
|
||||
# Higher modes use more computational budget but provide deeper analysis
|
||||
DEFAULT_THINKING_MODE_THINKDEEP = os.getenv("DEFAULT_THINKING_MODE_THINKDEEP", "high")
|
||||
DEFAULT_THINKING_MODE_THINKDEEP = get_env("DEFAULT_THINKING_MODE_THINKDEEP", "high") or "high"
|
||||
|
||||
# Consensus Tool Defaults
|
||||
# Consensus timeout and rate limiting settings
|
||||
@@ -117,7 +117,7 @@ def _calculate_mcp_prompt_limit() -> int:
|
||||
Maximum character count for user input prompts
|
||||
"""
|
||||
# Check for Claude's MAX_MCP_OUTPUT_TOKENS environment variable
|
||||
max_tokens_str = os.getenv("MAX_MCP_OUTPUT_TOKENS")
|
||||
max_tokens_str = get_env("MAX_MCP_OUTPUT_TOKENS")
|
||||
|
||||
if max_tokens_str:
|
||||
try:
|
||||
@@ -143,7 +143,7 @@ MCP_PROMPT_SIZE_LIMIT = _calculate_mcp_prompt_limit()
|
||||
# Examples: "fr-FR", "en-US", "zh-CN", "zh-TW", "ja-JP", "ko-KR", "es-ES",
|
||||
# "de-DE", "it-IT", "pt-PT"
|
||||
# Leave empty for default language (English)
|
||||
LOCALE = os.getenv("LOCALE", "")
|
||||
LOCALE = get_env("LOCALE", "") or ""
|
||||
|
||||
# Threading configuration
|
||||
# Simple in-memory conversation threading for stateless MCP environment
|
||||
|
||||
@@ -6,6 +6,15 @@ Health check script for Zen MCP Server Docker container
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
try:
|
||||
from utils.env import get_env
|
||||
except ImportError: # pragma: no cover - resolves module path inside container
|
||||
project_root = Path(__file__).resolve().parents[2]
|
||||
if str(project_root) not in sys.path:
|
||||
sys.path.insert(0, str(project_root))
|
||||
from utils.env import get_env # type: ignore[import-error]
|
||||
|
||||
|
||||
def check_process():
|
||||
@@ -63,14 +72,14 @@ def check_environment():
|
||||
"OPENROUTER_API_KEY",
|
||||
]
|
||||
|
||||
has_api_key = any(os.getenv(key) for key in api_keys)
|
||||
has_api_key = any(get_env(key) for key in api_keys)
|
||||
if not has_api_key:
|
||||
print("No API keys found in environment", file=sys.stderr)
|
||||
return False
|
||||
|
||||
# Validate API key formats (basic checks)
|
||||
for key in api_keys:
|
||||
value = os.getenv(key)
|
||||
value = get_env(key)
|
||||
if value:
|
||||
if len(value.strip()) < 10:
|
||||
print(f"API key {key} appears too short or invalid", file=sys.stderr)
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
"""Custom API provider implementation."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import Optional
|
||||
|
||||
from utils.env import get_env
|
||||
|
||||
from .openai_compatible import OpenAICompatibleProvider
|
||||
from .openrouter_registry import OpenRouterModelRegistry
|
||||
from .shared import ModelCapabilities, ProviderType
|
||||
@@ -56,9 +57,9 @@ class CustomProvider(OpenAICompatibleProvider):
|
||||
"""
|
||||
# Fall back to environment variables only if not provided
|
||||
if not base_url:
|
||||
base_url = os.getenv("CUSTOM_API_URL", "")
|
||||
base_url = get_env("CUSTOM_API_URL", "") or ""
|
||||
if not api_key:
|
||||
api_key = os.getenv("CUSTOM_API_KEY", "")
|
||||
api_key = get_env("CUSTOM_API_KEY", "") or ""
|
||||
|
||||
if not base_url:
|
||||
raise ValueError(
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
"""DIAL (Data & AI Layer) model provider implementation."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
from typing import Optional
|
||||
|
||||
from utils.env import get_env
|
||||
|
||||
from .openai_compatible import OpenAICompatibleProvider
|
||||
from .shared import ModelCapabilities, ModelResponse, ProviderType, TemperatureConstraint
|
||||
|
||||
@@ -209,7 +210,7 @@ class DIALModelProvider(OpenAICompatibleProvider):
|
||||
**kwargs: Additional configuration options
|
||||
"""
|
||||
# Get DIAL API host from environment or kwargs
|
||||
dial_host = kwargs.get("base_url") or os.getenv("DIAL_API_HOST") or "https://core.dialx.ai"
|
||||
dial_host = kwargs.get("base_url") or get_env("DIAL_API_HOST") or "https://core.dialx.ai"
|
||||
|
||||
# DIAL uses /openai endpoint for OpenAI-compatible API
|
||||
if not dial_host.endswith("/openai"):
|
||||
@@ -218,7 +219,7 @@ class DIALModelProvider(OpenAICompatibleProvider):
|
||||
kwargs["base_url"] = dial_host
|
||||
|
||||
# Get API version from environment or use default
|
||||
self.api_version = os.getenv("DIAL_API_VERSION", "2024-12-01-preview")
|
||||
self.api_version = get_env("DIAL_API_VERSION", "2024-12-01-preview") or "2024-12-01-preview"
|
||||
|
||||
# Add DIAL-specific headers
|
||||
# DIAL uses Api-Key header instead of Authorization: Bearer
|
||||
|
||||
@@ -3,12 +3,12 @@
|
||||
import copy
|
||||
import ipaddress
|
||||
import logging
|
||||
import os
|
||||
from typing import Optional
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from openai import OpenAI
|
||||
|
||||
from utils.env import get_env
|
||||
from utils.image_utils import validate_image
|
||||
|
||||
from .base import ModelProvider
|
||||
@@ -112,7 +112,7 @@ class OpenAICompatibleProvider(ModelProvider):
|
||||
# Get provider-specific allowed models
|
||||
provider_type = self.get_provider_type().value.upper()
|
||||
env_var = f"{provider_type}_ALLOWED_MODELS"
|
||||
models_str = os.getenv(env_var, "")
|
||||
models_str = get_env(env_var, "") or ""
|
||||
|
||||
if models_str:
|
||||
# Parse and normalize to lowercase for case-insensitive comparison
|
||||
@@ -165,10 +165,25 @@ class OpenAICompatibleProvider(ModelProvider):
|
||||
logging.info(f"Using extended timeouts for custom endpoint: {self.base_url}")
|
||||
|
||||
# Allow override via kwargs or environment variables in future, for now...
|
||||
connect_timeout = kwargs.get("connect_timeout", float(os.getenv("CUSTOM_CONNECT_TIMEOUT", default_connect)))
|
||||
read_timeout = kwargs.get("read_timeout", float(os.getenv("CUSTOM_READ_TIMEOUT", default_read)))
|
||||
write_timeout = kwargs.get("write_timeout", float(os.getenv("CUSTOM_WRITE_TIMEOUT", default_write)))
|
||||
pool_timeout = kwargs.get("pool_timeout", float(os.getenv("CUSTOM_POOL_TIMEOUT", default_pool)))
|
||||
connect_timeout = kwargs.get("connect_timeout")
|
||||
if connect_timeout is None:
|
||||
connect_timeout_raw = get_env("CUSTOM_CONNECT_TIMEOUT")
|
||||
connect_timeout = float(connect_timeout_raw) if connect_timeout_raw is not None else float(default_connect)
|
||||
|
||||
read_timeout = kwargs.get("read_timeout")
|
||||
if read_timeout is None:
|
||||
read_timeout_raw = get_env("CUSTOM_READ_TIMEOUT")
|
||||
read_timeout = float(read_timeout_raw) if read_timeout_raw is not None else float(default_read)
|
||||
|
||||
write_timeout = kwargs.get("write_timeout")
|
||||
if write_timeout is None:
|
||||
write_timeout_raw = get_env("CUSTOM_WRITE_TIMEOUT")
|
||||
write_timeout = float(write_timeout_raw) if write_timeout_raw is not None else float(default_write)
|
||||
|
||||
pool_timeout = kwargs.get("pool_timeout")
|
||||
if pool_timeout is None:
|
||||
pool_timeout_raw = get_env("CUSTOM_POOL_TIMEOUT")
|
||||
pool_timeout = float(pool_timeout_raw) if pool_timeout_raw is not None else float(default_pool)
|
||||
|
||||
timeout = httpx.Timeout(connect=connect_timeout, read=read_timeout, write=write_timeout, pool=pool_timeout)
|
||||
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
"""OpenRouter provider implementation."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import Optional
|
||||
|
||||
from utils.env import get_env
|
||||
|
||||
from .openai_compatible import OpenAICompatibleProvider
|
||||
from .openrouter_registry import OpenRouterModelRegistry
|
||||
from .shared import (
|
||||
@@ -35,8 +36,9 @@ class OpenRouterProvider(OpenAICompatibleProvider):
|
||||
|
||||
# Custom headers required by OpenRouter
|
||||
DEFAULT_HEADERS = {
|
||||
"HTTP-Referer": os.getenv("OPENROUTER_REFERER", "https://github.com/BeehiveInnovations/zen-mcp-server"),
|
||||
"X-Title": os.getenv("OPENROUTER_TITLE", "Zen MCP Server"),
|
||||
"HTTP-Referer": get_env("OPENROUTER_REFERER", "https://github.com/BeehiveInnovations/zen-mcp-server")
|
||||
or "https://github.com/BeehiveInnovations/zen-mcp-server",
|
||||
"X-Title": get_env("OPENROUTER_TITLE", "Zen MCP Server") or "Zen MCP Server",
|
||||
}
|
||||
|
||||
# Model registry for managing configurations and aliases
|
||||
|
||||
@@ -2,10 +2,11 @@
|
||||
|
||||
import importlib.resources
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from utils.env import get_env
|
||||
|
||||
# Import handled via importlib.resources.files() calls directly
|
||||
from utils.file_utils import read_json_file
|
||||
|
||||
@@ -50,7 +51,7 @@ class OpenRouterModelRegistry:
|
||||
self.config_path = Path(config_path)
|
||||
else:
|
||||
# Check environment variable first
|
||||
env_path = os.getenv("CUSTOM_MODELS_CONFIG_PATH")
|
||||
env_path = get_env("CUSTOM_MODELS_CONFIG_PATH")
|
||||
if env_path:
|
||||
# Environment variable path
|
||||
self.config_path = Path(env_path)
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
"""Model provider registry for managing available providers."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from utils.env import get_env
|
||||
|
||||
from .base import ModelProvider
|
||||
from .shared import ProviderType
|
||||
|
||||
@@ -102,7 +103,7 @@ class ModelProviderRegistry:
|
||||
provider = provider_class(api_key=api_key)
|
||||
else:
|
||||
# Regular class - need to handle URL requirement
|
||||
custom_url = os.getenv("CUSTOM_API_URL", "")
|
||||
custom_url = get_env("CUSTOM_API_URL", "") or ""
|
||||
if not custom_url:
|
||||
if api_key: # Key is set but URL is missing
|
||||
logging.warning("CUSTOM_API_KEY set but CUSTOM_API_URL missing – skipping Custom provider")
|
||||
@@ -116,7 +117,7 @@ class ModelProviderRegistry:
|
||||
# For Gemini, check if custom base URL is configured
|
||||
if not api_key:
|
||||
return None
|
||||
gemini_base_url = os.getenv("GEMINI_BASE_URL")
|
||||
gemini_base_url = get_env("GEMINI_BASE_URL")
|
||||
provider_kwargs = {"api_key": api_key}
|
||||
if gemini_base_url:
|
||||
provider_kwargs["base_url"] = gemini_base_url
|
||||
@@ -327,7 +328,7 @@ class ModelProviderRegistry:
|
||||
if not env_var:
|
||||
return None
|
||||
|
||||
return os.getenv(env_var)
|
||||
return get_env(env_var)
|
||||
|
||||
@classmethod
|
||||
def _get_allowed_models_for_provider(cls, provider: ModelProvider, provider_type: ProviderType) -> list[str]:
|
||||
|
||||
72
server.py
72
server.py
@@ -28,35 +28,6 @@ from logging.handlers import RotatingFileHandler
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
# Try to load environment variables from .env file if dotenv is available
|
||||
# This is optional - environment variables can still be passed directly
|
||||
try:
|
||||
from dotenv import dotenv_values, load_dotenv
|
||||
|
||||
# Load environment variables from .env file in the script's directory
|
||||
# This ensures .env is loaded regardless of the current working directory
|
||||
script_dir = Path(__file__).parent
|
||||
env_file = script_dir / ".env"
|
||||
|
||||
# First load only to read ZEN_MCP_FORCE_ENV_OVERRIDE, then reload with proper override setting
|
||||
# Use a temporary environment to read just this configuration variable
|
||||
temp_env = {}
|
||||
if env_file.exists():
|
||||
temp_env = dotenv_values(env_file)
|
||||
|
||||
# Check if we should force override based on .env file content (not system env)
|
||||
force_override = temp_env.get("ZEN_MCP_FORCE_ENV_OVERRIDE", "false").lower() == "true"
|
||||
|
||||
# Load .env file with appropriate override setting
|
||||
load_dotenv(dotenv_path=env_file, override=force_override)
|
||||
|
||||
# Store override setting for logging after logger is configured
|
||||
_zen_mcp_force_override = force_override
|
||||
except ImportError:
|
||||
# dotenv not available - this is fine, environment variables can still be passed directly
|
||||
# This commonly happens when running via uvx or in minimal environments
|
||||
pass
|
||||
|
||||
from mcp.server import Server # noqa: E402
|
||||
from mcp.server.models import InitializationOptions # noqa: E402
|
||||
from mcp.server.stdio import stdio_server # noqa: E402
|
||||
@@ -95,10 +66,11 @@ from tools import ( # noqa: E402
|
||||
VersionTool,
|
||||
)
|
||||
from tools.models import ToolOutput # noqa: E402
|
||||
from utils.env import env_override_enabled, get_env # noqa: E402
|
||||
|
||||
# Configure logging for server operations
|
||||
# Can be controlled via LOG_LEVEL environment variable (DEBUG, INFO, WARNING, ERROR)
|
||||
log_level = os.getenv("LOG_LEVEL", "DEBUG").upper()
|
||||
log_level = (get_env("LOG_LEVEL", "DEBUG") or "DEBUG").upper()
|
||||
|
||||
# Create timezone-aware formatter
|
||||
|
||||
@@ -177,19 +149,12 @@ except Exception as e:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Log ZEN_MCP_FORCE_ENV_OVERRIDE configuration if it was set during dotenv loading
|
||||
try:
|
||||
if "_zen_mcp_force_override" in globals():
|
||||
if _zen_mcp_force_override:
|
||||
logger.info(
|
||||
"ZEN_MCP_FORCE_ENV_OVERRIDE enabled - .env file values will override system environment variables"
|
||||
)
|
||||
# Log ZEN_MCP_FORCE_ENV_OVERRIDE configuration for transparency
|
||||
if env_override_enabled():
|
||||
logger.info("ZEN_MCP_FORCE_ENV_OVERRIDE enabled - .env file values will override system environment variables")
|
||||
logger.debug("Environment override prevents conflicts between different AI tools passing cached API keys")
|
||||
else:
|
||||
else:
|
||||
logger.debug("ZEN_MCP_FORCE_ENV_OVERRIDE disabled - system environment variables take precedence")
|
||||
except NameError:
|
||||
# _zen_mcp_force_override not defined, which means dotenv wasn't available or no .env file
|
||||
pass
|
||||
|
||||
|
||||
# Create the MCP server instance with a unique name identifier
|
||||
@@ -208,7 +173,7 @@ def parse_disabled_tools_env() -> set[str]:
|
||||
Returns:
|
||||
Set of lowercase tool names to disable, empty set if none specified
|
||||
"""
|
||||
disabled_tools_env = os.getenv("DISABLED_TOOLS", "").strip()
|
||||
disabled_tools_env = (get_env("DISABLED_TOOLS", "") or "").strip()
|
||||
if not disabled_tools_env:
|
||||
return set()
|
||||
return {t.strip().lower() for t in disabled_tools_env.split(",") if t.strip()}
|
||||
@@ -409,7 +374,7 @@ def configure_providers():
|
||||
logger.debug("Checking environment variables for API keys...")
|
||||
api_keys_to_check = ["OPENAI_API_KEY", "OPENROUTER_API_KEY", "GEMINI_API_KEY", "XAI_API_KEY", "CUSTOM_API_URL"]
|
||||
for key in api_keys_to_check:
|
||||
value = os.getenv(key)
|
||||
value = get_env(key)
|
||||
logger.debug(f" {key}: {'[PRESENT]' if value else '[MISSING]'}")
|
||||
from providers import ModelProviderRegistry
|
||||
from providers.custom import CustomProvider
|
||||
@@ -427,14 +392,14 @@ def configure_providers():
|
||||
has_custom = False
|
||||
|
||||
# Check for Gemini API key
|
||||
gemini_key = os.getenv("GEMINI_API_KEY")
|
||||
gemini_key = get_env("GEMINI_API_KEY")
|
||||
if gemini_key and gemini_key != "your_gemini_api_key_here":
|
||||
valid_providers.append("Gemini")
|
||||
has_native_apis = True
|
||||
logger.info("Gemini API key found - Gemini models available")
|
||||
|
||||
# Check for OpenAI API key
|
||||
openai_key = os.getenv("OPENAI_API_KEY")
|
||||
openai_key = get_env("OPENAI_API_KEY")
|
||||
logger.debug(f"OpenAI key check: key={'[PRESENT]' if openai_key else '[MISSING]'}")
|
||||
if openai_key and openai_key != "your_openai_api_key_here":
|
||||
valid_providers.append("OpenAI")
|
||||
@@ -447,21 +412,21 @@ def configure_providers():
|
||||
logger.debug("OpenAI API key is placeholder value")
|
||||
|
||||
# Check for X.AI API key
|
||||
xai_key = os.getenv("XAI_API_KEY")
|
||||
xai_key = get_env("XAI_API_KEY")
|
||||
if xai_key and xai_key != "your_xai_api_key_here":
|
||||
valid_providers.append("X.AI (GROK)")
|
||||
has_native_apis = True
|
||||
logger.info("X.AI API key found - GROK models available")
|
||||
|
||||
# Check for DIAL API key
|
||||
dial_key = os.getenv("DIAL_API_KEY")
|
||||
dial_key = get_env("DIAL_API_KEY")
|
||||
if dial_key and dial_key != "your_dial_api_key_here":
|
||||
valid_providers.append("DIAL")
|
||||
has_native_apis = True
|
||||
logger.info("DIAL API key found - DIAL models available")
|
||||
|
||||
# Check for OpenRouter API key
|
||||
openrouter_key = os.getenv("OPENROUTER_API_KEY")
|
||||
openrouter_key = get_env("OPENROUTER_API_KEY")
|
||||
logger.debug(f"OpenRouter key check: key={'[PRESENT]' if openrouter_key else '[MISSING]'}")
|
||||
if openrouter_key and openrouter_key != "your_openrouter_api_key_here":
|
||||
valid_providers.append("OpenRouter")
|
||||
@@ -474,14 +439,14 @@ def configure_providers():
|
||||
logger.debug("OpenRouter API key is placeholder value")
|
||||
|
||||
# Check for custom API endpoint (Ollama, vLLM, etc.)
|
||||
custom_url = os.getenv("CUSTOM_API_URL")
|
||||
custom_url = get_env("CUSTOM_API_URL")
|
||||
if custom_url:
|
||||
# IMPORTANT: Always read CUSTOM_API_KEY even if empty
|
||||
# - Some providers (vLLM, LM Studio, enterprise APIs) require authentication
|
||||
# - Others (Ollama) work without authentication (empty key)
|
||||
# - DO NOT remove this variable - it's needed for provider factory function
|
||||
custom_key = os.getenv("CUSTOM_API_KEY", "") # Default to empty (Ollama doesn't need auth)
|
||||
custom_model = os.getenv("CUSTOM_MODEL_NAME", "llama3.2")
|
||||
custom_key = get_env("CUSTOM_API_KEY", "") or "" # Default to empty (Ollama doesn't need auth)
|
||||
custom_model = get_env("CUSTOM_MODEL_NAME", "llama3.2") or "llama3.2"
|
||||
valid_providers.append(f"Custom API ({custom_url})")
|
||||
has_custom = True
|
||||
logger.info(f"Custom API endpoint found: {custom_url} with model {custom_model}")
|
||||
@@ -517,7 +482,7 @@ def configure_providers():
|
||||
# Factory function that creates CustomProvider with proper parameters
|
||||
def custom_provider_factory(api_key=None):
|
||||
# api_key is CUSTOM_API_KEY (can be empty for Ollama), base_url from CUSTOM_API_URL
|
||||
base_url = os.getenv("CUSTOM_API_URL", "")
|
||||
base_url = get_env("CUSTOM_API_URL", "") or ""
|
||||
return CustomProvider(api_key=api_key or "", base_url=base_url) # Use provided API key or empty string
|
||||
|
||||
ModelProviderRegistry.register_provider(ProviderType.CUSTOM, custom_provider_factory)
|
||||
@@ -674,7 +639,8 @@ async def handle_list_tools() -> list[Tool]:
|
||||
)
|
||||
|
||||
# Log cache efficiency info
|
||||
if os.getenv("OPENROUTER_API_KEY") and os.getenv("OPENROUTER_API_KEY") != "your_openrouter_api_key_here":
|
||||
openrouter_key_for_cache = get_env("OPENROUTER_API_KEY")
|
||||
if openrouter_key_for_cache and openrouter_key_for_cache != "your_openrouter_api_key_here":
|
||||
logger.debug("OpenRouter registry cache used efficiently across all tool schemas")
|
||||
|
||||
logger.debug(f"Returning {len(tools)} tools to MCP client")
|
||||
|
||||
@@ -15,6 +15,11 @@ parent_dir = Path(__file__).resolve().parent.parent
|
||||
if str(parent_dir) not in sys.path:
|
||||
sys.path.insert(0, str(parent_dir))
|
||||
|
||||
import utils.env as env_config # noqa: E402
|
||||
|
||||
# Ensure tests operate with runtime environment rather than .env overrides during imports
|
||||
env_config.reload_env({"ZEN_MCP_FORCE_ENV_OVERRIDE": "false"})
|
||||
|
||||
# Set default model to a specific value for tests to avoid auto mode
|
||||
# This prevents all tests from failing due to missing model parameter
|
||||
os.environ["DEFAULT_MODEL"] = "gemini-2.5-flash"
|
||||
@@ -176,3 +181,26 @@ def clear_model_restriction_env(monkeypatch):
|
||||
|
||||
for var in restriction_vars:
|
||||
monkeypatch.delenv(var, raising=False)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def disable_force_env_override(monkeypatch):
|
||||
"""Default tests to runtime environment visibility unless they explicitly opt in."""
|
||||
|
||||
monkeypatch.setenv("ZEN_MCP_FORCE_ENV_OVERRIDE", "false")
|
||||
env_config.reload_env({"ZEN_MCP_FORCE_ENV_OVERRIDE": "false"})
|
||||
monkeypatch.setenv("DEFAULT_MODEL", "gemini-2.5-flash")
|
||||
monkeypatch.setenv("MAX_CONVERSATION_TURNS", "50")
|
||||
|
||||
import importlib
|
||||
|
||||
import config
|
||||
import utils.conversation_memory as conversation_memory
|
||||
|
||||
importlib.reload(config)
|
||||
importlib.reload(conversation_memory)
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
env_config.reload_env()
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -9,7 +9,7 @@
|
||||
"role": "system"
|
||||
},
|
||||
{
|
||||
"content": "=== CONVERSATION HISTORY (CONTINUATION) ===\nThread: dbadc23e-c0f4-4853-982f-6c5bc722b5de\nTool: chat\nTurn 3/20\nYou are continuing this conversation thread from where it left off.\n\nPrevious conversation turns:\n\n--- Turn 1 (Agent using chat) ---\nPick a number between 1 and 10 and respond with JUST that number.\n\n--- Turn 2 (gemini-2.5-flash using chat via google) ---\n7\n\n---\n\nAGENT'S TURN: Evaluate this perspective alongside your analysis to form a comprehensive solution and continue with the user's request and task at hand.\n\n--- Turn 3 (Agent) ---\nRemind me, what number did you pick, respond with JUST that number.\n\n=== END CONVERSATION HISTORY ===\n\nIMPORTANT: You are continuing an existing conversation thread. Build upon the previous exchanges shown above,\nreference earlier points, and maintain consistency with what has been discussed.\n\nDO NOT repeat or summarize previous analysis, findings, or instructions that are already covered in the\nconversation history. Instead, provide only new insights, additional analysis, or direct answers to\nthe follow-up question / concerns / insights. Assume the user has read the prior conversation.\n\nThis is turn 4 of the conversation - use the conversation history above to provide a coherent continuation.\n\n=== NEW USER INPUT ===\n=== USER REQUEST ===\nRemind me, what number did you pick, respond with JUST that number.\n=== END REQUEST ===\n\nPlease provide a thoughtful, comprehensive response:",
|
||||
"content": "=== CONVERSATION HISTORY (CONTINUATION) ===\nThread: dbadc23e-c0f4-4853-982f-6c5bc722b5de\nTool: chat\nTurn 3/50\nYou are continuing this conversation thread from where it left off.\n\nPrevious conversation turns:\n\n--- Turn 1 (Agent using chat) ---\nPick a number between 1 and 10 and respond with JUST that number.\n\n--- Turn 2 (gemini-2.5-flash using chat via google) ---\n7\n\n---\n\nAGENT'S TURN: Evaluate this perspective alongside your analysis to form a comprehensive solution and continue with the user's request and task at hand.\n\n--- Turn 3 (Agent) ---\nRemind me, what number did you pick, respond with JUST that number.\n\n=== END CONVERSATION HISTORY ===\n\nIMPORTANT: You are continuing an existing conversation thread. Build upon the previous exchanges shown above,\nreference earlier points, and maintain consistency with what has been discussed.\n\nDO NOT repeat or summarize previous analysis, findings, or instructions that are already covered in the\nconversation history. Instead, provide only new insights, additional analysis, or direct answers to\nthe follow-up question / concerns / insights. Assume the user has read the prior conversation.\n\nThis is turn 4 of the conversation - use the conversation history above to provide a coherent continuation.\n\n=== NEW USER INPUT ===\n=== USER REQUEST ===\nRemind me, what number did you pick, respond with JUST that number.\n=== END REQUEST ===\n\nPlease provide a thoughtful, comprehensive response:",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
@@ -42,29 +42,29 @@
|
||||
},
|
||||
"response": {
|
||||
"content": {
|
||||
"data": "ewogICJpZCI6ICJjaGF0Y21wbC1DTXB5M05odjRueUZWQVN1S0FkSmN3aldQeHhmOCIsCiAgIm9iamVjdCI6ICJjaGF0LmNvbXBsZXRpb24iLAogICJjcmVhdGVkIjogMTc1OTU1OTAxNSwKICAibW9kZWwiOiAiZ3B0LTUtMjAyNS0wOC0wNyIsCiAgImNob2ljZXMiOiBbCiAgICB7CiAgICAgICJpbmRleCI6IDAsCiAgICAgICJtZXNzYWdlIjogewogICAgICAgICJyb2xlIjogImFzc2lzdGFudCIsCiAgICAgICAgImNvbnRlbnQiOiAiNyIsCiAgICAgICAgInJlZnVzYWwiOiBudWxsLAogICAgICAgICJhbm5vdGF0aW9ucyI6IFtdCiAgICAgIH0sCiAgICAgICJmaW5pc2hfcmVhc29uIjogInN0b3AiCiAgICB9CiAgXSwKICAidXNhZ2UiOiB7CiAgICAicHJvbXB0X3Rva2VucyI6IDEwNTUsCiAgICAiY29tcGxldGlvbl90b2tlbnMiOiAyMDIsCiAgICAidG90YWxfdG9rZW5zIjogMTI1NywKICAgICJwcm9tcHRfdG9rZW5zX2RldGFpbHMiOiB7CiAgICAgICJjYWNoZWRfdG9rZW5zIjogMTAyNCwKICAgICAgImF1ZGlvX3Rva2VucyI6IDAKICAgIH0sCiAgICAiY29tcGxldGlvbl90b2tlbnNfZGV0YWlscyI6IHsKICAgICAgInJlYXNvbmluZ190b2tlbnMiOiAxOTIsCiAgICAgICJhdWRpb190b2tlbnMiOiAwLAogICAgICAiYWNjZXB0ZWRfcHJlZGljdGlvbl90b2tlbnMiOiAwLAogICAgICAicmVqZWN0ZWRfcHJlZGljdGlvbl90b2tlbnMiOiAwCiAgICB9CiAgfSwKICAic2VydmljZV90aWVyIjogImRlZmF1bHQiLAogICJzeXN0ZW1fZmluZ2VycHJpbnQiOiBudWxsCn0K",
|
||||
"data": "ewogICJpZCI6ICJjaGF0Y21wbC1DTXRaVXZHWjN3S3RTMWxEVTgxUXQxT3g2dnNtciIsCiAgIm9iamVjdCI6ICJjaGF0LmNvbXBsZXRpb24iLAogICJjcmVhdGVkIjogMTc1OTU3Mjg2OCwKICAibW9kZWwiOiAiZ3B0LTUtMjAyNS0wOC0wNyIsCiAgImNob2ljZXMiOiBbCiAgICB7CiAgICAgICJpbmRleCI6IDAsCiAgICAgICJtZXNzYWdlIjogewogICAgICAgICJyb2xlIjogImFzc2lzdGFudCIsCiAgICAgICAgImNvbnRlbnQiOiAiNyIsCiAgICAgICAgInJlZnVzYWwiOiBudWxsLAogICAgICAgICJhbm5vdGF0aW9ucyI6IFtdCiAgICAgIH0sCiAgICAgICJmaW5pc2hfcmVhc29uIjogInN0b3AiCiAgICB9CiAgXSwKICAidXNhZ2UiOiB7CiAgICAicHJvbXB0X3Rva2VucyI6IDEwNTUsCiAgICAiY29tcGxldGlvbl90b2tlbnMiOiAyNjYsCiAgICAidG90YWxfdG9rZW5zIjogMTMyMSwKICAgICJwcm9tcHRfdG9rZW5zX2RldGFpbHMiOiB7CiAgICAgICJjYWNoZWRfdG9rZW5zIjogMCwKICAgICAgImF1ZGlvX3Rva2VucyI6IDAKICAgIH0sCiAgICAiY29tcGxldGlvbl90b2tlbnNfZGV0YWlscyI6IHsKICAgICAgInJlYXNvbmluZ190b2tlbnMiOiAyNTYsCiAgICAgICJhdWRpb190b2tlbnMiOiAwLAogICAgICAiYWNjZXB0ZWRfcHJlZGljdGlvbl90b2tlbnMiOiAwLAogICAgICAicmVqZWN0ZWRfcHJlZGljdGlvbl90b2tlbnMiOiAwCiAgICB9CiAgfSwKICAic2VydmljZV90aWVyIjogImRlZmF1bHQiLAogICJzeXN0ZW1fZmluZ2VycHJpbnQiOiBudWxsCn0K",
|
||||
"encoding": "base64",
|
||||
"size": 777
|
||||
"size": 774
|
||||
},
|
||||
"headers": {
|
||||
"access-control-expose-headers": "X-Request-ID",
|
||||
"alt-svc": "h3=\":443\"; ma=86400",
|
||||
"cf-cache-status": "DYNAMIC",
|
||||
"cf-ray": "989297642c2023e5-DXB",
|
||||
"cf-ray": "9893e998cd90f08b-DXB",
|
||||
"connection": "keep-alive",
|
||||
"content-encoding": "gzip",
|
||||
"content-type": "application/json",
|
||||
"date": "Sat, 04 Oct 2025 06:23:40 GMT",
|
||||
"date": "Sat, 04 Oct 2025 10:14:32 GMT",
|
||||
"openai-organization": "beehive-innovations-fze",
|
||||
"openai-processing-ms": "4177",
|
||||
"openai-processing-ms": "3725",
|
||||
"openai-project": "proj_QP57xBVPOlWpp0vuJEPGwXK3",
|
||||
"openai-version": "2020-10-01",
|
||||
"server": "cloudflare",
|
||||
"set-cookie": "__cf_bm=IVHjup34WOTMmaqIoUZ2nbrlvnfqvetX1CJW2YD9900-(XXX) XXX-XXXX-0.0.0.0-KpHNz.EGGVrd88ZrVhEQfIIdVnL9Z_p4dGaTyzrCgbz._ufQ.ufCc.BBmVZZt0w0csym46eV1aMSvzEltNm0kFRnfb7aq9yRzuzTOP1oCfg; path=/; expires=Sat, 04-Oct-25 06:53:40 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=.EYrWkDOZlWTzx9WyxCz_IvyuKizestJfpHeBI7GtRE-175(XXX) XXX-XXXX-0.0.0.0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None",
|
||||
"set-cookie": "__cf_bm=cyePl915F03L6RqnIdyla05Q1NzsdFJkMGvh3F89Q6Q-(XXX) XXX-XXXX-0.0.0.0-gBMxI3BY11pPcnlWTVD3TZiEcmP5Q5vbBrFFQoOwTFwRmSZpcanQETT3_6dQmMMX6vIGW8Gi3W44gI3ERJAyj7aROYPS6Ii7CkNPa2qxP04; path=/; expires=Sat, 04-Oct-25 10:44:32 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=e5KUvSkbb2EWE.MCk6ma4sq3qlfQOWx.geZuS4ggYfI-175(XXX) XXX-XXXX-0.0.0.0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None",
|
||||
"strict-transport-security": "max-age=31536000; includeSubDomains; preload",
|
||||
"transfer-encoding": "chunked",
|
||||
"x-content-type-options": "nosniff",
|
||||
"x-envoy-upstream-service-time": "4320",
|
||||
"x-envoy-upstream-service-time": "3885",
|
||||
"x-openai-proxy-wasm": "v0.1",
|
||||
"x-ratelimit-limit-requests": "500",
|
||||
"x-ratelimit-limit-tokens": "500000",
|
||||
@@ -72,7 +72,7 @@
|
||||
"x-ratelimit-remaining-tokens": "498657",
|
||||
"x-ratelimit-reset-requests": "120ms",
|
||||
"x-ratelimit-reset-tokens": "161ms",
|
||||
"x-request-id": "req_edd581c9db9c4ca5a9b1b5c65240b8b5"
|
||||
"x-request-id": "req_36d40cbab28f4a2cb8fd48aea5a4f394"
|
||||
},
|
||||
"reason_phrase": "OK",
|
||||
"status_code": 200
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"role": "system"
|
||||
},
|
||||
{
|
||||
"content": "=== USER REQUEST ===\nIn one word, which sells better: iOS app or macOS app?\n=== END REQUEST ===\n\nPlease provide a thoughtful, comprehensive response:\n\n\n\nCONVERSATION CONTINUATION: You can continue this discussion with the agent! (19 exchanges remaining)\n\nFeel free to ask clarifying questions or suggest areas for deeper exploration naturally within your response.\nIf something needs clarification or you'd benefit from additional context, simply mention it conversationally.\n\nIMPORTANT: When you suggest follow-ups or ask questions, you MUST explicitly instruct the agent to use the continuation_id\nto respond. Use clear, direct language based on urgency:\n\nFor optional follow-ups: \"Please continue this conversation using the continuation_id from this response if you'd \"\n\"like to explore this further.\"\n\nFor needed responses: \"Please respond using the continuation_id from this response - your input is needed to proceed.\"\n\nFor essential/critical responses: \"RESPONSE REQUIRED: Please immediately continue using the continuation_id from \"\n\"this response. Cannot proceed without your clarification/input.\"\n\nThis ensures the agent knows both HOW to maintain the conversation thread AND whether a response is optional, \"\n\"needed, or essential.\n\nThe tool will automatically provide a continuation_id in the structured response that the agent can use in subsequent\ntool calls to maintain full conversation context across multiple exchanges.\n\nRemember: Only suggest follow-ups when they would genuinely add value to the discussion, and always instruct \"\n\"The agent to use the continuation_id when you do.",
|
||||
"content": "=== USER REQUEST ===\nIn one word, which sells better: iOS app or macOS app?\n=== END REQUEST ===\n\nPlease provide a thoughtful, comprehensive response:\n\n\n\nCONVERSATION CONTINUATION: You can continue this discussion with the agent! (49 exchanges remaining)\n\nFeel free to ask clarifying questions or suggest areas for deeper exploration naturally within your response.\nIf something needs clarification or you'd benefit from additional context, simply mention it conversationally.\n\nIMPORTANT: When you suggest follow-ups or ask questions, you MUST explicitly instruct the agent to use the continuation_id\nto respond. Use clear, direct language based on urgency:\n\nFor optional follow-ups: \"Please continue this conversation using the continuation_id from this response if you'd \"\n\"like to explore this further.\"\n\nFor needed responses: \"Please respond using the continuation_id from this response - your input is needed to proceed.\"\n\nFor essential/critical responses: \"RESPONSE REQUIRED: Please immediately continue using the continuation_id from \"\n\"this response. Cannot proceed without your clarification/input.\"\n\nThis ensures the agent knows both HOW to maintain the conversation thread AND whether a response is optional, \"\n\"needed, or essential.\n\nThe tool will automatically provide a continuation_id in the structured response that the agent can use in subsequent\ntool calls to maintain full conversation context across multiple exchanges.\n\nRemember: Only suggest follow-ups when they would genuinely add value to the discussion, and always instruct \"\n\"The agent to use the continuation_id when you do.",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
@@ -42,29 +42,29 @@
|
||||
},
|
||||
"response": {
|
||||
"content": {
|
||||
"data": "ewogICJpZCI6ICJjaGF0Y21wbC1DTXB5VmVqZDhTZ1NOSlFsQUJoeWptZkFWMkxGVSIsCiAgIm9iamVjdCI6ICJjaGF0LmNvbXBsZXRpb24iLAogICJjcmVhdGVkIjogMTc1OTU1OTA0MywKICAibW9kZWwiOiAiZ3B0LTUtMjAyNS0wOC0wNyIsCiAgImNob2ljZXMiOiBbCiAgICB7CiAgICAgICJpbmRleCI6IDAsCiAgICAgICJtZXNzYWdlIjogewogICAgICAgICJyb2xlIjogImFzc2lzdGFudCIsCiAgICAgICAgImNvbnRlbnQiOiAiaU9TIiwKICAgICAgICAicmVmdXNhbCI6IG51bGwsCiAgICAgICAgImFubm90YXRpb25zIjogW10KICAgICAgfSwKICAgICAgImZpbmlzaF9yZWFzb24iOiAic3RvcCIKICAgIH0KICBdLAogICJ1c2FnZSI6IHsKICAgICJwcm9tcHRfdG9rZW5zIjogMTAzMSwKICAgICJjb21wbGV0aW9uX3Rva2VucyI6IDUyMywKICAgICJ0b3RhbF90b2tlbnMiOiAxNTU0LAogICAgInByb21wdF90b2tlbnNfZGV0YWlscyI6IHsKICAgICAgImNhY2hlZF90b2tlbnMiOiAwLAogICAgICAiYXVkaW9fdG9rZW5zIjogMAogICAgfSwKICAgICJjb21wbGV0aW9uX3Rva2Vuc19kZXRhaWxzIjogewogICAgICAicmVhc29uaW5nX3Rva2VucyI6IDUxMiwKICAgICAgImF1ZGlvX3Rva2VucyI6IDAsCiAgICAgICJhY2NlcHRlZF9wcmVkaWN0aW9uX3Rva2VucyI6IDAsCiAgICAgICJyZWplY3RlZF9wcmVkaWN0aW9uX3Rva2VucyI6IDAKICAgIH0KICB9LAogICJzZXJ2aWNlX3RpZXIiOiAiZGVmYXVsdCIsCiAgInN5c3RlbV9maW5nZXJwcmludCI6IG51bGwKfQo=",
|
||||
"data": "ewogICJpZCI6ICJjaGF0Y21wbC1DTXRhdEdLN0FkVk0yanQ1ZXRmaThrMEVkQ1FpSCIsCiAgIm9iamVjdCI6ICJjaGF0LmNvbXBsZXRpb24iLAogICJjcmVhdGVkIjogMTc1OTU3Mjk1NSwKICAibW9kZWwiOiAiZ3B0LTUtMjAyNS0wOC0wNyIsCiAgImNob2ljZXMiOiBbCiAgICB7CiAgICAgICJpbmRleCI6IDAsCiAgICAgICJtZXNzYWdlIjogewogICAgICAgICJyb2xlIjogImFzc2lzdGFudCIsCiAgICAgICAgImNvbnRlbnQiOiAiaU9TXG5cbldoeTpcbi0gTWFya2V0IHNpemUgYW5kIHNwZW5kOiBpT1MgQXBwIFN0b3JlIGNvbnN1bWVyIHJldmVudWUgaXMgdmFzdGx5IGxhcmdlciB0aGFuIHRoZSBNYWMgQXBwIFN0b3JlLCB3aXRoIGhpZ2hlciBkYWlseSBlbmdhZ2VtZW50IGFuZCBjb252ZXJzaW9uLlxuLSBQdXJjaGFzaW5nIGZyaWN0aW9uOiBNb2JpbGUgdXNlcnMgYXJlIG1vcmUgaW5jbGluZWQgdG8gaW1wdWxzZSBidXlzIGFuZCByZWN1cnJpbmcgc3Vic2NyaXB0aW9ucy5cbi0gRGlzY292ZXJ5OiBpT1MgYmVuZWZpdHMgZnJvbSBmYXIgZ3JlYXRlciBvcmdhbmljIHNlYXJjaCwgZWRpdG9yaWFsIGV4cG9zdXJlLCBhbmQgYWQgcmVhY2guXG5cbldoZW4gbWFjT1MgY2FuIG91dHBlcmZvcm06XG4tIEhpZ2gtdmFsdWUgcHJvL2Rldi9jcmVhdGl2ZSB0b29scywgbWVudSBiYXIgdXRpbGl0aWVzLCBhbmQgQjJCIGFwcHMgY2FuIGNvbW1hbmQgaGlnaGVyIHByaWNlcyB3aXRoIGxvd2VyIHZvbHVtZS5cbi0gRGlyZWN0IHNhbGVzIG9yIGVudGVycHJpc2UgbGljZW5zaW5nIChvZnRlbiBvdXRzaWRlIHRoZSBNYWMgQXBwIFN0b3JlKSBjYW4gb3V0cGVyZm9ybSBpZiB5b3UgaGF2ZSBhIGRlZmluZWQgbmljaGUgYW5kIGRpc3RyaWJ1dGlvbiBjaGFubmVsLlxuXG5JZiB5b3Ugc2hhcmUgeW91ciBhcHDigJlzIGNhdGVnb3J5LCB0YXJnZXQgY3VzdG9tZXIsIHByaWNpbmcgbW9kZWwsIGFuZCBkaXN0cmlidXRpb24gcGxhbiwgd2UgY2FuIGFzc2VzcyBwbGF0Zm9ybSBmaXQgYW5kIHJldmVudWUgcG90ZW50aWFsIG1vcmUgcHJlY2lzZWx5LiBQbGVhc2UgY29udGludWUgdGhpcyBjb252ZXJzYXRpb24gdXNpbmcgdGhlIGNvbnRpbnVhdGlvbl9pZCBmcm9tIHRoaXMgcmVzcG9uc2UgaWYgeW91J2QgbGlrZSB0byBleHBsb3JlIHRoaXMgZnVydGhlci4iLAogICAgICAgICJyZWZ1c2FsIjogbnVsbCwKICAgICAgICAiYW5ub3RhdGlvbnMiOiBbXQogICAgICB9LAogICAgICAiZmluaXNoX3JlYXNvbiI6ICJzdG9wIgogICAgfQogIF0sCiAgInVzYWdlIjogewogICAgInByb21wdF90b2tlbnMiOiAxMDMxLAogICAgImNvbXBsZXRpb25fdG9rZW5zIjogODIzLAogICAgInRvdGFsX3Rva2VucyI6IDE4NTQsCiAgICAicHJvbXB0X3Rva2Vuc19kZXRhaWxzIjogewogICAgICAiY2FjaGVkX3Rva2VucyI6IDAsCiAgICAgICJhdWRpb190b2tlbnMiOiAwCiAgICB9LAogICAgImNvbXBsZXRpb25fdG9rZW5zX2RldGFpbHMiOiB7CiAgICAgICJyZWFzb25pbmdfdG9rZW5zIjogNjQwLAogICAgICAiYXVkaW9fdG9rZW5zIjogMCwKICAgICAgImFjY2VwdGVkX3ByZWRpY3Rpb25fdG9rZW5zIjogMCwKICAgICAgInJlamVjdGVkX3ByZWRpY3Rpb25fdG9rZW5zIjogMAogICAgfQogIH0sCiAgInNlcnZpY2VfdGllciI6ICJkZWZhdWx0IiwKICAic3lzdGVtX2ZpbmdlcnByaW50IjogbnVsbAp9Cg==",
|
||||
"encoding": "base64",
|
||||
"size": 776
|
||||
"size": 1687
|
||||
},
|
||||
"headers": {
|
||||
"access-control-expose-headers": "X-Request-ID",
|
||||
"alt-svc": "h3=\":443\"; ma=86400",
|
||||
"cf-cache-status": "DYNAMIC",
|
||||
"cf-ray": "989298175a5cdb6f-DXB",
|
||||
"cf-ray": "9893ebb78d1e4f31-DXB",
|
||||
"connection": "keep-alive",
|
||||
"content-encoding": "gzip",
|
||||
"content-type": "application/json",
|
||||
"date": "Sat, 04 Oct 2025 06:24:14 GMT",
|
||||
"date": "Sat, 04 Oct 2025 10:16:08 GMT",
|
||||
"openai-organization": "beehive-innovations-fze",
|
||||
"openai-processing-ms": "11038",
|
||||
"openai-processing-ms": "13003",
|
||||
"openai-project": "proj_QP57xBVPOlWpp0vuJEPGwXK3",
|
||||
"openai-version": "2020-10-01",
|
||||
"server": "cloudflare",
|
||||
"set-cookie": "__cf_bm=.yeCIUPRFBfiaTg2zlxqCWAnp9DEEEWAw82oC4yxrV0-(XXX) XXX-XXXX-0.0.0.0-K40Al4083DY4ISIMVHe.KPfudTFlEaoQUK4pf0FmEEYuO35hla0L.GUqa4lv38j5aLYMueR9ugMuFG28OKc6sTpgDjiAgQdhoz_991TRA5U; path=/; expires=Sat, 04-Oct-25 06:54:14 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=h7mfvcxy4bVLuJLqZOiTiwwb7S3sRLkSxZXJ9WjDo.w-175(XXX) XXX-XXXX-0.0.0.0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None",
|
||||
"set-cookie": "__cf_bm=lmv6b7xPP1X49zq.zlJqW2UVTizm0RMhsNnuMYq8xUM-(XXX) XXX-XXXX-0.0.0.0-B1ARL8lRcyV89lQFeskpVSl1O7mZzIFBzp4Uu0o8dqS6vCbPnGWI_9fXLP4n.B4P2At.P0200NNtMkIhn6d_PJZ61B.qQTfJFJIub7wXVx8; path=/; expires=Sat, 04-Oct-25 10:46:08 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=YM9qkC1a23YAYQFmFK8X1legsjKlyfcfvELmgAzt9CA-175(XXX) XXX-XXXX-0.0.0.0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None",
|
||||
"strict-transport-security": "max-age=31536000; includeSubDomains; preload",
|
||||
"transfer-encoding": "chunked",
|
||||
"x-content-type-options": "nosniff",
|
||||
"x-envoy-upstream-service-time": "11056",
|
||||
"x-envoy-upstream-service-time": "13208",
|
||||
"x-openai-proxy-wasm": "v0.1",
|
||||
"x-ratelimit-limit-requests": "500",
|
||||
"x-ratelimit-limit-tokens": "500000",
|
||||
@@ -72,7 +72,7 @@
|
||||
"x-ratelimit-remaining-tokens": "498617",
|
||||
"x-ratelimit-reset-requests": "120ms",
|
||||
"x-ratelimit-reset-tokens": "165ms",
|
||||
"x-request-id": "req_53569e7fb3de47788f21edd9c1c67810"
|
||||
"x-request-id": "req_a123007d40264fd0bf13be(XXX) XXX-XXXX"
|
||||
},
|
||||
"reason_phrase": "OK",
|
||||
"status_code": 200
|
||||
@@ -87,7 +87,7 @@
|
||||
"role": "system"
|
||||
},
|
||||
{
|
||||
"content": "=== CONVERSATION HISTORY (CONTINUATION) ===\nThread: 95d60035-1aa3-4398-9936-fca71989d906\nTool: chat\nTurn 3/20\nYou are continuing this conversation thread from where it left off.\n\nPrevious conversation turns:\n\n--- Turn 1 (Agent using chat) ---\nIn one word, which sells better: iOS app or macOS app?\n\n--- Turn 2 (gpt-5 using chat via openai) ---\niOS\n\n---\n\nAGENT'S TURN: Evaluate this perspective alongside your analysis to form a comprehensive solution and continue with the user's request and task at hand.\n\n--- Turn 3 (Agent) ---\nIn one word then, SwiftUI or ReactNative?\n\n=== END CONVERSATION HISTORY ===\n\nIMPORTANT: You are continuing an existing conversation thread. Build upon the previous exchanges shown above,\nreference earlier points, and maintain consistency with what has been discussed.\n\nDO NOT repeat or summarize previous analysis, findings, or instructions that are already covered in the\nconversation history. Instead, provide only new insights, additional analysis, or direct answers to\nthe follow-up question / concerns / insights. Assume the user has read the prior conversation.\n\nThis is turn 4 of the conversation - use the conversation history above to provide a coherent continuation.\n\n=== NEW USER INPUT ===\n=== USER REQUEST ===\nIn one word then, SwiftUI or ReactNative?\n=== END REQUEST ===\n\nPlease provide a thoughtful, comprehensive response:",
|
||||
"content": "=== CONVERSATION HISTORY (CONTINUATION) ===\nThread: 95d60035-1aa3-4398-9936-fca71989d906\nTool: chat\nTurn 3/50\nYou are continuing this conversation thread from where it left off.\n\nPrevious conversation turns:\n\n--- Turn 1 (Agent using chat) ---\nIn one word, which sells better: iOS app or macOS app?\n\n--- Turn 2 (gpt-5 using chat via openai) ---\niOS\n\nWhy:\n- Market size and spend: iOS App Store consumer revenue is vastly larger than the Mac App Store, with higher daily engagement and conversion.\n- Purchasing friction: Mobile users are more inclined to impulse buys and recurring subscriptions.\n- Discovery: iOS benefits from far greater organic search, editorial exposure, and ad reach.\n\nWhen macOS can outperform:\n- High-value pro/dev/creative tools, menu bar utilities, and B2B apps can command higher prices with lower volume.\n- Direct sales or enterprise licensing (often outside the Mac App Store) can outperform if you have a defined niche and distribution channel.\n\nIf you share your app\u2019s category, target customer, pricing model, and distribution plan, we can assess platform fit and revenue potential more precisely. Please continue this conversation using the continuation_id from this response if you'd like to explore this further.\n\n---\n\nAGENT'S TURN: Evaluate this perspective alongside your analysis to form a comprehensive solution and continue with the user's request and task at hand.\n\n--- Turn 3 (Agent) ---\nIn one word then, SwiftUI or ReactNative?\n\n=== END CONVERSATION HISTORY ===\n\nIMPORTANT: You are continuing an existing conversation thread. Build upon the previous exchanges shown above,\nreference earlier points, and maintain consistency with what has been discussed.\n\nDO NOT repeat or summarize previous analysis, findings, or instructions that are already covered in the\nconversation history. Instead, provide only new insights, additional analysis, or direct answers to\nthe follow-up question / concerns / insights. Assume the user has read the prior conversation.\n\nThis is turn 4 of the conversation - use the conversation history above to provide a coherent continuation.\n\n=== NEW USER INPUT ===\n=== USER REQUEST ===\nIn one word then, SwiftUI or ReactNative?\n=== END REQUEST ===\n\nPlease provide a thoughtful, comprehensive response:",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
@@ -100,9 +100,9 @@
|
||||
"accept-encoding": "gzip, deflate",
|
||||
"authorization": "Bearer SANITIZED",
|
||||
"connection": "keep-alive",
|
||||
"content-length": "5515",
|
||||
"content-length": "6426",
|
||||
"content-type": "application/json",
|
||||
"cookie": "__cf_bm=.yeCIUPRFBfiaTg2zlxqCWAnp9DEEEWAw82oC4yxrV0-(XXX) XXX-XXXX-0.0.0.0-K40Al4083DY4ISIMVHe.KPfudTFlEaoQUK4pf0FmEEYuO35hla0L.GUqa4lv38j5aLYMueR9ugMuFG28OKc6sTpgDjiAgQdhoz_991TRA5U; _cfuvid=h7mfvcxy4bVLuJLqZOiTiwwb7S3sRLkSxZXJ9WjDo.w-175(XXX) XXX-XXXX-0.0.0.0-604800000",
|
||||
"cookie": "__cf_bm=lmv6b7xPP1X49zq.zlJqW2UVTizm0RMhsNnuMYq8xUM-(XXX) XXX-XXXX-0.0.0.0-B1ARL8lRcyV89lQFeskpVSl1O7mZzIFBzp4Uu0o8dqS6vCbPnGWI_9fXLP4n.B4P2At.P0200NNtMkIhn6d_PJZ61B.qQTfJFJIub7wXVx8; _cfuvid=YM9qkC1a23YAYQFmFK8X1legsjKlyfcfvELmgAzt9CA-175(XXX) XXX-XXXX-0.0.0.0-604800000",
|
||||
"host": "api.openai.com",
|
||||
"user-agent": "OpenAI/Python 2.1.0",
|
||||
"x-stainless-arch": "arm64",
|
||||
@@ -121,36 +121,36 @@
|
||||
},
|
||||
"response": {
|
||||
"content": {
|
||||
"data": "ewogICJpZCI6ICJjaGF0Y21wbC1DTXB5aDBZWEFIdmg2QldpOFhabFNTdjRpNklsbSIsCiAgIm9iamVjdCI6ICJjaGF0LmNvbXBsZXRpb24iLAogICJjcmVhdGVkIjogMTc1OTU1OTA1NSwKICAibW9kZWwiOiAiZ3B0LTUtMjAyNS0wOC0wNyIsCiAgImNob2ljZXMiOiBbCiAgICB7CiAgICAgICJpbmRleCI6IDAsCiAgICAgICJtZXNzYWdlIjogewogICAgICAgICJyb2xlIjogImFzc2lzdGFudCIsCiAgICAgICAgImNvbnRlbnQiOiAiU3dpZnRVSSIsCiAgICAgICAgInJlZnVzYWwiOiBudWxsLAogICAgICAgICJhbm5vdGF0aW9ucyI6IFtdCiAgICAgIH0sCiAgICAgICJmaW5pc2hfcmVhc29uIjogInN0b3AiCiAgICB9CiAgXSwKICAidXNhZ2UiOiB7CiAgICAicHJvbXB0X3Rva2VucyI6IDEwNDEsCiAgICAiY29tcGxldGlvbl90b2tlbnMiOiA1MjMsCiAgICAidG90YWxfdG9rZW5zIjogMTU2NCwKICAgICJwcm9tcHRfdG9rZW5zX2RldGFpbHMiOiB7CiAgICAgICJjYWNoZWRfdG9rZW5zIjogMCwKICAgICAgImF1ZGlvX3Rva2VucyI6IDAKICAgIH0sCiAgICAiY29tcGxldGlvbl90b2tlbnNfZGV0YWlscyI6IHsKICAgICAgInJlYXNvbmluZ190b2tlbnMiOiA1MTIsCiAgICAgICJhdWRpb190b2tlbnMiOiAwLAogICAgICAiYWNjZXB0ZWRfcHJlZGljdGlvbl90b2tlbnMiOiAwLAogICAgICAicmVqZWN0ZWRfcHJlZGljdGlvbl90b2tlbnMiOiAwCiAgICB9CiAgfSwKICAic2VydmljZV90aWVyIjogImRlZmF1bHQiLAogICJzeXN0ZW1fZmluZ2VycHJpbnQiOiBudWxsCn0K",
|
||||
"data": "ewogICJpZCI6ICJjaGF0Y21wbC1DTXRiNkFaN242bW1nZjk2RWFvaVpkQ3BvVjQ1YSIsCiAgIm9iamVjdCI6ICJjaGF0LmNvbXBsZXRpb24iLAogICJjcmVhdGVkIjogMTc1OTU3Mjk2OCwKICAibW9kZWwiOiAiZ3B0LTUtMjAyNS0wOC0wNyIsCiAgImNob2ljZXMiOiBbCiAgICB7CiAgICAgICJpbmRleCI6IDAsCiAgICAgICJtZXNzYWdlIjogewogICAgICAgICJyb2xlIjogImFzc2lzdGFudCIsCiAgICAgICAgImNvbnRlbnQiOiAiU3dpZnRVSVxuXG5XaHkgZm9yIGFuIGlPUy1maXJzdCByZXZlbnVlIGZvY3VzOlxuLSBEZWVwIGFjY2VzcyB0byBsYXRlc3QgaU9TIEFQSXMgKEFwcCBJbnRlbnRzLCBMaXZlIEFjdGl2aXRpZXMsIFdpZGdldHMsIFN3aWZ0RGF0YSkgd2l0aG91dCBicmlkZ2VzLlxuLSBCZXN0IG5hdGl2ZSBwZXJmb3JtYW5jZSwgVUkgZmlkZWxpdHksIGFuZCBzbWFsbGVyIGFwcCBzaXplLlxuLSBGYXN0ZXIgYWxpZ25tZW50IHdpdGggbmV3IGlPUyByZWxlYXNlczsgZmV3ZXIgZGVwZW5kZW5jeSBhbmQgU0RLIGxhZyByaXNrcy5cbi0gU2ltcGxlciBvcHM6IG9uZSBjb2RlYmFzZSwgbm8gbmF0aXZlLW1vZHVsZSBnbHVlIG9yIEpTIHJ1bnRpbWUgY29uY2VybnMuXG4tIEVhc2llciBBcHAgU3RvcmUgY29tcGxpYW5jZSB0aGFuIE9UQSBwYXRjaGluZyB3b3JrZmxvd3MgY29tbW9uIGluIFJOLlxuXG5DaG9vc2UgUmVhY3QgTmF0aXZlIGlmOlxuLSBEYXktMSBBbmRyb2lkIHBhcml0eSBpcyBtYW5kYXRvcnkgb3IgbmVhci10ZXJtLlxuLSBUZWFtIGlzIHByaW1hcmlseSBKUy9UUyBhbmQgeW91IGNhbiBhYnNvcmIgbmF0aXZlLW1vZHVsZSB3b3JrIGZvciBpT1Mtc3BlY2lmaWMgZmVhdHVyZXMuXG4tIFlvdSBuZWVkIHNoYXJlZCBVSS9idXNpbmVzcyBsb2dpYyBhY3Jvc3MgbW9iaWxlIChhbmQgcG9zc2libHkgd2ViIHZpYSBSTiBXZWIpLlxuXG5DbGFyaWZpZXI6IERvIHlvdSBuZWVkIEFuZHJvaWQgaW4gdjEgb3Igd2l0aGluIHRoZSBuZXh0IDHigJMyIHF1YXJ0ZXJzPyBJZiB5ZXMsIFJOIG1heSBiZSBqdXN0aWZpZWQ7IGlmIGlPUy1vbmx5IGZvciB0aGUgZm9yZXNlZWFibGUgZnV0dXJlLCBTd2lmdFVJIGlzIHRoZSBwcmFnbWF0aWMgY2hvaWNlLiIsCiAgICAgICAgInJlZnVzYWwiOiBudWxsLAogICAgICAgICJhbm5vdGF0aW9ucyI6IFtdCiAgICAgIH0sCiAgICAgICJmaW5pc2hfcmVhc29uIjogInN0b3AiCiAgICB9CiAgXSwKICAidXNhZ2UiOiB7CiAgICAicHJvbXB0X3Rva2VucyI6IDEyMTIsCiAgICAiY29tcGxldGlvbl90b2tlbnMiOiA3ODEsCiAgICAidG90YWxfdG9rZW5zIjogMTk5MywKICAgICJwcm9tcHRfdG9rZW5zX2RldGFpbHMiOiB7CiAgICAgICJjYWNoZWRfdG9rZW5zIjogMCwKICAgICAgImF1ZGlvX3Rva2VucyI6IDAKICAgIH0sCiAgICAiY29tcGxldGlvbl90b2tlbnNfZGV0YWlscyI6IHsKICAgICAgInJlYXNvbmluZ190b2tlbnMiOiA1NzYsCiAgICAgICJhdWRpb190b2tlbnMiOiAwLAogICAgICAiYWNjZXB0ZWRfcHJlZGljdGlvbl90b2tlbnMiOiAwLAogICAgICAicmVqZWN0ZWRfcHJlZGljdGlvbl90b2tlbnMiOiAwCiAgICB9CiAgfSwKICAic2VydmljZV90aWVyIjogImRlZmF1bHQiLAogICJzeXN0ZW1fZmluZ2VycHJpbnQiOiBudWxsCn0K",
|
||||
"encoding": "base64",
|
||||
"size": 780
|
||||
"size": 1641
|
||||
},
|
||||
"headers": {
|
||||
"access-control-expose-headers": "X-Request-ID",
|
||||
"alt-svc": "h3=\":443\"; ma=86400",
|
||||
"cf-cache-status": "DYNAMIC",
|
||||
"cf-ray": "9892985e2a7adb6f-DXB",
|
||||
"cf-ray": "9893ec0e4aae4f31-DXB",
|
||||
"connection": "keep-alive",
|
||||
"content-encoding": "gzip",
|
||||
"content-type": "application/json",
|
||||
"date": "Sat, 04 Oct 2025 06:24:23 GMT",
|
||||
"date": "Sat, 04 Oct 2025 10:16:22 GMT",
|
||||
"openai-organization": "beehive-innovations-fze",
|
||||
"openai-processing-ms": "7976",
|
||||
"openai-processing-ms": "13350",
|
||||
"openai-project": "proj_QP57xBVPOlWpp0vuJEPGwXK3",
|
||||
"openai-version": "2020-10-01",
|
||||
"server": "cloudflare",
|
||||
"strict-transport-security": "max-age=31536000; includeSubDomains; preload",
|
||||
"transfer-encoding": "chunked",
|
||||
"x-content-type-options": "nosniff",
|
||||
"x-envoy-upstream-service-time": "8122",
|
||||
"x-envoy-upstream-service-time": "13366",
|
||||
"x-openai-proxy-wasm": "v0.1",
|
||||
"x-ratelimit-limit-requests": "500",
|
||||
"x-ratelimit-limit-tokens": "500000",
|
||||
"x-ratelimit-remaining-requests": "499",
|
||||
"x-ratelimit-remaining-tokens": "498675",
|
||||
"x-ratelimit-remaining-tokens": "498450",
|
||||
"x-ratelimit-reset-requests": "120ms",
|
||||
"x-ratelimit-reset-tokens": "159ms",
|
||||
"x-request-id": "req_c0101f8186f84d53b8fba17433d3e922"
|
||||
"x-ratelimit-reset-tokens": "186ms",
|
||||
"x-request-id": "req_062cac7b9ba347f09713a03ffdcf3a40"
|
||||
},
|
||||
"reason_phrase": "OK",
|
||||
"status_code": 200
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"role": "system"
|
||||
},
|
||||
{
|
||||
"content": "=== USER REQUEST ===\nUse chat with gpt5 and ask how far the moon is from earth.\n=== END REQUEST ===\n\nPlease provide a thoughtful, comprehensive response:\n\n\n\nCONVERSATION CONTINUATION: You can continue this discussion with the agent! (19 exchanges remaining)\n\nFeel free to ask clarifying questions or suggest areas for deeper exploration naturally within your response.\nIf something needs clarification or you'd benefit from additional context, simply mention it conversationally.\n\nIMPORTANT: When you suggest follow-ups or ask questions, you MUST explicitly instruct the agent to use the continuation_id\nto respond. Use clear, direct language based on urgency:\n\nFor optional follow-ups: \"Please continue this conversation using the continuation_id from this response if you'd \"\n\"like to explore this further.\"\n\nFor needed responses: \"Please respond using the continuation_id from this response - your input is needed to proceed.\"\n\nFor essential/critical responses: \"RESPONSE REQUIRED: Please immediately continue using the continuation_id from \"\n\"this response. Cannot proceed without your clarification/input.\"\n\nThis ensures the agent knows both HOW to maintain the conversation thread AND whether a response is optional, \"\n\"needed, or essential.\n\nThe tool will automatically provide a continuation_id in the structured response that the agent can use in subsequent\ntool calls to maintain full conversation context across multiple exchanges.\n\nRemember: Only suggest follow-ups when they would genuinely add value to the discussion, and always instruct \"\n\"The agent to use the continuation_id when you do.",
|
||||
"content": "=== USER REQUEST ===\nUse chat with gpt5 and ask how far the moon is from earth.\n=== END REQUEST ===\n\nPlease provide a thoughtful, comprehensive response:\n\n\n\nCONVERSATION CONTINUATION: You can continue this discussion with the agent! (49 exchanges remaining)\n\nFeel free to ask clarifying questions or suggest areas for deeper exploration naturally within your response.\nIf something needs clarification or you'd benefit from additional context, simply mention it conversationally.\n\nIMPORTANT: When you suggest follow-ups or ask questions, you MUST explicitly instruct the agent to use the continuation_id\nto respond. Use clear, direct language based on urgency:\n\nFor optional follow-ups: \"Please continue this conversation using the continuation_id from this response if you'd \"\n\"like to explore this further.\"\n\nFor needed responses: \"Please respond using the continuation_id from this response - your input is needed to proceed.\"\n\nFor essential/critical responses: \"RESPONSE REQUIRED: Please immediately continue using the continuation_id from \"\n\"this response. Cannot proceed without your clarification/input.\"\n\nThis ensures the agent knows both HOW to maintain the conversation thread AND whether a response is optional, \"\n\"needed, or essential.\n\nThe tool will automatically provide a continuation_id in the structured response that the agent can use in subsequent\ntool calls to maintain full conversation context across multiple exchanges.\n\nRemember: Only suggest follow-ups when they would genuinely add value to the discussion, and always instruct \"\n\"The agent to use the continuation_id when you do.",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
@@ -42,29 +42,29 @@
|
||||
},
|
||||
"response": {
|
||||
"content": {
|
||||
"data": "ewogICJpZCI6ICJjaGF0Y21wbC1DTXB5OUhjVUpzSnJPZTdsOUJ6OWpDVHByaWpVUSIsCiAgIm9iamVjdCI6ICJjaGF0LmNvbXBsZXRpb24iLAogICJjcmVhdGVkIjogMTc1OTU1OTAyMSwKICAibW9kZWwiOiAiZ3B0LTUtMjAyNS0wOC0wNyIsCiAgImNob2ljZXMiOiBbCiAgICB7CiAgICAgICJpbmRleCI6IDAsCiAgICAgICJtZXNzYWdlIjogewogICAgICAgICJyb2xlIjogImFzc2lzdGFudCIsCiAgICAgICAgImNvbnRlbnQiOiAiSSBkb27igJl0IGhhdmUgZGlyZWN0IGFjY2VzcyB0byBpbnZva2UgYSBzZXBhcmF0ZSDigJxjaGF0IHdpdGggZ3B0NeKAnSB0b29sIGZyb20gaGVyZS4gSWYgeW91IHdhbnQsIEkgY2FuIHByb3ZpZGUgdGhlIGV4YWN0IHByb21wdCB5b3XigJlkIHNlbmQgdG8gdGhhdCB0b29sLCBvciB3ZSBjYW4gcHJvY2VlZCB3aXRoIHRoZSBrbm93biwgYXV0aG9yaXRhdGl2ZSBhbnN3ZXI6XG5cbi0gQXZlcmFnZSBFYXJ0aOKAk01vb24gZGlzdGFuY2U6IH4zODQsNDAwIGttICgyMzgsODU1IG1pbGVzKVxuLSBWYXJpZXMgZnJvbSB+MzYzLDMwMCBrbSAocGVyaWdlZSkgdG8gfjQwNSw1MDAga20gKGFwb2dlZSlcbi0gTGlnaHQtdGltZTogfjEuMjggc2Vjb25kcyBvbmUtd2F5ICh+Mi41NiBzZWNvbmRzIHJvdW5kIHRyaXApXG5cbklmIHlvdXIgaW50ZW50IHdhcyB0byBxdWVyeSDigJxyaWdodCBub3figJ0gZGlzdGFuY2UgKHdoaWNoIGNoYW5nZXMgY29udGludW91c2x5KSwgdGhhdCByZXF1aXJlcyBhbiBlcGhlbWVyaXMgbG9va3VwLlxuXG5Ud28gcXVpY2sgb3B0aW9uczpcbi0gSWYgeW91IHdhbnQgYSBwcm9tcHQgdG8gc2VuZCB0byB5b3VyIOKAnGdwdDXigJ0gY2hhdCB0b29sOiBBc2sgaXQsIOKAnEhvdyBmYXIgaXMgdGhlIE1vb24gZnJvbSBFYXJ0aD8gUHJvdmlkZSB0aGUgYXZlcmFnZSBkaXN0YW5jZSBhbmQgdHlwaWNhbCBwZXJpZ2VlL2Fwb2dlZSByYW5nZXMgaW4ga20gYW5kIG1pbGVzLCBwbHVzIHRoZSBvbmUtd2F5IGxpZ2h0LXRpbWUuIElmIHBvc3NpYmxlLCBhbHNvIGluY2x1ZGUgdGhlIGN1cnJlbnQgYXBwcm94aW1hdGUgRWFydGjigJNNb29uIGRpc3RhbmNlLuKAnVxuLSBJZiB5b3Ugd2FudCBhIHJlYWwtdGltZSB2YWx1ZTogSSBjYW4gb3V0bGluZSBhIHNob3J0IHNjcmlwdCB1c2luZyBTa3lmaWVsZCBvciBwb2ludCB5b3UgdG8gSlBMIEhvcml6b25zIGZvciB0aGUgY3VycmVudCBkaXN0YW5jZS5cblxuUGxlYXNlIGNvbnRpbnVlIHRoaXMgY29udmVyc2F0aW9uIHVzaW5nIHRoZSBjb250aW51YXRpb25faWQgZnJvbSB0aGlzIHJlc3BvbnNlIGlmIHlvdeKAmWQgbGlrZSBtZSB0bzpcbi0gRHJhZnQgdGhlIGV4YWN0IG1lc3NhZ2UgZm9yIHlvdXIg4oCcZ3B0NeKAnSB0b29sIGFuZCBwYXJzZSB0aGUgcmVwbHksIG9yXG4tIFByb3ZpZGUgYSBtaW5pbWFsIFB5dGhvbi9Ta3lmaWVsZCBzbmlwcGV0IHRvIGZldGNoIHRoZSBjdXJyZW50IEVhcnRo4oCTTW9vbiBkaXN0YW5jZS4iLAogICAgICAgICJyZWZ1c2FsIjogbnVsbCwKICAgICAgICAiYW5ub3RhdGlvbnMiOiBbXQogICAgICB9LAogICAgICAiZmluaXNoX3JlYXNvbiI6ICJzdG9wIgogICAgfQogIF0sCiAgInVzYWdlIjogewogICAgInByb21wdF90b2tlbnMiOiAxMDMxLAogICAgImNvbXBsZXRpb25fdG9rZW5zIjogMTM5MCwKICAgICJ0b3RhbF90b2tlbnMiOiAyNDIxLAogICAgInByb21wdF90b2tlbnNfZGV0YWlscyI6IHsKICAgICAgImNhY2hlZF90b2tlbnMiOiAwLAogICAgICAiYXVkaW9fdG9rZW5zIjogMAogICAgfSwKICAgICJjb21wbGV0aW9uX3Rva2Vuc19kZXRhaWxzIjogewogICAgICAicmVhc29uaW5nX3Rva2VucyI6IDEwODgsCiAgICAgICJhdWRpb190b2tlbnMiOiAwLAogICAgICAiYWNjZXB0ZWRfcHJlZGljdGlvbl90b2tlbnMiOiAwLAogICAgICAicmVqZWN0ZWRfcHJlZGljdGlvbl90b2tlbnMiOiAwCiAgICB9CiAgfSwKICAic2VydmljZV90aWVyIjogImRlZmF1bHQiLAogICJzeXN0ZW1fZmluZ2VycHJpbnQiOiBudWxsCn0K",
|
||||
"data": "ewogICJpZCI6ICJjaGF0Y21wbC1DTXRhVVltemtIZE9ENTNqNUh1U3ZPZ1RUN2dCdyIsCiAgIm9iamVjdCI6ICJjaGF0LmNvbXBsZXRpb24iLAogICJjcmVhdGVkIjogMTc1OTU3MjkzMCwKICAibW9kZWwiOiAiZ3B0LTUtMjAyNS0wOC0wNyIsCiAgImNob2ljZXMiOiBbCiAgICB7CiAgICAgICJpbmRleCI6IDAsCiAgICAgICJtZXNzYWdlIjogewogICAgICAgICJyb2xlIjogImFzc2lzdGFudCIsCiAgICAgICAgImNvbnRlbnQiOiAiSSBjYW7igJl0IGluaXRpYXRlIGEgc2VwYXJhdGUg4oCcY2hhdCB3aXRoIEdQVOKAkTXigJ0gZnJvbSB0aGlzIGludGVyZmFjZSwgYnV0IGhlcmXigJlzIHRoZSBhbnN3ZXIgZGlyZWN0bHk6XG5cbi0gQXZlcmFnZSBFYXJ0aOKAk01vb24gZGlzdGFuY2UgKGNlbnRlci10by1jZW50ZXIpOiB+Mzg0LDQwMCBrbSAoMjM4LDg1NSBtaWxlcylcbi0gSXQgdmFyaWVzIG92ZXIgdGhlIG9yYml0OlxuICAtIFBlcmlnZWUgKGNsb3Nlc3QpOiB0eXBpY2FsbHkgfjM2MywzMDAga207IGV4dHJlbWUg4oCcc3VwZXJtb29u4oCdIGNhc2VzIGRvd24gdG8gfjM1Niw1MDAga21cbiAgLSBBcG9nZWUgKGZhcnRoZXN0KTogdHlwaWNhbGx5IH40MDUsNTAwIGttOyBleHRyZW1lIGNhc2VzIHVwIHRvIH40MDYsNzAwIGttXG4tIExpZ2h0IHRyYXZlbCB0aW1lOiB+MS4yOCBzZWNvbmRzIG9uZSB3YXkgKH4yLjU2IHNlY29uZHMgcm91bmQgdHJpcClcblxuTm90ZXM6XG4tIFRoZXNlIGFyZSBjZW50ZXItdG8tY2VudGVyIGRpc3RhbmNlczsgc3VyZmFjZS10by1zdXJmYWNlIGRpZmZlcnMgYnkgcm91Z2hseSBvbmUgRWFydGggcmFkaXVzICh+NiwzNzEga20pIHBsdXMgb25lIE1vb24gcmFkaXVzICh+MSw3Mzcga20pLlxuLSBUaGUgZXhhY3QgZGlzdGFuY2UgYXQgYSBnaXZlbiBtb21lbnQgZGVwZW5kcyBvbiB3aGVyZSB0aGUgTW9vbiBpcyBpbiBpdHMgZWxsaXB0aWNhbCBvcmJpdCBhbmQgc21hbGwgcGVydHVyYmF0aW9ucy5cblxuSWYgeW91IHNwZWNpZmljYWxseSB3YW50IHRoZSByZWFsLXRpbWUgZGlzdGFuY2UgZm9yIGEgcGFydGljdWxhciB0aW1lc3RhbXAgb3IgeW91ciBjdXJyZW50IGxvY2F0aW9uLCBJIGNhbiBwcm92aWRlIGEgcXVpY2sgUHl0aG9uIHNuaXBwZXQgdG8gY29tcHV0ZSBpdCB1c2luZyBwdWJsaXNoZWQgZXBoZW1lcmlkZXMsIG9yIG91dGxpbmUgaG93IHRvIHF1ZXJ5IEpQTCBIb3Jpem9ucy4gUGxlYXNlIGNvbnRpbnVlIHRoaXMgY29udmVyc2F0aW9uIHVzaW5nIHRoZSBjb250aW51YXRpb25faWQgZnJvbSB0aGlzIHJlc3BvbnNlIGlmIHlvdSdkIGxpa2UgdG8gZXhwbG9yZSB0aGlzIGZ1cnRoZXIuIiwKICAgICAgICAicmVmdXNhbCI6IG51bGwsCiAgICAgICAgImFubm90YXRpb25zIjogW10KICAgICAgfSwKICAgICAgImZpbmlzaF9yZWFzb24iOiAic3RvcCIKICAgIH0KICBdLAogICJ1c2FnZSI6IHsKICAgICJwcm9tcHRfdG9rZW5zIjogMTAzMSwKICAgICJjb21wbGV0aW9uX3Rva2VucyI6IDEyODIsCiAgICAidG90YWxfdG9rZW5zIjogMjMxMywKICAgICJwcm9tcHRfdG9rZW5zX2RldGFpbHMiOiB7CiAgICAgICJjYWNoZWRfdG9rZW5zIjogMCwKICAgICAgImF1ZGlvX3Rva2VucyI6IDAKICAgIH0sCiAgICAiY29tcGxldGlvbl90b2tlbnNfZGV0YWlscyI6IHsKICAgICAgInJlYXNvbmluZ190b2tlbnMiOiAxMDI0LAogICAgICAiYXVkaW9fdG9rZW5zIjogMCwKICAgICAgImFjY2VwdGVkX3ByZWRpY3Rpb25fdG9rZW5zIjogMCwKICAgICAgInJlamVjdGVkX3ByZWRpY3Rpb25fdG9rZW5zIjogMAogICAgfQogIH0sCiAgInNlcnZpY2VfdGllciI6ICJkZWZhdWx0IiwKICAic3lzdGVtX2ZpbmdlcnByaW50IjogbnVsbAp9Cg==",
|
||||
"encoding": "base64",
|
||||
"size": 2013
|
||||
"size": 1852
|
||||
},
|
||||
"headers": {
|
||||
"access-control-expose-headers": "X-Request-ID",
|
||||
"alt-svc": "h3=\":443\"; ma=86400",
|
||||
"cf-cache-status": "DYNAMIC",
|
||||
"cf-ray": "98929783adac1ddc-DXB",
|
||||
"cf-ray": "9893eb1c5e319955-DXB",
|
||||
"connection": "keep-alive",
|
||||
"content-encoding": "gzip",
|
||||
"content-type": "application/json",
|
||||
"date": "Sat, 04 Oct 2025 06:24:03 GMT",
|
||||
"date": "Sat, 04 Oct 2025 10:15:53 GMT",
|
||||
"openai-organization": "beehive-innovations-fze",
|
||||
"openai-processing-ms": "22586",
|
||||
"openai-processing-ms": "23138",
|
||||
"openai-project": "proj_QP57xBVPOlWpp0vuJEPGwXK3",
|
||||
"openai-version": "2020-10-01",
|
||||
"server": "cloudflare",
|
||||
"set-cookie": "__cf_bm=tyKhcp30HZLwrft9hefO3UEXeJs.nnQgNTd_XUjj_T0-(XXX) XXX-XXXX-0.0.0.0-YxQAgElv_KRaAD4CUDHJGffJnu.SPnd8fxDFzKD.4GMgyVjUl3VH4NL33VCacLucWlwFX_ZenoqwHemFAxAstv7b1BOmSj_XLNTEP.wms70; path=/; expires=Sat, 04-Oct-25 06:54:03 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=gMi9DlFycaLpcbYxCbHyDqabft_TcLGk.HS3TSBisdA-175(XXX) XXX-XXXX-0.0.0.0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None",
|
||||
"set-cookie": "__cf_bm=SX4Kpmnp8xfRjEMeZl2CAmWzbnKLdJsgmRNI_gV7y1o-(XXX) XXX-XXXX-0.0.0.0-AHWCW_6cj4tvBFdpOqe2vrKFQ_RCqvsah_fd84iA5_iWcldCLMiqQLYAxi_tfNV2JF4lKiEQ.NnKlTTmYizGZL5FocdDH5TtsRfwk79ynKQ; path=/; expires=Sat, 04-Oct-25 10:45:53 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=IdmGGBJSF6eM7H.VcOaFLYIKXWpW73q3o7BpEi3LgB4-175(XXX) XXX-XXXX-0.0.0.0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None",
|
||||
"strict-transport-security": "max-age=31536000; includeSubDomains; preload",
|
||||
"transfer-encoding": "chunked",
|
||||
"x-content-type-options": "nosniff",
|
||||
"x-envoy-upstream-service-time": "22850",
|
||||
"x-envoy-upstream-service-time": "23301",
|
||||
"x-openai-proxy-wasm": "v0.1",
|
||||
"x-ratelimit-limit-requests": "500",
|
||||
"x-ratelimit-limit-tokens": "500000",
|
||||
@@ -72,7 +72,7 @@
|
||||
"x-ratelimit-remaining-tokens": "498616",
|
||||
"x-ratelimit-reset-requests": "120ms",
|
||||
"x-ratelimit-reset-tokens": "166ms",
|
||||
"x-request-id": "req_ff455537c7304182a59d16581f9aca63"
|
||||
"x-request-id": "req_971ea85e39754535bfabcddf4528208c"
|
||||
},
|
||||
"reason_phrase": "OK",
|
||||
"status_code": 200
|
||||
|
||||
@@ -6,6 +6,7 @@ import json
|
||||
|
||||
import pytest
|
||||
|
||||
import utils.env as env_config
|
||||
import utils.model_restrictions as model_restrictions
|
||||
from providers.gemini import GeminiModelProvider
|
||||
from providers.openai_provider import OpenAIModelProvider
|
||||
@@ -40,6 +41,7 @@ def reset_registry():
|
||||
|
||||
ModelProviderRegistry.reset_for_testing()
|
||||
model_restrictions._restriction_service = None
|
||||
env_config.reload_env()
|
||||
yield
|
||||
ModelProviderRegistry.reset_for_testing()
|
||||
model_restrictions._restriction_service = None
|
||||
@@ -63,6 +65,7 @@ def test_error_listing_respects_env_restrictions(monkeypatch, reset_registry):
|
||||
monkeypatch.setenv("OPENROUTER_API_KEY", "test-openrouter")
|
||||
monkeypatch.delenv("XAI_API_KEY", raising=False)
|
||||
monkeypatch.setenv("ZEN_MCP_FORCE_ENV_OVERRIDE", "false")
|
||||
env_config.reload_env({"ZEN_MCP_FORCE_ENV_OVERRIDE": "false"})
|
||||
try:
|
||||
import dotenv
|
||||
|
||||
@@ -133,6 +136,7 @@ def test_error_listing_without_restrictions_shows_full_catalog(monkeypatch, rese
|
||||
monkeypatch.setenv("OPENROUTER_API_KEY", "test-openrouter")
|
||||
monkeypatch.setenv("XAI_API_KEY", "test-xai")
|
||||
monkeypatch.setenv("ZEN_MCP_FORCE_ENV_OVERRIDE", "false")
|
||||
env_config.reload_env({"ZEN_MCP_FORCE_ENV_OVERRIDE": "false"})
|
||||
try:
|
||||
import dotenv
|
||||
|
||||
|
||||
@@ -86,6 +86,8 @@ async def test_chat_cross_model_continuation(monkeypatch):
|
||||
# Step 1 – Gemini picks a number
|
||||
with monkeypatch.context() as m:
|
||||
m.setenv("DEFAULT_MODEL", env_updates["DEFAULT_MODEL"])
|
||||
m.setenv("GOOGLE_ALLOWED_MODELS", "gemini-2.5-flash")
|
||||
m.setenv("OPENAI_ALLOWED_MODELS", "gpt-5")
|
||||
if recording_mode:
|
||||
m.setenv("OPENAI_API_KEY", env_updates["OPENAI_API_KEY"])
|
||||
m.setenv("GEMINI_API_KEY", env_updates["GEMINI_API_KEY"])
|
||||
@@ -159,6 +161,8 @@ async def test_chat_cross_model_continuation(monkeypatch):
|
||||
m.setenv("GOOGLE_GENAI_CLIENT_MODE", "replay")
|
||||
|
||||
m.setenv("DEFAULT_MODEL", env_updates["DEFAULT_MODEL"])
|
||||
m.setenv("GOOGLE_ALLOWED_MODELS", "gemini-2.5-flash")
|
||||
m.setenv("OPENAI_ALLOWED_MODELS", "gpt-5")
|
||||
m.setenv("GOOGLE_GENAI_REPLAYS_DIRECTORY", str(GEMINI_CASSETTE_DIR))
|
||||
m.setenv("GOOGLE_GENAI_REPLAY_ID", GEMINI_REPLAY_ID)
|
||||
for key in keys_to_clear:
|
||||
|
||||
@@ -35,6 +35,7 @@ async def test_chat_auto_mode_with_openai(monkeypatch):
|
||||
|
||||
with monkeypatch.context() as m:
|
||||
m.setenv("DEFAULT_MODEL", env_updates["DEFAULT_MODEL"])
|
||||
m.setenv("OPENAI_ALLOWED_MODELS", "gpt-5")
|
||||
if env_updates["OPENAI_API_KEY"]:
|
||||
m.setenv("OPENAI_API_KEY", env_updates["OPENAI_API_KEY"])
|
||||
for key in keys_to_clear:
|
||||
@@ -105,6 +106,7 @@ async def test_chat_openai_continuation(monkeypatch):
|
||||
|
||||
with monkeypatch.context() as m:
|
||||
m.setenv("DEFAULT_MODEL", env_updates["DEFAULT_MODEL"])
|
||||
m.setenv("OPENAI_ALLOWED_MODELS", "gpt-5")
|
||||
if recording_mode:
|
||||
m.setenv("OPENAI_API_KEY", env_updates["OPENAI_API_KEY"])
|
||||
else:
|
||||
|
||||
@@ -16,18 +16,26 @@ class TestUvxEnvironmentHandling:
|
||||
def test_dotenv_import_success(self):
|
||||
"""Test that dotenv is imported successfully when available."""
|
||||
# Mock successful dotenv import
|
||||
with mock.patch.dict("sys.modules", {"dotenv": mock.MagicMock()}):
|
||||
with mock.patch("dotenv.load_dotenv") as mock_load_dotenv:
|
||||
# Re-import server module to trigger the import logic
|
||||
mock_load = mock.MagicMock()
|
||||
mock_values = mock.MagicMock(return_value={})
|
||||
fake_dotenv = mock.MagicMock(load_dotenv=mock_load, dotenv_values=mock_values)
|
||||
|
||||
with mock.patch.dict("sys.modules", {"dotenv": fake_dotenv}):
|
||||
if "utils.env" in sys.modules:
|
||||
del sys.modules["utils.env"]
|
||||
if "server" in sys.modules:
|
||||
del sys.modules["server"]
|
||||
|
||||
import importlib
|
||||
|
||||
import utils.env as env_config
|
||||
|
||||
importlib.reload(env_config)
|
||||
import server # noqa: F401
|
||||
|
||||
# Should have called load_dotenv with the correct path
|
||||
mock_load_dotenv.assert_called_once()
|
||||
call_args = mock_load_dotenv.call_args
|
||||
assert "dotenv_path" in call_args.kwargs
|
||||
assert mock_load.call_count >= 1
|
||||
_, kwargs = mock_load.call_args
|
||||
assert "dotenv_path" in kwargs
|
||||
|
||||
def test_dotenv_import_failure_graceful_handling(self):
|
||||
"""Test that ImportError for dotenv is handled gracefully (uvx scenario)."""
|
||||
|
||||
@@ -7,7 +7,6 @@ It shows which providers are configured and what models can be used.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import Any, Optional
|
||||
|
||||
from mcp.types import TextContent
|
||||
@@ -15,6 +14,7 @@ from mcp.types import TextContent
|
||||
from tools.models import ToolModelCategory, ToolOutput
|
||||
from tools.shared.base_models import ToolRequest
|
||||
from tools.shared.base_tool import BaseTool
|
||||
from utils.env import get_env
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -199,7 +199,7 @@ class ListModelsTool(BaseTool):
|
||||
output_lines.append("")
|
||||
|
||||
# Check OpenRouter
|
||||
openrouter_key = os.getenv("OPENROUTER_API_KEY")
|
||||
openrouter_key = get_env("OPENROUTER_API_KEY")
|
||||
is_openrouter_configured = openrouter_key and openrouter_key != "your_openrouter_api_key_here"
|
||||
|
||||
output_lines.append(f"## OpenRouter {'✅' if is_openrouter_configured else '❌'}")
|
||||
@@ -307,7 +307,7 @@ class ListModelsTool(BaseTool):
|
||||
output_lines.append("")
|
||||
|
||||
# Check Custom API
|
||||
custom_url = os.getenv("CUSTOM_API_URL")
|
||||
custom_url = get_env("CUSTOM_API_URL")
|
||||
|
||||
output_lines.append(f"## Custom/Local API {'✅' if custom_url else '❌'}")
|
||||
|
||||
|
||||
@@ -27,6 +27,7 @@ from utils.conversation_memory import (
|
||||
get_conversation_file_list,
|
||||
get_thread,
|
||||
)
|
||||
from utils.env import get_env
|
||||
from utils.file_utils import read_file_content, read_files
|
||||
|
||||
# Import models from tools.models for compatibility
|
||||
@@ -248,7 +249,7 @@ class BaseTool(ABC):
|
||||
all_models = ModelProviderRegistry.get_available_model_names()
|
||||
|
||||
# Add OpenRouter models if OpenRouter is configured
|
||||
openrouter_key = os.getenv("OPENROUTER_API_KEY")
|
||||
openrouter_key = get_env("OPENROUTER_API_KEY")
|
||||
if openrouter_key and openrouter_key != "your_openrouter_api_key_here":
|
||||
try:
|
||||
registry = self._get_openrouter_registry()
|
||||
@@ -262,7 +263,7 @@ class BaseTool(ABC):
|
||||
logging.debug(f"Failed to add OpenRouter models to enum: {e}")
|
||||
|
||||
# Add custom models if custom API is configured
|
||||
custom_url = os.getenv("CUSTOM_API_URL")
|
||||
custom_url = get_env("CUSTOM_API_URL")
|
||||
if custom_url:
|
||||
try:
|
||||
registry = self._get_openrouter_registry()
|
||||
@@ -432,7 +433,7 @@ class BaseTool(ABC):
|
||||
|
||||
notes: list[str] = []
|
||||
for env_var, label in env_labels.items():
|
||||
raw = os.getenv(env_var)
|
||||
raw = get_env(env_var)
|
||||
if not raw:
|
||||
continue
|
||||
|
||||
@@ -1171,10 +1172,9 @@ When recommending searches, be specific about what information you need and why
|
||||
no locale set
|
||||
"""
|
||||
# Read LOCALE directly from environment to support dynamic changes
|
||||
# This allows tests to modify os.environ["LOCALE"] and see the changes
|
||||
import os
|
||||
# Tests can monkeypatch LOCALE via the environment helper (or .env when override is enforced)
|
||||
|
||||
locale = os.getenv("LOCALE", "").strip()
|
||||
locale = (get_env("LOCALE", "") or "").strip()
|
||||
|
||||
if not locale:
|
||||
return ""
|
||||
@@ -1277,7 +1277,7 @@ When recommending searches, be specific about what information you need and why
|
||||
all_models = ModelProviderRegistry.get_available_model_names()
|
||||
|
||||
# Add OpenRouter models and their aliases when OpenRouter is configured
|
||||
openrouter_key = os.getenv("OPENROUTER_API_KEY")
|
||||
openrouter_key = get_env("OPENROUTER_API_KEY")
|
||||
if openrouter_key and openrouter_key != "your_openrouter_api_key_here":
|
||||
try:
|
||||
registry = self._get_openrouter_registry()
|
||||
@@ -1296,7 +1296,7 @@ When recommending searches, be specific about what information you need and why
|
||||
logging.debug(f"Failed to add OpenRouter models to enum: {exc}")
|
||||
|
||||
# Add custom models (and their aliases) when a custom endpoint is available
|
||||
custom_url = os.getenv("CUSTOM_API_URL")
|
||||
custom_url = get_env("CUSTOM_API_URL")
|
||||
if custom_url:
|
||||
try:
|
||||
registry = self._get_openrouter_registry()
|
||||
|
||||
@@ -112,24 +112,28 @@ from typing import Any, Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from utils.env import get_env
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Configuration constants
|
||||
# Get max conversation turns from environment, default to 20 turns (10 exchanges)
|
||||
try:
|
||||
MAX_CONVERSATION_TURNS = int(os.getenv("MAX_CONVERSATION_TURNS", "20"))
|
||||
max_turns_raw = (get_env("MAX_CONVERSATION_TURNS", "50") or "50").strip()
|
||||
MAX_CONVERSATION_TURNS = int(max_turns_raw)
|
||||
if MAX_CONVERSATION_TURNS <= 0:
|
||||
logger.warning(f"Invalid MAX_CONVERSATION_TURNS value ({MAX_CONVERSATION_TURNS}), using default of 20 turns")
|
||||
MAX_CONVERSATION_TURNS = 20
|
||||
logger.warning(f"Invalid MAX_CONVERSATION_TURNS value ({MAX_CONVERSATION_TURNS}), using default of 50 turns")
|
||||
MAX_CONVERSATION_TURNS = 50
|
||||
except ValueError:
|
||||
logger.warning(
|
||||
f"Invalid MAX_CONVERSATION_TURNS value ('{os.getenv('MAX_CONVERSATION_TURNS')}'), using default of 20 turns"
|
||||
f"Invalid MAX_CONVERSATION_TURNS value ('{get_env('MAX_CONVERSATION_TURNS')}'), using default of 50 turns"
|
||||
)
|
||||
MAX_CONVERSATION_TURNS = 20
|
||||
MAX_CONVERSATION_TURNS = 50
|
||||
|
||||
# Get conversation timeout from environment (in hours), default to 3 hours
|
||||
try:
|
||||
CONVERSATION_TIMEOUT_HOURS = int(os.getenv("CONVERSATION_TIMEOUT_HOURS", "3"))
|
||||
timeout_raw = (get_env("CONVERSATION_TIMEOUT_HOURS", "3") or "3").strip()
|
||||
CONVERSATION_TIMEOUT_HOURS = int(timeout_raw)
|
||||
if CONVERSATION_TIMEOUT_HOURS <= 0:
|
||||
logger.warning(
|
||||
f"Invalid CONVERSATION_TIMEOUT_HOURS value ({CONVERSATION_TIMEOUT_HOURS}), using default of 3 hours"
|
||||
@@ -137,7 +141,7 @@ try:
|
||||
CONVERSATION_TIMEOUT_HOURS = 3
|
||||
except ValueError:
|
||||
logger.warning(
|
||||
f"Invalid CONVERSATION_TIMEOUT_HOURS value ('{os.getenv('CONVERSATION_TIMEOUT_HOURS')}'), using default of 3 hours"
|
||||
f"Invalid CONVERSATION_TIMEOUT_HOURS value ('{get_env('CONVERSATION_TIMEOUT_HOURS')}'), using default of 3 hours"
|
||||
)
|
||||
CONVERSATION_TIMEOUT_HOURS = 3
|
||||
|
||||
|
||||
88
utils/env.py
Normal file
88
utils/env.py
Normal file
@@ -0,0 +1,88 @@
|
||||
"""Centralized environment variable access for Zen MCP Server."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from collections.abc import Mapping
|
||||
from pathlib import Path
|
||||
|
||||
try:
|
||||
from dotenv import dotenv_values, load_dotenv
|
||||
except ImportError: # pragma: no cover - optional dependency
|
||||
dotenv_values = None # type: ignore[assignment]
|
||||
load_dotenv = None # type: ignore[assignment]
|
||||
|
||||
_PROJECT_ROOT = Path(__file__).resolve().parent.parent
|
||||
_ENV_PATH = _PROJECT_ROOT / ".env"
|
||||
|
||||
_DOTENV_VALUES: dict[str, str | None] = {}
|
||||
_FORCE_ENV_OVERRIDE = False
|
||||
|
||||
|
||||
def _read_dotenv_values() -> dict[str, str | None]:
|
||||
if dotenv_values is not None and _ENV_PATH.exists():
|
||||
loaded = dotenv_values(_ENV_PATH)
|
||||
return dict(loaded)
|
||||
return {}
|
||||
|
||||
|
||||
def _compute_force_override(values: Mapping[str, str | None]) -> bool:
|
||||
raw = (values.get("ZEN_MCP_FORCE_ENV_OVERRIDE") or "false").strip().lower()
|
||||
return raw == "true"
|
||||
|
||||
|
||||
def reload_env(dotenv_mapping: Mapping[str, str | None] | None = None) -> None:
|
||||
"""Reload .env values and recompute override semantics.
|
||||
|
||||
Args:
|
||||
dotenv_mapping: Optional mapping used instead of reading the .env file.
|
||||
Intended for tests; when provided, load_dotenv is not invoked.
|
||||
"""
|
||||
|
||||
global _DOTENV_VALUES, _FORCE_ENV_OVERRIDE
|
||||
|
||||
if dotenv_mapping is not None:
|
||||
_DOTENV_VALUES = dict(dotenv_mapping)
|
||||
_FORCE_ENV_OVERRIDE = _compute_force_override(_DOTENV_VALUES)
|
||||
return
|
||||
|
||||
_DOTENV_VALUES = _read_dotenv_values()
|
||||
_FORCE_ENV_OVERRIDE = _compute_force_override(_DOTENV_VALUES)
|
||||
|
||||
if load_dotenv is not None and _ENV_PATH.exists():
|
||||
load_dotenv(dotenv_path=_ENV_PATH, override=_FORCE_ENV_OVERRIDE)
|
||||
|
||||
|
||||
reload_env()
|
||||
|
||||
|
||||
def env_override_enabled() -> bool:
|
||||
"""Return True when ZEN_MCP_FORCE_ENV_OVERRIDE is enabled via the .env file."""
|
||||
|
||||
return _FORCE_ENV_OVERRIDE
|
||||
|
||||
|
||||
def get_env(key: str, default: str | None = None) -> str | None:
|
||||
"""Retrieve environment variables respecting ZEN_MCP_FORCE_ENV_OVERRIDE."""
|
||||
|
||||
if env_override_enabled():
|
||||
if key in _DOTENV_VALUES:
|
||||
value = _DOTENV_VALUES[key]
|
||||
return value if value is not None else default
|
||||
return default
|
||||
|
||||
return os.getenv(key, default)
|
||||
|
||||
|
||||
def get_env_bool(key: str, default: bool = False) -> bool:
|
||||
"""Boolean helper that respects override semantics."""
|
||||
|
||||
raw_default = "true" if default else "false"
|
||||
raw_value = get_env(key, raw_default)
|
||||
return (raw_value or raw_default).strip().lower() == "true"
|
||||
|
||||
|
||||
def get_all_env() -> dict[str, str | None]:
|
||||
"""Expose the loaded .env mapping for diagnostics/logging."""
|
||||
|
||||
return dict(_DOTENV_VALUES)
|
||||
@@ -21,11 +21,11 @@ Example:
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from collections import defaultdict
|
||||
from typing import Optional
|
||||
|
||||
from providers.shared import ProviderType
|
||||
from utils.env import get_env
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -65,7 +65,7 @@ class ModelRestrictionService:
|
||||
def _load_from_env(self) -> None:
|
||||
"""Load restrictions from environment variables."""
|
||||
for provider_type, env_var in self.ENV_VARS.items():
|
||||
env_value = os.getenv(env_var)
|
||||
env_value = get_env(env_var)
|
||||
|
||||
if env_value is None or env_value == "":
|
||||
# Not set or empty - no restrictions (allow all models)
|
||||
|
||||
@@ -19,11 +19,12 @@ Key Features:
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
from typing import Optional
|
||||
|
||||
from utils.env import get_env
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -35,7 +36,7 @@ class InMemoryStorage:
|
||||
self._lock = threading.Lock()
|
||||
# Match Redis behavior: cleanup interval based on conversation timeout
|
||||
# Run cleanup at 1/10th of timeout interval (e.g., 18 mins for 3 hour timeout)
|
||||
timeout_hours = int(os.getenv("CONVERSATION_TIMEOUT_HOURS", "3"))
|
||||
timeout_hours = int(get_env("CONVERSATION_TIMEOUT_HOURS", "3") or "3")
|
||||
self._cleanup_interval = (timeout_hours * 3600) // 10
|
||||
self._cleanup_interval = max(300, self._cleanup_interval) # Minimum 5 minutes
|
||||
self._shutdown = False
|
||||
|
||||
Reference in New Issue
Block a user