Migration from Docker to Standalone Python Server (#73)

* Migration from docker to standalone server
Migration handling
Fixed tests
Use simpler in-memory storage
Support for concurrent logging to disk
Simplified direct connections to localhost

* Migration from docker / redis to standalone script
Updated tests
Updated run script
Fixed requirements
Use dotenv
Ask if user would like to install MCP in Claude Desktop once
Updated docs

* More cleanup and references to docker removed

* Cleanup

* Comments

* Fixed tests

* Fix GitHub Actions workflow for standalone Python architecture

- Install requirements-dev.txt for pytest and testing dependencies
- Remove Docker setup from simulation tests (now standalone)
- Simplify linting job to use requirements-dev.txt
- Update simulation tests to run directly without Docker

Fixes unit test failures in CI due to missing pytest dependency.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>

* Remove simulation tests from GitHub Actions

- Removed simulation-tests job that makes real API calls
- Keep only unit tests (mocked, no API costs) and linting
- Simulation tests should be run manually with real API keys
- Reduces CI costs and complexity

GitHub Actions now only runs:
- Unit tests (569 tests, all mocked)
- Code quality checks (ruff, black)

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>

* Fixed tests

* Fixed tests

---------

Co-authored-by: Claude <noreply@anthropic.com>
This commit is contained in:
Beehive Innovations
2025-06-18 23:41:22 +04:00
committed by GitHub
parent 9d72545ecd
commit 4151c3c3a5
121 changed files with 2842 additions and 3168 deletions

View File

@@ -2,8 +2,8 @@
from .base import ModelCapabilities, ModelProvider, ModelResponse
from .gemini import GeminiModelProvider
from .openai import OpenAIModelProvider
from .openai_compatible import OpenAICompatibleProvider
from .openai_provider import OpenAIModelProvider
from .openrouter import OpenRouterProvider
from .registry import ModelProviderRegistry

View File

@@ -40,7 +40,7 @@ class CustomProvider(OpenAICompatibleProvider):
api_key: API key for the custom endpoint. Can be empty string for
providers that don't require authentication (like Ollama).
Falls back to CUSTOM_API_KEY environment variable if not provided.
base_url: Base URL for the custom API endpoint (e.g., 'http://host.docker.internal:11434/v1').
base_url: Base URL for the custom API endpoint (e.g., 'http://localhost:11434/v1').
Falls back to CUSTOM_API_URL environment variable if not provided.
**kwargs: Additional configuration passed to parent OpenAI-compatible provider

View File

@@ -453,20 +453,13 @@ class GeminiModelProvider(ModelProvider):
mime_type = header.split(";")[0].split(":")[1]
return {"inline_data": {"mime_type": mime_type, "data": data}}
else:
# Handle file path - translate for Docker environment
# Handle file path
from utils.file_types import get_image_mime_type
from utils.file_utils import translate_path_for_environment
translated_path = translate_path_for_environment(image_path)
logger.debug(f"Translated image path from '{image_path}' to '{translated_path}'")
if not os.path.exists(translated_path):
logger.warning(f"Image file not found: {translated_path} (original: {image_path})")
if not os.path.exists(image_path):
logger.warning(f"Image file not found: {image_path}")
return None
# Use translated path for all subsequent operations
image_path = translated_path
# Detect MIME type from file extension using centralized mappings
ext = os.path.splitext(image_path)[1].lower()
mime_type = get_image_mime_type(ext)

View File

@@ -151,10 +151,6 @@ class OpenAICompatibleProvider(ModelProvider):
if hostname in ["localhost", "127.0.0.1", "::1"]:
return True
# Check for Docker internal hostnames (like host.docker.internal)
if hostname and ("docker.internal" in hostname or "host.docker.internal" in hostname):
return True
# Check for private network ranges (local network)
if hostname:
try:
@@ -201,26 +197,70 @@ class OpenAICompatibleProvider(ModelProvider):
def client(self):
"""Lazy initialization of OpenAI client with security checks and timeout configuration."""
if self._client is None:
client_kwargs = {
"api_key": self.api_key,
}
import os
if self.base_url:
client_kwargs["base_url"] = self.base_url
import httpx
if self.organization:
client_kwargs["organization"] = self.organization
# Temporarily disable proxy environment variables to prevent httpx from detecting them
original_env = {}
proxy_env_vars = ["HTTP_PROXY", "HTTPS_PROXY", "ALL_PROXY", "http_proxy", "https_proxy", "all_proxy"]
# Add default headers if any
if self.DEFAULT_HEADERS:
client_kwargs["default_headers"] = self.DEFAULT_HEADERS.copy()
for var in proxy_env_vars:
if var in os.environ:
original_env[var] = os.environ[var]
del os.environ[var]
# Add configured timeout settings
if hasattr(self, "timeout_config") and self.timeout_config:
client_kwargs["timeout"] = self.timeout_config
logging.debug(f"OpenAI client initialized with custom timeout: {self.timeout_config}")
try:
# Create a custom httpx client that explicitly avoids proxy parameters
timeout_config = (
self.timeout_config
if hasattr(self, "timeout_config") and self.timeout_config
else httpx.Timeout(30.0)
)
self._client = OpenAI(**client_kwargs)
# Create httpx client with minimal config to avoid proxy conflicts
# Note: proxies parameter was removed in httpx 0.28.0
http_client = httpx.Client(
timeout=timeout_config,
follow_redirects=True,
)
# Keep client initialization minimal to avoid proxy parameter conflicts
client_kwargs = {
"api_key": self.api_key,
"http_client": http_client,
}
if self.base_url:
client_kwargs["base_url"] = self.base_url
if self.organization:
client_kwargs["organization"] = self.organization
# Add default headers if any
if self.DEFAULT_HEADERS:
client_kwargs["default_headers"] = self.DEFAULT_HEADERS.copy()
logging.debug(f"OpenAI client initialized with custom httpx client and timeout: {timeout_config}")
# Create OpenAI client with custom httpx client
self._client = OpenAI(**client_kwargs)
except Exception as e:
# If all else fails, try absolute minimal client without custom httpx
logging.warning(f"Failed to create client with custom httpx, falling back to minimal config: {e}")
try:
minimal_kwargs = {"api_key": self.api_key}
if self.base_url:
minimal_kwargs["base_url"] = self.base_url
self._client = OpenAI(**minimal_kwargs)
except Exception as fallback_error:
logging.error(f"Even minimal OpenAI client creation failed: {fallback_error}")
raise
finally:
# Restore original proxy environment variables
for var, value in original_env.items():
os.environ[var] = value
return self._client
@@ -480,7 +520,7 @@ class OpenAICompatibleProvider(ModelProvider):
# Log retry attempt
logging.warning(
f"{self.FRIENDLY_NAME} API error for model {model_name}, attempt {attempt + 1}/{max_retries}: {str(e)}. Retrying in {delay}s..."
f"{self.FRIENDLY_NAME} error for model {model_name}, attempt {attempt + 1}/{max_retries}: {str(e)}. Retrying in {delay}s..."
)
time.sleep(delay)
@@ -738,19 +778,11 @@ class OpenAICompatibleProvider(ModelProvider):
# Handle data URL: data:image/png;base64,iVBORw0...
return {"type": "image_url", "image_url": {"url": image_path}}
else:
# Handle file path - translate for Docker environment
from utils.file_utils import translate_path_for_environment
translated_path = translate_path_for_environment(image_path)
logging.debug(f"Translated image path from '{image_path}' to '{translated_path}'")
if not os.path.exists(translated_path):
logging.warning(f"Image file not found: {translated_path} (original: {image_path})")
# Handle file path
if not os.path.exists(image_path):
logging.warning(f"Image file not found: {image_path}")
return None
# Use translated path for all subsequent operations
image_path = translated_path
# Detect MIME type from file extension using centralized mappings
from utils.file_types import get_image_mime_type

View File

@@ -6,7 +6,7 @@ from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional
from utils.file_utils import read_json_file, translate_path_for_environment
from utils.file_utils import read_json_file
from .base import ModelCapabilities, ProviderType, RangeTemperatureConstraint
@@ -59,19 +59,17 @@ class OpenRouterModelRegistry:
# Determine config path
if config_path:
# Direct config_path parameter - translate for Docker if needed
translated_path = translate_path_for_environment(config_path)
self.config_path = Path(translated_path)
# Direct config_path parameter
self.config_path = Path(config_path)
else:
# Check environment variable first
env_path = os.getenv("CUSTOM_MODELS_CONFIG_PATH")
if env_path:
# Environment variable path - translate for Docker if needed
translated_path = translate_path_for_environment(env_path)
self.config_path = Path(translated_path)
# Environment variable path
self.config_path = Path(env_path)
else:
# Default to conf/custom_models.json - use relative path from this file
# This works both in development and container environments
# This works in development environment
self.config_path = Path(__file__).parent.parent / "conf" / "custom_models.json"
# Load configuration