Merge pull request #131 from GiGiDKR/feat-local_support_with_UTF-8_encoding-update

feat: local support with utf 8 encoding
This commit is contained in:
Beehive Innovations
2025-06-27 08:02:14 -07:00
committed by GitHub
25 changed files with 1845 additions and 75 deletions

View File

@@ -524,7 +524,7 @@ of the evidence, even when it strongly points in one direction.""",
"provider_used": provider.get_provider_type().value,
}
return [TextContent(type="text", text=json.dumps(response_data, indent=2))]
return [TextContent(type="text", text=json.dumps(response_data, indent=2, ensure_ascii=False))]
# Otherwise, use standard workflow execution
return await super().execute_workflow(arguments)

View File

@@ -1084,6 +1084,26 @@ Consider requesting searches for:
When recommending searches, be specific about what information you need and why it would improve your analysis. Always remember to instruct Claude to use the continuation_id from this response when providing search results."""
def get_language_instruction(self) -> str:
"""
Generate language instruction based on LOCALE configuration.
Returns:
str: Language instruction to prepend to prompt, or empty string if
no locale set
"""
# Read LOCALE directly from environment to support dynamic changes
# This allows tests to modify os.environ["LOCALE"] and see the changes
import os
locale = os.getenv("LOCALE", "").strip()
if not locale:
return ""
# Simple language instruction
return f"Always respond in {locale}.\n\n"
# === ABSTRACT METHODS FOR SIMPLE TOOLS ===
@abstractmethod

View File

@@ -387,24 +387,23 @@ class SimpleTool(BaseTool):
follow_up_instructions = get_follow_up_instructions(0)
prompt = f"{prompt}\n\n{follow_up_instructions}"
logger.debug(f"Added follow-up instructions for new {self.get_name()} conversation")
# Validate images if any were provided
logger.debug(
f"Added follow-up instructions for new {self.get_name()} conversation"
) # Validate images if any were provided
if images:
image_validation_error = self._validate_image_limits(
images, model_context=self._model_context, continuation_id=continuation_id
)
if image_validation_error:
return [TextContent(type="text", text=json.dumps(image_validation_error))]
return [TextContent(type="text", text=json.dumps(image_validation_error, ensure_ascii=False))]
# Get and validate temperature against model constraints
temperature, temp_warnings = self.get_validated_temperature(request, self._model_context)
# Log any temperature corrections
for warning in temp_warnings:
# Get thinking mode with defaults
logger.warning(warning)
# Get thinking mode with defaults
thinking_mode = self.get_request_thinking_mode(request)
if thinking_mode is None:
thinking_mode = self.get_default_thinking_mode()
@@ -413,7 +412,9 @@ class SimpleTool(BaseTool):
provider = self._model_context.provider
# Get system prompt for this tool
system_prompt = self.get_system_prompt()
base_system_prompt = self.get_system_prompt()
language_instruction = self.get_language_instruction()
system_prompt = language_instruction + base_system_prompt
# Generate AI response using the provider
logger.info(f"Sending request to {provider.get_provider_type().value} API for {self.get_name()}")

View File

@@ -715,7 +715,7 @@ class BaseWorkflowMixin(ABC):
if continuation_id:
self.store_conversation_turn(continuation_id, response_data, request)
return [TextContent(type="text", text=json.dumps(response_data, indent=2))]
return [TextContent(type="text", text=json.dumps(response_data, indent=2, ensure_ascii=False))]
except Exception as e:
logger.error(f"Error in {self.get_name()} work: {e}", exc_info=True)
@@ -728,7 +728,7 @@ class BaseWorkflowMixin(ABC):
# Add metadata to error responses too
self._add_workflow_metadata(error_data, arguments)
return [TextContent(type="text", text=json.dumps(error_data, indent=2))]
return [TextContent(type="text", text=json.dumps(error_data, indent=2, ensure_ascii=False))]
# Hook methods for tool customization
@@ -1233,7 +1233,7 @@ class BaseWorkflowMixin(ABC):
# - file_context (internal optimization info)
# - required_actions (internal workflow instructions)
return json.dumps(clean_data, indent=2)
return json.dumps(clean_data, indent=2, ensure_ascii=False)
# Core workflow logic methods
@@ -1265,7 +1265,9 @@ class BaseWorkflowMixin(ABC):
# Promote the special status to the main response
special_status = expert_analysis["status"]
response_data["status"] = special_status
response_data["content"] = expert_analysis.get("raw_analysis", json.dumps(expert_analysis))
response_data["content"] = expert_analysis.get(
"raw_analysis", json.dumps(expert_analysis, ensure_ascii=False)
)
del response_data["expert_analysis"]
# Update next steps for special status
@@ -1524,20 +1526,22 @@ class BaseWorkflowMixin(ABC):
error_data = {"status": "error", "content": "No arguments provided"}
# Add basic metadata even for validation errors
error_data["metadata"] = {"tool_name": self.get_name()}
return [TextContent(type="text", text=json.dumps(error_data))]
return [TextContent(type="text", text=json.dumps(error_data, ensure_ascii=False))]
# Delegate to execute_workflow
return await self.execute_workflow(arguments)
except Exception as e:
logger.error(f"Error in {self.get_name()} tool execution: {e}", exc_info=True)
error_data = {"status": "error", "content": f"Error in {self.get_name()}: {str(e)}"}
# Add metadata to error responses
error_data = {
"status": "error",
"content": f"Error in {self.get_name()}: {str(e)}",
} # Add metadata to error responses
self._add_workflow_metadata(error_data, arguments)
return [
TextContent(
type="text",
text=json.dumps(error_data),
text=json.dumps(error_data, ensure_ascii=False),
)
]