feat: Add LOCAL variable support for responses with UTF-8 JSON encoding.
Description: This feature adds support for UTF-8 encoding in JSON responses, allowing for proper handling of special characters and emojis. - Implement unit tests for UTF-8 encoding in various model providers including Gemini, OpenAI, and OpenAI Compatible. - Validate UTF-8 support in token counting, content generation, and error handling. - Introduce tests for JSON serialization ensuring proper handling of French characters and emojis. - Create tests for language instruction generation based on locale settings. - Validate UTF-8 handling in workflow tools including AnalyzeTool, CodereviewTool, and DebugIssueTool. - Ensure that all tests check for correct UTF-8 character preservation and proper JSON formatting. - Add integration tests to verify the interaction between locale settings and model responses.
This commit is contained in:
@@ -512,7 +512,10 @@ of the evidence, even when it strongly points in one direction.""",
|
||||
"provider_used": provider.get_provider_type().value,
|
||||
}
|
||||
|
||||
return [TextContent(type="text", text=json.dumps(response_data, indent=2))]
|
||||
return [TextContent(
|
||||
type="text",
|
||||
text=json.dumps(response_data, indent=2, ensure_ascii=False)
|
||||
)]
|
||||
|
||||
# Otherwise, use standard workflow execution
|
||||
return await super().execute_workflow(arguments)
|
||||
|
||||
@@ -1067,6 +1067,22 @@ Consider requesting searches for:
|
||||
|
||||
When recommending searches, be specific about what information you need and why it would improve your analysis. Always remember to instruct Claude to use the continuation_id from this response when providing search results."""
|
||||
|
||||
def get_language_instruction(self) -> str:
|
||||
"""
|
||||
Generate language instruction based on LOCALE configuration.
|
||||
|
||||
Returns:
|
||||
str: Language instruction to prepend to prompt, or empty string if
|
||||
no locale set
|
||||
"""
|
||||
from config import LOCALE
|
||||
|
||||
if not LOCALE or not LOCALE.strip():
|
||||
return ""
|
||||
|
||||
# Simple language instruction
|
||||
return f"Always respond in {LOCALE.strip()}.\n\n"
|
||||
|
||||
# === ABSTRACT METHODS FOR SIMPLE TOOLS ===
|
||||
|
||||
@abstractmethod
|
||||
|
||||
@@ -372,24 +372,24 @@ class SimpleTool(BaseTool):
|
||||
|
||||
follow_up_instructions = get_follow_up_instructions(0)
|
||||
prompt = f"{prompt}\n\n{follow_up_instructions}"
|
||||
logger.debug(f"Added follow-up instructions for new {self.get_name()} conversation")
|
||||
|
||||
# Validate images if any were provided
|
||||
logger.debug(f"Added follow-up instructions for new {self.get_name()} conversation") # Validate images if any were provided
|
||||
if images:
|
||||
image_validation_error = self._validate_image_limits(
|
||||
images, model_context=self._model_context, continuation_id=continuation_id
|
||||
)
|
||||
if image_validation_error:
|
||||
return [TextContent(type="text", text=json.dumps(image_validation_error))]
|
||||
return [TextContent(
|
||||
type="text",
|
||||
text=json.dumps(image_validation_error, ensure_ascii=False)
|
||||
)]
|
||||
|
||||
# Get and validate temperature against model constraints
|
||||
temperature, temp_warnings = self.get_validated_temperature(request, self._model_context)
|
||||
|
||||
# Log any temperature corrections
|
||||
for warning in temp_warnings:
|
||||
# Get thinking mode with defaults
|
||||
logger.warning(warning)
|
||||
|
||||
# Get thinking mode with defaults
|
||||
thinking_mode = self.get_request_thinking_mode(request)
|
||||
if thinking_mode is None:
|
||||
thinking_mode = self.get_default_thinking_mode()
|
||||
@@ -398,7 +398,9 @@ class SimpleTool(BaseTool):
|
||||
provider = self._model_context.provider
|
||||
|
||||
# Get system prompt for this tool
|
||||
system_prompt = self.get_system_prompt()
|
||||
base_system_prompt = self.get_system_prompt()
|
||||
language_instruction = self.get_language_instruction()
|
||||
system_prompt = language_instruction + base_system_prompt
|
||||
|
||||
# Generate AI response using the provider
|
||||
logger.info(f"Sending request to {provider.get_provider_type().value} API for {self.get_name()}")
|
||||
|
||||
@@ -715,7 +715,10 @@ class BaseWorkflowMixin(ABC):
|
||||
if continuation_id:
|
||||
self.store_conversation_turn(continuation_id, response_data, request)
|
||||
|
||||
return [TextContent(type="text", text=json.dumps(response_data, indent=2))]
|
||||
return [TextContent(
|
||||
type="text",
|
||||
text=json.dumps(response_data, indent=2, ensure_ascii=False)
|
||||
)]
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in {self.get_name()} work: {e}", exc_info=True)
|
||||
@@ -728,7 +731,10 @@ class BaseWorkflowMixin(ABC):
|
||||
# Add metadata to error responses too
|
||||
self._add_workflow_metadata(error_data, arguments)
|
||||
|
||||
return [TextContent(type="text", text=json.dumps(error_data, indent=2))]
|
||||
return [TextContent(
|
||||
type="text",
|
||||
text=json.dumps(error_data, indent=2, ensure_ascii=False)
|
||||
)]
|
||||
|
||||
# Hook methods for tool customization
|
||||
|
||||
@@ -1233,7 +1239,7 @@ class BaseWorkflowMixin(ABC):
|
||||
# - file_context (internal optimization info)
|
||||
# - required_actions (internal workflow instructions)
|
||||
|
||||
return json.dumps(clean_data, indent=2)
|
||||
return json.dumps(clean_data, indent=2, ensure_ascii=False)
|
||||
|
||||
# Core workflow logic methods
|
||||
|
||||
@@ -1265,7 +1271,10 @@ class BaseWorkflowMixin(ABC):
|
||||
# Promote the special status to the main response
|
||||
special_status = expert_analysis["status"]
|
||||
response_data["status"] = special_status
|
||||
response_data["content"] = expert_analysis.get("raw_analysis", json.dumps(expert_analysis))
|
||||
response_data["content"] = expert_analysis.get(
|
||||
"raw_analysis",
|
||||
json.dumps(expert_analysis, ensure_ascii=False)
|
||||
)
|
||||
del response_data["expert_analysis"]
|
||||
|
||||
# Update next steps for special status
|
||||
@@ -1524,20 +1533,22 @@ class BaseWorkflowMixin(ABC):
|
||||
error_data = {"status": "error", "content": "No arguments provided"}
|
||||
# Add basic metadata even for validation errors
|
||||
error_data["metadata"] = {"tool_name": self.get_name()}
|
||||
return [TextContent(type="text", text=json.dumps(error_data))]
|
||||
return [TextContent(
|
||||
type="text",
|
||||
text=json.dumps(error_data, ensure_ascii=False)
|
||||
)]
|
||||
|
||||
# Delegate to execute_workflow
|
||||
return await self.execute_workflow(arguments)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in {self.get_name()} tool execution: {e}", exc_info=True)
|
||||
error_data = {"status": "error", "content": f"Error in {self.get_name()}: {str(e)}"}
|
||||
# Add metadata to error responses
|
||||
error_data = {"status": "error", "content": f"Error in {self.get_name()}: {str(e)}"} # Add metadata to error responses
|
||||
self._add_workflow_metadata(error_data, arguments)
|
||||
return [
|
||||
TextContent(
|
||||
type="text",
|
||||
text=json.dumps(error_data),
|
||||
text=json.dumps(error_data, ensure_ascii=False),
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
Reference in New Issue
Block a user