feat: Add LOCAL variable support for responses with UTF-8 JSON encoding.

Description: This feature adds support for UTF-8 encoding in JSON responses, allowing for proper handling of special characters and emojis.

- Implement unit tests for UTF-8 encoding in various model providers including Gemini, OpenAI, and OpenAI Compatible.
- Validate UTF-8 support in token counting, content generation, and error handling.
- Introduce tests for JSON serialization ensuring proper handling of French characters and emojis.
- Create tests for language instruction generation based on locale settings.
- Validate UTF-8 handling in workflow tools including AnalyzeTool, CodereviewTool, and DebugIssueTool.
- Ensure that all tests check for correct UTF-8 character preservation and proper JSON formatting.
- Add integration tests to verify the interaction between locale settings and model responses.
This commit is contained in:
OhMyApps
2025-06-22 19:13:02 +02:00
parent 132c6ca025
commit e9c5662b3a
22 changed files with 1994 additions and 49 deletions

View File

@@ -715,7 +715,10 @@ class BaseWorkflowMixin(ABC):
if continuation_id:
self.store_conversation_turn(continuation_id, response_data, request)
return [TextContent(type="text", text=json.dumps(response_data, indent=2))]
return [TextContent(
type="text",
text=json.dumps(response_data, indent=2, ensure_ascii=False)
)]
except Exception as e:
logger.error(f"Error in {self.get_name()} work: {e}", exc_info=True)
@@ -728,7 +731,10 @@ class BaseWorkflowMixin(ABC):
# Add metadata to error responses too
self._add_workflow_metadata(error_data, arguments)
return [TextContent(type="text", text=json.dumps(error_data, indent=2))]
return [TextContent(
type="text",
text=json.dumps(error_data, indent=2, ensure_ascii=False)
)]
# Hook methods for tool customization
@@ -1233,7 +1239,7 @@ class BaseWorkflowMixin(ABC):
# - file_context (internal optimization info)
# - required_actions (internal workflow instructions)
return json.dumps(clean_data, indent=2)
return json.dumps(clean_data, indent=2, ensure_ascii=False)
# Core workflow logic methods
@@ -1265,7 +1271,10 @@ class BaseWorkflowMixin(ABC):
# Promote the special status to the main response
special_status = expert_analysis["status"]
response_data["status"] = special_status
response_data["content"] = expert_analysis.get("raw_analysis", json.dumps(expert_analysis))
response_data["content"] = expert_analysis.get(
"raw_analysis",
json.dumps(expert_analysis, ensure_ascii=False)
)
del response_data["expert_analysis"]
# Update next steps for special status
@@ -1524,20 +1533,22 @@ class BaseWorkflowMixin(ABC):
error_data = {"status": "error", "content": "No arguments provided"}
# Add basic metadata even for validation errors
error_data["metadata"] = {"tool_name": self.get_name()}
return [TextContent(type="text", text=json.dumps(error_data))]
return [TextContent(
type="text",
text=json.dumps(error_data, ensure_ascii=False)
)]
# Delegate to execute_workflow
return await self.execute_workflow(arguments)
except Exception as e:
logger.error(f"Error in {self.get_name()} tool execution: {e}", exc_info=True)
error_data = {"status": "error", "content": f"Error in {self.get_name()}: {str(e)}"}
# Add metadata to error responses
error_data = {"status": "error", "content": f"Error in {self.get_name()}: {str(e)}"} # Add metadata to error responses
self._add_workflow_metadata(error_data, arguments)
return [
TextContent(
type="text",
text=json.dumps(error_data),
text=json.dumps(error_data, ensure_ascii=False),
)
]