fix: format error handling
This commit is contained in:
@@ -285,53 +285,53 @@ def test_mcp_tools_integration():
|
||||
response_data = {
|
||||
"status": "success",
|
||||
"content_type": "markdown",
|
||||
"content": """# Analysis Completed Successfully ✅
|
||||
"content": """# Analyse Terminée avec Succès ✅
|
||||
|
||||
## Analysis Summary
|
||||
## Résumé de l'Analyse
|
||||
|
||||
The architectural analysis of the project has been **successfully** completed. Here are the main results:
|
||||
L'analyse architecturale du projet a été **terminée** avec succès. Voici les principaux résultats :
|
||||
|
||||
### 🎯 Achieved Goals
|
||||
- ✅ Complete code review
|
||||
- ✅ Identification of performance issues
|
||||
- ✅ Improvement recommendations generated
|
||||
### 🎯 Objectifs Atteints
|
||||
- ✅ Révision complète du code
|
||||
- ✅ Identification des problèmes de performance
|
||||
- ✅ Recommandations d'amélioration générées
|
||||
|
||||
### 📊 Analyzed Metrics
|
||||
| Metric | Value | Status |
|
||||
|--------|-------|--------|
|
||||
| Cyclomatic complexity | 12 | 🟡 Acceptable |
|
||||
| Test coverage | 85% | 🟢 Good |
|
||||
| External dependencies | 23 | 🟠 To be reviewed |
|
||||
### 📊 Métriques Analysées
|
||||
| Métrique | Valeur | Statut |
|
||||
|----------|--------|--------|
|
||||
| Complexité cyclomatique | 12 | 🟡 Acceptable |
|
||||
| Couverture de tests | 85% | 🟢 Bon |
|
||||
| Dépendances externes | 23 | 🟠 À réviser |
|
||||
|
||||
### 🔍 Identified Issues
|
||||
### 🔍 Problèmes Identifiés
|
||||
|
||||
#### 🔴 Critical
|
||||
No critical issues detected.
|
||||
#### 🔴 Critique
|
||||
Aucun problème critique détecté.
|
||||
|
||||
#### 🟠 High
|
||||
1. **Query performance**: Optimization needed
|
||||
2. **Memory management**: Potential leaks detected
|
||||
#### 🟠 Élevé
|
||||
1. **Performance des requêtes** : Optimisation nécessaire
|
||||
2. **Gestion mémoire** : Fuites potentielles détectées
|
||||
|
||||
#### 🟡 Medium
|
||||
1. **Documentation**: Some functions lack comments
|
||||
2. **Unit tests**: Coverage to be improved
|
||||
#### 🟡 Moyen
|
||||
1. **Documentation** : Certaines fonctions manquent de commentaires
|
||||
2. **Tests unitaires** : Couverture à améliorer
|
||||
|
||||
### 🚀 Priority Recommendations
|
||||
### 🚀 Recommandations Prioritaires
|
||||
|
||||
1. **DB Optimization**: Implement Redis cache
|
||||
2. **Refactoring**: Separate responsibilities
|
||||
3. **Documentation**: Add missing docstrings
|
||||
4. **Tests**: Increase coverage to 90%+
|
||||
1. **Optimisation DB** : Implémenter un cache Redis
|
||||
2. **Refactoring** : Séparer les responsabilités
|
||||
3. **Documentation** : Ajouter les docstrings manquantes
|
||||
4. **Tests** : Augmenter la couverture à 90%+
|
||||
|
||||
### 📈 Next Steps
|
||||
### 📈 Prochaines Étapes
|
||||
|
||||
- [ ] Implement caching system
|
||||
- [ ] Refactor identified modules
|
||||
- [ ] Complete documentation
|
||||
- [ ] Run regression tests
|
||||
- [ ] Implémenter le système de cache
|
||||
- [ ] Refactorer les modules identifiés
|
||||
- [ ] Compléter la documentation
|
||||
- [ ] Exécuter les tests de régression
|
||||
|
||||
---
|
||||
*Analysis automatically generated by MCP Zen* 🤖
|
||||
*Analyse générée automatiquement par MCP Zen* 🤖
|
||||
""",
|
||||
"metadata": {
|
||||
"tool_name": "analyze",
|
||||
|
||||
@@ -384,16 +384,10 @@ class TestLocalizationIntegration(unittest.TestCase):
|
||||
# English
|
||||
os.environ["LOCALE"] = "en-US"
|
||||
instruction_en = tool.get_language_instruction()
|
||||
self.assertIn("en-US", instruction_en)
|
||||
|
||||
# Spanish
|
||||
os.environ["LOCALE"] = "es-ES"
|
||||
instruction_es = tool.get_language_instruction() # Spanish
|
||||
self.assertIn("en-US", instruction_en) # Spanish
|
||||
os.environ["LOCALE"] = "es-ES"
|
||||
instruction_es = tool.get_language_instruction()
|
||||
self.assertIn("es-ES", instruction_es)
|
||||
|
||||
# Chinese
|
||||
self.assertIn("es-ES", instruction_es) # Chinese
|
||||
os.environ["LOCALE"] = "zh-CN"
|
||||
instruction_zh = tool.get_language_instruction()
|
||||
self.assertIn("zh-CN", instruction_zh)
|
||||
|
||||
@@ -6,14 +6,14 @@ and the generation of properly encoded JSON responses.
|
||||
import json
|
||||
import os
|
||||
import unittest
|
||||
from unittest.mock import Mock, patch
|
||||
from unittest.mock import AsyncMock, Mock, patch
|
||||
|
||||
from tools.analyze import AnalyzeTool
|
||||
from tools.codereview import CodeReviewTool
|
||||
from tools.debug import DebugIssueTool
|
||||
|
||||
|
||||
class TestWorkflowToolsUTF8(unittest.TestCase):
|
||||
class TestWorkflowToolsUTF8(unittest.IsolatedAsyncioTestCase):
|
||||
"""Tests for UTF-8 encoding in workflow tools."""
|
||||
|
||||
def setUp(self):
|
||||
@@ -48,7 +48,7 @@ class TestWorkflowToolsUTF8(unittest.TestCase):
|
||||
# Test JSON serialization with ensure_ascii=False
|
||||
json_str = json.dumps(test_response, indent=2, ensure_ascii=False)
|
||||
|
||||
# UTF-8 checks
|
||||
# Check UTF-8 characters are preserved
|
||||
self.assertIn("🔍", json_str)
|
||||
|
||||
# No escaped characters
|
||||
@@ -60,22 +60,24 @@ class TestWorkflowToolsUTF8(unittest.TestCase):
|
||||
self.assertEqual(len(parsed["issues_found"]), 1)
|
||||
|
||||
@patch("tools.shared.base_tool.BaseTool.get_model_provider")
|
||||
def test_analyze_tool_utf8_response(self, mock_get_provider):
|
||||
async def test_analyze_tool_utf8_response(self, mock_get_provider):
|
||||
"""Test that the analyze tool returns correct UTF-8 responses."""
|
||||
# Mock provider
|
||||
mock_provider = Mock()
|
||||
mock_provider.get_provider_type.return_value = Mock(value="test")
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
mock_provider.generate_content = AsyncMock(
|
||||
return_value=Mock(
|
||||
content="Architectural analysis complete. Recommendations: improve modularity.",
|
||||
usage={},
|
||||
model_name="test-model",
|
||||
metadata={},
|
||||
)
|
||||
)
|
||||
mock_get_provider.return_value = mock_provider
|
||||
|
||||
# Test the tool
|
||||
analyze_tool = AnalyzeTool()
|
||||
result = analyze_tool.execute(
|
||||
result = await analyze_tool.execute(
|
||||
{
|
||||
"step": "Analyze system architecture to identify issues",
|
||||
"step_number": 1,
|
||||
@@ -106,13 +108,14 @@ class TestWorkflowToolsUTF8(unittest.TestCase):
|
||||
self.assertIn("fr-FR", system_prompt)
|
||||
|
||||
@patch("tools.shared.base_tool.BaseTool.get_model_provider")
|
||||
def test_codereview_tool_french_findings(self, mock_get_provider):
|
||||
async def test_codereview_tool_french_findings(self, mock_get_provider):
|
||||
"""Test that the codereview tool produces findings in French."""
|
||||
# Mock with analysis in French
|
||||
mock_provider = Mock()
|
||||
mock_provider.get_provider_type.return_value = Mock(value="test")
|
||||
mock_provider.supports_thinking_mode.return_value = False
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
mock_provider.generate_content = AsyncMock(
|
||||
return_value=Mock(
|
||||
content=json.dumps(
|
||||
{
|
||||
"status": "analysis_complete",
|
||||
@@ -139,11 +142,12 @@ class TestWorkflowToolsUTF8(unittest.TestCase):
|
||||
model_name="test-model",
|
||||
metadata={},
|
||||
)
|
||||
)
|
||||
mock_get_provider.return_value = mock_provider
|
||||
|
||||
# Test the tool
|
||||
codereview_tool = CodeReviewTool()
|
||||
result = codereview_tool.execute(
|
||||
result = await codereview_tool.execute(
|
||||
{
|
||||
"step": "Complete review of Python code",
|
||||
"step_number": 1,
|
||||
@@ -177,22 +181,24 @@ class TestWorkflowToolsUTF8(unittest.TestCase):
|
||||
self.assertIn("✅", analysis)
|
||||
|
||||
@patch("tools.shared.base_tool.BaseTool.get_model_provider")
|
||||
def test_debug_tool_french_error_analysis(self, mock_get_provider):
|
||||
async def test_debug_tool_french_error_analysis(self, mock_get_provider):
|
||||
"""Test that the debug tool analyzes errors in French."""
|
||||
# Mock provider
|
||||
mock_provider = Mock()
|
||||
mock_provider.get_provider_type.return_value = Mock(value="test")
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
mock_provider.generate_content = AsyncMock(
|
||||
return_value=Mock(
|
||||
content="Error analyzed: variable 'données' not defined. Probable cause: missing import.",
|
||||
usage={},
|
||||
model_name="test-model",
|
||||
metadata={},
|
||||
)
|
||||
)
|
||||
mock_get_provider.return_value = mock_provider
|
||||
|
||||
# Test the debug tool
|
||||
debug_tool = DebugIssueTool()
|
||||
result = debug_tool.execute(
|
||||
result = await debug_tool.execute(
|
||||
{
|
||||
"step": "Analyze NameError in data processing file",
|
||||
"step_number": 1,
|
||||
@@ -220,67 +226,51 @@ class TestWorkflowToolsUTF8(unittest.TestCase):
|
||||
response_str = json.dumps(response_data, ensure_ascii=False)
|
||||
self.assertIn("données", response_str)
|
||||
|
||||
def test_json_utf8_serialization(self):
|
||||
"""Test UTF-8 serialization with ensure_ascii=False."""
|
||||
# Test data with French characters and emojis
|
||||
def test_utf8_emoji_preservation_in_workflow_responses(self):
|
||||
"""Test that emojis are preserved in workflow tool responses."""
|
||||
# Mock workflow response with various emojis
|
||||
test_data = {
|
||||
"analyse": {
|
||||
"statut": "terminée",
|
||||
"résultat": "Aucun problème critique détecté",
|
||||
"recommandations": [
|
||||
"Améliorer la documentation",
|
||||
"Optimiser les performances",
|
||||
"Ajouter des tests unitaires",
|
||||
"status": "analysis_complete",
|
||||
"severity_indicators": {
|
||||
"critical": "🔴",
|
||||
"high": "🟠",
|
||||
"medium": "🟡",
|
||||
"low": "🟢",
|
||||
"success": "✅",
|
||||
"error": "❌",
|
||||
"warning": "⚠️",
|
||||
},
|
||||
"progress": "Analysis completed 🎉",
|
||||
"recommendations": [
|
||||
"Optimize performance 🚀",
|
||||
"Improve documentation 📚",
|
||||
"Add unit tests 🧪",
|
||||
],
|
||||
"métadonnées": {
|
||||
"créé_par": "Développeur Principal",
|
||||
"date_création": "2024-01-01",
|
||||
"dernière_modification": "2024-01-15",
|
||||
},
|
||||
"émojis_status": {
|
||||
"critique": "🔴",
|
||||
"élevé": "🟠",
|
||||
"moyen": "🟡",
|
||||
"faible": "🟢",
|
||||
"succès": "✅",
|
||||
"erreur": "❌",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
# Test with ensure_ascii=False
|
||||
json_correct = json.dumps(test_data, ensure_ascii=False, indent=2)
|
||||
# Test JSON encoding with ensure_ascii=False
|
||||
json_str = json.dumps(test_data, ensure_ascii=False, indent=2)
|
||||
|
||||
# Checks
|
||||
utf8_terms = [
|
||||
"terminée",
|
||||
"résultat",
|
||||
"détecté",
|
||||
"Améliorer",
|
||||
"créé_par",
|
||||
"Développeur",
|
||||
"création",
|
||||
"métadonnées",
|
||||
"dernière",
|
||||
"émojis_status",
|
||||
"élevé",
|
||||
]
|
||||
# Check emojis are preserved
|
||||
self.assertIn("🔴", json_str)
|
||||
self.assertIn("🟠", json_str)
|
||||
self.assertIn("🟡", json_str)
|
||||
self.assertIn("🟢", json_str)
|
||||
self.assertIn("✅", json_str)
|
||||
self.assertIn("❌", json_str)
|
||||
self.assertIn("⚠️", json_str)
|
||||
self.assertIn("🎉", json_str)
|
||||
self.assertIn("🚀", json_str)
|
||||
self.assertIn("📚", json_str)
|
||||
self.assertIn("🧪", json_str)
|
||||
|
||||
emojis = ["🔴", "🟠", "🟡", "🟢", "✅", "❌"]
|
||||
# No escaped Unicode
|
||||
self.assertNotIn("\\u", json_str)
|
||||
|
||||
for term in utf8_terms:
|
||||
self.assertIn(term, json_correct)
|
||||
|
||||
for emoji in emojis:
|
||||
self.assertIn(emoji, json_correct)
|
||||
|
||||
# Check for escaped characters
|
||||
self.assertNotIn("\\u", json_correct)
|
||||
|
||||
# Test parsing
|
||||
parsed = json.loads(json_correct)
|
||||
self.assertEqual(parsed["analyse"]["statut"], "terminée")
|
||||
self.assertEqual(parsed["analyse"]["émojis_status"]["critique"], "🔴")
|
||||
# Test parsing preserves emojis
|
||||
parsed = json.loads(json_str)
|
||||
self.assertEqual(parsed["severity_indicators"]["critical"], "🔴")
|
||||
self.assertEqual(parsed["progress"], "Analysis completed 🎉")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
Reference in New Issue
Block a user