feat: refactored and improved codereview in line with precommit. Reviews are now either external (default) or internal. Takes away anxiety and loss of tokens when Claude incorrectly decides to be 'confident' about its own changes and bungle things up.

fix: Minor tweaks to prompts
fix: Improved support for smaller models that struggle with strict structured JSON output
Rearranged reasons to use the MCP above quick start (collapsed)
This commit is contained in:
Fahad
2025-08-21 14:04:32 +04:00
parent d30c212029
commit 80d21e57c0
6 changed files with 300 additions and 122 deletions

View File

@@ -23,6 +23,7 @@ Features:
import json
import logging
import os
import re
from abc import ABC, abstractmethod
from typing import Any, Optional
@@ -1518,16 +1519,32 @@ class BaseWorkflowMixin(ABC):
)
if model_response.content:
content = model_response.content.strip()
# Try to extract JSON from markdown code blocks if present
if "```json" in content or "```" in content:
json_match = re.search(r"```(?:json)?\s*(.*?)\s*```", content, re.DOTALL)
if json_match:
content = json_match.group(1).strip()
try:
# Try to parse as JSON
analysis_result = json.loads(model_response.content.strip())
analysis_result = json.loads(content)
return analysis_result
except json.JSONDecodeError:
# Return as text if not valid JSON
except json.JSONDecodeError as e:
# Log the parse error with more details but don't fail
logger.info(
f"[{self.get_name()}] Expert analysis returned non-JSON response (this is OK for smaller models). "
f"Parse error: {str(e)}. Response length: {len(model_response.content)} chars."
)
logger.debug(f"First 500 chars of response: {model_response.content[:500]!r}")
# Still return the analysis as plain text - this is valid
return {
"status": "analysis_complete",
"raw_analysis": model_response.content,
"parse_error": "Response was not valid JSON",
"format": "text", # Indicate it's plain text, not an error
"note": "Analysis provided in plain text format",
}
else:
return {"error": "No response from model", "status": "empty_response"}