🚀 Major Enhancement: Workflow-Based Tool Architecture v5.5.0 (#95)

* WIP: new workflow architecture

* WIP: further improvements and cleanup

* WIP: cleanup and docks, replace old tool with new

* WIP: cleanup and docks, replace old tool with new

* WIP: new planner implementation using workflow

* WIP: precommit tool working as a workflow instead of a basic tool
Support for passing False to use_assistant_model to skip external models completely and use Claude only

* WIP: precommit workflow version swapped with old

* WIP: codereview

* WIP: replaced codereview

* WIP: replaced codereview

* WIP: replaced refactor

* WIP: workflow for thinkdeep

* WIP: ensure files get embedded correctly

* WIP: thinkdeep replaced with workflow version

* WIP: improved messaging when an external model's response is received

* WIP: analyze tool swapped

* WIP: updated tests
* Extract only the content when building history
* Use "relevant_files" for workflow tools only

* WIP: updated tests
* Extract only the content when building history
* Use "relevant_files" for workflow tools only

* WIP: fixed get_completion_next_steps_message missing param

* Fixed tests
Request for files consistently

* Fixed tests
Request for files consistently

* Fixed tests

* New testgen workflow tool
Updated docs

* Swap testgen workflow

* Fix CI test failures by excluding API-dependent tests

- Update GitHub Actions workflow to exclude simulation tests that require API keys
- Fix collaboration tests to properly mock workflow tool expert analysis calls
- Update test assertions to handle new workflow tool response format
- Ensure unit tests run without external API dependencies in CI

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>

* WIP - Update tests to match new tools

* WIP - Update tests to match new tools

---------

Co-authored-by: Claude <noreply@anthropic.com>
This commit is contained in:
Beehive Innovations
2025-06-21 00:08:11 +04:00
committed by GitHub
parent 4dae6e457e
commit 69a3121452
76 changed files with 17111 additions and 7725 deletions

View File

@@ -3,90 +3,91 @@ Tests for the Consensus tool
"""
import json
import unittest
from unittest.mock import Mock, patch
from unittest.mock import patch
import pytest
from tools.consensus import ConsensusTool, ModelConfig
class TestConsensusTool(unittest.TestCase):
class TestConsensusTool:
"""Test cases for the Consensus tool"""
def setUp(self):
def setup_method(self):
"""Set up test fixtures"""
self.tool = ConsensusTool()
def test_tool_metadata(self):
"""Test tool metadata is correct"""
self.assertEqual(self.tool.get_name(), "consensus")
self.assertTrue("MULTI-MODEL CONSENSUS" in self.tool.get_description())
self.assertEqual(self.tool.get_default_temperature(), 0.2)
assert self.tool.get_name() == "consensus"
assert "MULTI-MODEL CONSENSUS" in self.tool.get_description()
assert self.tool.get_default_temperature() == 0.2
def test_input_schema(self):
"""Test input schema is properly defined"""
schema = self.tool.get_input_schema()
self.assertEqual(schema["type"], "object")
self.assertIn("prompt", schema["properties"])
self.assertIn("models", schema["properties"])
self.assertEqual(schema["required"], ["prompt", "models"])
assert schema["type"] == "object"
assert "prompt" in schema["properties"]
assert "models" in schema["properties"]
assert schema["required"] == ["prompt", "models"]
# Check that schema includes model configuration information
models_desc = schema["properties"]["models"]["description"]
# Check description includes object format
self.assertIn("model configurations", models_desc)
self.assertIn("specific stance and custom instructions", models_desc)
assert "model configurations" in models_desc
assert "specific stance and custom instructions" in models_desc
# Check example shows new format
self.assertIn("'model': 'o3'", models_desc)
self.assertIn("'stance': 'for'", models_desc)
self.assertIn("'stance_prompt'", models_desc)
assert "'model': 'o3'" in models_desc
assert "'stance': 'for'" in models_desc
assert "'stance_prompt'" in models_desc
def test_normalize_stance_basic(self):
"""Test basic stance normalization"""
# Test basic stances
self.assertEqual(self.tool._normalize_stance("for"), "for")
self.assertEqual(self.tool._normalize_stance("against"), "against")
self.assertEqual(self.tool._normalize_stance("neutral"), "neutral")
self.assertEqual(self.tool._normalize_stance(None), "neutral")
assert self.tool._normalize_stance("for") == "for"
assert self.tool._normalize_stance("against") == "against"
assert self.tool._normalize_stance("neutral") == "neutral"
assert self.tool._normalize_stance(None) == "neutral"
def test_normalize_stance_synonyms(self):
"""Test stance synonym normalization"""
# Supportive synonyms
self.assertEqual(self.tool._normalize_stance("support"), "for")
self.assertEqual(self.tool._normalize_stance("favor"), "for")
assert self.tool._normalize_stance("support") == "for"
assert self.tool._normalize_stance("favor") == "for"
# Critical synonyms
self.assertEqual(self.tool._normalize_stance("critical"), "against")
self.assertEqual(self.tool._normalize_stance("oppose"), "against")
assert self.tool._normalize_stance("critical") == "against"
assert self.tool._normalize_stance("oppose") == "against"
# Case insensitive
self.assertEqual(self.tool._normalize_stance("FOR"), "for")
self.assertEqual(self.tool._normalize_stance("Support"), "for")
self.assertEqual(self.tool._normalize_stance("AGAINST"), "against")
self.assertEqual(self.tool._normalize_stance("Critical"), "against")
assert self.tool._normalize_stance("FOR") == "for"
assert self.tool._normalize_stance("Support") == "for"
assert self.tool._normalize_stance("AGAINST") == "against"
assert self.tool._normalize_stance("Critical") == "against"
# Test unknown stances default to neutral
self.assertEqual(self.tool._normalize_stance("supportive"), "neutral")
self.assertEqual(self.tool._normalize_stance("maybe"), "neutral")
self.assertEqual(self.tool._normalize_stance("contra"), "neutral")
self.assertEqual(self.tool._normalize_stance("random"), "neutral")
assert self.tool._normalize_stance("supportive") == "neutral"
assert self.tool._normalize_stance("maybe") == "neutral"
assert self.tool._normalize_stance("contra") == "neutral"
assert self.tool._normalize_stance("random") == "neutral"
def test_model_config_validation(self):
"""Test ModelConfig validation"""
# Valid config
config = ModelConfig(model="o3", stance="for", stance_prompt="Custom prompt")
self.assertEqual(config.model, "o3")
self.assertEqual(config.stance, "for")
self.assertEqual(config.stance_prompt, "Custom prompt")
assert config.model == "o3"
assert config.stance == "for"
assert config.stance_prompt == "Custom prompt"
# Default stance
config = ModelConfig(model="flash")
self.assertEqual(config.stance, "neutral")
self.assertIsNone(config.stance_prompt)
assert config.stance == "neutral"
assert config.stance_prompt is None
# Test that empty model is handled by validation elsewhere
# Pydantic allows empty strings by default, but the tool validates it
config = ModelConfig(model="")
self.assertEqual(config.model, "")
assert config.model == ""
def test_validate_model_combinations(self):
"""Test model combination validation with ModelConfig objects"""
@@ -98,8 +99,8 @@ class TestConsensusTool(unittest.TestCase):
ModelConfig(model="o3", stance="against"),
]
valid, skipped = self.tool._validate_model_combinations(configs)
self.assertEqual(len(valid), 4)
self.assertEqual(len(skipped), 0)
assert len(valid) == 4
assert len(skipped) == 0
# Test max instances per combination (2)
configs = [
@@ -109,9 +110,9 @@ class TestConsensusTool(unittest.TestCase):
ModelConfig(model="pro", stance="against"),
]
valid, skipped = self.tool._validate_model_combinations(configs)
self.assertEqual(len(valid), 3)
self.assertEqual(len(skipped), 1)
self.assertIn("max 2 instances", skipped[0])
assert len(valid) == 3
assert len(skipped) == 1
assert "max 2 instances" in skipped[0]
# Test unknown stances get normalized to neutral
configs = [
@@ -120,31 +121,31 @@ class TestConsensusTool(unittest.TestCase):
ModelConfig(model="grok"), # Already neutral
]
valid, skipped = self.tool._validate_model_combinations(configs)
self.assertEqual(len(valid), 3) # All are valid (normalized to neutral)
self.assertEqual(len(skipped), 0) # None skipped
assert len(valid) == 3 # All are valid (normalized to neutral)
assert len(skipped) == 0 # None skipped
# Verify normalization worked
self.assertEqual(valid[0].stance, "neutral") # maybe -> neutral
self.assertEqual(valid[1].stance, "neutral") # kinda -> neutral
self.assertEqual(valid[2].stance, "neutral") # already neutral
assert valid[0].stance == "neutral" # maybe -> neutral
assert valid[1].stance == "neutral" # kinda -> neutral
assert valid[2].stance == "neutral" # already neutral
def test_get_stance_enhanced_prompt(self):
"""Test stance-enhanced prompt generation"""
# Test that stance prompts are injected correctly
for_prompt = self.tool._get_stance_enhanced_prompt("for")
self.assertIn("SUPPORTIVE PERSPECTIVE", for_prompt)
assert "SUPPORTIVE PERSPECTIVE" in for_prompt
against_prompt = self.tool._get_stance_enhanced_prompt("against")
self.assertIn("CRITICAL PERSPECTIVE", against_prompt)
assert "CRITICAL PERSPECTIVE" in against_prompt
neutral_prompt = self.tool._get_stance_enhanced_prompt("neutral")
self.assertIn("BALANCED PERSPECTIVE", neutral_prompt)
assert "BALANCED PERSPECTIVE" in neutral_prompt
# Test custom stance prompt
custom_prompt = "Focus on user experience and business value"
enhanced = self.tool._get_stance_enhanced_prompt("for", custom_prompt)
self.assertIn(custom_prompt, enhanced)
self.assertNotIn("SUPPORTIVE PERSPECTIVE", enhanced) # Should use custom instead
assert custom_prompt in enhanced
assert "SUPPORTIVE PERSPECTIVE" not in enhanced # Should use custom instead
def test_format_consensus_output(self):
"""Test consensus output formatting"""
@@ -158,21 +159,41 @@ class TestConsensusTool(unittest.TestCase):
output = self.tool._format_consensus_output(responses, skipped)
output_data = json.loads(output)
self.assertEqual(output_data["status"], "consensus_success")
self.assertEqual(output_data["models_used"], ["o3:for", "pro:against"])
self.assertEqual(output_data["models_skipped"], skipped)
self.assertEqual(output_data["models_errored"], ["grok"])
self.assertIn("next_steps", output_data)
assert output_data["status"] == "consensus_success"
assert output_data["models_used"] == ["o3:for", "pro:against"]
assert output_data["models_skipped"] == skipped
assert output_data["models_errored"] == ["grok"]
assert "next_steps" in output_data
@patch("tools.consensus.ConsensusTool.get_model_provider")
async def test_execute_with_model_configs(self, mock_get_provider):
@pytest.mark.asyncio
@patch("tools.consensus.ConsensusTool._get_consensus_responses")
async def test_execute_with_model_configs(self, mock_get_responses):
"""Test execute with ModelConfig objects"""
# Mock provider
mock_provider = Mock()
mock_response = Mock()
mock_response.content = "Test response"
mock_provider.generate_content.return_value = mock_response
mock_get_provider.return_value = mock_provider
# Mock responses directly at the consensus level
mock_responses = [
{
"model": "o3",
"stance": "for", # support normalized to for
"status": "success",
"verdict": "This is good for user benefits",
"metadata": {"provider": "openai", "usage": None, "custom_stance_prompt": True},
},
{
"model": "pro",
"stance": "against", # critical normalized to against
"status": "success",
"verdict": "There are technical risks to consider",
"metadata": {"provider": "gemini", "usage": None, "custom_stance_prompt": True},
},
{
"model": "grok",
"stance": "neutral",
"status": "success",
"verdict": "Balanced perspective on the proposal",
"metadata": {"provider": "xai", "usage": None, "custom_stance_prompt": False},
},
]
mock_get_responses.return_value = mock_responses
# Test with ModelConfig objects including custom stance prompts
models = [
@@ -183,21 +204,20 @@ class TestConsensusTool(unittest.TestCase):
result = await self.tool.execute({"prompt": "Test prompt", "models": models})
# Verify all models were called
self.assertEqual(mock_get_provider.call_count, 3)
# Check that response contains expected format
# Verify the response structure
response_text = result[0].text
response_data = json.loads(response_text)
self.assertEqual(response_data["status"], "consensus_success")
self.assertEqual(len(response_data["models_used"]), 3)
assert response_data["status"] == "consensus_success"
assert len(response_data["models_used"]) == 3
# Verify stance normalization worked
# Verify stance normalization worked in the models_used field
models_used = response_data["models_used"]
self.assertIn("o3:for", models_used) # support -> for
self.assertIn("pro:against", models_used) # critical -> against
self.assertIn("grok", models_used) # neutral (no suffix)
assert "o3:for" in models_used # support -> for
assert "pro:against" in models_used # critical -> against
assert "grok" in models_used # neutral (no stance suffix)
if __name__ == "__main__":
import unittest
unittest.main()