fix: improved conversation retrieval
This commit is contained in:
36
tests/test_conversation_continuation_integration.py
Normal file
36
tests/test_conversation_continuation_integration.py
Normal file
@@ -0,0 +1,36 @@
|
||||
"""Integration test for conversation continuation persistence."""
|
||||
|
||||
from tools.chat import ChatRequest, ChatTool
|
||||
from utils.conversation_memory import get_thread
|
||||
from utils.storage_backend import get_storage_backend
|
||||
|
||||
|
||||
def test_first_response_persisted_in_conversation_history():
|
||||
"""Ensure the assistant's initial reply is stored for newly created threads."""
|
||||
|
||||
# Clear in-memory storage to avoid cross-test contamination
|
||||
storage = get_storage_backend()
|
||||
storage._store.clear() # type: ignore[attr-defined]
|
||||
|
||||
tool = ChatTool()
|
||||
request = ChatRequest(prompt="First question?", model="local-llama")
|
||||
response_text = "Here is the initial answer."
|
||||
|
||||
# Mimic the first tool invocation (no continuation_id supplied)
|
||||
continuation_data = tool._create_continuation_offer(request, model_info={"model_name": "local-llama"})
|
||||
tool._create_continuation_offer_response(
|
||||
response_text,
|
||||
continuation_data,
|
||||
request,
|
||||
{"model_name": "local-llama", "provider": "custom"},
|
||||
)
|
||||
|
||||
thread_id = continuation_data["continuation_id"]
|
||||
thread = get_thread(thread_id)
|
||||
|
||||
assert thread is not None
|
||||
assert [turn.role for turn in thread.turns] == ["user", "assistant"]
|
||||
assert thread.turns[-1].content == response_text
|
||||
|
||||
# Cleanup storage for subsequent tests
|
||||
storage._store.clear() # type: ignore[attr-defined]
|
||||
Reference in New Issue
Block a user