diff options
| author | haoyuren <13851610112@163.com> | 2026-02-12 12:45:24 -0600 |
|---|---|---|
| committer | haoyuren <13851610112@163.com> | 2026-02-12 12:45:24 -0600 |
| commit | c8fae0256c91a0ebe495270aa15baa2f27211268 (patch) | |
| tree | efc908a9fb259a18809ab5151a15fc0f1e10fdf1 /backend/council.py | |
| parent | 92e1fccb1bdcf1bab7221aa9ed90f9dc72529131 (diff) | |
Multi-turn conversation, stop generation, SSE fix, and UI improvements
- Multi-turn context: all council stages now receive conversation history
(user messages + Stage 3 chairman responses) for coherent follow-ups
- Stop generation: abort streaming mid-request, recover query to input box
- SSE parsing: buffer-based chunking to prevent JSON split across packets
- Atomic storage: user + assistant messages saved together after completion,
preventing dangling messages on abort
- GFM markdown: tables, strikethrough via remark-gfm plugin + table styles
- Performance: memo user messages and completed assistant messages, only
re-render the active streaming message
- Model config: gpt-5.2, claude-opus-4.6 as chairman
- Always show input box for multi-turn conversations
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
Diffstat (limited to 'backend/council.py')
| -rw-r--r-- | backend/council.py | 56 |
1 files changed, 45 insertions, 11 deletions
diff --git a/backend/council.py b/backend/council.py index 5069abe..6facbd8 100644 --- a/backend/council.py +++ b/backend/council.py @@ -1,21 +1,46 @@ """3-stage LLM Council orchestration.""" -from typing import List, Dict, Any, Tuple +from typing import List, Dict, Any, Tuple, Optional from .openrouter import query_models_parallel, query_model from .config import COUNCIL_MODELS, CHAIRMAN_MODEL -async def stage1_collect_responses(user_query: str) -> List[Dict[str, Any]]: +def _build_messages( + conversation_history: Optional[List[Dict[str, str]]], + current_content: str +) -> List[Dict[str, str]]: + """ + Build a messages list with conversation history + current user message. + + Args: + conversation_history: List of {"role": "user"/"assistant", "content": ...} dicts + current_content: The current message content to append as user + + Returns: + Messages list for the OpenRouter API + """ + messages = [] + if conversation_history: + messages.extend(conversation_history) + messages.append({"role": "user", "content": current_content}) + return messages + + +async def stage1_collect_responses( + user_query: str, + conversation_history: Optional[List[Dict[str, str]]] = None +) -> List[Dict[str, Any]]: """ Stage 1: Collect individual responses from all council models. Args: user_query: The user's question + conversation_history: Optional list of prior conversation messages Returns: List of dicts with 'model' and 'response' keys """ - messages = [{"role": "user", "content": user_query}] + messages = _build_messages(conversation_history, user_query) # Query all models in parallel responses = await query_models_parallel(COUNCIL_MODELS, messages) @@ -34,7 +59,8 @@ async def stage1_collect_responses(user_query: str) -> List[Dict[str, Any]]: async def stage2_collect_rankings( user_query: str, - stage1_results: List[Dict[str, Any]] + stage1_results: List[Dict[str, Any]], + conversation_history: Optional[List[Dict[str, str]]] = None ) -> Tuple[List[Dict[str, Any]], Dict[str, str]]: """ Stage 2: Each model ranks the anonymized responses. @@ -42,6 +68,7 @@ async def stage2_collect_rankings( Args: user_query: The original user query stage1_results: Results from Stage 1 + conversation_history: Optional list of prior conversation messages Returns: Tuple of (rankings list, label_to_model mapping) @@ -92,7 +119,7 @@ FINAL RANKING: Now provide your evaluation and ranking:""" - messages = [{"role": "user", "content": ranking_prompt}] + messages = _build_messages(conversation_history, ranking_prompt) # Get rankings from all council models in parallel responses = await query_models_parallel(COUNCIL_MODELS, messages) @@ -115,7 +142,8 @@ Now provide your evaluation and ranking:""" async def stage3_synthesize_final( user_query: str, stage1_results: List[Dict[str, Any]], - stage2_results: List[Dict[str, Any]] + stage2_results: List[Dict[str, Any]], + conversation_history: Optional[List[Dict[str, str]]] = None ) -> Dict[str, Any]: """ Stage 3: Chairman synthesizes final response. @@ -124,6 +152,7 @@ async def stage3_synthesize_final( user_query: The original user query stage1_results: Individual model responses from Stage 1 stage2_results: Rankings from Stage 2 + conversation_history: Optional list of prior conversation messages Returns: Dict with 'model' and 'response' keys @@ -156,7 +185,7 @@ Your task as Chairman is to synthesize all of this information into a single, co Provide a clear, well-reasoned final answer that represents the council's collective wisdom:""" - messages = [{"role": "user", "content": chairman_prompt}] + messages = _build_messages(conversation_history, chairman_prompt) # Query the chairman model response = await query_model(CHAIRMAN_MODEL, messages) @@ -293,18 +322,22 @@ Title:""" return title -async def run_full_council(user_query: str) -> Tuple[List, List, Dict, Dict]: +async def run_full_council( + user_query: str, + conversation_history: Optional[List[Dict[str, str]]] = None +) -> Tuple[List, List, Dict, Dict]: """ Run the complete 3-stage council process. Args: user_query: The user's question + conversation_history: Optional list of prior conversation messages Returns: Tuple of (stage1_results, stage2_results, stage3_result, metadata) """ # Stage 1: Collect individual responses - stage1_results = await stage1_collect_responses(user_query) + stage1_results = await stage1_collect_responses(user_query, conversation_history) # If no models responded successfully, return error if not stage1_results: @@ -314,7 +347,7 @@ async def run_full_council(user_query: str) -> Tuple[List, List, Dict, Dict]: }, {} # Stage 2: Collect rankings - stage2_results, label_to_model = await stage2_collect_rankings(user_query, stage1_results) + stage2_results, label_to_model = await stage2_collect_rankings(user_query, stage1_results, conversation_history) # Calculate aggregate rankings aggregate_rankings = calculate_aggregate_rankings(stage2_results, label_to_model) @@ -323,7 +356,8 @@ async def run_full_council(user_query: str) -> Tuple[List, List, Dict, Dict]: stage3_result = await stage3_synthesize_final( user_query, stage1_results, - stage2_results + stage2_results, + conversation_history ) # Prepare metadata |
