From 77be59bc0a6353e98846b9c9bfa2d566efea8b1f Mon Sep 17 00:00:00 2001 From: YurenHao0426 Date: Fri, 13 Feb 2026 21:43:34 +0000 Subject: Add LLM Council mode for multi-model consensus 3-stage council orchestration: parallel model queries (Stage 1), anonymous peer ranking (Stage 2), and streamed chairman synthesis (Stage 3). Includes scope-aware file resolution for Google/Claude providers so upstream file attachments are visible to all providers. - Backend: council.py orchestrator, /api/run_council_stream endpoint, query_model_full() non-streaming wrapper, resolve_provider() helper, resolve_scoped_file_ids() for Google/Claude scope parity with OpenAI - Frontend: council toggle UI, model checkbox selector, chairman picker, SSE event parsing, tabbed Stage 1/2/3 response display - Canvas: amber council node indicator with Users icon Co-Authored-By: Claude Opus 4.6 --- backend/app/services/llm.py | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) (limited to 'backend/app/services/llm.py') diff --git a/backend/app/services/llm.py b/backend/app/services/llm.py index 7efdce0..c22ada3 100644 --- a/backend/app/services/llm.py +++ b/backend/app/services/llm.py @@ -4,7 +4,7 @@ from typing import AsyncGenerator, List, Dict, Any, Optional import openai import google.generativeai as genai import anthropic -from app.schemas import LLMConfig, Message, Role, Context +from app.schemas import LLMConfig, Message, Role, Context, ModelProvider logger = logging.getLogger("contextflow.llm") @@ -599,6 +599,33 @@ async def llm_streamer( yield f"Error calling LLM: {primary_error} (OpenRouter fallback also failed: {fallback_error})" +def resolve_provider(model_name: str) -> ModelProvider: + """Determine the provider from a model name string.""" + name = model_name.lower() + if any(name.startswith(p) for p in ('claude',)): + return ModelProvider.CLAUDE + if any(name.startswith(p) for p in ('gemini',)): + return ModelProvider.GOOGLE + # Default to OpenAI for gpt-*, o1, o3, etc. + return ModelProvider.OPENAI + + +async def query_model_full( + context: Context, + user_prompt: str, + config: LLMConfig, + attachments=None, + tools=None, + openrouter_api_key=None, + images=None, +) -> str: + """Collect full response from llm_streamer (non-streaming wrapper).""" + chunks = [] + async for chunk in llm_streamer(context, user_prompt, config, attachments, tools, openrouter_api_key, images): + chunks.append(chunk) + return "".join(chunks) + + async def generate_title(user_prompt: str, response: str, api_key: str = None) -> str: """ Generate a short title (3-4 words) for a Q-A pair using gpt-5-nano. -- cgit v1.2.3