summaryrefslogtreecommitdiff
path: root/backend/app/main.py
diff options
context:
space:
mode:
authorYurenHao0426 <blackhao0426@gmail.com>2026-02-13 23:08:05 +0000
committerYurenHao0426 <blackhao0426@gmail.com>2026-02-13 23:08:05 +0000
commitcb59ecf3ac3b38ba883fc74bf810ae9e82e2a469 (patch)
treed0cab16f3ddb7708528ceb3cbb126d9437aed91b /backend/app/main.py
parent2adacdbfa1d1049a0497e55f2b3ed00551bf876f (diff)
Add LLM Debate mode for multi-round iterative model discussions
Implements a debate feature alongside Council mode where 2-6 models engage in multi-round discussions with configurable judge modes (external judge, self-convergence, display-only), debate formats (free discussion, structured opposition, iterative improvement, custom), and early termination conditions. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
Diffstat (limited to 'backend/app/main.py')
-rw-r--r--backend/app/main.py129
1 files changed, 128 insertions, 1 deletions
diff --git a/backend/app/main.py b/backend/app/main.py
index 304c74f..89c5dd0 100644
--- a/backend/app/main.py
+++ b/backend/app/main.py
@@ -8,9 +8,10 @@ from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import StreamingResponse, FileResponse
from fastapi import UploadFile, File, Form
from pydantic import BaseModel
-from app.schemas import NodeRunRequest, NodeRunResponse, MergeStrategy, Role, Message, Context, LLMConfig, ModelProvider, ReasoningEffort, CouncilRunRequest
+from app.schemas import NodeRunRequest, NodeRunResponse, MergeStrategy, Role, Message, Context, LLMConfig, ModelProvider, ReasoningEffort, CouncilRunRequest, DebateRunRequest, DebateJudgeMode
from app.services.llm import llm_streamer, generate_title, get_openai_client, get_anthropic_client, resolve_provider
from app.services.council import council_event_stream
+from app.services.debate import debate_event_stream
from app.auth import auth_router, get_current_user, get_current_user_optional, init_db, User, get_db
from app.auth.utils import get_password_hash
from dotenv import load_dotenv
@@ -584,6 +585,132 @@ async def run_council_stream(
)
+@app.post("/api/run_debate_stream")
+async def run_debate_stream(
+ request: DebateRunRequest,
+ user: str = DEFAULT_USER,
+ current_user: User | None = Depends(get_current_user_optional),
+):
+ """
+ Run a multi-round LLM Debate and stream SSE events.
+ """
+ resolved = resolve_user(current_user, user)
+ username = resolved.username if resolved else DEFAULT_USER
+
+ # Merge incoming contexts
+ raw_messages = []
+ for ctx in request.incoming_contexts:
+ raw_messages.extend(ctx.messages)
+ if request.merge_strategy == MergeStrategy.SMART:
+ final_messages = smart_merge_messages(raw_messages)
+ else:
+ final_messages = raw_messages
+ execution_context = Context(messages=final_messages)
+
+ # Extract images from attached files
+ images, non_image_file_ids = extract_image_attachments(username, request.attached_file_ids)
+
+ openrouter_key = get_user_api_key(resolved, "openrouter")
+
+ # Build LLMConfig + attachments + tools for each debate member
+ member_configs: list[LLMConfig] = []
+ attachments_per_model: list[list[dict] | None] = []
+ tools_per_model: list[list[dict] | None] = []
+
+ for member in request.debate_models:
+ provider = resolve_provider(member.model_name)
+ provider_str = provider.value
+ api_key = get_user_api_key(resolved, provider_str)
+
+ config = LLMConfig(
+ provider=provider,
+ model_name=member.model_name,
+ temperature=member.temperature if member.temperature is not None else request.temperature,
+ system_prompt=request.system_prompt,
+ api_key=api_key,
+ reasoning_effort=member.reasoning_effort if member.reasoning_effort is not None else request.reasoning_effort,
+ enable_google_search=member.enable_google_search if member.enable_google_search is not None else request.enable_google_search,
+ )
+ member_configs.append(config)
+
+ # Prepare provider-specific file attachments
+ tools: list[dict] = []
+ attachments: list[dict] = []
+ scoped_file_ids = resolve_scoped_file_ids(username, request.scopes, non_image_file_ids)
+
+ if provider == ModelProvider.OPENAI:
+ vs_ids, debug_refs, filters = await prepare_openai_vector_search(
+ user=username,
+ attached_ids=non_image_file_ids,
+ scopes=request.scopes,
+ llm_config=config,
+ )
+ if not vs_ids:
+ try:
+ client = get_openai_client(config.api_key)
+ vs_id = await ensure_user_vector_store(username, client)
+ if vs_id:
+ vs_ids = [vs_id]
+ except Exception:
+ pass
+ if vs_ids:
+ tool_def = {"type": "file_search", "vector_store_ids": vs_ids}
+ if filters:
+ tool_def["filters"] = filters
+ tools.append(tool_def)
+ elif provider == ModelProvider.GOOGLE:
+ attachments = await prepare_attachments(
+ user=username,
+ target_provider=provider,
+ attached_ids=scoped_file_ids,
+ llm_config=config,
+ )
+ elif provider == ModelProvider.CLAUDE:
+ attachments = await prepare_attachments(
+ user=username,
+ target_provider=provider,
+ attached_ids=scoped_file_ids,
+ llm_config=config,
+ )
+
+ attachments_per_model.append(attachments or None)
+ tools_per_model.append(tools or None)
+
+ # Build judge config (if external_judge mode)
+ judge_config = None
+ if request.judge_mode == DebateJudgeMode.EXTERNAL_JUDGE and request.judge_model:
+ judge = request.judge_model
+ judge_provider = resolve_provider(judge.model_name)
+ judge_api_key = get_user_api_key(resolved, judge_provider.value)
+ judge_config = LLMConfig(
+ provider=judge_provider,
+ model_name=judge.model_name,
+ temperature=judge.temperature if judge.temperature is not None else request.temperature,
+ system_prompt=request.system_prompt,
+ api_key=judge_api_key,
+ reasoning_effort=judge.reasoning_effort if judge.reasoning_effort is not None else request.reasoning_effort,
+ enable_google_search=judge.enable_google_search if judge.enable_google_search is not None else request.enable_google_search,
+ )
+
+ return StreamingResponse(
+ debate_event_stream(
+ user_prompt=request.user_prompt,
+ context=execution_context,
+ member_configs=member_configs,
+ judge_config=judge_config,
+ judge_mode=request.judge_mode,
+ debate_format=request.debate_format,
+ max_rounds=request.max_rounds,
+ custom_format_prompt=request.custom_format_prompt,
+ attachments_per_model=attachments_per_model,
+ tools_per_model=tools_per_model,
+ openrouter_api_key=openrouter_key,
+ images=images,
+ ),
+ media_type="text/event-stream",
+ )
+
+
class TitleRequest(BaseModel):
user_prompt: str
response: str