summaryrefslogtreecommitdiff
path: root/backend/app/main.py
diff options
context:
space:
mode:
authorYurenHao0426 <blackhao0426@gmail.com>2026-02-13 22:46:06 +0000
committerYurenHao0426 <blackhao0426@gmail.com>2026-02-13 22:46:06 +0000
commit2adacdbfa1d1049a0497e55f2b3ed00551bf876f (patch)
tree7bb712d5d85e42aff8b7afe5da56a496ca82d9bd /backend/app/main.py
parent77be59bc0a6353e98846b9c9bfa2d566efea8b1f (diff)
Add per-model council settings, Quick Chat council mode, and per-member trace selection
Council members now support individual temperature, reasoning effort, web search, and context trace overrides. Quick Chat inherits council config from the source node and streams through the 3-stage council pipeline. Blueprint loading migrates old string[] council formats to CouncilMemberConfig[]. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
Diffstat (limited to 'backend/app/main.py')
-rw-r--r--backend/app/main.py34
1 files changed, 26 insertions, 8 deletions
diff --git a/backend/app/main.py b/backend/app/main.py
index 9370a32..304c74f 100644
--- a/backend/app/main.py
+++ b/backend/app/main.py
@@ -472,12 +472,13 @@ async def run_council_stream(
openrouter_key = get_user_api_key(resolved, "openrouter")
- # Build LLMConfig + attachments for each council member
+ # Build LLMConfig + attachments + per-member contexts for each council member
member_configs: list[LLMConfig] = []
attachments_per_model: list[list[dict] | None] = []
tools_per_model: list[list[dict] | None] = []
+ contexts_per_model: list[Context | None] = []
- all_model_names = [m.model_name for m in request.council_models] + [request.chairman_model]
+ all_model_names = [m.model_name for m in request.council_models] + [request.chairman_model.model_name]
for member in request.council_models:
provider = resolve_provider(member.model_name)
@@ -487,10 +488,11 @@ async def run_council_stream(
config = LLMConfig(
provider=provider,
model_name=member.model_name,
- temperature=request.temperature,
+ temperature=member.temperature if member.temperature is not None else request.temperature,
system_prompt=request.system_prompt,
api_key=api_key,
- reasoning_effort=request.reasoning_effort,
+ reasoning_effort=member.reasoning_effort if member.reasoning_effort is not None else request.reasoning_effort,
+ enable_google_search=member.enable_google_search if member.enable_google_search is not None else request.enable_google_search,
)
member_configs.append(config)
@@ -539,16 +541,31 @@ async def run_council_stream(
attachments_per_model.append(attachments or None)
tools_per_model.append(tools or None)
+ # Per-member context override
+ if member.incoming_contexts:
+ raw = []
+ for ctx in member.incoming_contexts:
+ raw.extend(ctx.messages)
+ if request.merge_strategy == MergeStrategy.SMART:
+ merged = smart_merge_messages(raw)
+ else:
+ merged = raw
+ contexts_per_model.append(Context(messages=merged))
+ else:
+ contexts_per_model.append(None) # Use shared context
+
# Build chairman config
- chairman_provider = resolve_provider(request.chairman_model)
+ chairman = request.chairman_model
+ chairman_provider = resolve_provider(chairman.model_name)
chairman_api_key = get_user_api_key(resolved, chairman_provider.value)
chairman_config = LLMConfig(
provider=chairman_provider,
- model_name=request.chairman_model,
- temperature=request.temperature,
+ model_name=chairman.model_name,
+ temperature=chairman.temperature if chairman.temperature is not None else request.temperature,
system_prompt=request.system_prompt,
api_key=chairman_api_key,
- reasoning_effort=request.reasoning_effort,
+ reasoning_effort=chairman.reasoning_effort if chairman.reasoning_effort is not None else request.reasoning_effort,
+ enable_google_search=chairman.enable_google_search if chairman.enable_google_search is not None else request.enable_google_search,
)
return StreamingResponse(
@@ -561,6 +578,7 @@ async def run_council_stream(
tools_per_model=tools_per_model,
openrouter_api_key=openrouter_key,
images=images,
+ contexts_per_model=contexts_per_model,
),
media_type="text/event-stream",
)