diff options
Diffstat (limited to 'backend/app/main.py')
| -rw-r--r-- | backend/app/main.py | 34 |
1 files changed, 26 insertions, 8 deletions
diff --git a/backend/app/main.py b/backend/app/main.py index 9370a32..304c74f 100644 --- a/backend/app/main.py +++ b/backend/app/main.py @@ -472,12 +472,13 @@ async def run_council_stream( openrouter_key = get_user_api_key(resolved, "openrouter") - # Build LLMConfig + attachments for each council member + # Build LLMConfig + attachments + per-member contexts for each council member member_configs: list[LLMConfig] = [] attachments_per_model: list[list[dict] | None] = [] tools_per_model: list[list[dict] | None] = [] + contexts_per_model: list[Context | None] = [] - all_model_names = [m.model_name for m in request.council_models] + [request.chairman_model] + all_model_names = [m.model_name for m in request.council_models] + [request.chairman_model.model_name] for member in request.council_models: provider = resolve_provider(member.model_name) @@ -487,10 +488,11 @@ async def run_council_stream( config = LLMConfig( provider=provider, model_name=member.model_name, - temperature=request.temperature, + temperature=member.temperature if member.temperature is not None else request.temperature, system_prompt=request.system_prompt, api_key=api_key, - reasoning_effort=request.reasoning_effort, + reasoning_effort=member.reasoning_effort if member.reasoning_effort is not None else request.reasoning_effort, + enable_google_search=member.enable_google_search if member.enable_google_search is not None else request.enable_google_search, ) member_configs.append(config) @@ -539,16 +541,31 @@ async def run_council_stream( attachments_per_model.append(attachments or None) tools_per_model.append(tools or None) + # Per-member context override + if member.incoming_contexts: + raw = [] + for ctx in member.incoming_contexts: + raw.extend(ctx.messages) + if request.merge_strategy == MergeStrategy.SMART: + merged = smart_merge_messages(raw) + else: + merged = raw + contexts_per_model.append(Context(messages=merged)) + else: + contexts_per_model.append(None) # Use shared context + # Build chairman config - chairman_provider = resolve_provider(request.chairman_model) + chairman = request.chairman_model + chairman_provider = resolve_provider(chairman.model_name) chairman_api_key = get_user_api_key(resolved, chairman_provider.value) chairman_config = LLMConfig( provider=chairman_provider, - model_name=request.chairman_model, - temperature=request.temperature, + model_name=chairman.model_name, + temperature=chairman.temperature if chairman.temperature is not None else request.temperature, system_prompt=request.system_prompt, api_key=chairman_api_key, - reasoning_effort=request.reasoning_effort, + reasoning_effort=chairman.reasoning_effort if chairman.reasoning_effort is not None else request.reasoning_effort, + enable_google_search=chairman.enable_google_search if chairman.enable_google_search is not None else request.enable_google_search, ) return StreamingResponse( @@ -561,6 +578,7 @@ async def run_council_stream( tools_per_model=tools_per_model, openrouter_api_key=openrouter_key, images=images, + contexts_per_model=contexts_per_model, ), media_type="text/event-stream", ) |
