diff options
| author | karpathy <andrej.karpathy@gmail.com> | 2025-11-22 15:24:47 -0800 |
|---|---|---|
| committer | karpathy <andrej.karpathy@gmail.com> | 2025-11-22 15:24:47 -0800 |
| commit | 87b4a178ec24cfaeca06ee433a592055fcf0068b (patch) | |
| tree | bed1b3a1d9ae99cd1c20dd07ab3ccc46a0be0101 /backend | |
| parent | 827bfd3d3ecc34ac5f6a21003c785460d1b02d2b (diff) | |
a bit more progressive update and single turn
Diffstat (limited to 'backend')
| -rw-r--r-- | backend/main.py | 76 |
1 files changed, 75 insertions, 1 deletions
diff --git a/backend/main.py b/backend/main.py index e896bf2..e33ce59 100644 --- a/backend/main.py +++ b/backend/main.py @@ -2,12 +2,15 @@ from fastapi import FastAPI, HTTPException from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import StreamingResponse from pydantic import BaseModel from typing import List, Dict, Any import uuid +import json +import asyncio from . import storage -from .council import run_full_council, generate_conversation_title +from .council import run_full_council, generate_conversation_title, stage1_collect_responses, stage2_collect_rankings, stage3_synthesize_final, calculate_aggregate_rankings app = FastAPI(title="LLM Council API") @@ -120,6 +123,77 @@ async def send_message(conversation_id: str, request: SendMessageRequest): } +@app.post("/api/conversations/{conversation_id}/message/stream") +async def send_message_stream(conversation_id: str, request: SendMessageRequest): + """ + Send a message and stream the 3-stage council process. + Returns Server-Sent Events as each stage completes. + """ + # Check if conversation exists + conversation = storage.get_conversation(conversation_id) + if conversation is None: + raise HTTPException(status_code=404, detail="Conversation not found") + + # Check if this is the first message + is_first_message = len(conversation["messages"]) == 0 + + async def event_generator(): + try: + # Add user message + storage.add_user_message(conversation_id, request.content) + + # Start title generation in parallel (don't await yet) + title_task = None + if is_first_message: + title_task = asyncio.create_task(generate_conversation_title(request.content)) + + # Stage 1: Collect responses + yield f"data: {json.dumps({'type': 'stage1_start'})}\n\n" + stage1_results = await stage1_collect_responses(request.content) + yield f"data: {json.dumps({'type': 'stage1_complete', 'data': stage1_results})}\n\n" + + # Stage 2: Collect rankings + yield f"data: {json.dumps({'type': 'stage2_start'})}\n\n" + stage2_results, label_to_model = await stage2_collect_rankings(request.content, stage1_results) + aggregate_rankings = calculate_aggregate_rankings(stage2_results, label_to_model) + yield f"data: {json.dumps({'type': 'stage2_complete', 'data': stage2_results, 'metadata': {'label_to_model': label_to_model, 'aggregate_rankings': aggregate_rankings}})}\n\n" + + # Stage 3: Synthesize final answer + yield f"data: {json.dumps({'type': 'stage3_start'})}\n\n" + stage3_result = await stage3_synthesize_final(request.content, stage1_results, stage2_results) + yield f"data: {json.dumps({'type': 'stage3_complete', 'data': stage3_result})}\n\n" + + # Wait for title generation if it was started + if title_task: + title = await title_task + storage.update_conversation_title(conversation_id, title) + yield f"data: {json.dumps({'type': 'title_complete', 'data': {'title': title}})}\n\n" + + # Save complete assistant message + storage.add_assistant_message( + conversation_id, + stage1_results, + stage2_results, + stage3_result + ) + + # Send completion event + yield f"data: {json.dumps({'type': 'complete'})}\n\n" + + except Exception as e: + # Send error event + yield f"data: {json.dumps({'type': 'error', 'message': str(e)})}\n\n" + + return StreamingResponse( + event_generator(), + media_type="text/event-stream", + headers={ + "Cache-Control": "no-cache", + "Connection": "keep-alive", + } + ) + + if __name__ == "__main__": import uvicorn uvicorn.run(app, host="0.0.0.0", port=8001) |
