diff options
| author | blackhao <13851610112@163.com> | 2025-12-06 01:30:57 -0600 |
|---|---|---|
| committer | blackhao <13851610112@163.com> | 2025-12-06 01:30:57 -0600 |
| commit | 93dbe11014cf967690727c25e89d9d1075008c24 (patch) | |
| tree | e168becbfff0e699f49021c1b3de6918e7f0a124 /backend/app/main.py | |
| parent | bcb44d5a7c4b17afd7ba64be5b497d74afc69fb6 (diff) | |
Diffstat (limited to 'backend/app/main.py')
| -rw-r--r-- | backend/app/main.py | 38 |
1 files changed, 36 insertions, 2 deletions
diff --git a/backend/app/main.py b/backend/app/main.py index 48cb89f..65fa3a3 100644 --- a/backend/app/main.py +++ b/backend/app/main.py @@ -1,8 +1,9 @@ from fastapi import FastAPI, HTTPException from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import StreamingResponse -from app.schemas import NodeRunRequest, NodeRunResponse, MergeStrategy, Role, Message, Context -from app.services.llm import llm_streamer +from pydantic import BaseModel +from app.schemas import NodeRunRequest, NodeRunResponse, MergeStrategy, Role, Message, Context, LLMConfig, ModelProvider, ReasoningEffort +from app.services.llm import llm_streamer, generate_title from dotenv import load_dotenv import os @@ -83,3 +84,36 @@ async def run_node_stream(request: NodeRunRequest): llm_streamer(execution_context, request.user_prompt, request.config), media_type="text/event-stream" ) + +class TitleRequest(BaseModel): + user_prompt: str + response: str + +class TitleResponse(BaseModel): + title: str + +@app.post("/api/generate_title", response_model=TitleResponse) +async def generate_title_endpoint(request: TitleRequest): + """ + Generate a short title for a Q-A pair using gpt-5-nano. + Returns 3-4 short English words summarizing the topic. + """ + title = await generate_title(request.user_prompt, request.response) + return TitleResponse(title=title) + + +class SummarizeRequest(BaseModel): + content: str + model: str # Model to use for summarization + +class SummarizeResponse(BaseModel): + summary: str + +@app.post("/api/summarize", response_model=SummarizeResponse) +async def summarize_endpoint(request: SummarizeRequest): + """ + Summarize the given content using the specified model. + """ + from app.services.llm import summarize_content + summary = await summarize_content(request.content, request.model) + return SummarizeResponse(summary=summary) |
