summaryrefslogtreecommitdiff
path: root/backend/app/services/llm.py
diff options
context:
space:
mode:
Diffstat (limited to 'backend/app/services/llm.py')
-rw-r--r--backend/app/services/llm.py29
1 files changed, 28 insertions, 1 deletions
diff --git a/backend/app/services/llm.py b/backend/app/services/llm.py
index 7efdce0..c22ada3 100644
--- a/backend/app/services/llm.py
+++ b/backend/app/services/llm.py
@@ -4,7 +4,7 @@ from typing import AsyncGenerator, List, Dict, Any, Optional
import openai
import google.generativeai as genai
import anthropic
-from app.schemas import LLMConfig, Message, Role, Context
+from app.schemas import LLMConfig, Message, Role, Context, ModelProvider
logger = logging.getLogger("contextflow.llm")
@@ -599,6 +599,33 @@ async def llm_streamer(
yield f"Error calling LLM: {primary_error} (OpenRouter fallback also failed: {fallback_error})"
+def resolve_provider(model_name: str) -> ModelProvider:
+ """Determine the provider from a model name string."""
+ name = model_name.lower()
+ if any(name.startswith(p) for p in ('claude',)):
+ return ModelProvider.CLAUDE
+ if any(name.startswith(p) for p in ('gemini',)):
+ return ModelProvider.GOOGLE
+ # Default to OpenAI for gpt-*, o1, o3, etc.
+ return ModelProvider.OPENAI
+
+
+async def query_model_full(
+ context: Context,
+ user_prompt: str,
+ config: LLMConfig,
+ attachments=None,
+ tools=None,
+ openrouter_api_key=None,
+ images=None,
+) -> str:
+ """Collect full response from llm_streamer (non-streaming wrapper)."""
+ chunks = []
+ async for chunk in llm_streamer(context, user_prompt, config, attachments, tools, openrouter_api_key, images):
+ chunks.append(chunk)
+ return "".join(chunks)
+
+
async def generate_title(user_prompt: str, response: str, api_key: str = None) -> str:
"""
Generate a short title (3-4 words) for a Q-A pair using gpt-5-nano.