summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--backend/app/main.py38
-rw-r--r--backend/app/schemas.py7
-rw-r--r--backend/app/services/llm.py384
-rw-r--r--frontend/src/App.tsx115
-rw-r--r--frontend/src/components/LeftSidebar.tsx134
-rw-r--r--frontend/src/components/Sidebar.tsx419
-rw-r--r--frontend/src/components/nodes/LLMNode.tsx22
-rw-r--r--frontend/src/store/flowStore.ts191
8 files changed, 1221 insertions, 89 deletions
diff --git a/backend/app/main.py b/backend/app/main.py
index 48cb89f..65fa3a3 100644
--- a/backend/app/main.py
+++ b/backend/app/main.py
@@ -1,8 +1,9 @@
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import StreamingResponse
-from app.schemas import NodeRunRequest, NodeRunResponse, MergeStrategy, Role, Message, Context
-from app.services.llm import llm_streamer
+from pydantic import BaseModel
+from app.schemas import NodeRunRequest, NodeRunResponse, MergeStrategy, Role, Message, Context, LLMConfig, ModelProvider, ReasoningEffort
+from app.services.llm import llm_streamer, generate_title
from dotenv import load_dotenv
import os
@@ -83,3 +84,36 @@ async def run_node_stream(request: NodeRunRequest):
llm_streamer(execution_context, request.user_prompt, request.config),
media_type="text/event-stream"
)
+
+class TitleRequest(BaseModel):
+ user_prompt: str
+ response: str
+
+class TitleResponse(BaseModel):
+ title: str
+
+@app.post("/api/generate_title", response_model=TitleResponse)
+async def generate_title_endpoint(request: TitleRequest):
+ """
+ Generate a short title for a Q-A pair using gpt-5-nano.
+ Returns 3-4 short English words summarizing the topic.
+ """
+ title = await generate_title(request.user_prompt, request.response)
+ return TitleResponse(title=title)
+
+
+class SummarizeRequest(BaseModel):
+ content: str
+ model: str # Model to use for summarization
+
+class SummarizeResponse(BaseModel):
+ summary: str
+
+@app.post("/api/summarize", response_model=SummarizeResponse)
+async def summarize_endpoint(request: SummarizeRequest):
+ """
+ Summarize the given content using the specified model.
+ """
+ from app.services.llm import summarize_content
+ summary = await summarize_content(request.content, request.model)
+ return SummarizeResponse(summary=summary)
diff --git a/backend/app/schemas.py b/backend/app/schemas.py
index ac90bc1..bd8ebe7 100644
--- a/backend/app/schemas.py
+++ b/backend/app/schemas.py
@@ -24,6 +24,11 @@ class ModelProvider(str, Enum):
OPENAI = "openai"
GOOGLE = "google"
+class ReasoningEffort(str, Enum):
+ LOW = "low"
+ MEDIUM = "medium"
+ HIGH = "high"
+
class LLMConfig(BaseModel):
provider: ModelProvider
model_name: str
@@ -31,6 +36,8 @@ class LLMConfig(BaseModel):
max_tokens: int = 1000
system_prompt: Optional[str] = None
api_key: Optional[str] = None # Optional override, usually from env
+ enable_google_search: bool = False
+ reasoning_effort: ReasoningEffort = ReasoningEffort.MEDIUM # For OpenAI reasoning models
class MergeStrategy(str, Enum):
RAW = "raw"
diff --git a/backend/app/services/llm.py b/backend/app/services/llm.py
index 958ab4c..b372f9e 100644
--- a/backend/app/services/llm.py
+++ b/backend/app/services/llm.py
@@ -34,57 +34,206 @@ async def stream_openai(messages: list[Message], config: LLMConfig) -> AsyncGene
for msg in messages:
openai_messages.append({"role": msg.role.value, "content": msg.content})
- stream = await client.chat.completions.create(
- model=config.model_name,
- messages=openai_messages,
- temperature=config.temperature,
- max_tokens=config.max_tokens,
- stream=True
+ # Models that ONLY support Responses API (no Chat Completions fallback)
+ responses_only_models = ['gpt-5-pro']
+
+ # Models that CAN use Responses API (and thus support web_search tool)
+ responses_capable_models = [
+ 'gpt-5', 'gpt-5-chat-latest', 'gpt-5-mini', 'gpt-5-nano',
+ 'gpt-5-pro', 'gpt-5.1', 'gpt-5.1-chat-latest', 'o3'
+ ]
+
+ # Use Responses API if:
+ # 1. Model ONLY supports Responses API, OR
+ # 2. User wants web search AND model is capable of Responses API
+ use_responses_api = (
+ config.model_name in responses_only_models or
+ (config.enable_google_search and config.model_name in responses_capable_models)
)
+ if use_responses_api:
+ # Debug: Confirm config reception
+ # yield f"[Debug: Config Search={config.enable_google_search}, Model={config.model_name}]\n"
+
+ # Use new client.responses.create API with Polling Strategy
+ # Convert messages to Responses API format (same as Chat Completions)
+ # Responses API accepts input as array of message objects
+
+ # Filter out system messages (use instructions instead) and format for Responses API
+ input_messages = []
+ for msg in openai_messages:
+ if msg['role'] != 'system': # System prompt goes to instructions
+ input_messages.append({
+ "role": msg['role'],
+ "content": msg['content']
+ })
+
+ resp_params = {
+ "model": config.model_name,
+ "input": input_messages, # Full conversation history
+ "stream": False, # Disable stream to get immediate ID
+ "background": True, # Enable background mode for async execution
+ "store": True
+ }
+
+ # Add reasoning effort (not supported by chat-latest models)
+ models_without_effort = ['gpt-5-chat-latest', 'gpt-5.1-chat-latest']
+ if config.model_name not in models_without_effort:
+ resp_params["reasoning"] = {"effort": config.reasoning_effort.value}
+
+ # Enable Web Search if requested (Reusing enable_google_search flag as generic web_search flag)
+ if config.enable_google_search:
+ resp_params["tools"] = [{"type": "web_search"}]
+ resp_params["tool_choice"] = "auto"
+ # Debugging tool injection
+ # yield "[Debug: Web Search Tool Injected]" # Uncomment to debug
+
+ if config.system_prompt:
+ resp_params["instructions"] = config.system_prompt
+
+ # 1. Create Response (Async/Background)
+ # This returns a Response object immediately with status 'queued' or 'in_progress'
+ initial_resp = await client.responses.create(**resp_params)
+ response_id = initial_resp.id
+
+ # 2. Poll for Completion
+ import asyncio
+ # Poll for up to 10 minutes
+ for _ in range(300):
+ final_resp = await client.responses.retrieve(response_id)
+
+ if final_resp.status == 'completed':
+ # Parse final response object
+ found_content = False
+ if hasattr(final_resp, 'output'):
+ for out in final_resp.output:
+ out_type = getattr(out, 'type', None)
+ out_content = getattr(out, 'content', None)
+
+ if out_type == 'message' and out_content:
+ for c in out_content:
+ c_type = getattr(c, 'type', None)
+ if c_type == 'output_text':
+ text_val = getattr(c, 'text', None)
+ if text_val:
+ yield text_val
+ found_content = True
+
+ if not found_content:
+ yield f"\n[Debug: Completed but no content. Resp: {final_resp}]"
+ return
+
+ elif final_resp.status in ['failed', 'cancelled', 'expired']:
+ error_msg = getattr(final_resp, 'error', 'Unknown error')
+ yield f"\n[Error: Response generation {final_resp.status}: {error_msg}]"
+ return
+
+ # Still in_progress
+ await asyncio.sleep(2)
+
+ yield "\n[Error: Polling timed out]"
+ return
+
+ # Standard Chat Completions API
+ # Prepare parameters
+ req_params = {
+ "model": config.model_name,
+ "messages": openai_messages,
+ "stream": True
+ }
+
+ # Identify reasoning models
+ is_reasoning_model = config.model_name in [
+ 'gpt-5', 'gpt-5-chat-latest', 'gpt-5-mini', 'gpt-5-nano',
+ 'gpt-5-pro', 'gpt-5.1', 'gpt-5.1-chat-latest', 'o3',
+ 'o1', 'o1-mini', 'o1-preview'
+ ]
+
+ if is_reasoning_model:
+ # Reasoning models use max_completion_tokens
+ if config.max_tokens:
+ req_params["max_completion_tokens"] = config.max_tokens
+ # IMPORTANT: Reasoning models often DO NOT support 'temperature'.
+ # We skip adding it.
+ else:
+ req_params["max_tokens"] = config.max_tokens
+ req_params["temperature"] = config.temperature
+
+ stream = await client.chat.completions.create(**req_params)
+
async for chunk in stream:
- if chunk.choices[0].delta.content:
- yield chunk.choices[0].delta.content
+ if chunk.choices and chunk.choices[0].delta:
+ delta = chunk.choices[0].delta
+ if delta.content:
+ yield delta.content
+ elif delta.tool_calls:
+ # If the model tries to call a tool (even if we didn't send any?)
+ # This shouldn't happen unless we sent tools.
+ # But let's notify the user.
+ # Or maybe it's just an empty delta at the start/end.
+ pass
+ elif getattr(delta, 'refusal', None):
+ yield f"[Refusal: {delta.refusal}]"
async def stream_google(messages: list[Message], config: LLMConfig) -> AsyncGenerator[str, None]:
- configure_google(config.api_key)
- model = genai.GenerativeModel(config.model_name)
-
- # Google Generative AI history format:
- # [{"role": "user", "parts": ["..."]}, {"role": "model", "parts": ["..."]}]
- # System prompt is usually set on model init or prepended.
+ # Use new Google GenAI SDK (google-genai)
+ from google import genai
+ from google.genai import types
- history = []
- # If system prompt exists, we might prepend it to the first user message or use specific system instruction if supported
- # Gemini 1.5 Pro supports system instructions. For simplicity, let's prepend to history if possible or context.
+ key = config.api_key or os.getenv("GOOGLE_API_KEY")
+ if not key:
+ raise ValueError("Google API Key not found")
+
+ client = genai.Client(api_key=key)
- system_instruction = config.system_prompt
- if system_instruction:
- model = genai.GenerativeModel(config.model_name, system_instruction=system_instruction)
+ # Configure Tools (Google Search)
+ tools = None
+ if config.enable_google_search:
+ # Enable Google Search Grounding
+ tools = [types.Tool(google_search=types.GoogleSearch())]
- # Convert messages
- # Note: Gemini strictly requires user/model alternation in history usually.
- # We will need to handle this. For MVP, we assume the input is clean or we blindly map.
- for msg in messages:
+ # Configure Generation
+ gen_config = types.GenerateContentConfig(
+ temperature=config.temperature,
+ max_output_tokens=config.max_tokens,
+ system_instruction=config.system_prompt,
+ tools=tools
+ )
+
+ # Prepare History
+ # Extract last message as the prompt
+ prompt_msg = "..."
+ history_msgs = messages
+ if messages and messages[-1].role == Role.USER:
+ prompt_msg = messages[-1].content
+ history_msgs = messages[:-1]
+
+ history_content = []
+ for msg in history_msgs:
role = "user" if msg.role == Role.USER else "model"
- history.append({"role": role, "parts": [msg.content]})
-
- # The last message should be the prompt, strictly speaking, `chat.send_message` takes the new message
- # But if we are treating everything as history...
- # Let's separate the last user message as the prompt if possible.
+ history_content.append(types.Content(
+ role=role,
+ parts=[types.Part(text=msg.content)]
+ ))
+
+ # Use Async Client via .aio
+ chat_session = client.aio.chats.create(
+ model=config.model_name,
+ history=history_content,
+ config=gen_config
+ )
+
+ # Streaming call
+ # In google-genai SDK, streaming is usually via send_message_stream
+
+ # Check if send_message_stream exists, otherwise use send_message with stream=True (but error says no)
+ # Let's assume send_message_stream is the way.
+
+ # Note: chat_session.send_message_stream returns an AsyncIterator (or a coroutine returning one)
+ response_stream = await chat_session.send_message_stream(prompt_msg)
- if history and history[-1]["role"] == "user":
- last_msg = history.pop()
- chat = model.start_chat(history=history)
- response_stream = await chat.send_message_async(last_msg["parts"][0], stream=True)
- else:
- # If the last message is not user, we might be in a weird state.
- # Just send an empty prompt or handle error?
- # For now, assume the user always provides a prompt in the node.
- chat = model.start_chat(history=history)
- response_stream = await chat.send_message_async("...", stream=True) # Fallback
-
async for chunk in response_stream:
+ # Access text safely
if chunk.text:
yield chunk.text
@@ -114,3 +263,158 @@ async def llm_streamer(context: Context, user_prompt: str, config: LLMConfig) ->
except Exception as e:
yield f"Error calling LLM: {str(e)}"
+
+async def generate_title(user_prompt: str, response: str) -> str:
+ """
+ Generate a short title (3-4 words) for a Q-A pair using gpt-5-nano.
+ Uses Responses API (required for gpt-5 series), synchronous mode (no background).
+ """
+ client = get_openai_client()
+
+ instructions = """TASK: Extract a short topic title from the given Q&A. Do NOT answer the question - only extract the topic.
+
+Rules:
+- Output 2-3 short words OR 2 longer words
+- No punctuation, no quotes, no explanation
+- Capitalize each word
+- Be specific to the topic discussed
+- Output ONLY the title, nothing else
+
+Examples:
+Q: "How to sort a list in Python?" -> "Python Sorting"
+Q: "What is React state?" -> "React State"
+Q: "Explain AWS Lambda pricing" -> "Lambda Pricing"
+Q: "Who are you?" -> "AI Identity"
+Q: "What's the weather in NYC?" -> "NYC Weather\""""
+
+ # Truncate to avoid token limits
+ truncated_prompt = user_prompt[:300] if len(user_prompt) > 300 else user_prompt
+ truncated_response = response[:300] if len(response) > 300 else response
+
+ input_text = f"Question: {truncated_prompt}\n\nAnswer: {truncated_response}"
+
+ try:
+ print(f"[generate_title] Called with prompt: {truncated_prompt[:50]}...")
+
+ # Use Responses API for gpt-5-nano (synchronous, no background)
+ # Note: max_output_tokens includes reasoning tokens, so needs to be higher
+ resp = await client.responses.create(
+ model="gpt-5-nano",
+ input=input_text,
+ instructions=instructions,
+ max_output_tokens=500, # Higher to accommodate reasoning tokens
+ reasoning={"effort": "low"}, # Minimize reasoning for simple task
+ stream=False
+ )
+
+ print(f"[generate_title] Response status: {getattr(resp, 'status', 'unknown')}")
+ print(f"[generate_title] Response output: {getattr(resp, 'output', 'no output')}")
+
+ # Response should be completed immediately (no polling needed)
+ if hasattr(resp, 'output'):
+ for out in resp.output:
+ if getattr(out, 'type', None) == 'message':
+ content = getattr(out, 'content', [])
+ for c in content:
+ if getattr(c, 'type', None) == 'output_text':
+ title = getattr(c, 'text', '').strip()
+ # Clean up
+ title = title.strip('"\'')
+ print(f"[generate_title] Extracted title: {title}")
+ if title:
+ return title
+
+ print("[generate_title] No title found, returning default")
+ return "New Question"
+
+ except Exception as e:
+ print(f"Title generation error: {e}")
+ return "New Question"
+
+
+async def summarize_content(content: str, model: str) -> str:
+ """
+ Summarize the given content using the specified model.
+ Supports both OpenAI and Gemini models.
+ """
+ instructions = """Summarize the following content concisely.
+Keep the key points and main ideas.
+Output only the summary, no preamble."""
+
+ # Truncate very long content
+ max_content = 8000
+ if len(content) > max_content:
+ content = content[:max_content] + "\n\n[Content truncated...]"
+
+ try:
+ if model.startswith('gemini'):
+ # Use Gemini
+ from google import genai
+ from google.genai import types
+ import os
+
+ key = os.getenv("GOOGLE_API_KEY")
+ if not key:
+ return "Error: Google API Key not found"
+
+ client = genai.Client(api_key=key)
+
+ gen_config = types.GenerateContentConfig(
+ temperature=0.3,
+ max_output_tokens=1000,
+ system_instruction=instructions
+ )
+
+ response = await client.aio.models.generate_content(
+ model=model,
+ contents=content,
+ config=gen_config
+ )
+
+ return response.text or "No summary generated"
+
+ else:
+ # Use OpenAI
+ client = get_openai_client()
+
+ # Check if model needs Responses API
+ responses_api_models = [
+ 'gpt-5', 'gpt-5-chat-latest', 'gpt-5-mini', 'gpt-5-nano',
+ 'gpt-5-pro', 'gpt-5.1', 'gpt-5.1-chat-latest', 'o3'
+ ]
+
+ if model in responses_api_models:
+ # Use Responses API
+ resp = await client.responses.create(
+ model=model,
+ input=content,
+ instructions=instructions,
+ max_output_tokens=2000,
+ stream=False
+ )
+
+ if hasattr(resp, 'output'):
+ for out in resp.output:
+ if getattr(out, 'type', None) == 'message':
+ for c in getattr(out, 'content', []):
+ if getattr(c, 'type', None) == 'output_text':
+ return getattr(c, 'text', '') or "No summary generated"
+
+ return "No summary generated"
+ else:
+ # Use Chat Completions API
+ result = await client.chat.completions.create(
+ model=model,
+ messages=[
+ {"role": "system", "content": instructions},
+ {"role": "user", "content": content}
+ ],
+ max_tokens=1000,
+ temperature=0.3
+ )
+
+ return result.choices[0].message.content or "No summary generated"
+
+ except Exception as e:
+ print(f"Summarization error: {e}")
+ return f"Error: {str(e)}"
diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx
index 8c52751..9ec1340 100644
--- a/frontend/src/App.tsx
+++ b/frontend/src/App.tsx
@@ -13,6 +13,7 @@ import 'reactflow/dist/style.css';
import useFlowStore from './store/flowStore';
import LLMNode from './components/nodes/LLMNode';
import Sidebar from './components/Sidebar';
+import LeftSidebar from './components/LeftSidebar';
import { ContextMenu } from './components/ContextMenu';
import { Plus } from 'lucide-react';
@@ -31,12 +32,19 @@ function Flow() {
deleteEdge,
deleteNode,
deleteBranch,
- setSelectedNode
+ setSelectedNode,
+ toggleNodeDisabled,
+ archiveNode,
+ createNodeFromArchive,
+ toggleTraceDisabled
} = useFlowStore();
const reactFlowWrapper = useRef<HTMLDivElement>(null);
const { project } = useReactFlow();
const [menu, setMenu] = useState<{ x: number; y: number; type: 'pane' | 'node' | 'edge'; id?: string } | null>(null);
+
+ const [isLeftOpen, setIsLeftOpen] = useState(true);
+ const [isRightOpen, setIsRightOpen] = useState(true);
const onPaneClick = () => {
setSelectedNode(null);
@@ -73,6 +81,7 @@ function Flow() {
systemPrompt: '',
userPrompt: '',
mergeStrategy: 'smart',
+ reasoningEffort: 'medium', // Default for reasoning models
messages: [],
traces: [],
outgoingTraces: [],
@@ -86,12 +95,44 @@ function Flow() {
};
const onNodeClick = (_: any, node: Node) => {
+ // Don't select disabled nodes
+ const nodeData = node.data as any;
+ if (nodeData?.disabled) return;
setSelectedNode(node.id);
};
+ const onDragOver = (event: React.DragEvent) => {
+ event.preventDefault();
+ event.dataTransfer.dropEffect = 'copy';
+ };
+
+ const onDrop = (event: React.DragEvent) => {
+ event.preventDefault();
+
+ const archiveId = event.dataTransfer.getData('archiveId');
+ if (!archiveId) return;
+
+ const bounds = reactFlowWrapper.current?.getBoundingClientRect();
+ if (!bounds) return;
+
+ const position = project({
+ x: event.clientX - bounds.left,
+ y: event.clientY - bounds.top
+ });
+
+ createNodeFromArchive(archiveId, position);
+ };
+
return (
<div style={{ width: '100vw', height: '100vh', display: 'flex' }}>
- <div style={{ flex: 1, height: '100%' }} ref={reactFlowWrapper}>
+ <LeftSidebar isOpen={isLeftOpen} onToggle={() => setIsLeftOpen(!isLeftOpen)} />
+
+ <div
+ style={{ flex: 1, height: '100%', position: 'relative' }}
+ ref={reactFlowWrapper}
+ onDragOver={onDragOver}
+ onDrop={onDrop}
+ >
<ReactFlow
nodes={nodes}
edges={edges}
@@ -139,28 +180,64 @@ function Flow() {
}
}
}
- ] : menu.type === 'node' ? [
- {
- label: 'Delete Node (Cascade)',
- danger: true,
- onClick: () => menu.id && deleteBranch(menu.id)
- }
- ] : [
- {
- label: 'Disconnect',
- onClick: () => menu.id && deleteEdge(menu.id)
- },
- {
- label: 'Delete Branch',
- danger: true,
- onClick: () => menu.id && deleteBranch(undefined, menu.id)
+ ] : menu.type === 'node' ? (() => {
+ const targetNode = nodes.find(n => n.id === menu.id);
+ const isDisabled = targetNode?.data?.disabled;
+
+ // If disabled, only show Enable option
+ if (isDisabled) {
+ return [
+ {
+ label: 'Enable Node',
+ onClick: () => menu.id && toggleNodeDisabled(menu.id)
+ }
+ ];
}
- ]
+
+ // Normal node menu
+ return [
+ {
+ label: 'Disable Node',
+ onClick: () => menu.id && toggleNodeDisabled(menu.id)
+ },
+ {
+ label: 'Add to Archive',
+ onClick: () => menu.id && archiveNode(menu.id)
+ },
+ {
+ label: 'Delete Node (Cascade)',
+ danger: true,
+ onClick: () => menu.id && deleteBranch(menu.id)
+ }
+ ];
+ })() : (() => {
+ // Check if any node connected to this edge is disabled
+ const targetEdge = edges.find(e => e.id === menu.id);
+ const sourceNode = nodes.find(n => n.id === targetEdge?.source);
+ const targetNode = nodes.find(n => n.id === targetEdge?.target);
+ const isTraceDisabled = sourceNode?.data?.disabled || targetNode?.data?.disabled;
+
+ return [
+ {
+ label: isTraceDisabled ? 'Enable Trace' : 'Disable Trace',
+ onClick: () => menu.id && toggleTraceDisabled(menu.id)
+ },
+ {
+ label: 'Disconnect',
+ onClick: () => menu.id && deleteEdge(menu.id)
+ },
+ {
+ label: 'Delete Branch',
+ danger: true,
+ onClick: () => menu.id && deleteBranch(undefined, menu.id)
+ }
+ ];
+ })()
}
/>
)}
</div>
- <Sidebar />
+ <Sidebar isOpen={isRightOpen} onToggle={() => setIsRightOpen(!isRightOpen)} />
</div>
);
}
diff --git a/frontend/src/components/LeftSidebar.tsx b/frontend/src/components/LeftSidebar.tsx
new file mode 100644
index 0000000..fa8b471
--- /dev/null
+++ b/frontend/src/components/LeftSidebar.tsx
@@ -0,0 +1,134 @@
+import React, { useState } from 'react';
+import { Folder, FileText, Archive, ChevronLeft, ChevronRight, Trash2, MessageSquare } from 'lucide-react';
+import useFlowStore from '../store/flowStore';
+
+interface LeftSidebarProps {
+ isOpen: boolean;
+ onToggle: () => void;
+}
+
+const LeftSidebar: React.FC<LeftSidebarProps> = ({ isOpen, onToggle }) => {
+ const [activeTab, setActiveTab] = useState<'project' | 'files' | 'archive'>('project');
+ const { archivedNodes, removeFromArchive, createNodeFromArchive } = useFlowStore();
+
+ const handleDragStart = (e: React.DragEvent, archiveId: string) => {
+ e.dataTransfer.setData('archiveId', archiveId);
+ e.dataTransfer.effectAllowed = 'copy';
+ };
+
+ if (!isOpen) {
+ return (
+ <div className="border-r border-gray-200 h-screen bg-white flex flex-col items-center py-4 w-12 z-10 transition-all duration-300">
+ <button
+ onClick={onToggle}
+ className="p-2 hover:bg-gray-100 rounded mb-4"
+ title="Expand"
+ >
+ <ChevronRight size={20} className="text-gray-500" />
+ </button>
+ {/* Icons when collapsed */}
+ <div className="flex flex-col gap-4">
+ <Folder size={20} className={activeTab === 'project' ? "text-blue-500" : "text-gray-400"} />
+ <FileText size={20} className={activeTab === 'files' ? "text-blue-500" : "text-gray-400"} />
+ <Archive size={20} className={activeTab === 'archive' ? "text-blue-500" : "text-gray-400"} />
+ </div>
+ </div>
+ );
+ }
+
+ return (
+ <div className="w-64 border-r border-gray-200 h-screen flex flex-col bg-white shadow-xl z-10 transition-all duration-300">
+ {/* Header */}
+ <div className="p-3 border-b border-gray-200 flex justify-between items-center bg-gray-50">
+ <h2 className="font-bold text-sm text-gray-700 uppercase">Workspace</h2>
+ <button
+ onClick={onToggle}
+ className="p-1 hover:bg-gray-200 rounded"
+ >
+ <ChevronLeft size={16} className="text-gray-500" />
+ </button>
+ </div>
+
+ {/* Tabs */}
+ <div className="flex border-b border-gray-200">
+ <button
+ onClick={() => setActiveTab('project')}
+ className={`flex-1 p-3 text-xs flex justify-center items-center gap-2 ${activeTab === 'project' ? 'border-b-2 border-blue-500 text-blue-600 font-medium' : 'text-gray-600 hover:bg-gray-50'}`}
+ >
+ <Folder size={14} /> Project
+ </button>
+ <button
+ onClick={() => setActiveTab('files')}
+ className={`flex-1 p-3 text-xs flex justify-center items-center gap-2 ${activeTab === 'files' ? 'border-b-2 border-blue-500 text-blue-600 font-medium' : 'text-gray-600 hover:bg-gray-50'}`}
+ >
+ <FileText size={14} /> Files
+ </button>
+ <button
+ onClick={() => setActiveTab('archive')}
+ className={`flex-1 p-3 text-xs flex justify-center items-center gap-2 ${activeTab === 'archive' ? 'border-b-2 border-blue-500 text-blue-600 font-medium' : 'text-gray-600 hover:bg-gray-50'}`}
+ >
+ <Archive size={14} /> Archive
+ </button>
+ </div>
+
+ {/* Content Area */}
+ <div className="flex-1 overflow-y-auto p-4 text-sm text-gray-500">
+ {activeTab === 'project' && (
+ <div className="flex flex-col items-center justify-center h-full opacity-50">
+ <Folder size={48} className="mb-2" />
+ <p>Project settings coming soon</p>
+ </div>
+ )}
+ {activeTab === 'files' && (
+ <div className="flex flex-col items-center justify-center h-full opacity-50">
+ <FileText size={48} className="mb-2" />
+ <p>File manager coming soon</p>
+ </div>
+ )}
+ {activeTab === 'archive' && (
+ <div className="space-y-2">
+ {archivedNodes.length === 0 ? (
+ <div className="flex flex-col items-center justify-center h-40 opacity-50">
+ <Archive size={32} className="mb-2" />
+ <p className="text-xs text-center">
+ No archived nodes.<br/>
+ Right-click a node → "Add to Archive"
+ </p>
+ </div>
+ ) : (
+ <>
+ <p className="text-xs text-gray-400 mb-2">Drag to canvas to create a copy</p>
+ {archivedNodes.map((archived) => (
+ <div
+ key={archived.id}
+ draggable
+ onDragStart={(e) => handleDragStart(e, archived.id)}
+ className="p-2 bg-gray-50 border border-gray-200 rounded-md cursor-grab hover:bg-gray-100 hover:border-gray-300 transition-colors group"
+ >
+ <div className="flex items-center justify-between">
+ <div className="flex items-center gap-2">
+ <MessageSquare size={14} className="text-gray-500" />
+ <span className="text-sm font-medium truncate max-w-[140px]">{archived.label}</span>
+ </div>
+ <button
+ onClick={() => removeFromArchive(archived.id)}
+ className="opacity-0 group-hover:opacity-100 p-1 hover:bg-red-100 rounded text-gray-400 hover:text-red-500 transition-all"
+ title="Remove from archive"
+ >
+ <Trash2 size={12} />
+ </button>
+ </div>
+ <div className="text-[10px] text-gray-400 mt-1">{archived.model}</div>
+ </div>
+ ))}
+ </>
+ )}
+ </div>
+ )}
+ </div>
+ </div>
+ );
+};
+
+export default LeftSidebar;
+
diff --git a/frontend/src/components/Sidebar.tsx b/frontend/src/components/Sidebar.tsx
index f62f3cb..165028c 100644
--- a/frontend/src/components/Sidebar.tsx
+++ b/frontend/src/components/Sidebar.tsx
@@ -2,24 +2,75 @@ import React, { useState, useEffect } from 'react';
import useFlowStore from '../store/flowStore';
import type { NodeData } from '../store/flowStore';
import ReactMarkdown from 'react-markdown';
-import { Play, Settings, Info, Save } from 'lucide-react';
+import { Play, Settings, Info, Save, ChevronLeft, ChevronRight, Maximize2, Edit3, X, Check, FileText } from 'lucide-react';
-const Sidebar = () => {
+interface SidebarProps {
+ isOpen: boolean;
+ onToggle: () => void;
+}
+
+const Sidebar: React.FC<SidebarProps> = ({ isOpen, onToggle }) => {
const { nodes, selectedNodeId, updateNodeData, getActiveContext } = useFlowStore();
const [activeTab, setActiveTab] = useState<'interact' | 'settings' | 'debug'>('interact');
const [streamBuffer, setStreamBuffer] = useState('');
+
+ // Response Modal & Edit states
+ const [isModalOpen, setIsModalOpen] = useState(false);
+ const [isEditing, setIsEditing] = useState(false);
+ const [editedResponse, setEditedResponse] = useState('');
+
+ // Summary states
+ const [showSummaryModal, setShowSummaryModal] = useState(false);
+ const [summaryModel, setSummaryModel] = useState('gpt-5-nano');
+ const [isSummarizing, setIsSummarizing] = useState(false);
const selectedNode = nodes.find((n) => n.id === selectedNodeId);
- // Reset stream buffer when node changes
+ // Reset stream buffer and modal states when node changes
useEffect(() => {
setStreamBuffer('');
+ setIsModalOpen(false);
+ setIsEditing(false);
}, [selectedNodeId]);
+
+ // Sync editedResponse when entering edit mode
+ useEffect(() => {
+ if (isEditing && selectedNode) {
+ setEditedResponse(selectedNode.data.response || '');
+ }
+ }, [isEditing, selectedNode?.data.response]);
+
+ if (!isOpen) {
+ return (
+ <div className="border-l border-gray-200 h-screen bg-white flex flex-col items-center py-4 w-12 z-10 transition-all duration-300">
+ <button
+ onClick={onToggle}
+ className="p-2 hover:bg-gray-100 rounded mb-4"
+ title="Expand"
+ >
+ <ChevronLeft size={20} className="text-gray-500" />
+ </button>
+ {selectedNode && (
+ <div className="writing-vertical text-xs font-bold text-gray-500 uppercase tracking-widest mt-4" style={{ writingMode: 'vertical-rl' }}>
+ {selectedNode.data.label}
+ </div>
+ )}
+ </div>
+ );
+ }
if (!selectedNode) {
return (
- <div className="w-96 border-l border-gray-200 h-screen p-4 bg-gray-50 text-gray-500 text-center flex flex-col justify-center">
- <p>Select a node to edit</p>
+ <div className="w-96 border-l border-gray-200 h-screen flex flex-col bg-white shadow-xl z-10 transition-all duration-300">
+ <div className="p-3 border-b border-gray-200 flex justify-between items-center bg-gray-50">
+ <span className="text-sm font-medium text-gray-500">Details</span>
+ <button onClick={onToggle} className="p-1 hover:bg-gray-200 rounded">
+ <ChevronRight size={16} className="text-gray-500" />
+ </button>
+ </div>
+ <div className="flex-1 p-4 bg-gray-50 text-gray-500 text-center flex flex-col justify-center">
+ <p>Select a node to edit</p>
+ </div>
</div>
);
}
@@ -43,11 +94,13 @@ const Sidebar = () => {
user_prompt: selectedNode.data.userPrompt,
merge_strategy: selectedNode.data.mergeStrategy || 'smart',
config: {
- provider: selectedNode.data.model.includes('gpt') ? 'openai' : 'google',
+ provider: selectedNode.data.model.includes('gpt') || selectedNode.data.model === 'o3' ? 'openai' : 'google',
model_name: selectedNode.data.model,
temperature: selectedNode.data.temperature,
system_prompt: selectedNode.data.systemPrompt,
api_key: selectedNode.data.apiKey,
+ enable_google_search: selectedNode.data.enableGoogleSearch !== false, // Default true
+ reasoning_effort: selectedNode.data.reasoningEffort || 'medium', // For reasoning models
}
})
});
@@ -85,6 +138,10 @@ const Sidebar = () => {
response: fullResponse,
messages: [...context, newUserMsg, newAssistantMsg] as any
});
+
+ // Auto-generate title using gpt-5-nano (async, non-blocking)
+ // Always regenerate title after each query
+ generateTitle(selectedNode.id, selectedNode.data.userPrompt, fullResponse);
} catch (error) {
console.error(error);
@@ -95,17 +152,85 @@ const Sidebar = () => {
const handleChange = (field: keyof NodeData, value: any) => {
updateNodeData(selectedNode.id, { [field]: value });
};
+
+ const handleSaveEdit = () => {
+ if (!selectedNode) return;
+ updateNodeData(selectedNode.id, { response: editedResponse });
+ setIsEditing(false);
+ };
+
+ const handleCancelEdit = () => {
+ setIsEditing(false);
+ setEditedResponse(selectedNode?.data.response || '');
+ };
+
+ // Summarize response
+ const handleSummarize = async () => {
+ if (!selectedNode?.data.response) return;
+
+ setIsSummarizing(true);
+ setShowSummaryModal(false);
+
+ try {
+ const res = await fetch('http://localhost:8000/api/summarize', {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({
+ content: selectedNode.data.response,
+ model: summaryModel
+ })
+ });
+
+ if (res.ok) {
+ const data = await res.json();
+ if (data.summary) {
+ // Replace response with summary
+ updateNodeData(selectedNode.id, { response: data.summary });
+ }
+ }
+ } catch (error) {
+ console.error('Summarization failed:', error);
+ } finally {
+ setIsSummarizing(false);
+ }
+ };
+
+ // Auto-generate title using gpt-5-nano
+ const generateTitle = async (nodeId: string, userPrompt: string, response: string) => {
+ try {
+ const res = await fetch('http://localhost:8000/api/generate_title', {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({ user_prompt: userPrompt, response })
+ });
+
+ if (res.ok) {
+ const data = await res.json();
+ if (data.title) {
+ updateNodeData(nodeId, { label: data.title });
+ }
+ }
+ } catch (error) {
+ console.error('Failed to generate title:', error);
+ // Silently fail - keep the original title
+ }
+ };
return (
- <div className="w-96 border-l border-gray-200 h-screen flex flex-col bg-white shadow-xl z-10">
+ <div className="w-96 border-l border-gray-200 h-screen flex flex-col bg-white shadow-xl z-10 transition-all duration-300">
{/* Header */}
- <div className="p-4 border-b border-gray-200 bg-gray-50">
- <input
- type="text"
- value={selectedNode.data.label}
- onChange={(e) => handleChange('label', e.target.value)}
- className="font-bold text-lg bg-transparent border-none focus:ring-0 focus:outline-none w-full"
- />
+ <div className="p-4 border-b border-gray-200 bg-gray-50 flex flex-col gap-2">
+ <div className="flex justify-between items-center">
+ <input
+ type="text"
+ value={selectedNode.data.label}
+ onChange={(e) => handleChange('label', e.target.value)}
+ className="font-bold text-lg bg-transparent border-none focus:ring-0 focus:outline-none w-full"
+ />
+ <button onClick={onToggle} className="p-1 hover:bg-gray-200 rounded shrink-0">
+ <ChevronRight size={16} className="text-gray-500" />
+ </button>
+ </div>
<div className="flex items-center justify-between mt-1">
<div className="text-xs px-2 py-1 bg-blue-100 text-blue-700 rounded uppercase">
{selectedNode.data.status}
@@ -146,13 +271,41 @@ const Sidebar = () => {
<label className="block text-sm font-medium text-gray-700 mb-1">Model</label>
<select
value={selectedNode.data.model}
- onChange={(e) => handleChange('model', e.target.value)}
+ onChange={(e) => {
+ const newModel = e.target.value;
+ // Auto-set temperature to 1 for reasoning models
+ const reasoningModels = [
+ 'gpt-5', 'gpt-5-chat-latest', 'gpt-5-mini', 'gpt-5-nano',
+ 'gpt-5-pro', 'gpt-5.1', 'gpt-5.1-chat-latest', 'o3'
+ ];
+ const isReasoning = reasoningModels.includes(newModel);
+
+ if (isReasoning) {
+ handleChange('temperature', 1);
+ }
+ handleChange('model', newModel);
+ }}
className="w-full border border-gray-300 rounded-md p-2 text-sm"
>
- <option value="gpt-4o">GPT-4o</option>
- <option value="gpt-4o-mini">GPT-4o Mini</option>
- <option value="gemini-1.5-pro">Gemini 1.5 Pro</option>
- <option value="gemini-1.5-flash">Gemini 1.5 Flash</option>
+ <optgroup label="Gemini">
+ <option value="gemini-2.5-flash">gemini-2.5-flash</option>
+ <option value="gemini-2.5-flash-lite">gemini-2.5-flash-lite</option>
+ <option value="gemini-3-pro-preview">gemini-3-pro-preview</option>
+ </optgroup>
+ <optgroup label="OpenAI (Standard)">
+ <option value="gpt-4.1">gpt-4.1</option>
+ <option value="gpt-4o">gpt-4o</option>
+ </optgroup>
+ <optgroup label="OpenAI (Reasoning)">
+ <option value="gpt-5">gpt-5</option>
+ <option value="gpt-5-chat-latest">gpt-5-chat-latest</option>
+ <option value="gpt-5-mini">gpt-5-mini</option>
+ <option value="gpt-5-nano">gpt-5-nano</option>
+ <option value="gpt-5-pro">gpt-5-pro</option>
+ <option value="gpt-5.1">gpt-5.1</option>
+ <option value="gpt-5.1-chat-latest">gpt-5.1-chat-latest</option>
+ <option value="o3">o3</option>
+ </optgroup>
</select>
</div>
@@ -216,10 +369,65 @@ const Sidebar = () => {
</button>
<div className="mt-6">
- <label className="block text-sm font-medium text-gray-700 mb-2">Response</label>
- <div className="bg-gray-50 p-3 rounded-md border border-gray-200 min-h-[150px] text-sm prose prose-sm max-w-none">
- <ReactMarkdown>{selectedNode.data.response || streamBuffer}</ReactMarkdown>
+ <div className="flex items-center justify-between mb-2">
+ <label className="block text-sm font-medium text-gray-700">Response</label>
+ <div className="flex gap-1">
+ {selectedNode.data.response && (
+ <>
+ <button
+ onClick={() => setShowSummaryModal(true)}
+ disabled={isSummarizing}
+ className="p-1 hover:bg-gray-200 rounded text-gray-500 hover:text-gray-700 disabled:opacity-50"
+ title="Summarize"
+ >
+ {isSummarizing ? <Loader2 className="animate-spin" size={14} /> : <FileText size={14} />}
+ </button>
+ <button
+ onClick={() => setIsEditing(true)}
+ className="p-1 hover:bg-gray-200 rounded text-gray-500 hover:text-gray-700"
+ title="Edit Response"
+ >
+ <Edit3 size={14} />
+ </button>
+ <button
+ onClick={() => setIsModalOpen(true)}
+ className="p-1 hover:bg-gray-200 rounded text-gray-500 hover:text-gray-700"
+ title="Expand"
+ >
+ <Maximize2 size={14} />
+ </button>
+ </>
+ )}
+ </div>
</div>
+
+ {isEditing ? (
+ <div className="space-y-2">
+ <textarea
+ value={editedResponse}
+ onChange={(e) => setEditedResponse(e.target.value)}
+ className="w-full border border-blue-300 rounded-md p-2 text-sm min-h-[200px] font-mono focus:ring-2 focus:ring-blue-500"
+ />
+ <div className="flex gap-2 justify-end">
+ <button
+ onClick={handleCancelEdit}
+ className="px-3 py-1 text-sm text-gray-600 hover:bg-gray-100 rounded flex items-center gap-1"
+ >
+ <X size={14} /> Cancel
+ </button>
+ <button
+ onClick={handleSaveEdit}
+ className="px-3 py-1 text-sm bg-blue-600 text-white rounded hover:bg-blue-700 flex items-center gap-1"
+ >
+ <Check size={14} /> Save
+ </button>
+ </div>
+ </div>
+ ) : (
+ <div className="bg-gray-50 p-3 rounded-md border border-gray-200 min-h-[150px] text-sm prose prose-sm max-w-none">
+ <ReactMarkdown>{selectedNode.data.response || streamBuffer}</ReactMarkdown>
+ </div>
+ )}
</div>
</div>
)}
@@ -242,7 +450,15 @@ const Sidebar = () => {
</div>
<div>
- <label className="block text-sm font-medium text-gray-700 mb-1">Temperature ({selectedNode.data.temperature})</label>
+ <label className="block text-sm font-medium text-gray-700 mb-1">
+ Temperature ({selectedNode.data.temperature})
+ {[
+ 'gpt-5', 'gpt-5-chat-latest', 'gpt-5-mini', 'gpt-5-nano',
+ 'gpt-5-pro', 'gpt-5.1', 'gpt-5.1-chat-latest', 'o3'
+ ].includes(selectedNode.data.model) && (
+ <span className="text-xs text-orange-500 ml-2">(Locked for Reasoning Model)</span>
+ )}
+ </label>
<input
type="range"
min="0"
@@ -250,9 +466,37 @@ const Sidebar = () => {
step="0.1"
value={selectedNode.data.temperature}
onChange={(e) => handleChange('temperature', parseFloat(e.target.value))}
- className="w-full"
+ disabled={[
+ 'gpt-5', 'gpt-5-chat-latest', 'gpt-5-mini', 'gpt-5-nano',
+ 'gpt-5-pro', 'gpt-5.1', 'gpt-5.1-chat-latest', 'o3'
+ ].includes(selectedNode.data.model)}
+ className="w-full disabled:opacity-50 disabled:cursor-not-allowed"
/>
</div>
+
+ {/* Reasoning Effort - Only for OpenAI reasoning models (except chat-latest) */}
+ {[
+ 'gpt-5', 'gpt-5-mini', 'gpt-5-nano',
+ 'gpt-5-pro', 'gpt-5.1', 'o3'
+ ].includes(selectedNode.data.model) && (
+ <div>
+ <label className="block text-sm font-medium text-gray-700 mb-1">
+ Reasoning Effort
+ </label>
+ <select
+ value={selectedNode.data.reasoningEffort || 'medium'}
+ onChange={(e) => handleChange('reasoningEffort', e.target.value)}
+ className="w-full border border-gray-300 rounded-md p-2 text-sm"
+ >
+ <option value="low">Low (Faster, less thorough)</option>
+ <option value="medium">Medium (Balanced)</option>
+ <option value="high">High (Slower, more thorough)</option>
+ </select>
+ <p className="text-xs text-gray-500 mt-1">
+ Controls how much reasoning the model performs before responding. Higher = more tokens used.
+ </p>
+ </div>
+ )}
<div>
<label className="block text-sm font-medium text-gray-700 mb-1">API Key (Optional)</label>
@@ -274,6 +518,22 @@ const Sidebar = () => {
placeholder="Global system prompt will be used if empty..."
/>
</div>
+
+ {(selectedNode.data.model.startsWith('gemini') ||
+ selectedNode.data.model.startsWith('gpt-5') ||
+ ['o3', 'o4-mini', 'gpt-4o'].includes(selectedNode.data.model)) && (
+ <div className="flex items-center gap-2 mt-4">
+ <input
+ type="checkbox"
+ id="web-search"
+ checked={selectedNode.data.enableGoogleSearch !== false} // Default to true
+ onChange={(e) => handleChange('enableGoogleSearch', e.target.checked)}
+ />
+ <label htmlFor="web-search" className="text-sm font-medium text-gray-700 select-none cursor-pointer">
+ Enable Web Search
+ </label>
+ </div>
+ )}
</div>
)}
@@ -294,6 +554,117 @@ const Sidebar = () => {
</div>
)}
</div>
+
+ {/* Response Modal */}
+ {isModalOpen && selectedNode && (
+ <div className="fixed inset-0 bg-black/50 flex items-center justify-center z-50" onClick={() => setIsModalOpen(false)}>
+ <div
+ className="bg-white rounded-lg shadow-2xl w-[80vw] max-w-4xl max-h-[80vh] flex flex-col"
+ onClick={(e) => e.stopPropagation()}
+ >
+ {/* Modal Header */}
+ <div className="flex items-center justify-between p-4 border-b border-gray-200">
+ <h3 className="font-semibold text-lg">{selectedNode.data.label} - Response</h3>
+ <div className="flex gap-2">
+ {!isEditing && (
+ <button
+ onClick={() => setIsEditing(true)}
+ className="px-3 py-1 text-sm text-gray-600 hover:bg-gray-100 rounded flex items-center gap-1"
+ >
+ <Edit3 size={14} /> Edit
+ </button>
+ )}
+ <button
+ onClick={() => { setIsModalOpen(false); setIsEditing(false); }}
+ className="p-1 hover:bg-gray-200 rounded text-gray-500"
+ >
+ <X size={18} />
+ </button>
+ </div>
+ </div>
+
+ {/* Modal Content */}
+ <div className="flex-1 overflow-y-auto p-6">
+ {isEditing ? (
+ <textarea
+ value={editedResponse}
+ onChange={(e) => setEditedResponse(e.target.value)}
+ className="w-full h-full min-h-[400px] border border-gray-300 rounded-md p-3 text-sm font-mono focus:ring-2 focus:ring-blue-500 resize-y"
+ />
+ ) : (
+ <div className="prose prose-sm max-w-none">
+ <ReactMarkdown>{selectedNode.data.response}</ReactMarkdown>
+ </div>
+ )}
+ </div>
+
+ {/* Modal Footer (only when editing) */}
+ {isEditing && (
+ <div className="flex justify-end gap-2 p-4 border-t border-gray-200">
+ <button
+ onClick={handleCancelEdit}
+ className="px-4 py-2 text-sm text-gray-600 hover:bg-gray-100 rounded flex items-center gap-1"
+ >
+ <X size={14} /> Cancel
+ </button>
+ <button
+ onClick={() => { handleSaveEdit(); setIsModalOpen(false); }}
+ className="px-4 py-2 text-sm bg-blue-600 text-white rounded hover:bg-blue-700 flex items-center gap-1"
+ >
+ <Check size={14} /> Save Changes
+ </button>
+ </div>
+ )}
+ </div>
+ </div>
+ )}
+
+ {/* Summary Model Selection Modal */}
+ {showSummaryModal && (
+ <div className="fixed inset-0 bg-black/50 flex items-center justify-center z-50" onClick={() => setShowSummaryModal(false)}>
+ <div
+ className="bg-white rounded-lg shadow-2xl w-80 p-4"
+ onClick={(e) => e.stopPropagation()}
+ >
+ <h3 className="font-semibold text-lg mb-4">Summarize Response</h3>
+
+ <div className="mb-4">
+ <label className="block text-sm font-medium text-gray-700 mb-2">Select Model</label>
+ <select
+ value={summaryModel}
+ onChange={(e) => setSummaryModel(e.target.value)}
+ className="w-full border border-gray-300 rounded-md p-2 text-sm"
+ >
+ <optgroup label="Fast (Recommended)">
+ <option value="gpt-5-nano">gpt-5-nano</option>
+ <option value="gpt-5-mini">gpt-5-mini</option>
+ <option value="gemini-2.5-flash-lite">gemini-2.5-flash-lite</option>
+ <option value="gemini-2.5-flash">gemini-2.5-flash</option>
+ </optgroup>
+ <optgroup label="Standard">
+ <option value="gpt-4o">gpt-4o</option>
+ <option value="gpt-5">gpt-5</option>
+ </optgroup>
+ </select>
+ </div>
+
+ <div className="flex justify-end gap-2">
+ <button
+ onClick={() => setShowSummaryModal(false)}
+ className="px-3 py-2 text-sm text-gray-600 hover:bg-gray-100 rounded"
+ >
+ Cancel
+ </button>
+ <button
+ onClick={handleSummarize}
+ className="px-3 py-2 text-sm bg-blue-600 text-white rounded hover:bg-blue-700 flex items-center gap-1"
+ >
+ <FileText size={14} /> Summarize
+ </button>
+ </div>
+ </div>
+ </div>
+ )}
</div>
);
};
diff --git a/frontend/src/components/nodes/LLMNode.tsx b/frontend/src/components/nodes/LLMNode.tsx
index cdd402c..592ab5b 100644
--- a/frontend/src/components/nodes/LLMNode.tsx
+++ b/frontend/src/components/nodes/LLMNode.tsx
@@ -49,18 +49,32 @@ const LLMNode = ({ id, data, selected }: NodeProps<NodeData>) => {
const inputsToShow = Math.max(maxConnectedIndex + 2, 1);
+ const isDisabled = data.disabled;
+
return (
- <div className={`px-4 py-2 shadow-md rounded-md bg-white border-2 min-w-[200px] ${selected ? 'border-blue-500' : 'border-gray-200'}`}>
+ <div
+ className={`px-4 py-2 shadow-md rounded-md border-2 min-w-[200px] transition-all ${
+ isDisabled
+ ? 'bg-gray-100 border-gray-300 opacity-50 cursor-not-allowed'
+ : selected
+ ? 'bg-white border-blue-500'
+ : 'bg-white border-gray-200'
+ }`}
+ style={{ pointerEvents: isDisabled ? 'none' : 'auto' }}
+ >
<div className="flex items-center mb-2">
- <div className="rounded-full w-8 h-8 flex justify-center items-center bg-gray-100">
+ <div className={`rounded-full w-8 h-8 flex justify-center items-center ${isDisabled ? 'bg-gray-200' : 'bg-gray-100'}`}>
{data.status === 'loading' ? (
<Loader2 className="w-4 h-4 animate-spin text-blue-500" />
) : (
- <MessageSquare className="w-4 h-4 text-gray-600" />
+ <MessageSquare className={`w-4 h-4 ${isDisabled ? 'text-gray-400' : 'text-gray-600'}`} />
)}
</div>
<div className="ml-2">
- <div className="text-sm font-bold truncate max-w-[150px]">{data.label}</div>
+ <div className={`text-sm font-bold truncate max-w-[150px] ${isDisabled ? 'text-gray-400' : ''}`}>
+ {data.label}
+ {isDisabled && <span className="text-xs ml-1">(disabled)</span>}
+ </div>
<div className="text-xs text-gray-500">{data.model}</div>
</div>
</div>
diff --git a/frontend/src/store/flowStore.ts b/frontend/src/store/flowStore.ts
index d2114aa..0c90357 100644
--- a/frontend/src/store/flowStore.ts
+++ b/frontend/src/store/flowStore.ts
@@ -38,6 +38,9 @@ export interface NodeData {
systemPrompt: string;
userPrompt: string;
mergeStrategy: 'raw' | 'smart';
+ enableGoogleSearch?: boolean;
+ reasoningEffort: 'low' | 'medium' | 'high'; // For OpenAI reasoning models
+ disabled?: boolean; // Greyed out, no interaction
// Traces logic
traces: Trace[]; // INCOMING Traces
@@ -53,10 +56,21 @@ export interface NodeData {
export type LLMNode = Node<NodeData>;
+// Archived node template (for reuse)
+export interface ArchivedNode {
+ id: string;
+ label: string;
+ model: string;
+ systemPrompt: string;
+ temperature: number;
+ reasoningEffort: 'low' | 'medium' | 'high';
+}
+
interface FlowState {
nodes: LLMNode[];
edges: Edge[];
selectedNodeId: string | null;
+ archivedNodes: ArchivedNode[]; // Stored node templates
onNodesChange: OnNodesChange;
onEdgesChange: OnEdgesChange;
@@ -72,6 +86,16 @@ interface FlowState {
deleteEdge: (edgeId: string) => void;
deleteNode: (nodeId: string) => void;
deleteBranch: (startNodeId?: string, startEdgeId?: string) => void;
+
+ // Archive actions
+ toggleNodeDisabled: (nodeId: string) => void;
+ archiveNode: (nodeId: string) => void;
+ removeFromArchive: (archiveId: string) => void;
+ createNodeFromArchive: (archiveId: string, position: { x: number; y: number }) => void;
+
+ // Trace disable
+ toggleTraceDisabled: (edgeId: string) => void;
+ updateEdgeStyles: () => void;
propagateTraces: () => void;
}
@@ -90,6 +114,7 @@ const useFlowStore = create<FlowState>((set, get) => ({
nodes: [],
edges: [],
selectedNodeId: null,
+ archivedNodes: [],
onNodesChange: (changes: NodeChange[]) => {
set({
@@ -274,6 +299,172 @@ const useFlowStore = create<FlowState>((set, get) => ({
get().propagateTraces();
},
+ toggleNodeDisabled: (nodeId: string) => {
+ const node = get().nodes.find(n => n.id === nodeId);
+ if (node) {
+ const newDisabled = !node.data.disabled;
+ // Update node data AND draggable property
+ set(state => ({
+ nodes: state.nodes.map(n => {
+ if (n.id === nodeId) {
+ return {
+ ...n,
+ draggable: !newDisabled, // Disable dragging when node is disabled
+ selectable: !newDisabled, // Disable selection when node is disabled
+ data: { ...n.data, disabled: newDisabled }
+ };
+ }
+ return n;
+ })
+ }));
+ // Update edge styles to reflect disabled state
+ setTimeout(() => get().updateEdgeStyles(), 0);
+ }
+ },
+
+ archiveNode: (nodeId: string) => {
+ const node = get().nodes.find(n => n.id === nodeId);
+ if (!node) return;
+
+ const archived: ArchivedNode = {
+ id: `archive_${Date.now()}`,
+ label: node.data.label,
+ model: node.data.model,
+ systemPrompt: node.data.systemPrompt,
+ temperature: node.data.temperature,
+ reasoningEffort: node.data.reasoningEffort || 'medium'
+ };
+
+ set(state => ({
+ archivedNodes: [...state.archivedNodes, archived]
+ }));
+ },
+
+ removeFromArchive: (archiveId: string) => {
+ set(state => ({
+ archivedNodes: state.archivedNodes.filter(a => a.id !== archiveId)
+ }));
+ },
+
+ createNodeFromArchive: (archiveId: string, position: { x: number; y: number }) => {
+ const archived = get().archivedNodes.find(a => a.id === archiveId);
+ if (!archived) return;
+
+ const newNode: LLMNode = {
+ id: `node_${Date.now()}`,
+ type: 'llmNode',
+ position,
+ data: {
+ label: archived.label,
+ model: archived.model,
+ temperature: archived.temperature,
+ systemPrompt: archived.systemPrompt,
+ userPrompt: '',
+ mergeStrategy: 'smart',
+ reasoningEffort: archived.reasoningEffort,
+ traces: [],
+ outgoingTraces: [],
+ forkedTraces: [],
+ activeTraceIds: [],
+ response: '',
+ status: 'idle',
+ inputs: 1
+ }
+ };
+
+ get().addNode(newNode);
+ },
+
+ toggleTraceDisabled: (edgeId: string) => {
+ const { edges, nodes } = get();
+ const edge = edges.find(e => e.id === edgeId);
+ if (!edge) return;
+
+ // Find all nodes connected through this trace (BIDIRECTIONAL)
+ const nodesInTrace = new Set<string>();
+ const visitedEdges = new Set<string>();
+
+ // Traverse downstream (source -> target direction)
+ const traverseDownstream = (currentNodeId: string) => {
+ nodesInTrace.add(currentNodeId);
+
+ const outgoing = edges.filter(e => e.source === currentNodeId);
+ outgoing.forEach(nextEdge => {
+ if (visitedEdges.has(nextEdge.id)) return;
+ visitedEdges.add(nextEdge.id);
+ traverseDownstream(nextEdge.target);
+ });
+ };
+
+ // Traverse upstream (target -> source direction)
+ const traverseUpstream = (currentNodeId: string) => {
+ nodesInTrace.add(currentNodeId);
+
+ const incoming = edges.filter(e => e.target === currentNodeId);
+ incoming.forEach(prevEdge => {
+ if (visitedEdges.has(prevEdge.id)) return;
+ visitedEdges.add(prevEdge.id);
+ traverseUpstream(prevEdge.source);
+ });
+ };
+
+ // Start bidirectional traversal from clicked edge
+ visitedEdges.add(edge.id);
+
+ // Go upstream from source (including source itself)
+ traverseUpstream(edge.source);
+
+ // Go downstream from target (including target itself)
+ traverseDownstream(edge.target);
+
+ // Check if any node in this trace is disabled
+ const anyDisabled = Array.from(nodesInTrace).some(
+ nodeId => nodes.find(n => n.id === nodeId)?.data.disabled
+ );
+
+ // Toggle: if any disabled -> enable all, else disable all
+ const newDisabledState = !anyDisabled;
+
+ set(state => ({
+ nodes: state.nodes.map(node => {
+ if (nodesInTrace.has(node.id)) {
+ return {
+ ...node,
+ draggable: !newDisabledState,
+ selectable: !newDisabledState,
+ data: { ...node.data, disabled: newDisabledState }
+ };
+ }
+ return node;
+ })
+ }));
+
+ // Update edge styles
+ get().updateEdgeStyles();
+ },
+
+ updateEdgeStyles: () => {
+ const { nodes, edges } = get();
+
+ const updatedEdges = edges.map(edge => {
+ const sourceNode = nodes.find(n => n.id === edge.source);
+ const targetNode = nodes.find(n => n.id === edge.target);
+
+ const isDisabled = sourceNode?.data.disabled || targetNode?.data.disabled;
+
+ return {
+ ...edge,
+ style: {
+ ...edge.style,
+ opacity: isDisabled ? 0.3 : 1,
+ strokeDasharray: isDisabled ? '5,5' : undefined
+ }
+ };
+ });
+
+ set({ edges: updatedEdges });
+ },
+
propagateTraces: () => {
const { nodes, edges } = get();