from pydantic import BaseModel, Field from typing import List, Optional, Dict, Any, Union from enum import Enum import time class Role(str, Enum): USER = "user" ASSISTANT = "assistant" SYSTEM = "system" class Message(BaseModel): id: str = Field(..., description="Unique ID for the message") role: Role content: str timestamp: float = Field(default_factory=time.time) # Metadata to track where this message came from source_node_id: Optional[str] = None model_used: Optional[str] = None class Context(BaseModel): messages: List[Message] = [] class ModelProvider(str, Enum): OPENAI = "openai" GOOGLE = "google" CLAUDE = "claude" class ReasoningEffort(str, Enum): LOW = "low" MEDIUM = "medium" HIGH = "high" class LLMConfig(BaseModel): provider: ModelProvider model_name: str temperature: float = 0.7 max_tokens: Optional[int] = None system_prompt: Optional[str] = None api_key: Optional[str] = None # Optional override, usually from env enable_google_search: bool = False reasoning_effort: ReasoningEffort = ReasoningEffort.MEDIUM # For OpenAI reasoning models class MergeStrategy(str, Enum): RAW = "raw" SMART = "smart" class NodeRunRequest(BaseModel): node_id: str incoming_contexts: List[Context] = [] user_prompt: str config: LLMConfig merge_strategy: MergeStrategy = MergeStrategy.SMART attached_file_ids: List[str] = Field(default_factory=list) # Scopes for file_search filtering: ["project_path/node_id", ...] # Contains all project/node combinations in the current trace scopes: List[str] = Field(default_factory=list) class CouncilMemberConfig(BaseModel): model_name: str # e.g. "gpt-5", "claude-opus-4-6", "gemini-3-pro-preview" temperature: Optional[float] = None # None = use request default reasoning_effort: Optional[ReasoningEffort] = None enable_google_search: Optional[bool] = None incoming_contexts: Optional[List[Context]] = None # Per-member context override class CouncilRunRequest(BaseModel): node_id: str incoming_contexts: List[Context] = [] # Default context for all members user_prompt: str council_models: List[CouncilMemberConfig] # 2-6 models chairman_model: CouncilMemberConfig # Model config for synthesis system_prompt: Optional[str] = None temperature: float = 0.7 reasoning_effort: ReasoningEffort = ReasoningEffort.MEDIUM enable_google_search: bool = False merge_strategy: MergeStrategy = MergeStrategy.SMART attached_file_ids: List[str] = Field(default_factory=list) scopes: List[str] = Field(default_factory=list) class DebateJudgeMode(str, Enum): EXTERNAL_JUDGE = "external_judge" SELF_CONVERGENCE = "self_convergence" DISPLAY_ONLY = "display_only" class DebateFormat(str, Enum): FREE_DISCUSSION = "free_discussion" STRUCTURED_OPPOSITION = "structured_opposition" ITERATIVE_IMPROVEMENT = "iterative_improvement" CUSTOM = "custom" class DebateRunRequest(BaseModel): node_id: str incoming_contexts: List[Context] = [] user_prompt: str debate_models: List[CouncilMemberConfig] # 2-6 models judge_model: Optional[CouncilMemberConfig] = None judge_mode: DebateJudgeMode = DebateJudgeMode.EXTERNAL_JUDGE debate_format: DebateFormat = DebateFormat.FREE_DISCUSSION custom_format_prompt: Optional[str] = None max_rounds: int = 5 system_prompt: Optional[str] = None temperature: float = 0.7 reasoning_effort: ReasoningEffort = ReasoningEffort.MEDIUM enable_google_search: bool = False merge_strategy: MergeStrategy = MergeStrategy.SMART attached_file_ids: List[str] = Field(default_factory=list) scopes: List[str] = Field(default_factory=list) class NodeRunResponse(BaseModel): node_id: str output_context: Context response_content: str raw_response: Optional[Dict[str, Any]] = None usage: Optional[Dict[str, Any]] = None