summaryrefslogtreecommitdiff
path: root/src/personalization/evaluation/baselines/no_memory.py
diff options
context:
space:
mode:
authorYurenHao0426 <blackhao0426@gmail.com>2026-01-27 09:57:37 -0600
committerYurenHao0426 <blackhao0426@gmail.com>2026-01-27 09:57:37 -0600
commitdc801c07cf38b0c495686463e6ca6f871a64440e (patch)
tree599f03114775921dbc472403c701f4a3a8ea188a /src/personalization/evaluation/baselines/no_memory.py
parente43b3f8aa36c198b95c1e46bea2eaf3893b13dc3 (diff)
Add collaborativeagents module and update gitignore
- Add collaborativeagents subproject with adapters, agents, and evaluation modules - Update .gitignore to exclude large binary files (.whl, .tar), wandb logs, and results Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
Diffstat (limited to 'src/personalization/evaluation/baselines/no_memory.py')
-rw-r--r--src/personalization/evaluation/baselines/no_memory.py143
1 files changed, 143 insertions, 0 deletions
diff --git a/src/personalization/evaluation/baselines/no_memory.py b/src/personalization/evaluation/baselines/no_memory.py
new file mode 100644
index 0000000..bf4a7cf
--- /dev/null
+++ b/src/personalization/evaluation/baselines/no_memory.py
@@ -0,0 +1,143 @@
+"""
+No Memory Baseline (T1)
+
+A simple agent that has no memory of previous sessions.
+Only sees the current conversation history within a session.
+"""
+
+from typing import List, Dict, Any, Optional
+import os
+
+from .base import BaselineAgent, AgentResponse
+
+
+# System prompt for the agent
+AGENT_SYSTEM_PROMPT = """You are a helpful AI assistant helping users solve problems.
+
+Guidelines:
+- If the user's request is unclear, ask for clarification
+- Provide clear, well-structured answers
+- Adapt to user feedback and preferences expressed in the conversation
+- Be helpful and do your best to solve the user's problem
+
+Your output should be a direct response to the user."""
+
+
+class NoMemoryAgent(BaselineAgent):
+ """
+ T1: Base model with no memory.
+
+ This agent:
+ - Has no memory across sessions
+ - Only uses current conversation context
+ - Represents the baseline "no personalization" case
+ """
+
+ def __init__(
+ self,
+ model_name: str = "llama-8b",
+ api_base: Optional[str] = None,
+ api_key: Optional[str] = None,
+ max_new_tokens: int = 512,
+ temperature: float = 0.7,
+ **kwargs
+ ):
+ super().__init__(model_name, **kwargs)
+
+ self.api_base = api_base or os.getenv("OPENAI_API_BASE", "http://localhost:8003/v1")
+ self.api_key = api_key or os.getenv("OPENAI_API_KEY", "EMPTY")
+ self.max_new_tokens = max_new_tokens
+ self.temperature = temperature
+
+ # Initialize client
+ self._init_client()
+
+ def _init_client(self):
+ """Initialize the LLM client."""
+ try:
+ import openai
+ self.client = openai.OpenAI(
+ base_url=self.api_base,
+ api_key=self.api_key,
+ )
+ except Exception as e:
+ print(f"Warning: Could not initialize OpenAI client: {e}")
+ self.client = None
+
+ def _build_messages(
+ self,
+ conversation_history: List[Dict[str, str]],
+ query: str,
+ ) -> List[Dict[str, str]]:
+ """Build messages for the LLM."""
+ messages = [{"role": "system", "content": AGENT_SYSTEM_PROMPT}]
+
+ # Add conversation history
+ for msg in conversation_history:
+ messages.append({
+ "role": msg["role"],
+ "content": msg["content"],
+ })
+
+ # Add current query if not already in history
+ if not conversation_history or conversation_history[-1]["content"] != query:
+ messages.append({"role": "user", "content": query})
+
+ return messages
+
+ def respond(
+ self,
+ user_id: str,
+ query: str,
+ conversation_history: List[Dict[str, str]],
+ **kwargs
+ ) -> AgentResponse:
+ """Generate response using only current conversation context."""
+
+ messages = self._build_messages(conversation_history, query)
+
+ if self.client is None:
+ # Fallback for testing without LLM
+ return AgentResponse(
+ answer=f"[NoMemoryAgent] Response to: {query[:50]}...",
+ debug_info={"mode": "fallback", "num_messages": len(messages)},
+ )
+
+ try:
+ response = self.client.chat.completions.create(
+ model=self.model_name,
+ messages=messages,
+ max_tokens=self.max_new_tokens,
+ temperature=self.temperature,
+ )
+
+ answer = response.choices[0].message.content
+
+ return AgentResponse(
+ answer=answer,
+ debug_info={
+ "num_messages": len(messages),
+ "prompt_tokens": response.usage.prompt_tokens if response.usage else 0,
+ "completion_tokens": response.usage.completion_tokens if response.usage else 0,
+ },
+ )
+
+ except Exception as e:
+ print(f"Error calling LLM: {e}")
+ return AgentResponse(
+ answer=f"I apologize, but I encountered an error. Let me try again: {query[:100]}",
+ debug_info={"error": str(e)},
+ )
+
+ def end_session(self, user_id: str, conversation: List[Dict[str, str]]):
+ """No-op for no-memory agent."""
+ pass
+
+ def reset_user(self, user_id: str):
+ """No-op for no-memory agent."""
+ pass
+
+ def get_name(self) -> str:
+ return f"NoMemory({self.model_name})"
+
+