summaryrefslogtreecommitdiff
path: root/collaborativeagents/adapters/personalized_llm_adapter.py
blob: 4189f313373c18a709df8bc746a4ef5fe8aab6f0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
"""
Adapter to integrate PersonalizedLLM with CollaborativeAgents benchmark.

This adapter wraps PersonalizedLLM to work as a CollaboratorAgent in the
MULTISESSIONCOLLAB framework while maintaining all personalization features.
"""

import sys
import os
from pathlib import Path
from typing import Optional, List, Dict, Any
from dataclasses import dataclass, field
import json
import numpy as np

# Add paths
_project_root = Path(__file__).parent.parent.parent
sys.path.insert(0, str(_project_root / "src"))

# Import from your personalization system
from personalization.serving.personalized_llm import (
    PersonalizedLLM,
    AssistantResponse,
    Feedback,
    create_personalized_llm
)


@dataclass
class AdapterConfig:
    """Configuration for the PersonalizedLLM adapter."""
    # PersonalizedLLM config
    mode: str = "full"  # "full", "nopersonal", "vanilla"
    eval_mode: bool = True
    enable_preference_extraction: bool = True
    enable_rl_updates: bool = True
    use_user_vector: bool = True  # Whether to use user vector in policy scoring

    # Paths - computed relative to project root
    # Note: Using empty_store to start fresh - RAG will accumulate memories during evaluation
    _project_root: str = field(default_factory=lambda: str(Path(__file__).parent.parent.parent))
    user_store_path: str = ""
    memory_cards_path: str = ""
    memory_embeddings_path: str = ""
    item_projection_path: str = ""

    def __post_init__(self):
        root = Path(self._project_root)
        if not self.user_store_path:
            self.user_store_path = str(root / "data/users/collab_eval_store.npz")
        if not self.memory_cards_path:
            self.memory_cards_path = str(root / "data/corpora/empty_store/memory_cards.jsonl")
        if not self.memory_embeddings_path:
            self.memory_embeddings_path = str(root / "data/corpora/empty_store/memory_embeddings.npy")
        if not self.item_projection_path:
            self.item_projection_path = str(root / "data/corpora/item_projection.npz")

    # Multi-GPU assignment
    device_assignment: Optional[Dict[str, str]] = None

    # LLM backend selection
    llm_name: str = "qwen_1_5b"  # Use "llama_8b_vllm" for vLLM backend

    # Shared model mode for multi-threaded efficiency
    use_shared_models: bool = False  # If True, share embedding/reranker across parallel workers

    # Reranker selection: "qwen3" (8B) or "bge" (278M)
    reranker_type: str = "qwen3"

    # Best-of-N sampling: generate N responses and pick best (for RAG methods)
    best_of_n: int = 1

    # Reward mode: "keyword" (legacy heuristic), "llm" (GPT-4o-mini), or "llm_local" (local vLLM)
    reward_mode: str = "keyword"

    # vLLM URL for local reward model (only used when reward_mode="llm_local")
    reward_vllm_url: str = "http://localhost:8005/v1"

    # Retrieval optimizations
    enable_query_transform: bool = False  # Transform queries for better retrieval matching
    enable_global_preferences: bool = False  # Separate global prefs that bypass retrieval
    enable_preference_rewrite: bool = False  # Use LLM to rewrite/merge retrieved preferences

    # Dynamic topk settings
    dynamic_topk: bool = False  # Use dynamic selection based on rerank score distribution
    dynamic_min_k: int = 3  # Minimum preferences to select
    dynamic_max_k: int = 8  # Maximum preferences to select
    dynamic_score_ratio: float = 0.5  # Threshold = top_score * ratio

    # RL learning rate overrides
    eta_long: float = None  # Override RL learning rate for z_long (default 0.01)
    eta_short: float = None  # Override RL learning rate for z_short (default 0.05)

    # Session-level preference consolidation
    enable_preference_consolidation: bool = False  # Consolidate preferences at session end
    consolidation_threshold: int = 5  # Min preferences before consolidation kicks in

    # Reward mapping for user behavior
    preference_enforcement_reward: float = -0.8  # Negative reward when user enforces
    disappointment_expression_reward: float = -0.4  # Milder negative for disappointment
    positive_feedback_reward: float = 0.5  # When user expresses satisfaction
    no_enforcement_reward: float = 0.1  # Small positive when user doesn't enforce (good turn)
    task_completion_reward: float = 1.0  # When task is solved correctly


class PersonalizedLLMAdapter:
    """
    Adapter that wraps PersonalizedLLM for use in CollaborativeAgents.

    This adapter:
    1. Translates CollaborativeAgents conversation format to PersonalizedLLM
    2. Converts user simulator signals to reward/gating for REINFORCE
    3. Tracks metrics for evaluation
    4. Supports all baseline modes
    """

    def __init__(self, config: AdapterConfig = None):
        self.config = config or AdapterConfig()
        self._llm: Optional[PersonalizedLLM] = None
        self._initialized = False

        # Session tracking
        self._current_user_id: Optional[str] = None
        self._turn_counter: int = 0
        self._session_metrics: Dict[str, Any] = {}

        # Metrics accumulation
        self._total_enforcements: int = 0
        self._total_disappointments: int = 0
        self._total_turns: int = 0

    def initialize(self):
        """Initialize the PersonalizedLLM instance."""
        if self._initialized:
            return

        shared_mode_str = " (shared models)" if self.config.use_shared_models else ""
        print(f"[Adapter] Initializing PersonalizedLLM with LLM: {self.config.llm_name}{shared_mode_str}...")
        self._llm = PersonalizedLLM(
            mode=self.config.mode,
            eval_mode=self.config.eval_mode,
            enable_preference_extraction=self.config.enable_preference_extraction,
            enable_rl_updates=self.config.enable_rl_updates,
            user_store_path=self.config.user_store_path,
            memory_cards_path=self.config.memory_cards_path,
            memory_embeddings_path=self.config.memory_embeddings_path,
            item_projection_path=self.config.item_projection_path,
            device_assignment=self.config.device_assignment,
            llm_name=self.config.llm_name,
            use_shared_models=self.config.use_shared_models,
            reranker_type=self.config.reranker_type,
            best_of_n=self.config.best_of_n,
            reward_mode=self.config.reward_mode,
            reward_vllm_url=self.config.reward_vllm_url,
            enable_query_transform=self.config.enable_query_transform,
            enable_global_preferences=self.config.enable_global_preferences,
            enable_preference_rewrite=self.config.enable_preference_rewrite,
            dynamic_topk=self.config.dynamic_topk,
            dynamic_min_k=self.config.dynamic_min_k,
            dynamic_max_k=self.config.dynamic_max_k,
            dynamic_score_ratio=self.config.dynamic_score_ratio,
            eta_long=self.config.eta_long,
            eta_short=self.config.eta_short,
            enable_preference_consolidation=self.config.enable_preference_consolidation,
            consolidation_threshold=self.config.consolidation_threshold,
        )
        self._initialized = True
        print("[Adapter] Initialization complete.")

    def start_session(self, user_id: str, user_profile: dict = None):
        """
        Start a new session for a user.

        Args:
            user_id: Unique user identifier
            user_profile: Optional user profile with preferences (for ground truth)
        """
        if not self._initialized:
            self.initialize()

        self._current_user_id = user_id
        self._turn_counter = 0
        self._session_metrics = {
            "user_id": user_id,
            "enforcements": 0,
            "disappointments": 0,
            "turns": 0,
            "rewards_applied": [],
        }

        # Reset session (keeps z_long, clears z_short and history)
        self._llm.reset_session(user_id)

    def generate_response(
        self,
        query: str,
        conversation_history: List[Dict[str, str]] = None
    ) -> Dict[str, Any]:
        """
        Generate a response using PersonalizedLLM.

        Args:
            query: Current user query
            conversation_history: Previous conversation (for context, though
                                  PersonalizedLLM tracks its own history)

        Returns:
            Dict with 'response', 'reasoning', and debug info
        """
        if not self._initialized:
            self.initialize()

        # Call PersonalizedLLM
        result: AssistantResponse = self._llm.chat(self._current_user_id, query)

        self._turn_counter += 1
        self._session_metrics["turns"] = self._turn_counter

        # Handle None result defensively
        if result is None:
            return {"response": "[Error: LLM returned None]", "reasoning": "", "debug": {}}

        # Format response for CollaborativeAgents
        answer = result.answer if result.answer else "[No answer generated]"
        debug_info = result.debug if result.debug else None
        usage_info = result.usage if result.usage else None

        return {
            "response": answer,
            "reasoning": f"Retrieved {len(debug_info.selected_memory_notes) if debug_info else 0} memories",
            "debug": {
                "selected_memories": debug_info.selected_memory_notes if debug_info else [],
                "memory_scores": debug_info.selected_memory_scores if debug_info else [],
                "extracted_preferences": debug_info.extracted_preferences if debug_info else [],
                "user_vector_norm": debug_info.extra.get("z_long_norm", 0) if debug_info and debug_info.extra else 0,
                "usage": {
                    "prompt_tokens": usage_info.prompt_tokens if usage_info else 0,
                    "completion_tokens": usage_info.completion_tokens if usage_info else 0,
                    "total_tokens": usage_info.total_tokens if usage_info else 0,
                } if usage_info else {}
            }
        }

    def prepare_prompt(
        self,
        query: str,
        conversation_history: List[Dict[str, str]] = None
    ) -> tuple:
        """
        Prepare prompt for batch processing without calling LLM.

        This method does all preparation (embedding, memory retrieval) and
        returns messages for batched vLLM call.

        Args:
            query: Current user query
            conversation_history: Previous conversation

        Returns:
            Tuple of (messages, context) where messages is ready for vLLM batch
            and context is needed for process_response().
        """
        if not self._initialized:
            self.initialize()

        # Use chat_prepare from PersonalizedLLM
        # skip_extraction=False to enable preference extraction from user messages
        # skip_auto_reward=True because batch framework handles rewards via process_user_turn
        result = self._llm.chat_prepare(self._current_user_id, query, skip_extraction=False, skip_auto_reward=True)
        return result["messages"], result["context"]

    def process_response(
        self,
        response: str,
        context: dict
    ) -> Dict[str, Any]:
        """
        Process LLM response after batch call.

        This method takes the LLM response and context from prepare_prompt(),
        does post-processing, and returns the formatted result.

        Args:
            response: LLM response text from batched vLLM call
            context: Context dict from prepare_prompt()

        Returns:
            Dict with 'response', 'reasoning', and debug info
        """
        # Use chat_complete from PersonalizedLLM
        result: AssistantResponse = self._llm.chat_complete(response, context)

        self._turn_counter += 1
        self._session_metrics["turns"] = self._turn_counter

        # Handle None result defensively
        if result is None:
            return {"response": "[Error: LLM returned None]", "reasoning": "", "debug": {}}

        # Format response for CollaborativeAgents
        answer = result.answer if result.answer else "[No answer generated]"
        debug_info = result.debug if result.debug else None
        usage_info = result.usage if result.usage else None

        return {
            "response": answer,
            "reasoning": f"Retrieved {len(debug_info.selected_memory_notes) if debug_info else 0} memories",
            "debug": {
                "selected_memories": debug_info.selected_memory_notes if debug_info else [],
                "memory_scores": debug_info.selected_memory_scores if debug_info else [],
                "extracted_preferences": debug_info.extracted_preferences if debug_info else [],
                "user_vector_norm": debug_info.extra.get("z_long_norm", 0) if debug_info and debug_info.extra else 0,
                "usage": {
                    "prompt_tokens": usage_info.prompt_tokens if usage_info else 0,
                    "completion_tokens": usage_info.completion_tokens if usage_info else 0,
                    "total_tokens": usage_info.total_tokens if usage_info else 0,
                } if usage_info else {}
            }
        }

    def process_user_turn(
        self,
        user_response: str,
        enforce_preferences: bool = False,
        express_disappointment: bool = False,
        express_satisfaction: bool = False,
        draft_answer_updated: bool = False
    ):
        """
        Process user turn and derive reward signal for REINFORCE.

        Args:
            user_response: The user's response text
            enforce_preferences: Whether user explicitly enforced preferences
            express_disappointment: Whether user expressed disappointment
            express_satisfaction: Whether user expressed satisfaction
            draft_answer_updated: Whether user updated their draft answer

        This is called AFTER generate_response and BEFORE the next turn.
        """
        # Derive reward from user behavior
        # Key insight: ALWAYS give a reward signal, not just for enforcement
        # - Enforcement: negative reward (user had to correct agent)
        # - No enforcement: small positive reward (agent did well)
        # - Satisfaction/progress: larger positive reward
        gating = 1.0  # Always apply

        if enforce_preferences:
            reward = self.config.preference_enforcement_reward  # -0.8
            self._session_metrics["enforcements"] += 1
            self._total_enforcements += 1

        elif express_disappointment:
            reward = self.config.disappointment_expression_reward  # -0.4
            self._session_metrics["disappointments"] += 1
            self._total_disappointments += 1

        elif express_satisfaction or draft_answer_updated:
            reward = self.config.positive_feedback_reward  # +0.5

        else:
            # No enforcement = good turn, give small positive reward
            reward = self.config.no_enforcement_reward  # +0.1

        # Apply feedback to PersonalizedLLM (always, not just when reward != 0)
        if self.config.enable_rl_updates:
            # Debug: check if pending_rl_update exists
            ctx = self._llm._sessions.get(self._current_user_id)
            has_pending = ctx is not None and ctx.pending_rl_update is not None
            has_chosen = (has_pending and
                         len(ctx.pending_rl_update.get("last_chosen_indices", [])) > 0) if has_pending else False
            print(f"[DEBUG-RL] User={self._current_user_id} reward={reward:.2f} "
                  f"has_pending={has_pending} has_chosen={has_chosen}")
            feedback = Feedback(
                user_id=self._current_user_id,
                turn_id=self._turn_counter - 1,
                reward=reward,
                gating=gating,
                meta={
                    "enforce": enforce_preferences,
                    "disappointment": express_disappointment,
                    "satisfaction": express_satisfaction,
                }
            )
            self._llm.apply_feedback(feedback)
            self._session_metrics["rewards_applied"].append(reward)

    def end_session(self, task_success: bool = False) -> Dict[str, Any]:
        """
        End the current session and return metrics.

        Args:
            task_success: Whether the task was solved correctly

        Returns:
            Session metrics dictionary
        """
        # Apply final reward for task completion
        if task_success and self.config.enable_rl_updates:
            feedback = Feedback(
                user_id=self._current_user_id,
                turn_id=self._turn_counter,
                reward=self.config.task_completion_reward,
                gating=1.0,
                meta={"task_success": True}
            )
            self._llm.apply_feedback(feedback)
            self._session_metrics["rewards_applied"].append(
                self.config.task_completion_reward
            )

        self._session_metrics["task_success"] = task_success
        self._total_turns += self._turn_counter

        return self._session_metrics.copy()

    def reset_user(self, user_id: str):
        """Completely reset a user (new experiment)."""
        if self._initialized:
            self._llm.reset_user(user_id)

    def get_user_vector(self, user_id: str) -> Optional[np.ndarray]:
        """Get the user's z_long vector for analysis."""
        if not self._initialized:
            return None

        state = self._llm._user_store.get_state(user_id)
        return state.z_long.copy()

    def get_user_state_summary(self, user_id: str) -> Dict[str, Any]:
        """Get summary of user state for analysis."""
        if not self._initialized:
            return {}

        return self._llm.get_user_state_summary(user_id)

    def persist(self):
        """Save all state to disk."""
        if self._initialized:
            self._llm.persist()

    def export_all_user_vectors(self) -> Dict[str, Dict[str, Any]]:
        """
        Export all user vectors with full state for analysis.

        Returns:
            Dict mapping user_id to dict containing:
            - z_long: np.ndarray (long-term user vector)
            - z_short: np.ndarray (short-term user vector)
            - z_long_norm: float
            - z_short_norm: float
            - reward_ma: float (reward moving average)
        """
        if not self._initialized:
            return {}

        result = {}
        for user_id, state in self._llm._user_store._states.items():
            result[user_id] = {
                "z_long": state.z_long.tolist(),
                "z_short": state.z_short.tolist(),
                "z_long_norm": float(np.linalg.norm(state.z_long)),
                "z_short_norm": float(np.linalg.norm(state.z_short)),
                "reward_ma": float(state.reward_ma),
            }
        return result

    def export_user_vectors_npz(self, output_path: str) -> None:
        """
        Export all user vectors to a numpy .npz file for efficient storage and analysis.

        Args:
            output_path: Path to save the .npz file

        The saved file contains:
        - user_ids: array of user IDs
        - z_long: [n_users, k] array of long-term vectors
        - z_short: [n_users, k] array of short-term vectors
        - reward_ma: [n_users] array of reward moving averages
        """
        if not self._initialized:
            return

        states = self._llm._user_store._states
        if not states:
            return

        user_ids = list(states.keys())
        z_long = np.stack([states[uid].z_long for uid in user_ids])
        z_short = np.stack([states[uid].z_short for uid in user_ids])
        reward_ma = np.array([states[uid].reward_ma for uid in user_ids])

        np.savez(
            output_path,
            user_ids=np.array(user_ids),
            z_long=z_long,
            z_short=z_short,
            reward_ma=reward_ma,
        )
        print(f"[Adapter] Exported {len(user_ids)} user vectors to {output_path}")

    # =========================================================================
    # CollaborativeAgents Interface Methods
    # =========================================================================

    def __call__(
        self,
        messages: List[Dict[str, str]],
        user_profile: dict = None,
        **kwargs
    ) -> str:
        """
        Callable interface for CollaborativeAgents ConversationGenerator.

        Args:
            messages: Conversation history in [{"role": "user/assistant", "content": "..."}]
            user_profile: Optional user profile

        Returns:
            Response string
        """
        if not messages:
            return "How can I help you?"

        # Get the last user message
        last_user_msg = None
        for msg in reversed(messages):
            if msg["role"] == "user":
                last_user_msg = msg["content"]
                break

        if last_user_msg is None:
            return "How can I help you?"

        result = self.generate_response(last_user_msg, messages)
        return result["response"]


# =============================================================================
# Baseline Adapter Factory
# =============================================================================

def create_baseline_adapter(
    baseline_name: str,
    device_assignment: dict = None,
    use_vllm: bool = False,
    use_shared_models: bool = False,
    reward_mode: str = "keyword",
    reward_vllm_url: str = "http://localhost:8005/v1",
) -> PersonalizedLLMAdapter:
    """
    Create an adapter configured for a specific baseline.

    Args:
        baseline_name: One of:
            - "vanilla": No memory or personalization
            - "contextual": Full history in context (truncate if overflow)
            - "reflection": CollaborativeAgents' agent_notes approach
            - "reflection_grpo": Reflection + GRPO training
            - "all_memory": All extracted memories in context (no retrieval)
            - "rag": Extractor + RAG (no user vector)
            - "rag_vector": Full personalization (Extractor + RAG + User Vector)
        device_assignment: GPU assignment dict
        use_vllm: If True, use vLLM HTTP API for LLM inference (much faster)
        reward_mode: Global reward mode ("keyword", "llm", or "llm_local")
        reward_vllm_url: vLLM URL for local reward model (when reward_mode="llm_local")
        use_shared_models: If True, share embedding/reranker models across parallel
            workers. ESSENTIAL for parallel profile processing to avoid OOM.

    Returns:
        Configured adapter (PersonalizedLLMAdapter or baseline-specific adapter)
    """
    # Select LLM backend
    llm_name = "llama_8b_vllm" if use_vllm else "llama_8b"
    configs = {
        # Baseline 1: Vanilla - no memory at all
        "vanilla": AdapterConfig(
            mode="vanilla",
            enable_preference_extraction=False,
            enable_rl_updates=False,
            use_user_vector=False,
            llm_name=llm_name,
            use_shared_models=use_shared_models,
        ),
        # Baseline 2: Contextual - full history in context
        # This needs a separate adapter (ContextualAdapter)
        "contextual": None,  # Handled separately
        # Baseline 3: Reflection - agent_notes mechanism
        # This needs a separate adapter (ReflectionAdapter)
        "reflection": None,  # Handled separately
        # Baseline 4: Reflection + GRPO
        # This needs a separate adapter (ReflectionGRPOAdapter)
        "reflection_grpo": None,  # Handled separately
        # Baseline 5: All memory in context (no retrieval)
        "all_memory": AdapterConfig(
            mode="nopersonal",  # Uses all memories, no policy selection
            enable_preference_extraction=True,
            enable_rl_updates=False,
            use_user_vector=False,
            llm_name=llm_name,
            use_shared_models=use_shared_models,
        ),
        # Baseline 6: Extractor + RAG (no user vector)
        # Use "nopersonal" mode for pure dense+rerank retrieval without user vector influence
        # Device assignment: GPUs 2,3 for HF models (8B vLLM uses 40% memory, leaving room)
        "rag": AdapterConfig(
            mode="nopersonal",
            enable_preference_extraction=True,
            enable_rl_updates=False,  # No RL updates
            use_user_vector=False,  # No user vector in policy
            llm_name=llm_name,
            use_shared_models=use_shared_models,
            enable_query_transform=True,
            enable_global_preferences=True,
            device_assignment={
                "embed": "cuda:2",
                "reranker": "cuda:3",
                "extractor": "cuda:2",
            },
        ),
        # Baseline 6b: RAG with dynamic topk (min=3, max=8, ratio=0.5)
        "rag_dynamic": AdapterConfig(
            mode="nopersonal",
            enable_preference_extraction=True,
            enable_rl_updates=False,
            use_user_vector=False,
            llm_name=llm_name,
            use_shared_models=use_shared_models,
            enable_query_transform=True,
            enable_global_preferences=True,
            dynamic_topk=True,
            dynamic_min_k=3,
            dynamic_max_k=8,
            dynamic_score_ratio=0.5,
            device_assignment={
                "embed": "cuda:2",
                "reranker": "cuda:3",
                "extractor": "cuda:2",
            },
        ),
        # Baseline 6c: RAG with preference rewrite (LLM merges preferences)
        "rag_rewrite": AdapterConfig(
            mode="nopersonal",
            enable_preference_extraction=True,
            enable_rl_updates=False,
            use_user_vector=False,
            llm_name=llm_name,
            use_shared_models=use_shared_models,
            enable_query_transform=True,
            enable_global_preferences=True,
            enable_preference_rewrite=True,  # NEW: Use LLM to merge preferences
            device_assignment={
                "embed": "cuda:2",
                "reranker": "cuda:3",
                "extractor": "cuda:2",
            },
        ),
        # Baseline 7: Full - Extractor + RAG + User Vector (proposed method)
        # Device assignment: GPUs 2,3 for HF models (8B vLLM uses 40% memory, leaving room)
        "rag_vector": AdapterConfig(
            mode="full",
            enable_preference_extraction=True,
            enable_rl_updates=True,
            use_user_vector=True,
            llm_name=llm_name,
            use_shared_models=use_shared_models,
            enable_query_transform=True,
            enable_global_preferences=True,
            device_assignment={
                "embed": "cuda:2",
                "reranker": "cuda:3",
                "extractor": "cuda:2",
            },
        ),
        # Ablation: RAG + Vector without z_short (only z_long, no within-session adaptation)
        "rag_vector_no_short": AdapterConfig(
            mode="full",
            enable_preference_extraction=True,
            enable_rl_updates=True,
            use_user_vector=True,
            llm_name=llm_name,
            use_shared_models=use_shared_models,
            enable_query_transform=True,
            enable_global_preferences=True,
            eta_short=0.0,  # Disable z_short learning
            device_assignment={
                "embed": "cuda:2",
                "reranker": "cuda:3",
                "extractor": "cuda:2",
            },
        ),
        # Ablation: RAG + Vector without z_long (only z_short, no cross-session learning)
        "rag_vector_no_long": AdapterConfig(
            mode="full",
            enable_preference_extraction=True,
            enable_rl_updates=True,
            use_user_vector=True,
            llm_name=llm_name,
            use_shared_models=use_shared_models,
            enable_query_transform=True,
            enable_global_preferences=True,
            eta_long=0.0,  # Disable z_long learning
            device_assignment={
                "embed": "cuda:2",
                "reranker": "cuda:3",
                "extractor": "cuda:2",
            },
        ),
        # Baseline 7a: RAG + Vector + Preference Rewrite (combines best of both)
        "rag_rewrite_vector": AdapterConfig(
            mode="full",
            enable_preference_extraction=True,
            enable_rl_updates=True,
            use_user_vector=True,
            llm_name=llm_name,
            use_shared_models=use_shared_models,
            enable_query_transform=True,
            enable_global_preferences=True,
            enable_preference_rewrite=True,  # LLM merges preferences
            device_assignment={
                "embed": "cuda:2",
                "reranker": "cuda:3",
                "extractor": "cuda:2",
            },
        ),
        # Baseline 7b: RAG + Vector with higher learning rate (10x)
        "rag_vector_fast": AdapterConfig(
            mode="full",
            enable_preference_extraction=True,
            enable_rl_updates=True,
            use_user_vector=True,
            llm_name=llm_name,
            use_shared_models=use_shared_models,
            enable_query_transform=True,
            enable_global_preferences=True,
            eta_long=0.1,   # 10x default (0.01)
            eta_short=0.5,  # 10x default (0.05)
            device_assignment={
                "embed": "cuda:2",
                "reranker": "cuda:3",
                "extractor": "cuda:2",
            },
        ),
        # Baseline 7c: RAG + Vector with session-level preference consolidation
        "rag_vector_consolidate": AdapterConfig(
            mode="full",
            enable_preference_extraction=True,
            enable_rl_updates=True,
            use_user_vector=True,
            llm_name=llm_name,
            use_shared_models=use_shared_models,
            enable_query_transform=True,
            enable_global_preferences=True,
            enable_preference_consolidation=True,
            consolidation_threshold=5,
            device_assignment={
                "embed": "cuda:2",
                "reranker": "cuda:3",
                "extractor": "cuda:2",
            },
        ),
        # Baseline 7d: RAG + Vector with balanced rewards (10x LR + no_enforcement_reward)
        # Key improvements:
        # - 10x learning rate for faster adaptation
        # - Small positive reward for turns without enforcement (+0.1)
        # - Disappointment detection enabled
        # - Balanced reward signal for proper REINFORCE learning
        "rag_vector_balanced": AdapterConfig(
            mode="full",
            enable_preference_extraction=True,
            enable_rl_updates=True,
            use_user_vector=True,
            llm_name=llm_name,
            use_shared_models=use_shared_models,
            enable_query_transform=True,
            enable_global_preferences=True,
            eta_long=0.1,   # 10x default
            eta_short=0.5,  # 10x default
            # Balanced reward structure
            preference_enforcement_reward=-0.8,
            disappointment_expression_reward=-0.4,
            positive_feedback_reward=0.5,
            no_enforcement_reward=0.1,  # Key: positive signal for good turns
            device_assignment={
                "embed": "cuda:2",
                "reranker": "cuda:3",
                "extractor": "cuda:2",
            },
        ),
        # Baseline 8: RAG with BGE reranker (278M instead of 8B)
        "rag_bge": AdapterConfig(
            mode="nopersonal",
            enable_preference_extraction=True,
            enable_rl_updates=False,
            use_user_vector=False,
            llm_name=llm_name,
            use_shared_models=use_shared_models,
            reranker_type="bge",
            device_assignment={
                "embed": "cuda:2",
                "reranker": "cuda:3",
                "extractor": "cuda:2",
            },
        ),
        # Baseline 9: RAG + Vector with BGE reranker (278M instead of 8B)
        "rag_vector_bge": AdapterConfig(
            mode="full",
            enable_preference_extraction=True,
            enable_rl_updates=True,
            use_user_vector=True,
            llm_name=llm_name,
            use_shared_models=use_shared_models,
            reranker_type="bge",
            device_assignment={
                "embed": "cuda:2",
                "reranker": "cuda:3",
                "extractor": "cuda:2",
            },
        ),
        # Baseline 10: RAG + Vector with best-of-3 sampling
        "rag_vector_best3": AdapterConfig(
            mode="full",
            enable_preference_extraction=True,
            enable_rl_updates=True,
            use_user_vector=True,
            llm_name=llm_name,
            use_shared_models=use_shared_models,
            best_of_n=3,
            device_assignment={
                "embed": "cuda:2",
                "reranker": "cuda:3",
                "extractor": "cuda:2",
            },
        ),
        # Legacy aliases
        "nopersonal": AdapterConfig(
            mode="nopersonal",
            enable_preference_extraction=True,
            enable_rl_updates=False,
            use_user_vector=False,
            llm_name=llm_name,
            use_shared_models=use_shared_models,
        ),
        "full": AdapterConfig(
            mode="full",
            enable_preference_extraction=True,
            enable_rl_updates=True,
            use_user_vector=True,
            llm_name=llm_name,
            use_shared_models=use_shared_models,
        ),
    }

    if baseline_name not in configs:
        raise ValueError(f"Unknown baseline: {baseline_name}. Choose from {list(configs.keys())}")

    config = configs[baseline_name]

    # Handle baselines that need separate adapters
    if config is None:
        if baseline_name == "contextual":
            from .contextual_adapter import ContextualAdapter
            return ContextualAdapter(device_assignment=device_assignment)
        elif baseline_name == "reflection":
            from .reflection_adapter import ReflectionAdapter
            return ReflectionAdapter(device_assignment=device_assignment)
        elif baseline_name == "reflection_grpo":
            from .reflection_grpo_adapter import ReflectionGRPOAdapter
            return ReflectionGRPOAdapter(device_assignment=device_assignment)
        else:
            raise ValueError(f"Baseline {baseline_name} not implemented yet")

    if device_assignment:
        config.device_assignment = device_assignment

    # Apply global reward settings to all methods (overrides per-method defaults)
    config.reward_mode = reward_mode
    config.reward_vllm_url = reward_vllm_url

    return PersonalizedLLMAdapter(config)


# =============================================================================
# Integration with CollaborativeAgents ConversationGenerator
# =============================================================================

class PersonalizedCollaborator:
    """
    Drop-in replacement for CollaboratorAgent that uses PersonalizedLLM.

    Compatible with ConversationGenerator.generate_conversation()
    """

    def __init__(
        self,
        adapter: PersonalizedLLMAdapter,
        user_id: str,
        user_profile: dict = None,
        max_new_tokens: int = 1024
    ):
        self.adapter = adapter
        self.user_id = user_id
        self.user_profile = user_profile
        self.max_new_tokens = max_new_tokens

        # Start session
        self.adapter.start_session(user_id, user_profile)

    def generate(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
        """
        Generate response in CollaborativeAgents format.

        Returns dict with 'reasoning' and 'response' keys.
        """
        # Extract last user message
        last_user_msg = ""
        for msg in reversed(messages):
            if msg["role"] == "user":
                last_user_msg = msg["content"]
                break

        # Check for preference enforcement in the user message
        enforce_detected = self._detect_enforcement(last_user_msg)
        disappointment_detected = self._detect_disappointment(last_user_msg)
        satisfaction_detected = self._detect_satisfaction(last_user_msg)

        # Process the previous turn's feedback (if any)
        if len(messages) > 2:  # Not the first turn
            self.adapter.process_user_turn(
                last_user_msg,
                enforce_preferences=enforce_detected,
                express_disappointment=disappointment_detected,
                express_satisfaction=satisfaction_detected,
            )

        # Generate response
        result = self.adapter.generate_response(last_user_msg, messages)

        return {
            "reasoning": result["reasoning"],
            "response": result["response"],
            "debug": result.get("debug", {})
        }

    def _detect_enforcement(self, text: str) -> bool:
        """Detect if user is enforcing preferences."""
        enforcement_phrases = [
            "please use", "i asked for", "i prefer", "can you",
            "instead of", "not what i wanted", "i said", "remember that",
            "you should", "don't", "avoid", "stop"
        ]
        text_lower = text.lower()
        return any(phrase in text_lower for phrase in enforcement_phrases)

    def _detect_disappointment(self, text: str) -> bool:
        """Detect expressions of disappointment."""
        disappointment_phrases = [
            "not quite", "that's not", "hmm", "not really",
            "i was hoping", "could be better", "not exactly"
        ]
        text_lower = text.lower()
        return any(phrase in text_lower for phrase in disappointment_phrases)

    def _detect_satisfaction(self, text: str) -> bool:
        """Detect expressions of satisfaction."""
        satisfaction_phrases = [
            "thanks", "perfect", "great", "exactly", "that's what i",
            "helpful", "makes sense", "got it", "understand now"
        ]
        text_lower = text.lower()
        return any(phrase in text_lower for phrase in satisfaction_phrases)

    def end_session(self, task_success: bool) -> Dict[str, Any]:
        """End session and get metrics."""
        return self.adapter.end_session(task_success)


# =============================================================================
# Usage Example
# =============================================================================

if __name__ == "__main__":
    # Example usage
    adapter = create_baseline_adapter("full")
    adapter.initialize()

    # Simulate a session
    user_id = "test_user_001"
    adapter.start_session(user_id)

    # First turn
    response = adapter.generate_response("How do I implement quicksort?")
    print(f"Response: {response['response'][:200]}...")

    # User provides feedback (simulating enforcement)
    adapter.process_user_turn(
        "Can you use bullet points instead?",
        enforce_preferences=True
    )

    # Second turn
    response = adapter.generate_response("Can you use bullet points instead?")
    print(f"Response: {response['response'][:200]}...")

    # End session
    metrics = adapter.end_session(task_success=True)
    print(f"Session metrics: {metrics}")

    # Get user vector for analysis
    z_long = adapter.get_user_vector(user_id)
    print(f"User vector norm: {np.linalg.norm(z_long):.4f}")

    adapter.persist()