diff options
| author | YurenHao0426 <blackhao0426@gmail.com> | 2026-01-27 12:15:45 -0600 |
|---|---|---|
| committer | YurenHao0426 <blackhao0426@gmail.com> | 2026-01-27 12:15:45 -0600 |
| commit | 680513b7771a29f27cbbb3ffb009a69a913de6f9 (patch) | |
| tree | a0d60aef9ade1b2953b915f535b990c0de95e493 /scripts | |
| parent | c06ec2f3b80f8968f09eb801b69237495b055ec1 (diff) | |
local reward model
Diffstat (limited to 'scripts')
| -rw-r--r-- | scripts/test_armo_15666925.err | 31 | ||||
| -rw-r--r-- | scripts/test_armo_15666925.out | 13 | ||||
| -rw-r--r-- | scripts/test_armo_reward.py | 216 | ||||
| -rw-r--r-- | scripts/test_armo_reward.sh | 31 | ||||
| -rw-r--r-- | scripts/test_local_reward_15667278.err | 6 | ||||
| -rw-r--r-- | scripts/test_local_reward_15667278.out | 17 | ||||
| -rw-r--r-- | scripts/test_local_reward_15667317.err | 25 | ||||
| -rw-r--r-- | scripts/test_local_reward_15667317.out | 87 | ||||
| -rw-r--r-- | scripts/test_local_reward_batch.py | 206 | ||||
| -rwxr-xr-x | scripts/test_local_reward_batch.sh | 71 | ||||
| -rw-r--r-- | scripts/test_reward_cmp_15667024.err | 32 | ||||
| -rw-r--r-- | scripts/test_reward_cmp_15667024.out | 16 | ||||
| -rw-r--r-- | scripts/test_reward_cmp_15667063.err | 32 | ||||
| -rw-r--r-- | scripts/test_reward_cmp_15667063.out | 16 | ||||
| -rw-r--r-- | scripts/test_reward_cmp_15667076.err | 3 | ||||
| -rw-r--r-- | scripts/test_reward_cmp_15667076.out | 126 | ||||
| -rw-r--r-- | scripts/test_reward_cmp_15667126.err | 4 | ||||
| -rw-r--r-- | scripts/test_reward_cmp_15667126.out | 104 | ||||
| -rw-r--r-- | scripts/test_reward_comparison.py | 382 | ||||
| -rw-r--r-- | scripts/test_reward_comparison.sh | 39 |
20 files changed, 1457 insertions, 0 deletions
diff --git a/scripts/test_armo_15666925.err b/scripts/test_armo_15666925.err new file mode 100644 index 0000000..4c98df7 --- /dev/null +++ b/scripts/test_armo_15666925.err @@ -0,0 +1,31 @@ +/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/transformers/utils/hub.py:110: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead. + warnings.warn( +/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/huggingface_hub/file_download.py:798: UserWarning: Not enough free disk space to download the file. The expected file size is: 0.00 MB. The target location /work/hdd/bfqt/yurenh2/huggingface_cache/transformers/models--RLHFlow--ArmoRM-Llama3-8B-v0.1/blobs only has 0.00 MB free disk space. + warnings.warn( +`torch_dtype` is deprecated! Use `dtype` instead! +/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/huggingface_hub/file_download.py:798: UserWarning: Not enough free disk space to download the file. The expected file size is: 0.01 MB. The target location /work/hdd/bfqt/yurenh2/huggingface_cache/transformers/models--RLHFlow--ArmoRM-Llama3-8B-v0.1/blobs only has 0.00 MB free disk space. + warnings.warn( +A new version of the following files was downloaded from https://huggingface.co/RLHFlow/ArmoRM-Llama3-8B-v0.1: +- modeling_custom.py +. Make sure to double-check they do not contain any added malicious code. To avoid downloading new versions of the code file, you can pin a revision. +Traceback (most recent call last): + File "/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/scripts/test_armo_reward.py", line 216, in <module> + main() + File "/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/scripts/test_armo_reward.py", line 42, in main + model.load() + File "/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/scripts/../src/personalization/feedback/armo_reward.py", line 81, in load + self._model = AutoModelForSequenceClassification.from_pretrained( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py", line 586, in from_pretrained + model_class = get_class_from_dynamic_module( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/transformers/dynamic_module_utils.py", line 616, in get_class_from_dynamic_module + return get_class_in_module(class_name, final_module, force_reload=force_download) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/transformers/dynamic_module_utils.py", line 311, in get_class_in_module + module_spec.loader.exec_module(module) + File "<frozen importlib._bootstrap_external>", line 940, in exec_module + File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed + File "/projects/bfqt/users/yurenh2/hf_cache/huggingface/modules/transformers_modules/RLHFlow/ArmoRM_hyphen_Llama3_hyphen_8B_hyphen_v0_dot_1/eb2676d20da2f2d41082289d23c59b9f7427f955/modeling_custom.py", line 9, in <module> + from transformers.models.llama.modeling_llama import LLAMA_INPUTS_DOCSTRING +ImportError: cannot import name 'LLAMA_INPUTS_DOCSTRING' from 'transformers.models.llama.modeling_llama' (/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py) diff --git a/scripts/test_armo_15666925.out b/scripts/test_armo_15666925.out new file mode 100644 index 0000000..7b47c6c --- /dev/null +++ b/scripts/test_armo_15666925.out @@ -0,0 +1,13 @@ +=== Testing ArmoRM Reward Model === +GPU: NVIDIA A100-SXM4-40GB + +====================================================================== +ArmoRM Reward Model Test +====================================================================== +Device: cuda +Model: RLHFlow/ArmoRM-Llama3-8B-v0.1 + +Loading model... +[ArmoRM] Loading model RLHFlow/ArmoRM-Llama3-8B-v0.1 on cuda... + +=== Test Complete === diff --git a/scripts/test_armo_reward.py b/scripts/test_armo_reward.py new file mode 100644 index 0000000..534ab84 --- /dev/null +++ b/scripts/test_armo_reward.py @@ -0,0 +1,216 @@ +#!/usr/bin/env python3 +""" +Test script for ArmoRM reward model. + +Usage: + python scripts/test_armo_reward.py [--device cuda:0] +""" +import argparse +import sys +import os + +# Add src to path +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src')) + +from personalization.feedback.armo_reward import ( + ArmoRMRewardModel, + ArmoRewardConfig, + create_armo_reward_model, +) + + +def main(): + parser = argparse.ArgumentParser(description="Test ArmoRM reward model") + parser.add_argument("--device", type=str, default="cuda", help="Device to use") + parser.add_argument("--model-id", type=str, default="RLHFlow/ArmoRM-Llama3-8B-v0.1") + args = parser.parse_args() + + print("=" * 70) + print("ArmoRM Reward Model Test") + print("=" * 70) + print(f"Device: {args.device}") + print(f"Model: {args.model_id}") + print() + + # Create model + print("Loading model...") + config = ArmoRewardConfig( + model_id=args.model_id, + device=args.device, + ) + model = ArmoRMRewardModel(config) + model.load() + print("Model loaded!\n") + + # Test cases + test_cases = [ + { + "name": "Good factual answer", + "messages": [ + {"role": "user", "content": "What is the capital of France?"}, + {"role": "assistant", "content": "The capital of France is Paris. Paris is not only the capital but also the largest city in France, located in the north-central part of the country along the Seine River."}, + ], + "expected": "high score (>0.7)", + }, + { + "name": "Minimal answer", + "messages": [ + {"role": "user", "content": "What is the capital of France?"}, + {"role": "assistant", "content": "Paris."}, + ], + "expected": "medium score (0.4-0.7)", + }, + { + "name": "Wrong answer", + "messages": [ + {"role": "user", "content": "What is the capital of France?"}, + {"role": "assistant", "content": "The capital of France is London."}, + ], + "expected": "low score (<0.4)", + }, + { + "name": "Helpful detailed explanation", + "messages": [ + {"role": "user", "content": "Explain how to sort a list in Python"}, + {"role": "assistant", "content": """There are several ways to sort a list in Python: + +1. **Using sorted()** - Returns a new sorted list: + ```python + my_list = [3, 1, 4, 1, 5] + sorted_list = sorted(my_list) # [1, 1, 3, 4, 5] + ``` + +2. **Using list.sort()** - Sorts in place: + ```python + my_list = [3, 1, 4, 1, 5] + my_list.sort() # my_list is now [1, 1, 3, 4, 5] + ``` + +3. **Reverse sorting**: + ```python + sorted_list = sorted(my_list, reverse=True) + ``` + +4. **Custom key function**: + ```python + words = ['apple', 'Banana', 'cherry'] + sorted_words = sorted(words, key=str.lower) + ```"""}, + ], + "expected": "high score (>0.7)", + }, + { + "name": "Unhelpful response", + "messages": [ + {"role": "user", "content": "Explain how to sort a list in Python"}, + {"role": "assistant", "content": "Just use sort."}, + ], + "expected": "low score (<0.4)", + }, + ] + + print("Running test cases...\n") + for i, tc in enumerate(test_cases, 1): + print(f"--- Test {i}: {tc['name']} ---") + print(f"Expected: {tc['expected']}") + + result = model.score_response(tc["messages"]) + + print(f"Score: {result.score:.4f}") + print(f"Reward: {result.reward:.2f}") + print(f"Should Update: {result.should_update}") + print() + + # Test preference compliance + print("=" * 70) + print("Testing Preference Compliance Scenarios") + print("=" * 70) + print() + + compliance_tests = [ + { + "name": "User satisfied (preference followed)", + "query": "Can you explain recursion? I prefer examples with code.", + "response": """Recursion is when a function calls itself. Here's a classic example - calculating factorial: + +```python +def factorial(n): + if n <= 1: + return 1 + return n * factorial(n - 1) + +print(factorial(5)) # Output: 120 +``` + +The function calls itself with a smaller value until it reaches the base case (n <= 1).""", + "followup": "Perfect! That's exactly what I needed. Can you show me another example with Fibonacci?", + }, + { + "name": "User dissatisfied (preference NOT followed)", + "query": "Can you explain recursion? I prefer examples with code.", + "response": "Recursion is a programming concept where a function calls itself to solve smaller instances of the same problem.", + "followup": "I specifically asked for code examples. Please show me some actual code demonstrating recursion.", + }, + { + "name": "User correcting format preference", + "query": "List 5 benefits of meditation. Use bullet points please.", + "response": "Meditation has many benefits. First, it reduces stress. Second, it improves focus. Third, it promotes emotional health. Fourth, it enhances self-awareness. Fifth, it can reduce anxiety.", + "followup": "I asked for bullet points, not numbered sentences. Can you reformat that?", + }, + ] + + for i, tc in enumerate(compliance_tests, 1): + print(f"--- Compliance Test {i}: {tc['name']} ---") + print(f"Query: {tc['query'][:60]}...") + print(f"Followup: {tc['followup'][:60]}...") + + result = model.estimate_preference_compliance( + query=tc["query"], + response=tc["response"], + user_followup=tc["followup"], + ) + + print(f"Score: {result.score:.4f}") + print(f"Reward: {result.reward:.2f}") + print(f"Should Update: {result.should_update}") + print() + + # Test response comparison + print("=" * 70) + print("Testing Response Comparison") + print("=" * 70) + print() + + query = "What are the health benefits of drinking water?" + response_a = "Water is good for health." + response_b = """Drinking adequate water provides numerous health benefits: + +1. **Hydration**: Maintains fluid balance for bodily functions +2. **Digestion**: Aids in breaking down food and nutrient absorption +3. **Skin Health**: Keeps skin moisturized and may reduce wrinkles +4. **Kidney Function**: Helps flush out toxins and prevents kidney stones +5. **Energy**: Prevents fatigue caused by dehydration +6. **Weight Management**: Can reduce appetite when consumed before meals +7. **Joint Health**: Lubricates and cushions joints + +The general recommendation is 8 glasses (64 oz) per day, though needs vary by individual.""" + + print(f"Query: {query}") + print(f"Response A: {response_a}") + print(f"Response B: {response_b[:100]}...") + + score_a, score_b, winner = model.compare_responses(query, response_a, response_b) + print(f"\nScore A: {score_a:.4f}") + print(f"Score B: {score_b:.4f}") + print(f"Winner: {winner.upper()}") + + print("\n" + "=" * 70) + print("All tests complete!") + print("=" * 70) + + # Cleanup + model.cleanup() + + +if __name__ == "__main__": + main() diff --git a/scripts/test_armo_reward.sh b/scripts/test_armo_reward.sh new file mode 100644 index 0000000..18004c7 --- /dev/null +++ b/scripts/test_armo_reward.sh @@ -0,0 +1,31 @@ +#!/bin/bash +#SBATCH --job-name=test_armo +#SBATCH --account=bfqt-delta-gpu +#SBATCH --partition=gpuA100x4 +#SBATCH --nodes=1 +#SBATCH --ntasks=1 +#SBATCH --cpus-per-task=8 +#SBATCH --gres=gpu:nvidia_a100:1 +#SBATCH --mem=64G +#SBATCH --time=0:15:00 +#SBATCH --output=test_armo_%j.out +#SBATCH --error=test_armo_%j.err + +# Test ArmoRM reward model +# Requires ~16GB GPU memory for 8B model + +cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model +source /u/yurenh2/miniforge3/etc/profile.d/conda.sh +conda activate eval + +export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface +export PYTHONPATH="${PWD}/src:${PYTHONPATH}" + +echo "=== Testing ArmoRM Reward Model ===" +echo "GPU: $(nvidia-smi --query-gpu=name --format=csv,noheader)" +echo "" + +python scripts/test_armo_reward.py --device cuda + +echo "" +echo "=== Test Complete ===" diff --git a/scripts/test_local_reward_15667278.err b/scripts/test_local_reward_15667278.err new file mode 100644 index 0000000..8ef137a --- /dev/null +++ b/scripts/test_local_reward_15667278.err @@ -0,0 +1,6 @@ +/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/transformers/utils/hub.py:110: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead. + warnings.warn( +/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/transformers/utils/hub.py:110: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead. + warnings.warn( +[0;36m(EngineCore_DP0 pid=2802146)[0;0m
Loading safetensors checkpoint shards: 0% Completed | 0/4 [00:00<?, ?it/s] +[0;36m(EngineCore_DP0 pid=2802146)[0;0m
Loading safetensors checkpoint shards: 25% Completed | 1/4 [00:03<00:11, 3.73s/it] diff --git a/scripts/test_local_reward_15667278.out b/scripts/test_local_reward_15667278.out new file mode 100644 index 0000000..9108abd --- /dev/null +++ b/scripts/test_local_reward_15667278.out @@ -0,0 +1,17 @@ +=== Local LLM Reward Model Batch Test === +Model: models/llama-3.1-8b-instruct +GPU: NVIDIA A100-SXM4-40GB + +Starting vLLM server on port 8005... +Waiting for vLLM server to start... +[0;36m(APIServer pid=2801991)[0;0m INFO 01-27 11:59:38 [api_server.py:1351] vLLM API server version 0.13.0 +[0;36m(APIServer pid=2801991)[0;0m INFO 01-27 11:59:38 [utils.py:253] non-default args: {'port': 8005, 'model': 'models/llama-3.1-8b-instruct', 'dtype': 'bfloat16', 'max_model_len': 4096, 'gpu_memory_utilization': 0.85} +[0;36m(APIServer pid=2801991)[0;0m INFO 01-27 11:59:39 [model.py:514] Resolved architecture: LlamaForCausalLM +[0;36m(APIServer pid=2801991)[0;0m INFO 01-27 11:59:39 [model.py:1661] Using max model len 4096 +[0;36m(APIServer pid=2801991)[0;0m INFO 01-27 11:59:40 [scheduler.py:230] Chunked prefill is enabled with max_num_batched_tokens=2048. +[0;36m(EngineCore_DP0 pid=2802146)[0;0m INFO 01-27 11:59:52 [core.py:93] Initializing a V1 LLM engine (v0.13.0) with config: model='models/llama-3.1-8b-instruct', speculative_config=None, tokenizer='models/llama-3.1-8b-instruct', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=4096, download_dir=None, load_format=auto, tensor_parallel_size=1, pipeline_parallel_size=1, data_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, structured_outputs_config=StructuredOutputsConfig(backend='auto', disable_fallback=False, disable_any_whitespace=False, disable_additional_properties=False, reasoning_parser='', reasoning_parser_plugin='', enable_in_reasoning=False), observability_config=ObservabilityConfig(show_hidden_metrics_for_version=None, otlp_traces_endpoint=None, collect_detailed_traces=None, kv_cache_metrics=False, kv_cache_metrics_sample=0.01, cudagraph_metrics=False, enable_layerwise_nvtx_tracing=False), seed=0, served_model_name=models/llama-3.1-8b-instruct, enable_prefix_caching=True, enable_chunked_prefill=True, pooler_config=None, compilation_config={'level': None, 'mode': <CompilationMode.VLLM_COMPILE: 3>, 'debug_dump_path': None, 'cache_dir': '', 'compile_cache_save_format': 'binary', 'backend': 'inductor', 'custom_ops': ['none'], 'splitting_ops': ['vllm::unified_attention', 'vllm::unified_attention_with_output', 'vllm::unified_mla_attention', 'vllm::unified_mla_attention_with_output', 'vllm::mamba_mixer2', 'vllm::mamba_mixer', 'vllm::short_conv', 'vllm::linear_attention', 'vllm::plamo2_mamba_mixer', 'vllm::gdn_attention_core', 'vllm::kda_attention', 'vllm::sparse_attn_indexer'], 'compile_mm_encoder': False, 'compile_sizes': [], 'compile_ranges_split_points': [2048], 'inductor_compile_config': {'enable_auto_functionalized_v2': False, 'combo_kernels': True, 'benchmark_combo_kernel': True}, 'inductor_passes': {}, 'cudagraph_mode': <CUDAGraphMode.FULL_AND_PIECEWISE: (2, 1)>, 'cudagraph_num_of_warmups': 1, 'cudagraph_capture_sizes': [1, 2, 4, 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96, 104, 112, 120, 128, 136, 144, 152, 160, 168, 176, 184, 192, 200, 208, 216, 224, 232, 240, 248, 256, 272, 288, 304, 320, 336, 352, 368, 384, 400, 416, 432, 448, 464, 480, 496, 512], 'cudagraph_copy_inputs': False, 'cudagraph_specialize_lora': True, 'use_inductor_graph_partition': False, 'pass_config': {'fuse_norm_quant': False, 'fuse_act_quant': False, 'fuse_attn_quant': False, 'eliminate_noops': True, 'enable_sp': False, 'fuse_gemm_comms': False, 'fuse_allreduce_rms': False}, 'max_cudagraph_capture_size': 512, 'dynamic_shapes_config': {'type': <DynamicShapesType.BACKED: 'backed'>, 'evaluate_guards': False}, 'local_cache_dir': None} +[0;36m(EngineCore_DP0 pid=2802146)[0;0m INFO 01-27 11:59:54 [parallel_state.py:1203] world_size=1 rank=0 local_rank=0 distributed_init_method=tcp://141.142.254.34:42557 backend=nccl +[0;36m(EngineCore_DP0 pid=2802146)[0;0m INFO 01-27 11:59:54 [parallel_state.py:1411] rank 0 in world size 1 is assigned as DP rank 0, PP rank 0, PCP rank 0, TP rank 0, EP rank 0 +[0;36m(EngineCore_DP0 pid=2802146)[0;0m INFO 01-27 11:59:56 [gpu_model_runner.py:3562] Starting to load model models/llama-3.1-8b-instruct... +[0;36m(EngineCore_DP0 pid=2802146)[0;0m INFO 01-27 11:59:57 [cuda.py:351] Using FLASH_ATTN attention backend out of potential backends: ('FLASH_ATTN', 'FLASHINFER', 'TRITON_ATTN', 'FLEX_ATTENTION') +ERROR: vLLM server failed to start diff --git a/scripts/test_local_reward_15667317.err b/scripts/test_local_reward_15667317.err new file mode 100644 index 0000000..819b108 --- /dev/null +++ b/scripts/test_local_reward_15667317.err @@ -0,0 +1,25 @@ +/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/transformers/utils/hub.py:110: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead. + warnings.warn( +/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/transformers/utils/hub.py:110: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead. + warnings.warn( +[0;36m(EngineCore_DP0 pid=2400286)[0;0m
Loading safetensors checkpoint shards: 0% Completed | 0/4 [00:00<?, ?it/s] +[0;36m(EngineCore_DP0 pid=2400286)[0;0m
Loading safetensors checkpoint shards: 25% Completed | 1/4 [00:03<00:11, 3.75s/it] +[0;36m(EngineCore_DP0 pid=2400286)[0;0m
Loading safetensors checkpoint shards: 50% Completed | 2/4 [00:05<00:05, 2.54s/it] +[0;36m(EngineCore_DP0 pid=2400286)[0;0m
Loading safetensors checkpoint shards: 75% Completed | 3/4 [00:12<00:04, 4.60s/it] +[0;36m(EngineCore_DP0 pid=2400286)[0;0m
Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:16<00:00, 4.28s/it] +[0;36m(EngineCore_DP0 pid=2400286)[0;0m
Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:16<00:00, 4.07s/it] +[0;36m(EngineCore_DP0 pid=2400286)[0;0m +[0;36m(EngineCore_DP0 pid=2400286)[0;0m
Capturing CUDA graphs (mixed prefill-decode, PIECEWISE): 0%| | 0/51 [00:00<?, ?it/s]
Capturing CUDA graphs (mixed prefill-decode, PIECEWISE): 4%|▍ | 2/51 [00:00<00:03, 14.89it/s]
Capturing CUDA graphs (mixed prefill-decode, PIECEWISE): 8%|▊ | 4/51 [00:00<00:03, 15.63it/s]
Capturing CUDA graphs (mixed prefill-decode, PIECEWISE): 12%|█▏ | 6/51 [00:00<00:02, 16.10it/s]
Capturing CUDA graphs (mixed prefill-decode, PIECEWISE): 16%|█▌ | 8/51 [00:00<00:02, 16.66it/s]
Capturing CUDA graphs (mixed prefill-decode, PIECEWISE): 22%|██▏ | 11/51 [00:00<00:02, 18.08it/s]
Capturing CUDA graphs (mixed prefill-decode, PIECEWISE): 27%|██▋ | 14/51 [00:00<00:01, 18.87it/s]
Capturing CUDA graphs (mixed prefill-decode, PIECEWISE): 33%|███▎ | 17/51 [00:00<00:01, 19.62it/s]
Capturing CUDA graphs (mixed prefill-decode, PIECEWISE): 39%|███▉ | 20/51 [00:01<00:01, 20.60it/s]
Capturing CUDA graphs (mixed prefill-decode, PIECEWISE): 45%|████▌ | 23/51 [00:01<00:01, 21.25it/s]
Capturing CUDA graphs (mixed prefill-decode, PIECEWISE): 51%|█████ | 26/51 [00:01<00:01, 21.79it/s]
Capturing CUDA graphs (mixed prefill-decode, PIECEWISE): 57%|█████▋ | 29/51 [00:01<00:01, 20.08it/s]
Capturing CUDA graphs (mixed prefill-decode, PIECEWISE): 63%|██████▎ | 32/51 [00:01<00:00, 21.01it/s]
Capturing CUDA graphs (mixed prefill-decode, PIECEWISE): 69%|██████▊ | 35/51 [00:01<00:00, 21.87it/s]
Capturing CUDA graphs (mixed prefill-decode, PIECEWISE): 75%|███████▍ | 38/51 [00:01<00:00, 22.54it/s]
Capturing CUDA graphs (mixed prefill-decode, PIECEWISE): 80%|████████ | 41/51 [00:01<00:00, 23.14it/s]
Capturing CUDA graphs (mixed prefill-decode, PIECEWISE): 86%|████████▋ | 44/51 [00:02<00:00, 23.73it/s]
Capturing CUDA graphs (mixed prefill-decode, PIECEWISE): 92%|█████████▏| 47/51 [00:02<00:00, 23.48it/s]
Capturing CUDA graphs (mixed prefill-decode, PIECEWISE): 98%|█████████▊| 50/51 [00:02<00:00, 24.09it/s]
Capturing CUDA graphs (mixed prefill-decode, PIECEWISE): 100%|██████████| 51/51 [00:02<00:00, 20.82it/s] +[0;36m(EngineCore_DP0 pid=2400286)[0;0m
Capturing CUDA graphs (decode, FULL): 0%| | 0/35 [00:00<?, ?it/s]
Capturing CUDA graphs (decode, FULL): 3%|▎ | 1/35 [00:00<00:05, 6.28it/s]
Capturing CUDA graphs (decode, FULL): 11%|█▏ | 4/35 [00:00<00:02, 15.17it/s]
Capturing CUDA graphs (decode, FULL): 20%|██ | 7/35 [00:00<00:01, 18.56it/s]
Capturing CUDA graphs (decode, FULL): 29%|██▊ | 10/35 [00:00<00:01, 19.82it/s]
Capturing CUDA graphs (decode, FULL): 37%|███▋ | 13/35 [00:00<00:01, 20.21it/s]
Capturing CUDA graphs (decode, FULL): 46%|████▌ | 16/35 [00:00<00:00, 21.27it/s]
Capturing CUDA graphs (decode, FULL): 54%|█████▍ | 19/35 [00:00<00:00, 21.99it/s]
Capturing CUDA graphs (decode, FULL): 63%|██████▎ | 22/35 [00:01<00:00, 22.54it/s]
Capturing CUDA graphs (decode, FULL): 71%|███████▏ | 25/35 [00:01<00:00, 22.97it/s]
Capturing CUDA graphs (decode, FULL): 80%|████████ | 28/35 [00:01<00:00, 22.23it/s]
Capturing CUDA graphs (decode, FULL): 89%|████████▊ | 31/35 [00:01<00:00, 22.90it/s]
Capturing CUDA graphs (decode, FULL): 97%|█████████▋| 34/35 [00:01<00:00, 23.36it/s]
Capturing CUDA graphs (decode, FULL): 100%|██████████| 35/35 [00:01<00:00, 21.34it/s] +[0;36m(APIServer pid=2399803)[0;0m INFO: Started server process [2399803] +[0;36m(APIServer pid=2399803)[0;0m INFO: Waiting for application startup. +[0;36m(APIServer pid=2399803)[0;0m INFO: Application startup complete. +Traceback (most recent call last): + File "/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/scripts/test_local_reward_batch.py", line 202, in <module> + main() + File "/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/scripts/test_local_reward_batch.py", line 147, in main + samples = [ + ^ + File "/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/scripts/test_local_reward_batch.py", line 148, in <listcomp> + TurnSample( +TypeError: TurnSample.__init__() missing 4 required positional arguments: 'user_id', 'session_id', 'turn_id', and 'memories' diff --git a/scripts/test_local_reward_15667317.out b/scripts/test_local_reward_15667317.out new file mode 100644 index 0000000..6e45a21 --- /dev/null +++ b/scripts/test_local_reward_15667317.out @@ -0,0 +1,87 @@ +=== Local LLM Reward Model Batch Test === +Model: models/llama-3.1-8b-instruct +GPU: NVIDIA A100-SXM4-40GB + +Starting vLLM server on port 8005... +Waiting for vLLM server to start... +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:03:02 [api_server.py:1351] vLLM API server version 0.13.0 +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:03:02 [utils.py:253] non-default args: {'port': 8005, 'model': 'models/llama-3.1-8b-instruct', 'dtype': 'bfloat16', 'max_model_len': 4096, 'gpu_memory_utilization': 0.85} +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:03:02 [model.py:514] Resolved architecture: LlamaForCausalLM +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:03:02 [model.py:1661] Using max model len 4096 +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:03:03 [scheduler.py:230] Chunked prefill is enabled with max_num_batched_tokens=2048. +[0;36m(EngineCore_DP0 pid=2400286)[0;0m INFO 01-27 12:03:15 [core.py:93] Initializing a V1 LLM engine (v0.13.0) with config: model='models/llama-3.1-8b-instruct', speculative_config=None, tokenizer='models/llama-3.1-8b-instruct', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=4096, download_dir=None, load_format=auto, tensor_parallel_size=1, pipeline_parallel_size=1, data_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, structured_outputs_config=StructuredOutputsConfig(backend='auto', disable_fallback=False, disable_any_whitespace=False, disable_additional_properties=False, reasoning_parser='', reasoning_parser_plugin='', enable_in_reasoning=False), observability_config=ObservabilityConfig(show_hidden_metrics_for_version=None, otlp_traces_endpoint=None, collect_detailed_traces=None, kv_cache_metrics=False, kv_cache_metrics_sample=0.01, cudagraph_metrics=False, enable_layerwise_nvtx_tracing=False), seed=0, served_model_name=models/llama-3.1-8b-instruct, enable_prefix_caching=True, enable_chunked_prefill=True, pooler_config=None, compilation_config={'level': None, 'mode': <CompilationMode.VLLM_COMPILE: 3>, 'debug_dump_path': None, 'cache_dir': '', 'compile_cache_save_format': 'binary', 'backend': 'inductor', 'custom_ops': ['none'], 'splitting_ops': ['vllm::unified_attention', 'vllm::unified_attention_with_output', 'vllm::unified_mla_attention', 'vllm::unified_mla_attention_with_output', 'vllm::mamba_mixer2', 'vllm::mamba_mixer', 'vllm::short_conv', 'vllm::linear_attention', 'vllm::plamo2_mamba_mixer', 'vllm::gdn_attention_core', 'vllm::kda_attention', 'vllm::sparse_attn_indexer'], 'compile_mm_encoder': False, 'compile_sizes': [], 'compile_ranges_split_points': [2048], 'inductor_compile_config': {'enable_auto_functionalized_v2': False, 'combo_kernels': True, 'benchmark_combo_kernel': True}, 'inductor_passes': {}, 'cudagraph_mode': <CUDAGraphMode.FULL_AND_PIECEWISE: (2, 1)>, 'cudagraph_num_of_warmups': 1, 'cudagraph_capture_sizes': [1, 2, 4, 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96, 104, 112, 120, 128, 136, 144, 152, 160, 168, 176, 184, 192, 200, 208, 216, 224, 232, 240, 248, 256, 272, 288, 304, 320, 336, 352, 368, 384, 400, 416, 432, 448, 464, 480, 496, 512], 'cudagraph_copy_inputs': False, 'cudagraph_specialize_lora': True, 'use_inductor_graph_partition': False, 'pass_config': {'fuse_norm_quant': False, 'fuse_act_quant': False, 'fuse_attn_quant': False, 'eliminate_noops': True, 'enable_sp': False, 'fuse_gemm_comms': False, 'fuse_allreduce_rms': False}, 'max_cudagraph_capture_size': 512, 'dynamic_shapes_config': {'type': <DynamicShapesType.BACKED: 'backed'>, 'evaluate_guards': False}, 'local_cache_dir': None} +[0;36m(EngineCore_DP0 pid=2400286)[0;0m INFO 01-27 12:03:17 [parallel_state.py:1203] world_size=1 rank=0 local_rank=0 distributed_init_method=tcp://141.142.254.16:34265 backend=nccl +[0;36m(EngineCore_DP0 pid=2400286)[0;0m INFO 01-27 12:03:17 [parallel_state.py:1411] rank 0 in world size 1 is assigned as DP rank 0, PP rank 0, PCP rank 0, TP rank 0, EP rank 0 +[0;36m(EngineCore_DP0 pid=2400286)[0;0m INFO 01-27 12:03:19 [gpu_model_runner.py:3562] Starting to load model models/llama-3.1-8b-instruct... +[0;36m(EngineCore_DP0 pid=2400286)[0;0m INFO 01-27 12:03:20 [cuda.py:351] Using FLASH_ATTN attention backend out of potential backends: ('FLASH_ATTN', 'FLASHINFER', 'TRITON_ATTN', 'FLEX_ATTENTION') +[0;36m(EngineCore_DP0 pid=2400286)[0;0m INFO 01-27 12:03:37 [default_loader.py:308] Loading weights took 16.45 seconds +[0;36m(EngineCore_DP0 pid=2400286)[0;0m INFO 01-27 12:03:37 [gpu_model_runner.py:3659] Model loading took 14.9889 GiB memory and 17.567555 seconds +[0;36m(EngineCore_DP0 pid=2400286)[0;0m INFO 01-27 12:03:47 [backends.py:643] Using cache directory: /u/yurenh2/.cache/vllm/torch_compile_cache/1c763cd906/rank_0_0/backbone for vLLM's torch.compile +[0;36m(EngineCore_DP0 pid=2400286)[0;0m INFO 01-27 12:03:47 [backends.py:703] Dynamo bytecode transform time: 9.88 s +[0;36m(EngineCore_DP0 pid=2400286)[0;0m INFO 01-27 12:03:54 [backends.py:261] Cache the graph of compile range (1, 2048) for later use +[0;36m(EngineCore_DP0 pid=2400286)[0;0m INFO 01-27 12:03:59 [backends.py:278] Compiling a graph for compile range (1, 2048) takes 8.04 s +[0;36m(EngineCore_DP0 pid=2400286)[0;0m INFO 01-27 12:03:59 [monitor.py:34] torch.compile takes 17.92 s in total +[0;36m(EngineCore_DP0 pid=2400286)[0;0m INFO 01-27 12:04:00 [gpu_worker.py:375] Available KV cache memory: 17.35 GiB +[0;36m(EngineCore_DP0 pid=2400286)[0;0m INFO 01-27 12:04:00 [kv_cache_utils.py:1291] GPU KV cache size: 142,160 tokens +[0;36m(EngineCore_DP0 pid=2400286)[0;0m INFO 01-27 12:04:00 [kv_cache_utils.py:1296] Maximum concurrency for 4,096 tokens per request: 34.71x +[0;36m(EngineCore_DP0 pid=2400286)[0;0m INFO 01-27 12:04:05 [gpu_model_runner.py:4587] Graph capturing finished in 5 secs, took 0.56 GiB +[0;36m(EngineCore_DP0 pid=2400286)[0;0m INFO 01-27 12:04:05 [core.py:259] init engine (profile, create kv cache, warmup model) took 27.83 seconds +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [api_server.py:1099] Supported tasks: ['generate'] +[0;36m(APIServer pid=2399803)[0;0m WARNING 01-27 12:04:06 [model.py:1487] Default sampling parameters have been overridden by the model's Hugging Face generation config recommended from the model creator. If this is not intended, please relaunch vLLM instance with `--generation-config vllm`. +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [serving_responses.py:201] Using default chat sampling params from model: {'temperature': 0.6, 'top_p': 0.9} +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [serving_chat.py:137] Using default chat sampling params from model: {'temperature': 0.6, 'top_p': 0.9} +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [serving_completion.py:77] Using default completion sampling params from model: {'temperature': 0.6, 'top_p': 0.9} +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [serving_chat.py:137] Using default chat sampling params from model: {'temperature': 0.6, 'top_p': 0.9} +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [api_server.py:1425] Starting vLLM API server 0 on http://0.0.0.0:8005 +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:38] Available routes are: +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /openapi.json, Methods: GET, HEAD +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /docs, Methods: GET, HEAD +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /docs/oauth2-redirect, Methods: GET, HEAD +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /redoc, Methods: GET, HEAD +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /scale_elastic_ep, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /is_scaling_elastic_ep, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /tokenize, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /detokenize, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /inference/v1/generate, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /pause, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /resume, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /is_paused, Methods: GET +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /metrics, Methods: GET +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /health, Methods: GET +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /load, Methods: GET +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /v1/models, Methods: GET +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /version, Methods: GET +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /v1/responses, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /v1/responses/{response_id}, Methods: GET +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /v1/responses/{response_id}/cancel, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /v1/messages, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /v1/chat/completions, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /v1/completions, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /v1/audio/transcriptions, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /v1/audio/translations, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /ping, Methods: GET +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /ping, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /invocations, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /classify, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /v1/embeddings, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /score, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /v1/score, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /rerank, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /v1/rerank, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /v2/rerank, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO 01-27 12:04:06 [launcher.py:46] Route: /pooling, Methods: POST +[0;36m(APIServer pid=2399803)[0;0m INFO: 127.0.0.1:32858 - "GET /health HTTP/1.1" 200 OK +vLLM server ready after 100s +[0;36m(APIServer pid=2399803)[0;0m INFO: 127.0.0.1:32866 - "GET /health HTTP/1.1" 200 OK + +Running batch test... +[0;36m(APIServer pid=2399803)[0;0m INFO: 127.0.0.1:32870 - "GET /v1/models HTTP/1.1" 200 OK +====================================================================== +Local LLM Reward Model Batch Test +====================================================================== +vLLM URL: http://localhost:8005/v1 + +Model: models/llama-3.1-8b-instruct + + +=== Test Complete === diff --git a/scripts/test_local_reward_batch.py b/scripts/test_local_reward_batch.py new file mode 100644 index 0000000..7afb834 --- /dev/null +++ b/scripts/test_local_reward_batch.py @@ -0,0 +1,206 @@ +#!/usr/bin/env python3 +""" +Test batch reward estimation with local LLM via vLLM. + +Verifies that: +1. LocalLLMRewardClient works with vLLM server +2. Batch processing is efficient (concurrent, not sequential) +3. Results match expected labels +""" +import argparse +import asyncio +import sys +import os +import time + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src')) + +from personalization.feedback.schemas import TurnSample +from personalization.feedback.local_llm_reward import ( + LocalLLMRewardClient, + LocalLLMRewardConfig, +) + +# Test cases with expected labels +TEST_CASES = [ + { + "name": "neg_constraint_restate - format preference", + "query_t": "Explain how sorting works in Python. Please use bullet points.", + "answer_t": "Sorting in Python can be done using the sorted() function or the list.sort() method. The sorted() function returns a new sorted list, while sort() modifies the list in place. Both accept a key parameter for custom sorting and a reverse parameter for descending order.", + "query_t1": "I asked for bullet points. Can you reformat that with bullet points please?", + "expected": "neg_constraint_restate", + }, + { + "name": "neg_constraint_restate - step by step", + "query_t": "Solve x^2 - 5x + 6 = 0. Show step by step.", + "answer_t": "The solutions are x = 2 and x = 3.", + "query_t1": "As I said, I need to see the step-by-step solution, not just the answer.", + "expected": "neg_constraint_restate", + }, + { + "name": "neg_correction - wrong answer", + "query_t": "What is the capital of Australia?", + "answer_t": "The capital of Australia is Sydney.", + "query_t1": "That's incorrect. Sydney is not the capital of Australia.", + "expected": "neg_correction", + }, + { + "name": "neg_confusion - unclear explanation", + "query_t": "What is recursion in programming?", + "answer_t": "Recursion is when a function calls itself in a self-similar way to solve problems.", + "query_t1": "I'm confused. What do you mean by 'self-similar way'? Can you explain more clearly?", + "expected": "neg_confusion", + }, + { + "name": "pos_praise - explicit thanks", + "query_t": "How do I center a div in CSS?", + "answer_t": "You can center a div using flexbox: set the parent to `display: flex; justify-content: center; align-items: center;`. Alternatively, use `margin: 0 auto;` for horizontal centering with a defined width.", + "query_t1": "Perfect, thank you! That's exactly what I needed.", + "expected": "pos_praise", + }, + { + "name": "pos_praise - great explanation", + "query_t": "Explain how photosynthesis works.", + "answer_t": "Photosynthesis is the process by which plants convert sunlight, water, and CO2 into glucose and oxygen. It occurs in chloroplasts, with light-dependent reactions in the thylakoid membrane and the Calvin cycle in the stroma.", + "query_t1": "Great explanation! This really helped me understand the concept.", + "expected": "pos_praise", + }, + { + "name": "pos_progress - follow-up question", + "query_t": "What is a binary search tree?", + "answer_t": "A binary search tree (BST) is a data structure where each node has at most two children. The left subtree contains only nodes with values less than the parent, and the right subtree only nodes with values greater than the parent.", + "query_t1": "Interesting! How would I implement insertion into a BST?", + "expected": "pos_progress", + }, + { + "name": "pos_progress - extension", + "query_t": "How do I read a file in Python?", + "answer_t": "Use `with open('file.txt', 'r') as f: content = f.read()`. The 'with' statement ensures the file is properly closed.", + "query_t1": "Got it. What if I want to read it line by line instead?", + "expected": "pos_progress", + }, + { + "name": "neutral - minimal response", + "query_t": "What's 2 + 2?", + "answer_t": "2 + 2 = 4", + "query_t1": "Ok.", + "expected": "neutral", + }, + { + "name": "topic_shift - new topic", + "query_t": "What is the Pythagorean theorem?", + "answer_t": "The Pythagorean theorem states that in a right triangle, a² + b² = c², where c is the hypotenuse.", + "query_t1": "By the way, can you help me write a poem about nature?", + "expected": "topic_shift", + }, + { + "name": "neg_constraint_restate - language preference", + "query_t": "Explain machine learning in simple terms.", + "answer_t": "Machine learning is a subset of artificial intelligence that uses statistical techniques to enable computers to learn from data. It involves training models on datasets to make predictions or decisions without being explicitly programmed for specific tasks.", + "query_t1": "Remember I asked for simple terms? That's too technical. Can you explain like I'm 5?", + "expected": "neg_constraint_restate", + }, + { + "name": "neg_correction - incomplete answer", + "query_t": "List all the planets in our solar system.", + "answer_t": "The planets are Mercury, Venus, Earth, Mars, Jupiter, and Saturn.", + "query_t1": "You're missing Uranus and Neptune. There are 8 planets, not 6.", + "expected": "neg_correction", + }, +] + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--vllm-url", + type=str, + default="http://localhost:8005/v1", + help="vLLM server URL for reward model", + ) + parser.add_argument( + "--batch-size", + type=int, + default=12, + help="Batch size (default: all 12 test cases)", + ) + args = parser.parse_args() + + print("=" * 70) + print("Local LLM Reward Model Batch Test") + print("=" * 70) + print(f"vLLM URL: {args.vllm_url}") + print() + + # Create client + config = LocalLLMRewardConfig( + vllm_url=args.vllm_url, + max_tokens=256, + temperature=0.1, + max_concurrent=50, + ) + client = LocalLLMRewardClient(config) + print(f"Model: {client._model_name}") + print() + + # Convert test cases to TurnSamples + samples = [ + TurnSample( + user_id="test_user", + session_id="test_session", + turn_id=i, + query_t=tc["query_t"], + answer_t=tc["answer_t"], + query_t1=tc["query_t1"], + memories=[], # Not needed for reward classification + ) + for i, tc in enumerate(TEST_CASES[:args.batch_size]) + ] + + # Run batch inference + print(f"Running batch inference on {len(samples)} samples...") + t0 = time.time() + results = client.judge_batch(samples) + elapsed = time.time() - t0 + + print(f"Completed in {elapsed:.2f}s ({len(samples)/elapsed:.1f} samples/sec)") + print() + + # Analyze results + correct = 0 + for i, (tc, result) in enumerate(zip(TEST_CASES[:args.batch_size], results)): + is_correct = result.label == tc["expected"] + if is_correct: + correct += 1 + status = "OK" if is_correct else "WRONG" + + print(f"[{i+1:2d}] {tc['name'][:45]:45s}") + print(f" Expected: {tc['expected']:25s} Got: {result.label:25s} [{status}]") + print(f" Confidence: {result.confidence:.2f}, Reward: {result.reward:+.1f}, Update: {result.should_update}") + print() + + # Summary + print("=" * 70) + print("SUMMARY") + print("=" * 70) + accuracy = correct / len(samples) * 100 + print(f"Accuracy: {accuracy:.1f}% ({correct}/{len(samples)})") + print(f"Time: {elapsed:.2f}s") + print(f"Throughput: {len(samples)/elapsed:.1f} samples/sec") + print(f"Avg latency: {elapsed/len(samples)*1000:.0f}ms per sample (batched)") + print() + + # Errors + errors = [ + (tc, result) + for tc, result in zip(TEST_CASES[:args.batch_size], results) + if result.label != tc["expected"] + ] + if errors: + print(f"Errors ({len(errors)}):") + for tc, result in errors: + print(f" - {tc['name']}: Got {result.label}, Expected {tc['expected']}") + + +if __name__ == "__main__": + main() diff --git a/scripts/test_local_reward_batch.sh b/scripts/test_local_reward_batch.sh new file mode 100755 index 0000000..675ab76 --- /dev/null +++ b/scripts/test_local_reward_batch.sh @@ -0,0 +1,71 @@ +#!/bin/bash +#SBATCH --job-name=test_local_reward +#SBATCH --account=bfqt-delta-gpu +#SBATCH --partition=gpuA100x4 +#SBATCH --nodes=1 +#SBATCH --ntasks=1 +#SBATCH --cpus-per-task=8 +#SBATCH --gres=gpu:nvidia_a100:1 +#SBATCH --mem=48G +#SBATCH --time=0:30:00 +#SBATCH --output=test_local_reward_%j.out +#SBATCH --error=test_local_reward_%j.err + +# Test LocalLLMRewardClient with vLLM server + +cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model +source /u/yurenh2/miniforge3/etc/profile.d/conda.sh +conda activate eval + +export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface +export PYTHONPATH="${PWD}/src:${PYTHONPATH}" + +REWARD_MODEL="models/llama-3.1-8b-instruct" +REWARD_PORT=8005 + +echo "=== Local LLM Reward Model Batch Test ===" +echo "Model: $REWARD_MODEL" +echo "GPU: $(nvidia-smi --query-gpu=name --format=csv,noheader | head -1)" +echo "" + +# Start vLLM server for reward model +echo "Starting vLLM server on port $REWARD_PORT..." +python -m vllm.entrypoints.openai.api_server \ + --model $REWARD_MODEL \ + --port $REWARD_PORT \ + --tensor-parallel-size 1 \ + --dtype bfloat16 \ + --max-model-len 4096 \ + --gpu-memory-utilization 0.85 \ + --disable-log-requests \ + & +VLLM_PID=$! + +# Wait for server to be ready (model loading can take 2-3 minutes) +echo "Waiting for vLLM server to start..." +for i in {1..180}; do + if curl -s http://localhost:$REWARD_PORT/health > /dev/null 2>&1; then + echo "vLLM server ready after ${i}s" + break + fi + sleep 1 +done + +# Check if server started +if ! curl -s http://localhost:$REWARD_PORT/health > /dev/null 2>&1; then + echo "ERROR: vLLM server failed to start" + kill $VLLM_PID 2>/dev/null + exit 1 +fi + +echo "" +echo "Running batch test..." +python scripts/test_local_reward_batch.py \ + --vllm-url http://localhost:$REWARD_PORT/v1 \ + --batch-size 12 + +echo "" +echo "=== Test Complete ===" + +# Cleanup +kill $VLLM_PID 2>/dev/null diff --git a/scripts/test_reward_cmp_15667024.err b/scripts/test_reward_cmp_15667024.err new file mode 100644 index 0000000..3fc1f6a --- /dev/null +++ b/scripts/test_reward_cmp_15667024.err @@ -0,0 +1,32 @@ +/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/transformers/utils/hub.py:110: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead. + warnings.warn( +`torch_dtype` is deprecated! Use `dtype` instead! +Traceback (most recent call last): + File "/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/scripts/test_reward_comparison.py", line 382, in <module> + main() + File "/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/scripts/test_reward_comparison.py", line 378, in main + asyncio.run(run_comparison(args.local_model, args.device)) + File "/u/yurenh2/miniforge3/envs/eval/lib/python3.11/asyncio/runners.py", line 190, in run + return runner.run(main) + ^^^^^^^^^^^^^^^^ + File "/u/yurenh2/miniforge3/envs/eval/lib/python3.11/asyncio/runners.py", line 118, in run + return self._loop.run_until_complete(task) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/u/yurenh2/miniforge3/envs/eval/lib/python3.11/asyncio/base_events.py", line 654, in run_until_complete + return future.result() + ^^^^^^^^^^^^^^^ + File "/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/scripts/test_reward_comparison.py", line 283, in run_comparison + gpt_result, gpt_raw = await gpt_judge.judge( + ^^^^^^^^^^^^^^^^^^^^^^ + File "/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/scripts/test_reward_comparison.py", line 242, in judge + response = await self.client.chat.completions.create( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/openai/resources/chat/completions/completions.py", line 2678, in create + return await self._post( + ^^^^^^^^^^^^^^^^^ + File "/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/openai/_base_client.py", line 1797, in post + return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/openai/_base_client.py", line 1597, in request + raise self._make_status_error_from_response(err.response) from None +openai.BadRequestError: Error code: 400 - {'error': {'message': "Unsupported parameter: 'max_tokens' is not supported with this model. Use 'max_completion_tokens' instead.", 'type': 'invalid_request_error', 'param': 'max_tokens', 'code': 'unsupported_parameter'}} diff --git a/scripts/test_reward_cmp_15667024.out b/scripts/test_reward_cmp_15667024.out new file mode 100644 index 0000000..1a38997 --- /dev/null +++ b/scripts/test_reward_cmp_15667024.out @@ -0,0 +1,16 @@ +=== Reward Model Comparison Test === +Local: Qwen2.5-1.5B-Instruct +API: GPT-5-nano + +================================================================================ +Reward Model Comparison: Qwen2.5-1.5B vs GPT-5-nano +================================================================================ + +Loading models/qwen2.5-1.5b-instruct... +Model loaded. +Running 12 test cases... + +--- Test 1/12: neg_constraint_restate - format preference --- +Expected: neg_constraint_restate + +=== Test Complete === diff --git a/scripts/test_reward_cmp_15667063.err b/scripts/test_reward_cmp_15667063.err new file mode 100644 index 0000000..42c8029 --- /dev/null +++ b/scripts/test_reward_cmp_15667063.err @@ -0,0 +1,32 @@ +/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/transformers/utils/hub.py:110: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead. + warnings.warn( +`torch_dtype` is deprecated! Use `dtype` instead! +Traceback (most recent call last): + File "/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/scripts/test_reward_comparison.py", line 382, in <module> + main() + File "/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/scripts/test_reward_comparison.py", line 378, in main + asyncio.run(run_comparison(args.local_model, args.device)) + File "/u/yurenh2/miniforge3/envs/eval/lib/python3.11/asyncio/runners.py", line 190, in run + return runner.run(main) + ^^^^^^^^^^^^^^^^ + File "/u/yurenh2/miniforge3/envs/eval/lib/python3.11/asyncio/runners.py", line 118, in run + return self._loop.run_until_complete(task) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/u/yurenh2/miniforge3/envs/eval/lib/python3.11/asyncio/base_events.py", line 654, in run_until_complete + return future.result() + ^^^^^^^^^^^^^^^ + File "/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/scripts/test_reward_comparison.py", line 283, in run_comparison + gpt_result, gpt_raw = await gpt_judge.judge( + ^^^^^^^^^^^^^^^^^^^^^^ + File "/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/scripts/test_reward_comparison.py", line 242, in judge + response = await self.client.chat.completions.create( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/openai/resources/chat/completions/completions.py", line 2678, in create + return await self._post( + ^^^^^^^^^^^^^^^^^ + File "/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/openai/_base_client.py", line 1797, in post + return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/openai/_base_client.py", line 1597, in request + raise self._make_status_error_from_response(err.response) from None +openai.BadRequestError: Error code: 400 - {'error': {'message': "Unsupported value: 'temperature' does not support 0.1 with this model. Only the default (1) value is supported.", 'type': 'invalid_request_error', 'param': 'temperature', 'code': 'unsupported_value'}} diff --git a/scripts/test_reward_cmp_15667063.out b/scripts/test_reward_cmp_15667063.out new file mode 100644 index 0000000..1a38997 --- /dev/null +++ b/scripts/test_reward_cmp_15667063.out @@ -0,0 +1,16 @@ +=== Reward Model Comparison Test === +Local: Qwen2.5-1.5B-Instruct +API: GPT-5-nano + +================================================================================ +Reward Model Comparison: Qwen2.5-1.5B vs GPT-5-nano +================================================================================ + +Loading models/qwen2.5-1.5b-instruct... +Model loaded. +Running 12 test cases... + +--- Test 1/12: neg_constraint_restate - format preference --- +Expected: neg_constraint_restate + +=== Test Complete === diff --git a/scripts/test_reward_cmp_15667076.err b/scripts/test_reward_cmp_15667076.err new file mode 100644 index 0000000..1e27a36 --- /dev/null +++ b/scripts/test_reward_cmp_15667076.err @@ -0,0 +1,3 @@ +/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/transformers/utils/hub.py:110: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead. + warnings.warn( +`torch_dtype` is deprecated! Use `dtype` instead! diff --git a/scripts/test_reward_cmp_15667076.out b/scripts/test_reward_cmp_15667076.out new file mode 100644 index 0000000..67693b1 --- /dev/null +++ b/scripts/test_reward_cmp_15667076.out @@ -0,0 +1,126 @@ +=== Reward Model Comparison Test === +Local: Qwen2.5-1.5B-Instruct +API: GPT-5-nano + +================================================================================ +Reward Model Comparison: Qwen2.5-1.5B vs GPT-5-nano +================================================================================ + +Loading models/qwen2.5-1.5b-instruct... +Model loaded. +Running 12 test cases... + +--- Test 1/12: neg_constraint_restate - format preference --- +Expected: neg_constraint_restate + Local (Qwen): neg_constraint_restate (conf=0.95) [2.42s] ✓ + GPT-5-nano: neutral (conf=0.00) [4.97s] ✗ + Agreement: NO + +--- Test 2/12: neg_constraint_restate - step by step --- +Expected: neg_constraint_restate + Local (Qwen): neutral (conf=0.00) [1.04s] ✗ + GPT-5-nano: neutral (conf=0.00) [4.43s] ✗ + Agreement: Yes + +--- Test 3/12: neg_correction - wrong answer --- +Expected: neg_correction + Local (Qwen): neutral (conf=0.00) [0.93s] ✗ + GPT-5-nano: neutral (conf=0.00) [3.53s] ✗ + Agreement: Yes + +--- Test 4/12: neg_confusion - unclear explanation --- +Expected: neg_confusion + Local (Qwen): neutral (conf=0.00) [0.89s] ✗ + GPT-5-nano: neutral (conf=0.00) [2.52s] ✗ + Agreement: Yes + +--- Test 5/12: pos_praise - explicit thanks --- +Expected: pos_praise + Local (Qwen): neutral (conf=0.00) [0.88s] ✗ + GPT-5-nano: pos_praise (conf=0.92) [3.57s] ✓ + Agreement: NO + +--- Test 6/12: pos_praise - great explanation --- +Expected: pos_praise + Local (Qwen): neutral (conf=0.00) [0.92s] ✗ + GPT-5-nano: pos_praise (conf=0.85) [2.19s] ✓ + Agreement: NO + +--- Test 7/12: pos_progress - follow-up question --- +Expected: pos_progress + Local (Qwen): neutral (conf=0.00) [0.95s] ✗ + GPT-5-nano: neutral (conf=0.00) [3.76s] ✗ + Agreement: Yes + +--- Test 8/12: pos_progress - extension --- +Expected: pos_progress + Local (Qwen): neutral (conf=0.00) [1.20s] ✗ + GPT-5-nano: neutral (conf=0.00) [3.37s] ✗ + Agreement: Yes + +--- Test 9/12: neutral - minimal response --- +Expected: neutral + Local (Qwen): neutral (conf=0.00) [0.78s] ✓ + GPT-5-nano: neutral (conf=0.00) [4.21s] ✓ + Agreement: Yes + +--- Test 10/12: topic_shift - new topic --- +Expected: topic_shift + Local (Qwen): neutral (conf=0.00) [1.02s] ✗ + GPT-5-nano: neutral (conf=0.00) [3.86s] ✗ + Agreement: Yes + +--- Test 11/12: neg_constraint_restate - language preference --- +Expected: neg_constraint_restate + Local (Qwen): neutral (conf=0.00) [0.94s] ✗ + GPT-5-nano: neutral (conf=0.00) [2.31s] ✗ + Agreement: Yes + +--- Test 12/12: neg_correction - incomplete answer --- +Expected: neg_correction + Local (Qwen): neutral (conf=0.00) [0.90s] ✗ + GPT-5-nano: neutral (conf=0.00) [4.49s] ✗ + Agreement: Yes + +================================================================================ +SUMMARY +================================================================================ +Local (Qwen2.5-1.5B) Accuracy: 16.7% (2/12) +GPT-5-nano Accuracy: 25.0% (3/12) +Agreement Rate: 75.0% + +Local Avg Time: 1.07s +GPT Avg Time: 3.60s +Speedup: 3.4x faster (local) + +Disagreements (3): + - neg_constraint_restate - format preference: Local=neg_constraint_restate, GPT=neutral, Expected=neg_constraint_restate + - pos_praise - explicit thanks: Local=neutral, GPT=pos_praise, Expected=pos_praise + - pos_praise - great explanation: Local=neutral, GPT=pos_praise, Expected=pos_praise + +Local Model Errors (10): + - neg_constraint_restate - step by step: Got neutral, Expected neg_constraint_restate + - neg_correction - wrong answer: Got neutral, Expected neg_correction + - neg_confusion - unclear explanation: Got neutral, Expected neg_confusion + - pos_praise - explicit thanks: Got neutral, Expected pos_praise + - pos_praise - great explanation: Got neutral, Expected pos_praise + - pos_progress - follow-up question: Got neutral, Expected pos_progress + - pos_progress - extension: Got neutral, Expected pos_progress + - topic_shift - new topic: Got neutral, Expected topic_shift + - neg_constraint_restate - language preference: Got neutral, Expected neg_constraint_restate + - neg_correction - incomplete answer: Got neutral, Expected neg_correction + +GPT Model Errors (9): + - neg_constraint_restate - format preference: Got neutral, Expected neg_constraint_restate + - neg_constraint_restate - step by step: Got neutral, Expected neg_constraint_restate + - neg_correction - wrong answer: Got neutral, Expected neg_correction + - neg_confusion - unclear explanation: Got neutral, Expected neg_confusion + - pos_progress - follow-up question: Got neutral, Expected pos_progress + - pos_progress - extension: Got neutral, Expected pos_progress + - topic_shift - new topic: Got neutral, Expected topic_shift + - neg_constraint_restate - language preference: Got neutral, Expected neg_constraint_restate + - neg_correction - incomplete answer: Got neutral, Expected neg_correction + +================================================================================ + +=== Test Complete === diff --git a/scripts/test_reward_cmp_15667126.err b/scripts/test_reward_cmp_15667126.err new file mode 100644 index 0000000..91beab2 --- /dev/null +++ b/scripts/test_reward_cmp_15667126.err @@ -0,0 +1,4 @@ +/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/transformers/utils/hub.py:110: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead. + warnings.warn( +`torch_dtype` is deprecated! Use `dtype` instead! +
Loading checkpoint shards: 0%| | 0/4 [00:00<?, ?it/s]
Loading checkpoint shards: 25%|██▌ | 1/4 [00:07<00:21, 7.15s/it]
Loading checkpoint shards: 50%|█████ | 2/4 [00:14<00:14, 7.10s/it]
Loading checkpoint shards: 75%|███████▌ | 3/4 [00:21<00:06, 6.97s/it]
Loading checkpoint shards: 100%|██████████| 4/4 [00:22<00:00, 4.91s/it]
Loading checkpoint shards: 100%|██████████| 4/4 [00:22<00:00, 5.70s/it] diff --git a/scripts/test_reward_cmp_15667126.out b/scripts/test_reward_cmp_15667126.out new file mode 100644 index 0000000..1e10566 --- /dev/null +++ b/scripts/test_reward_cmp_15667126.out @@ -0,0 +1,104 @@ +=== Reward Model Comparison Test === +Local: Llama-3.1-8B-Instruct +API: GPT-4o-mini + +================================================================================ +Reward Model Comparison: Llama-3.1-8B vs GPT-4o-mini +================================================================================ + +Loading models/llama-3.1-8b-instruct... +Model loaded. +Running 12 test cases... + +--- Test 1/12: neg_constraint_restate - format preference --- +Expected: neg_constraint_restate + Local (Llama): neg_correction (conf=0.80) [1.48s] ✗ + GPT-4o-mini: neg_correction (conf=0.90) [1.72s] ✗ + Agreement: Yes + +--- Test 2/12: neg_constraint_restate - step by step --- +Expected: neg_constraint_restate + Local (Llama): neg_constraint_restate (conf=0.90) [1.16s] ✓ + GPT-4o-mini: neg_constraint_restate (conf=0.90) [0.95s] ✓ + Agreement: Yes + +--- Test 3/12: neg_correction - wrong answer --- +Expected: neg_correction + Local (Llama): neg_correction (conf=0.90) [1.03s] ✓ + GPT-4o-mini: neg_correction (conf=0.90) [1.01s] ✓ + Agreement: Yes + +--- Test 4/12: neg_confusion - unclear explanation --- +Expected: neg_confusion + Local (Llama): neg_confusion (conf=0.80) [1.20s] ✓ + GPT-4o-mini: neg_confusion (conf=0.90) [1.14s] ✓ + Agreement: Yes + +--- Test 5/12: pos_praise - explicit thanks --- +Expected: pos_praise + Local (Llama): pos_praise (conf=1.00) [0.97s] ✓ + GPT-4o-mini: pos_praise (conf=0.95) [1.32s] ✓ + Agreement: Yes + +--- Test 6/12: pos_praise - great explanation --- +Expected: pos_praise + Local (Llama): pos_praise (conf=1.00) [0.97s] ✓ + GPT-4o-mini: pos_praise (conf=0.95) [1.02s] ✓ + Agreement: Yes + +--- Test 7/12: pos_progress - follow-up question --- +Expected: pos_progress + Local (Llama): pos_progress (conf=0.90) [1.35s] ✓ + GPT-4o-mini: pos_progress (conf=0.90) [1.15s] ✓ + Agreement: Yes + +--- Test 8/12: pos_progress - extension --- +Expected: pos_progress + Local (Llama): pos_progress (conf=0.90) [1.33s] ✓ + GPT-4o-mini: pos_progress (conf=0.90) [1.25s] ✓ + Agreement: Yes + +--- Test 9/12: neutral - minimal response --- +Expected: neutral + Local (Llama): neutral (conf=0.80) [1.19s] ✓ + GPT-4o-mini: neutral (conf=0.90) [1.24s] ✓ + Agreement: Yes + +--- Test 10/12: topic_shift - new topic --- +Expected: topic_shift + Local (Llama): topic_shift (conf=0.90) [1.21s] ✓ + GPT-4o-mini: topic_shift (conf=0.90) [1.61s] ✓ + Agreement: Yes + +--- Test 11/12: neg_constraint_restate - language preference --- +Expected: neg_constraint_restate + Local (Llama): neg_constraint_restate (conf=0.80) [1.38s] ✓ + GPT-4o-mini: neg_constraint_restate (conf=0.90) [2.55s] ✓ + Agreement: Yes + +--- Test 12/12: neg_correction - incomplete answer --- +Expected: neg_correction + Local (Llama): neg_correction (conf=0.80) [1.00s] ✓ + GPT-4o-mini: neg_correction (conf=0.90) [2.35s] ✓ + Agreement: Yes + +================================================================================ +SUMMARY +================================================================================ +Local (Llama-3.1-8B) Accuracy: 91.7% (11/12) +GPT-4o-mini Accuracy: 91.7% (11/12) +Agreement Rate: 100.0% + +Local Avg Time: 1.19s +GPT Avg Time: 1.44s +Speedup: 1.2x faster (local) + +Local Model Errors (1): + - neg_constraint_restate - format preference: Got neg_correction, Expected neg_constraint_restate + +GPT Model Errors (1): + - neg_constraint_restate - format preference: Got neg_correction, Expected neg_constraint_restate + +================================================================================ + +=== Test Complete === diff --git a/scripts/test_reward_comparison.py b/scripts/test_reward_comparison.py new file mode 100644 index 0000000..baddbdf --- /dev/null +++ b/scripts/test_reward_comparison.py @@ -0,0 +1,382 @@ +#!/usr/bin/env python3 +""" +Compare GPT-4o-mini vs Llama-3.1-8B for reward classification. + +Tests both models on the same scenarios using the same prompt. +""" +import argparse +import asyncio +import json +import os +import sys +import time +from dataclasses import dataclass +from typing import List, Dict, Optional, Tuple + +# Add src to path +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src')) + +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer +from openai import AsyncOpenAI + +# Same prompt as llm_reward.py +JUDGE_SYSTEM_PROMPT = """\ +You are a feedback classifier. Given a user query (q_t), the assistant's response (a_t), \ +and the user's next message (q_{t+1}), classify the user's follow-up into exactly one label. + +Labels (mutually exclusive): +- neg_constraint_restate: User reasserts constraints/preferences as correction (e.g., "as I said…", "remember…", "按我说的…"). +- neg_correction: User indicates the content is wrong or the assistant failed to answer. +- neg_confusion: User indicates confusion or requests re-explanation. +- pos_praise: Explicit praise or satisfaction with the response. +- pos_progress: Constructive continuation (examples, extensions, what-if, next steps) without complaint. +- neutral: Ambiguous or minimal feedback, not clearly positive or negative. +- topic_shift: User switches to a new unrelated task/topic. + +Output a JSON object with fields: label, confidence (0-1), rationale (one short sentence).""" + +JUDGE_USER_TEMPLATE = """\ +q_t: {query_t} + +a_t: {answer_t} + +q_{{t+1}}: {query_t1}""" + +REWARD_MAP = { + "neg_constraint_restate": -1.0, + "neg_correction": -0.8, + "neg_confusion": -0.6, + "pos_praise": +0.8, + "pos_progress": +0.1, + "neutral": 0.0, + "topic_shift": 0.0, +} + +VALID_LABELS = set(REWARD_MAP.keys()) + +# Test cases with expected labels +TEST_CASES = [ + { + "name": "neg_constraint_restate - format preference", + "query_t": "Explain how sorting works in Python. Please use bullet points.", + "answer_t": "Sorting in Python can be done using the sorted() function or the list.sort() method. The sorted() function returns a new sorted list, while sort() modifies the list in place. Both accept a key parameter for custom sorting and a reverse parameter for descending order.", + "query_t1": "I asked for bullet points. Can you reformat that with bullet points please?", + "expected": "neg_constraint_restate", + }, + { + "name": "neg_constraint_restate - step by step", + "query_t": "Solve x^2 - 5x + 6 = 0. Show step by step.", + "answer_t": "The solutions are x = 2 and x = 3.", + "query_t1": "As I said, I need to see the step-by-step solution, not just the answer.", + "expected": "neg_constraint_restate", + }, + { + "name": "neg_correction - wrong answer", + "query_t": "What is the capital of Australia?", + "answer_t": "The capital of Australia is Sydney.", + "query_t1": "That's incorrect. Sydney is not the capital of Australia.", + "expected": "neg_correction", + }, + { + "name": "neg_confusion - unclear explanation", + "query_t": "What is recursion in programming?", + "answer_t": "Recursion is when a function calls itself in a self-similar way to solve problems.", + "query_t1": "I'm confused. What do you mean by 'self-similar way'? Can you explain more clearly?", + "expected": "neg_confusion", + }, + { + "name": "pos_praise - explicit thanks", + "query_t": "How do I center a div in CSS?", + "answer_t": "You can center a div using flexbox: set the parent to `display: flex; justify-content: center; align-items: center;`. Alternatively, use `margin: 0 auto;` for horizontal centering with a defined width.", + "query_t1": "Perfect, thank you! That's exactly what I needed.", + "expected": "pos_praise", + }, + { + "name": "pos_praise - great explanation", + "query_t": "Explain how photosynthesis works.", + "answer_t": "Photosynthesis is the process by which plants convert sunlight, water, and CO2 into glucose and oxygen. It occurs in chloroplasts, with light-dependent reactions in the thylakoid membrane and the Calvin cycle in the stroma.", + "query_t1": "Great explanation! This really helped me understand the concept.", + "expected": "pos_praise", + }, + { + "name": "pos_progress - follow-up question", + "query_t": "What is a binary search tree?", + "answer_t": "A binary search tree (BST) is a data structure where each node has at most two children. The left subtree contains only nodes with values less than the parent, and the right subtree only nodes with values greater than the parent.", + "query_t1": "Interesting! How would I implement insertion into a BST?", + "expected": "pos_progress", + }, + { + "name": "pos_progress - extension", + "query_t": "How do I read a file in Python?", + "answer_t": "Use `with open('file.txt', 'r') as f: content = f.read()`. The 'with' statement ensures the file is properly closed.", + "query_t1": "Got it. What if I want to read it line by line instead?", + "expected": "pos_progress", + }, + { + "name": "neutral - minimal response", + "query_t": "What's 2 + 2?", + "answer_t": "2 + 2 = 4", + "query_t1": "Ok.", + "expected": "neutral", + }, + { + "name": "topic_shift - new topic", + "query_t": "What is the Pythagorean theorem?", + "answer_t": "The Pythagorean theorem states that in a right triangle, a² + b² = c², where c is the hypotenuse.", + "query_t1": "By the way, can you help me write a poem about nature?", + "expected": "topic_shift", + }, + { + "name": "neg_constraint_restate - language preference", + "query_t": "Explain machine learning in simple terms.", + "answer_t": "Machine learning is a subset of artificial intelligence that uses statistical techniques to enable computers to learn from data. It involves training models on datasets to make predictions or decisions without being explicitly programmed for specific tasks.", + "query_t1": "Remember I asked for simple terms? That's too technical. Can you explain like I'm 5?", + "expected": "neg_constraint_restate", + }, + { + "name": "neg_correction - incomplete answer", + "query_t": "List all the planets in our solar system.", + "answer_t": "The planets are Mercury, Venus, Earth, Mars, Jupiter, and Saturn.", + "query_t1": "You're missing Uranus and Neptune. There are 8 planets, not 6.", + "expected": "neg_correction", + }, +] + + +def parse_json_response(text: str) -> Dict: + """Parse JSON from model response, handling markdown code blocks.""" + text = text.strip() + # Remove markdown code blocks if present + if text.startswith("```"): + lines = text.split("\n") + text = "\n".join(lines[1:-1] if lines[-1].strip() == "```" else lines[1:]) + try: + return json.loads(text) + except json.JSONDecodeError: + # Try to find JSON object in text + import re + match = re.search(r'\{[^}]+\}', text, re.DOTALL) + if match: + try: + return json.loads(match.group()) + except: + pass + return {"label": "neutral", "confidence": 0.0, "rationale": "parse_error"} + + +class LocalLLMJudge: + """Local LLM judge using Qwen2.5-1.5B.""" + + def __init__(self, model_path: str, device: str = "cuda"): + self.device = device + print(f"Loading {model_path}...") + self.tokenizer = AutoTokenizer.from_pretrained(model_path) + self.model = AutoModelForCausalLM.from_pretrained( + model_path, + torch_dtype=torch.bfloat16, + device_map=device, + ) + self.model.eval() + print("Model loaded.") + + def judge(self, query_t: str, answer_t: str, query_t1: str) -> Dict: + """Classify a single turn.""" + user_content = JUDGE_USER_TEMPLATE.format( + query_t=query_t, + answer_t=answer_t, + query_t1=query_t1, + ) + + messages = [ + {"role": "system", "content": JUDGE_SYSTEM_PROMPT}, + {"role": "user", "content": user_content}, + ] + + text = self.tokenizer.apply_chat_template( + messages, + tokenize=False, + add_generation_prompt=True, + ) + + inputs = self.tokenizer(text, return_tensors="pt").to(self.device) + + with torch.no_grad(): + outputs = self.model.generate( + **inputs, + max_new_tokens=256, + temperature=0.1, + do_sample=True, + pad_token_id=self.tokenizer.eos_token_id, + ) + + response = self.tokenizer.decode( + outputs[0][inputs.input_ids.shape[1]:], + skip_special_tokens=True, + ) + + return parse_json_response(response), response + + +class GPTJudge: + """GPT-5-nano judge using OpenAI API.""" + + def __init__(self): + self.client = AsyncOpenAI( + api_key=os.getenv("OPENAI_API_KEY"), + ) + + async def judge(self, query_t: str, answer_t: str, query_t1: str) -> Dict: + """Classify a single turn.""" + user_content = JUDGE_USER_TEMPLATE.format( + query_t=query_t, + answer_t=answer_t, + query_t1=query_t1, + ) + + messages = [ + {"role": "system", "content": JUDGE_SYSTEM_PROMPT}, + {"role": "user", "content": user_content}, + ] + + response = await self.client.chat.completions.create( + model="gpt-4o-mini", + messages=messages, + max_tokens=256, + temperature=0.1, + response_format={"type": "json_object"}, + ) + + raw = response.choices[0].message.content + return parse_json_response(raw), raw + + +async def run_comparison(local_model_path: str, device: str = "cuda"): + """Run comparison between local LLM and GPT-5-nano.""" + + print("=" * 80) + print("Reward Model Comparison: Llama-3.1-8B vs GPT-4o-mini") + print("=" * 80) + print() + + # Initialize models + local_judge = LocalLLMJudge(local_model_path, device) + gpt_judge = GPTJudge() + + results = [] + + print(f"Running {len(TEST_CASES)} test cases...\n") + + for i, tc in enumerate(TEST_CASES, 1): + print(f"--- Test {i}/{len(TEST_CASES)}: {tc['name']} ---") + print(f"Expected: {tc['expected']}") + + # Local model + t0 = time.time() + local_result, local_raw = local_judge.judge( + tc["query_t"], tc["answer_t"], tc["query_t1"] + ) + local_time = time.time() - t0 + + # GPT model + t0 = time.time() + gpt_result, gpt_raw = await gpt_judge.judge( + tc["query_t"], tc["answer_t"], tc["query_t1"] + ) + gpt_time = time.time() - t0 + + local_label = local_result.get("label", "unknown") + local_conf = local_result.get("confidence", 0.0) + gpt_label = gpt_result.get("label", "unknown") + gpt_conf = gpt_result.get("confidence", 0.0) + + # Check correctness + local_correct = local_label == tc["expected"] + gpt_correct = gpt_label == tc["expected"] + agreement = local_label == gpt_label + + print(f" Local (Llama): {local_label} (conf={local_conf:.2f}) [{local_time:.2f}s] {'✓' if local_correct else '✗'}") + print(f" GPT-4o-mini: {gpt_label} (conf={gpt_conf:.2f}) [{gpt_time:.2f}s] {'✓' if gpt_correct else '✗'}") + print(f" Agreement: {'Yes' if agreement else 'NO'}") + print() + + results.append({ + "name": tc["name"], + "expected": tc["expected"], + "local_label": local_label, + "local_conf": local_conf, + "local_time": local_time, + "local_correct": local_correct, + "gpt_label": gpt_label, + "gpt_conf": gpt_conf, + "gpt_time": gpt_time, + "gpt_correct": gpt_correct, + "agreement": agreement, + }) + + # Summary + print("=" * 80) + print("SUMMARY") + print("=" * 80) + + local_accuracy = sum(r["local_correct"] for r in results) / len(results) + gpt_accuracy = sum(r["gpt_correct"] for r in results) / len(results) + agreement_rate = sum(r["agreement"] for r in results) / len(results) + + local_avg_time = sum(r["local_time"] for r in results) / len(results) + gpt_avg_time = sum(r["gpt_time"] for r in results) / len(results) + + print(f"Local (Llama-3.1-8B) Accuracy: {local_accuracy*100:.1f}% ({sum(r['local_correct'] for r in results)}/{len(results)})") + print(f"GPT-4o-mini Accuracy: {gpt_accuracy*100:.1f}% ({sum(r['gpt_correct'] for r in results)}/{len(results)})") + print(f"Agreement Rate: {agreement_rate*100:.1f}%") + print() + print(f"Local Avg Time: {local_avg_time:.2f}s") + print(f"GPT Avg Time: {gpt_avg_time:.2f}s") + print(f"Speedup: {gpt_avg_time/local_avg_time:.1f}x faster (local)") + + # Show disagreements + disagreements = [r for r in results if not r["agreement"]] + if disagreements: + print() + print(f"Disagreements ({len(disagreements)}):") + for r in disagreements: + print(f" - {r['name']}: Local={r['local_label']}, GPT={r['gpt_label']}, Expected={r['expected']}") + + # Show errors by model + local_errors = [r for r in results if not r["local_correct"]] + gpt_errors = [r for r in results if not r["gpt_correct"]] + + if local_errors: + print() + print(f"Local Model Errors ({len(local_errors)}):") + for r in local_errors: + print(f" - {r['name']}: Got {r['local_label']}, Expected {r['expected']}") + + if gpt_errors: + print() + print(f"GPT Model Errors ({len(gpt_errors)}):") + for r in gpt_errors: + print(f" - {r['name']}: Got {r['gpt_label']}, Expected {r['expected']}") + + print() + print("=" * 80) + + return results + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--local-model", + type=str, + default="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct", + help="Path to local model", + ) + parser.add_argument("--device", type=str, default="cuda") + args = parser.parse_args() + + asyncio.run(run_comparison(args.local_model, args.device)) + + +if __name__ == "__main__": + main() diff --git a/scripts/test_reward_comparison.sh b/scripts/test_reward_comparison.sh new file mode 100644 index 0000000..d7fe277 --- /dev/null +++ b/scripts/test_reward_comparison.sh @@ -0,0 +1,39 @@ +#!/bin/bash +#SBATCH --job-name=test_reward_cmp +#SBATCH --account=bfqt-delta-gpu +#SBATCH --partition=gpuA100x4 +#SBATCH --nodes=1 +#SBATCH --ntasks=1 +#SBATCH --cpus-per-task=8 +#SBATCH --gres=gpu:nvidia_a100:1 +#SBATCH --mem=32G +#SBATCH --time=0:30:00 +#SBATCH --output=test_reward_cmp_%j.out +#SBATCH --error=test_reward_cmp_%j.err + +# Compare Llama-3.1-8B vs GPT-4o-mini for reward classification +# Tests 12 scenarios with expected labels + +cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model +source /u/yurenh2/miniforge3/etc/profile.d/conda.sh +conda activate eval + +export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface +export PYTHONPATH="${PWD}/src:${PYTHONPATH}" + +# Load OpenAI API key +set -a +source .env +set +a + +echo "=== Reward Model Comparison Test ===" +echo "Local: Llama-3.1-8B-Instruct" +echo "API: GPT-4o-mini" +echo "" + +python scripts/test_reward_comparison.py \ + --local-model models/llama-3.1-8b-instruct \ + --device cuda + +echo "" +echo "=== Test Complete ===" |
