summaryrefslogtreecommitdiff
path: root/collaborativeagents/scripts/controlled_test.sbatch
diff options
context:
space:
mode:
Diffstat (limited to 'collaborativeagents/scripts/controlled_test.sbatch')
-rw-r--r--collaborativeagents/scripts/controlled_test.sbatch173
1 files changed, 173 insertions, 0 deletions
diff --git a/collaborativeagents/scripts/controlled_test.sbatch b/collaborativeagents/scripts/controlled_test.sbatch
new file mode 100644
index 0000000..607b93b
--- /dev/null
+++ b/collaborativeagents/scripts/controlled_test.sbatch
@@ -0,0 +1,173 @@
+#!/bin/bash
+#SBATCH --job-name=ctrl_test
+#SBATCH --account=bfqt-delta-gpu
+#SBATCH --partition=gpuH200x8-interactive
+#SBATCH --nodes=1
+#SBATCH --ntasks=1
+#SBATCH --cpus-per-task=32
+#SBATCH --gres=gpu:4
+#SBATCH --mem=200G
+#SBATCH --time=00:45:00
+#SBATCH --output=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/ctrl_test-%j.out
+#SBATCH --error=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/ctrl_test-%j.err
+
+# Controlled Test: Same user profile, same questions, 3 methods
+# Tests:
+# 1. Stronger user enforcement prompts
+# 2. Memory retrieval debug output
+# 3. Comparison across vanilla/rag/rag_vector
+
+cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents
+source /u/yurenh2/miniforge3/etc/profile.d/conda.sh
+conda activate eval
+export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface
+export PYTHONPATH="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/src:$PYTHONPATH"
+
+# Use first profile only for controlled comparison
+PROFILE_PATH="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/data/complex_profiles_v2/profiles_200.jsonl"
+AGENT_MODEL="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct"
+USER_MODEL="meta-llama/Llama-3.1-70B-Instruct"
+MEMORY_STORE="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/data/corpora/empty_store"
+
+echo "=== Controlled Comparison Test ==="
+echo "Same user profile (1st), same 15 questions, 3 methods"
+echo "Testing: stronger enforcement + retrieval debug"
+date
+nvidia-smi --query-gpu=index,name,memory.total --format=csv
+
+# Start vLLM servers
+CUDA_VISIBLE_DEVICES=0,1 python -m vllm.entrypoints.openai.api_server \
+ --model $USER_MODEL \
+ --port 8004 --tensor-parallel-size 2 --gpu-memory-utilization 0.90 \
+ --max-model-len 16384 --dtype bfloat16 --download-dir $HF_HOME &
+
+CUDA_VISIBLE_DEVICES=2,3 python -m vllm.entrypoints.openai.api_server \
+ --model $AGENT_MODEL \
+ --port 8003 --tensor-parallel-size 2 --gpu-memory-utilization 0.45 \
+ --max-model-len 16384 --dtype bfloat16 &
+
+echo "Waiting for vLLM servers..."
+for i in {1..200}; do
+ if curl -s http://localhost:8004/health > /dev/null 2>&1; then
+ echo "User simulator ready after $((i*5))s"
+ break
+ fi
+ sleep 5
+done
+for i in {1..60}; do
+ if curl -s http://localhost:8003/health > /dev/null 2>&1; then
+ echo "Agent ready after $((i*5))s"
+ break
+ fi
+ sleep 5
+done
+sleep 5
+
+OUTPUT_DIR="../results/controlled_test_$(date +%Y%m%d_%H%M%S)"
+
+# Run each method with SAME user (1 profile, 15 sessions)
+for METHOD in vanilla rag rag_vector; do
+ echo ""
+ echo "============================================"
+ echo "Testing: $METHOD"
+ echo "============================================"
+
+ # Clear memory store before each method (fresh start)
+ > ${MEMORY_STORE}/memory_cards.jsonl
+ rm -f ${MEMORY_STORE}/memory_embeddings.npy
+ echo "Memory store cleared"
+
+ date
+ python scripts/run_experiments.py --methods $METHOD \
+ --datasets math-hard --n-profiles 1 --n-sessions 15 --max-turns 15 \
+ --use-vllm --no-batch-processing --parallel-profiles 1 \
+ --output-dir $OUTPUT_DIR --profile-path $PROFILE_PATH
+
+ echo "Method $METHOD completed"
+
+ # Show memory count for rag methods
+ if [ "$METHOD" != "vanilla" ]; then
+ echo "Final memory cards: $(wc -l < ${MEMORY_STORE}/memory_cards.jsonl)"
+ fi
+done
+
+echo ""
+echo "=== Done ==="
+date
+
+# Generate comparison summary
+python3 << 'EOF'
+import json
+import os
+from pathlib import Path
+
+output_base = sorted(Path("../results").glob("controlled_test_*"))[-1]
+print(f"\n=== Comparison Summary ===\n")
+print(f"Results dir: {output_base}")
+
+methods = ["vanilla", "rag", "rag_vector"]
+results = {}
+
+for subdir in output_base.iterdir():
+ if subdir.is_dir():
+ for method in methods:
+ result_file = subdir / method / "results.json"
+ if result_file.exists():
+ with open(result_file) as f:
+ results[method] = json.load(f)
+ break
+
+if results:
+ print(f"\n{'Metric':<25} {'vanilla':<12} {'rag':<12} {'rag_vector':<12}")
+ print("-" * 60)
+
+ for method in methods:
+ if method not in results:
+ continue
+ data = results[method]
+ task_succ = sum(r['metrics']['task_success'] for r in data) / len(data)
+ avg_turns = sum(r['metrics']['total_turns'] for r in data) / len(data)
+ avg_enf = sum(r['metrics']['enforcement_count'] for r in data) / len(data)
+
+ if method == methods[0]:
+ print(f"{'Task Success':<25} {task_succ:<12.1%} ", end="")
+ else:
+ print(f"{task_succ:<12.1%} ", end="")
+ print()
+
+ for method in methods:
+ if method not in results:
+ continue
+ data = results[method]
+ avg_turns = sum(r['metrics']['total_turns'] for r in data) / len(data)
+ if method == methods[0]:
+ print(f"{'Avg Turns':<25} {avg_turns:<12.1f} ", end="")
+ else:
+ print(f"{avg_turns:<12.1f} ", end="")
+ print()
+
+ for method in methods:
+ if method not in results:
+ continue
+ data = results[method]
+ avg_enf = sum(r['metrics']['enforcement_count'] for r in data) / len(data)
+ if method == methods[0]:
+ print(f"{'Avg Enforcement':<25} {avg_enf:<12.1f} ", end="")
+ else:
+ print(f"{avg_enf:<12.1f} ", end="")
+ print()
+
+ # Session-by-session comparison
+ print(f"\n=== Session-by-Session Turns ===")
+ print(f"{'Session':<10} {'vanilla':<12} {'rag':<12} {'rag_vector':<12}")
+ print("-" * 50)
+ for i in range(min(15, len(results.get('vanilla', [])))):
+ print(f"{i+1:<10} ", end="")
+ for method in methods:
+ if method in results and i < len(results[method]):
+ turns = results[method][i]['metrics']['total_turns']
+ print(f"{turns:<12} ", end="")
+ print()
+EOF
+
+pkill -f "vllm.entrypoints" 2>/dev/null || true