blob: 6550cdf832a0ec3d2b10dbdb668068feeae657bf (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
|
#!/bin/bash
#SBATCH --job-name=test_all
#SBATCH --account=bfqt-delta-gpu
#SBATCH --partition=gpuH200x8
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=32
#SBATCH --gres=gpu:4
#SBATCH --mem=200G
#SBATCH --time=02:00:00
#SBATCH --output=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/test_all_methods_%j.out
#SBATCH --error=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/test_all_methods_%j.err
cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents
source /u/yurenh2/miniforge3/etc/profile.d/conda.sh
conda activate eval
export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface
export PYTHONPATH="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/src:$PYTHONPATH"
PROFILE_PATH="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/data/complex_profiles_v2/profiles_200.jsonl"
AGENT_MODEL="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct"
USER_MODEL="meta-llama/Llama-3.1-70B-Instruct"
echo "=== Small-scale test: ALL methods with 70B user sim ==="
echo "Scale: 5 profiles × 3 sessions = 15 sessions per method"
date
nvidia-smi --query-gpu=index,name,memory.total --format=csv
# Start 70B user simulator on GPUs 0,1
echo ""
echo "Starting 70B user simulator..."
CUDA_VISIBLE_DEVICES=0,1 python -m vllm.entrypoints.openai.api_server \
--model $USER_MODEL \
--port 8004 --tensor-parallel-size 2 --gpu-memory-utilization 0.90 \
--max-model-len 8192 --dtype bfloat16 --download-dir $HF_HOME &
USER_PID=$!
# Start 8B agent on GPUs 2,3 (0.45 for RAG methods)
echo "Starting 8B agent (0.45 memory for embedding/reranker)..."
CUDA_VISIBLE_DEVICES=2,3 python -m vllm.entrypoints.openai.api_server \
--model $AGENT_MODEL \
--port 8003 --tensor-parallel-size 2 --gpu-memory-utilization 0.45 \
--max-model-len 8192 --dtype bfloat16 &
AGENT_PID=$!
# Wait for servers
echo "Waiting for vLLM servers (70B takes ~8 min)..."
for i in {1..200}; do
if curl -s http://localhost:8004/health > /dev/null 2>&1; then
echo "70B user simulator ready after $((i*5))s"
break
fi
sleep 5
done
for i in {1..60}; do
if curl -s http://localhost:8003/health > /dev/null 2>&1; then
echo "8B agent ready after $((i*5))s"
break
fi
sleep 5
done
echo ""
echo "=== GPU Memory after vLLM servers ==="
nvidia-smi --query-gpu=index,memory.used,memory.total --format=csv
# Test each method sequentially
for METHOD in vanilla contextual reflection all_memory rag rag_vector; do
echo ""
echo "=============================================="
echo "Testing method: $METHOD"
echo "=============================================="
date
python scripts/run_experiments.py --methods $METHOD \
--datasets math-hard --n-profiles 5 --n-sessions 3 --max-turns 10 \
--use-vllm --no-batch-processing --parallel-profiles 5 \
--output-dir ../results/test_all_methods --profile-path $PROFILE_PATH
echo ""
echo "=== GPU Memory after $METHOD ==="
nvidia-smi --query-gpu=index,memory.used,memory.total --format=csv
done
echo ""
echo "=============================================="
echo "ALL METHODS TESTED"
echo "=============================================="
date
pkill -f "vllm.entrypoints" 2>/dev/null || true
|