summaryrefslogtreecommitdiff
path: root/collaborativeagents/scripts/exp_contextual.sbatch
blob: 2c06bb8bbed1dde56deae6c8d7f3d522e9f05a0d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
#!/bin/bash
#SBATCH --job-name=exp_contextual
#SBATCH --account=bfqt-delta-gpu
#SBATCH --partition=gpuH200x8
#SBATCH --gres=gpu:4
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=32
#SBATCH --mem=256G
#SBATCH --time=24:00:00
#SBATCH --output=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/exp_contextual-%j.out
#SBATCH --error=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/exp_contextual-%j.err

set -e
cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents
source /u/yurenh2/miniforge3/etc/profile.d/conda.sh
conda activate eval

export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface
export PYTHONPATH="${PWD}:${PWD}/scripts:${PWD}/../src:${PYTHONPATH}"

MODEL_8B="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct"
PROFILE_PATH="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/data/complex_profiles_v2/profiles_200.jsonl"

echo "=== contextual (vLLM-based) ==="
date
nvidia-smi --query-gpu=index,name,memory.total --format=csv

pkill -f "vllm.entrypoints" 2>/dev/null || true
sleep 2

# GPU 0,1: vLLM server for user simulator (port 8004)
echo "Starting user simulator vLLM server on GPU 0,1 (port 8004)..."
CUDA_VISIBLE_DEVICES=0,1 python -m vllm.entrypoints.openai.api_server \
    --model $MODEL_8B --port 8004 --tensor-parallel-size 2 \
    --gpu-memory-utilization 0.90 --max-model-len 8192 \
    --disable-log-requests --dtype bfloat16 &

# GPU 2,3: vLLM server for agent (port 8003)
echo "Starting agent vLLM server on GPU 2,3 (port 8003)..."
CUDA_VISIBLE_DEVICES=2,3 python -m vllm.entrypoints.openai.api_server \
    --model $MODEL_8B --port 8003 --tensor-parallel-size 2 \
    --gpu-memory-utilization 0.90 --max-model-len 8192 \
    --disable-log-requests --dtype bfloat16 &

echo "Waiting for vLLM servers..."
for i in $(seq 1 200); do
    user_ready=$(curl -s http://localhost:8004/health > /dev/null 2>&1 && echo 1 || echo 0)
    agent_ready=$(curl -s http://localhost:8003/health > /dev/null 2>&1 && echo 1 || echo 0)
    if [ "$user_ready" = "1" ] && [ "$agent_ready" = "1" ]; then
        echo "Both servers ready after $((i*3))s"; break
    fi
    sleep 3
done

# Run experiment (uses vLLM HTTP API, no local GPU needed)
python scripts/run_experiments.py \
    --methods contextual \
    --datasets math-hard,math-500,bigcodebench \
    --n-profiles 200 --n-sessions 30 --max-turns 15 \
    --use-vllm --parallel-profiles 50 \
    --output-dir ../results/full_h200 \
    --profile-path "$PROFILE_PATH"

pkill -f "vllm.entrypoints" 2>/dev/null || true
echo "Done: $(date)"