summaryrefslogtreecommitdiff
path: root/collaborativeagents/scripts/fullscale_vanilla.sbatch
blob: 798dc5f18cbaf7710e0bd318abfa7cc0ba823544 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
#!/bin/bash
#SBATCH --job-name=fs_vanilla
#SBATCH --partition=gpuH200x8
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=32
#SBATCH --gres=gpu:4
#SBATCH --mem=200G
#SBATCH --time=8:00:00
#SBATCH --output=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/fs_vanilla-%j.out
#SBATCH --error=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/fs_vanilla-%j.err

cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents
source /u/yurenh2/miniforge3/etc/profile.d/conda.sh
conda activate eval
export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface
export PYTHONPATH="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/src:$PYTHONPATH"

PROFILE_PATH="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/data/user_profiles.jsonl"

# Start vLLM servers
CUDA_VISIBLE_DEVICES=0,1 python -m vllm.entrypoints.openai.api_server \
    --model hugging-quants/Meta-Llama-3.1-70B-Instruct-AWQ-INT4 \
    --port 8004 --tensor-parallel-size 2 --gpu-memory-utilization 0.90 \
    --max-model-len 8192 --dtype float16 --download-dir $HF_HOME &

CUDA_VISIBLE_DEVICES=2,3 python -m vllm.entrypoints.openai.api_server \
    --model /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct \
    --port 8003 --tensor-parallel-size 2 --gpu-memory-utilization 0.90 \
    --max-model-len 8192 --dtype bfloat16 &

for i in {1..60}; do
    curl -s http://localhost:8004/health > /dev/null 2>&1 && curl -s http://localhost:8003/health > /dev/null 2>&1 && break
    sleep 5
done
sleep 30

python scripts/run_experiments.py --methods vanilla \
    --datasets math-hard --n-profiles 200 --n-sessions 30 --max-turns 15 \
    --use-vllm --use-batch-processing --batch-size 100 --parallel-profiles 50 \
    --output-dir ../results/fullscale --profile-path $PROFILE_PATH

pkill -f "vllm.entrypoints" 2>/dev/null || true