summaryrefslogtreecommitdiff
path: root/collaborativeagents/slurm/run_grpo_training.sh
blob: 4f9e3f1b93391a7606816a138b83d92a4195fc78 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
#!/bin/bash
#SBATCH --job-name=grpo_refl
#SBATCH --account=bfqt-delta-gpu
#SBATCH --partition=gpuA100x4
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=16
#SBATCH --gres=gpu:4
#SBATCH --mem=200G
#SBATCH --time=48:00:00
#SBATCH --output=logs/grpo_reflection_%j.out
#SBATCH --error=logs/grpo_reflection_%j.err

set -e
cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model
mkdir -p collaborativeagents/slurm/logs collaborativeagents/training/outputs

source /u/yurenh2/miniforge3/etc/profile.d/conda.sh
conda activate eval

export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface
export PYTHONPATH="${PWD}/src:${PWD}/collaborativeagents:${PYTHONPATH}"
export NCCL_P2P_DISABLE=1

# Use the AWQ 70B model for judge (fits on 2 GPUs)
JUDGE_MODEL="hugging-quants/Meta-Llama-3.1-70B-Instruct-AWQ-INT4"
JUDGE_PORT=8000

# Start vLLM server for judge model (on GPUs 2,3)
echo "=== Starting vLLM judge server ==="
CUDA_VISIBLE_DEVICES=2,3 python -m vllm.entrypoints.openai.api_server \
    --model "$JUDGE_MODEL" \
    --port $JUDGE_PORT \
    --tensor-parallel-size 2 \
    --max-model-len 8192 \
    --dtype auto \
    --trust-remote-code &

VLLM_PID=$!
echo "vLLM server PID: $VLLM_PID"

# Wait for server to be ready
echo "Waiting for vLLM server to start..."
for i in {1..60}; do
    if curl -s http://localhost:$JUDGE_PORT/health > /dev/null 2>&1; then
        echo "vLLM server is ready!"
        break
    fi
    sleep 10
done

# Run GRPO training (on GPUs 0,1)
echo "=== Starting GRPO training ==="
CUDA_VISIBLE_DEVICES=0,1 python collaborativeagents/training/train_grpo.py \
    --model-path collaborativeagents/training/outputs/sft_reflection \
    --data-path collaborativeagents/training/training_data/grpo_training_data.json \
    --output-dir collaborativeagents/training/outputs/grpo_reflection \
    --judge-url "http://localhost:$JUDGE_PORT/v1" \
    --judge-model "$JUDGE_MODEL" \
    --max-steps 200 \
    --learning-rate 1e-6 \
    --num-generations 8

# Cleanup
echo "=== Cleanup ==="
kill $VLLM_PID 2>/dev/null || true

echo "=== GRPO Training Complete ==="
echo "Model saved to: collaborativeagents/training/outputs/grpo_reflection/final"