summaryrefslogtreecommitdiff
path: root/collaborativeagents/training/grpo_verl/run_grpo.sbatch
blob: e22b2210f74b2e9786fc3a7121059b1c311ba67d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
#!/bin/bash
#SBATCH --job-name=grpo_train
#SBATCH --account=bfqt-delta-gpu
#SBATCH --partition=gpuH200x8
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=32
#SBATCH --gres=gpu:4
#SBATCH --mem=256G
#SBATCH --time=12:00:00
#SBATCH --output=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/grpo_%j.out
#SBATCH --error=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/grpo_%j.err

echo "=== GRPO Training with VERL ==="
date
nvidia-smi --query-gpu=index,name,memory.total --format=csv

cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/training/grpo_verl
source /u/yurenh2/miniforge3/etc/profile.d/conda.sh
conda activate eval

export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface
export PYTHONPATH="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model:$PYTHONPATH"
export WANDB_MODE=offline

# Paths
TRAIN_DATA="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/training/grpo_verl/data/session_level_reflection_grpo_train.parquet"
MODEL_PATH="/work/nvme/bfqt/yurenh2/sft_checkpoints/checkpoint-200"
REWARD_FN="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/training/grpo_verl/verl_reward_functions.py"
OUTPUT_DIR="/scratch/bfqt/yurenh2/grpo_outputs"

mkdir -p $OUTPUT_DIR

# Start 70B judge model for reward evaluation on GPUs 0,1
echo "Starting 70B judge model on GPUs 0,1..."
CUDA_VISIBLE_DEVICES=0,1 python -m vllm.entrypoints.openai.api_server \
    --model meta-llama/Llama-3.1-70B-Instruct \
    --port 8004 --tensor-parallel-size 2 --gpu-memory-utilization 0.85 \
    --max-model-len 4096 --dtype bfloat16 --download-dir $HF_HOME &

# Wait for judge model
echo "Waiting for judge model..."
for i in {1..200}; do
    if curl -s http://localhost:8004/health > /dev/null 2>&1; then
        echo "Judge model ready after $((i*5)) seconds"
        break
    fi
    sleep 5
done

echo ""
echo "Starting GRPO training..."
echo "Model: $MODEL_PATH"
echo "Data: $TRAIN_DATA"
echo "Output: $OUTPUT_DIR"

# GRPO training with VERL
CUDA_VISIBLE_DEVICES=2,3 python3 -m verl.trainer.main_ppo \
    algorithm.adv_estimator=grpo \
    data.train_files="$TRAIN_DATA" \
    data.val_files="$TRAIN_DATA" \
    data.train_batch_size=64 \
    data.max_prompt_length=2048 \
    data.max_response_length=1024 \
    data.filter_overlong_prompts=True \
    data.truncation='error' \
    data.prompt_key=prompt \
    data.reward_fn_key=data_source \
    actor_rollout_ref.model.path=$MODEL_PATH \
    actor_rollout_ref.actor.optim.lr=1e-6 \
    actor_rollout_ref.model.use_remove_padding=True \
    actor_rollout_ref.actor.ppo_mini_batch_size=8 \
    actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
    actor_rollout_ref.actor.use_kl_loss=True \
    actor_rollout_ref.actor.kl_loss_coef=0.003 \
    actor_rollout_ref.actor.kl_loss_type=low_var_kl \
    actor_rollout_ref.actor.entropy_coeff=0 \
    actor_rollout_ref.model.enable_gradient_checkpointing=True \
    actor_rollout_ref.actor.fsdp_config.model_dtype=bfloat16 \
    actor_rollout_ref.actor.fsdp_config.param_offload=False \
    actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
    actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \
    actor_rollout_ref.rollout.tensor_model_parallel_size=1 \
    actor_rollout_ref.rollout.name=vllm \
    actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \
    actor_rollout_ref.rollout.n=8 \
    actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \
    actor_rollout_ref.ref.fsdp_config.model_dtype=bfloat16 \
    actor_rollout_ref.ref.fsdp_config.param_offload=True \
    actor_rollout_ref.rollout.temperature=0.9 \
    actor_rollout_ref.rollout.top_p=0.9 \
    custom_reward_function.path=$REWARD_FN \
    custom_reward_function.name=compute_score \
    algorithm.use_kl_in_reward=False \
    trainer.critic_warmup=0 \
    trainer.val_before_train=False \
    trainer.logger='["console"]' \
    trainer.project_name='collaborative-agent-reflection-grpo' \
    trainer.experiment_name='llama3.1-8b-grpo' \
    trainer.n_gpus_per_node=2 \
    trainer.nnodes=1 \
    trainer.save_freq=50 \
    trainer.test_freq=100 \
    trainer.total_epochs=1 \
    trainer.default_local_dir=$OUTPUT_DIR

pkill -f "vllm.entrypoints" 2>/dev/null || true

echo ""
echo "GRPO Training complete!"
date