summaryrefslogtreecommitdiff
path: root/collaborativeagents/training/grpo_verl/run_grpo.sbatch
diff options
context:
space:
mode:
Diffstat (limited to 'collaborativeagents/training/grpo_verl/run_grpo.sbatch')
-rw-r--r--collaborativeagents/training/grpo_verl/run_grpo.sbatch111
1 files changed, 111 insertions, 0 deletions
diff --git a/collaborativeagents/training/grpo_verl/run_grpo.sbatch b/collaborativeagents/training/grpo_verl/run_grpo.sbatch
new file mode 100644
index 0000000..e22b221
--- /dev/null
+++ b/collaborativeagents/training/grpo_verl/run_grpo.sbatch
@@ -0,0 +1,111 @@
+#!/bin/bash
+#SBATCH --job-name=grpo_train
+#SBATCH --account=bfqt-delta-gpu
+#SBATCH --partition=gpuH200x8
+#SBATCH --nodes=1
+#SBATCH --ntasks=1
+#SBATCH --cpus-per-task=32
+#SBATCH --gres=gpu:4
+#SBATCH --mem=256G
+#SBATCH --time=12:00:00
+#SBATCH --output=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/grpo_%j.out
+#SBATCH --error=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/grpo_%j.err
+
+echo "=== GRPO Training with VERL ==="
+date
+nvidia-smi --query-gpu=index,name,memory.total --format=csv
+
+cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/training/grpo_verl
+source /u/yurenh2/miniforge3/etc/profile.d/conda.sh
+conda activate eval
+
+export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface
+export PYTHONPATH="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model:$PYTHONPATH"
+export WANDB_MODE=offline
+
+# Paths
+TRAIN_DATA="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/training/grpo_verl/data/session_level_reflection_grpo_train.parquet"
+MODEL_PATH="/work/nvme/bfqt/yurenh2/sft_checkpoints/checkpoint-200"
+REWARD_FN="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/training/grpo_verl/verl_reward_functions.py"
+OUTPUT_DIR="/scratch/bfqt/yurenh2/grpo_outputs"
+
+mkdir -p $OUTPUT_DIR
+
+# Start 70B judge model for reward evaluation on GPUs 0,1
+echo "Starting 70B judge model on GPUs 0,1..."
+CUDA_VISIBLE_DEVICES=0,1 python -m vllm.entrypoints.openai.api_server \
+ --model meta-llama/Llama-3.1-70B-Instruct \
+ --port 8004 --tensor-parallel-size 2 --gpu-memory-utilization 0.85 \
+ --max-model-len 4096 --dtype bfloat16 --download-dir $HF_HOME &
+
+# Wait for judge model
+echo "Waiting for judge model..."
+for i in {1..200}; do
+ if curl -s http://localhost:8004/health > /dev/null 2>&1; then
+ echo "Judge model ready after $((i*5)) seconds"
+ break
+ fi
+ sleep 5
+done
+
+echo ""
+echo "Starting GRPO training..."
+echo "Model: $MODEL_PATH"
+echo "Data: $TRAIN_DATA"
+echo "Output: $OUTPUT_DIR"
+
+# GRPO training with VERL
+CUDA_VISIBLE_DEVICES=2,3 python3 -m verl.trainer.main_ppo \
+ algorithm.adv_estimator=grpo \
+ data.train_files="$TRAIN_DATA" \
+ data.val_files="$TRAIN_DATA" \
+ data.train_batch_size=64 \
+ data.max_prompt_length=2048 \
+ data.max_response_length=1024 \
+ data.filter_overlong_prompts=True \
+ data.truncation='error' \
+ data.prompt_key=prompt \
+ data.reward_fn_key=data_source \
+ actor_rollout_ref.model.path=$MODEL_PATH \
+ actor_rollout_ref.actor.optim.lr=1e-6 \
+ actor_rollout_ref.model.use_remove_padding=True \
+ actor_rollout_ref.actor.ppo_mini_batch_size=8 \
+ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
+ actor_rollout_ref.actor.use_kl_loss=True \
+ actor_rollout_ref.actor.kl_loss_coef=0.003 \
+ actor_rollout_ref.actor.kl_loss_type=low_var_kl \
+ actor_rollout_ref.actor.entropy_coeff=0 \
+ actor_rollout_ref.model.enable_gradient_checkpointing=True \
+ actor_rollout_ref.actor.fsdp_config.model_dtype=bfloat16 \
+ actor_rollout_ref.actor.fsdp_config.param_offload=False \
+ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
+ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \
+ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \
+ actor_rollout_ref.rollout.name=vllm \
+ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \
+ actor_rollout_ref.rollout.n=8 \
+ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \
+ actor_rollout_ref.ref.fsdp_config.model_dtype=bfloat16 \
+ actor_rollout_ref.ref.fsdp_config.param_offload=True \
+ actor_rollout_ref.rollout.temperature=0.9 \
+ actor_rollout_ref.rollout.top_p=0.9 \
+ custom_reward_function.path=$REWARD_FN \
+ custom_reward_function.name=compute_score \
+ algorithm.use_kl_in_reward=False \
+ trainer.critic_warmup=0 \
+ trainer.val_before_train=False \
+ trainer.logger='["console"]' \
+ trainer.project_name='collaborative-agent-reflection-grpo' \
+ trainer.experiment_name='llama3.1-8b-grpo' \
+ trainer.n_gpus_per_node=2 \
+ trainer.nnodes=1 \
+ trainer.save_freq=50 \
+ trainer.test_freq=100 \
+ trainer.total_epochs=1 \
+ trainer.default_local_dir=$OUTPUT_DIR
+
+pkill -f "vllm.entrypoints" 2>/dev/null || true
+
+echo ""
+echo "GRPO Training complete!"
+date