#!/bin/bash #SBATCH --job-name=grpo_test #SBATCH --account=bfqt-delta-gpu #SBATCH --partition=gpuA100x4 #SBATCH --nodes=1 #SBATCH --ntasks=1 #SBATCH --cpus-per-task=16 #SBATCH --gres=gpu:4 #SBATCH --mem=200G #SBATCH --time=1:00:00 #SBATCH --output=grpo_test_%j.out #SBATCH --error=grpo_test_%j.err set -e cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model source /u/yurenh2/miniforge3/etc/profile.d/conda.sh conda activate eval export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface export PYTHONPATH="${PWD}/src:${PWD}/collaborativeagents:${PYTHONPATH}" # Install required packages (ensure they're in the conda env) echo "Installing required packages..." pip install --quiet json-repair tenacity # Test: Verify imports work echo "Testing imports..." python3 -c "from json_repair import repair_json; from tenacity import retry; print('Imports OK')" # Start judge model (70B) on GPUs 2,3 MODEL_70B="meta-llama/Llama-3.1-70B-Instruct" echo "Starting judge model..." CUDA_VISIBLE_DEVICES=2,3 python -m vllm.entrypoints.openai.api_server \ --model $MODEL_70B --port 8004 --tensor-parallel-size 2 \ --gpu-memory-utilization 0.95 --max-model-len 8192 \ --download-dir $HF_HOME --dtype bfloat16 --disable-log-requests & # Wait for server for i in $(seq 1 60); do curl -s http://localhost:8004/health > /dev/null 2>&1 && break sleep 3 done echo "Judge model ready" # Run GRPO with minimal steps for testing echo "Starting GRPO test (10 steps only)..." cd collaborativeagents/training/grpo_verl python -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=${PWD}/data/session_level_reflection_grpo_train.parquet \ data.val_files=${PWD}/data/session_level_reflection_grpo_train.parquet \ data.train_batch_size=8 \ data.max_prompt_length=2048 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation=error \ data.prompt_key=prompt \ data.reward_fn_key=data_source \ actor_rollout_ref.model.path=/work/nvme/bfqt/yurenh2/sft_checkpoints/checkpoint-200 \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=4 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.003 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.model_dtype=bfloat16 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.rollout.n=4 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.ref.fsdp_config.model_dtype=bfloat16 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.rollout.temperature=0.9 \ actor_rollout_ref.rollout.top_p=0.9 \ custom_reward_function.path=${PWD}/verl_reward_functions.py \ custom_reward_function.name=compute_score \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.val_before_train=False \ trainer.logger='["console"]' \ trainer.project_name=grpo-test \ trainer.experiment_name=llama3.1-8b-grpo-test \ trainer.n_gpus_per_node=2 \ trainer.nnodes=1 \ trainer.save_freq=100 \ trainer.test_freq=100 \ trainer.total_training_steps=10 \ trainer.default_local_dir=/scratch/bfqt/yurenh2/grpo_test_outputs echo "GRPO test complete!" pkill -f "vllm.entrypoints" 2>/dev/null || true