#!/bin/bash #SBATCH --job-name=sft_refl #SBATCH --account=bfqt-delta-gpu #SBATCH --partition=gpuA100x4 #SBATCH --nodes=1 #SBATCH --ntasks=1 #SBATCH --cpus-per-task=16 #SBATCH --gres=gpu:4 #SBATCH --mem=200G #SBATCH --time=24:00:00 #SBATCH --output=logs/sft_reflection_%j.out #SBATCH --error=logs/sft_reflection_%j.err set -e cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model mkdir -p collaborativeagents/slurm/logs collaborativeagents/training/outputs collaborativeagents/training/training_data source /u/yurenh2/miniforge3/etc/profile.d/conda.sh conda activate eval export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface export PYTHONPATH="${PWD}/src:${PWD}/collaborativeagents:${PYTHONPATH}" export WANDB_PROJECT="collaborative-agent-reflection-sft" # Step 1: Generate training data from completed experiments echo "=== Step 1: Generating training data ===" python collaborativeagents/training/generate_training_data.py \ --results-dir collaborativeagents/results \ --output-dir collaborativeagents/training/training_data # Step 2: Run SFT training using TRL echo "=== Step 2: Running SFT training ===" python collaborativeagents/training/train_sft.py \ --model-path /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct \ --data-path collaborativeagents/training/training_data/sft_training_data.json \ --output-dir collaborativeagents/training/outputs/sft_reflection \ --num-epochs 4 \ --learning-rate 1e-6 \ --batch-size 1 \ --gradient-accumulation 64 echo "=== SFT Training Complete ===" echo "Model saved to: collaborativeagents/training/outputs/sft_reflection"