#!/bin/bash #SBATCH --job-name=sft_lora #SBATCH --output=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/sft_lora_%j.out #SBATCH --error=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/sft_lora_%j.err #SBATCH --time=12:00:00 #SBATCH --account=bfqt-delta-gpu #SBATCH --partition=gpuA100x4 #SBATCH --gres=gpu:1 #SBATCH --cpus-per-task=8 #SBATCH --mem=64G # LoRA SFT Training - Minimal Hardware (1 GPU) # Expected: ~2-4 hours for 4 epochs on single H200 cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/training source /u/yurenh2/miniforge3/etc/profile.d/conda.sh conda activate eval echo "=== Starting LoRA SFT Training ===" echo "GPU: $(nvidia-smi --query-gpu=name --format=csv,noheader | head -1)" echo "Config: llama_factory_lora_config.yaml" echo "Start time: $(date)" # Run training with LLaMA-Factory llamafactory-cli train llama_factory_lora_config.yaml echo "=== Training Complete ===" echo "End time: $(date)" echo "Output: outputs/sft_reflection_lora"