#!/bin/bash #SBATCH --job-name=sft_qlora #SBATCH --output=sft_qlora_%j.out #SBATCH --error=sft_qlora_%j.err #SBATCH --time=12:00:00 #SBATCH --partition=a100 #SBATCH --gres=gpu:1 #SBATCH --cpus-per-task=8 #SBATCH --mem=64G # QLoRA SFT Training - Ultra Minimal Hardware (1 GPU, ~12GB VRAM) # Can run on A100 40GB, V100, or even consumer GPUs cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/training source /u/yurenh2/miniforge3/etc/profile.d/conda.sh conda activate eval echo "=== Starting QLoRA SFT Training ===" echo "GPU: $(nvidia-smi --query-gpu=name --format=csv,noheader | head -1)" echo "Config: llama_factory_qlora_config.yaml" echo "Start time: $(date)" # Run training with LLaMA-Factory llamafactory-cli train llama_factory_qlora_config.yaml echo "=== Training Complete ===" echo "End time: $(date)" echo "Output: outputs/sft_reflection_qlora"