summaryrefslogtreecommitdiff
path: root/collaborativeagents/training/train_sft_lora.sbatch
blob: 9781dd0c7491552290a8bbe568ca4b07edc7f6ab (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
#!/bin/bash
#SBATCH --job-name=sft_lora
#SBATCH --output=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/sft_lora_%j.out
#SBATCH --error=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/sft_lora_%j.err
#SBATCH --time=12:00:00
#SBATCH --account=bfqt-delta-gpu
#SBATCH --partition=gpuA100x4
#SBATCH --gres=gpu:1
#SBATCH --cpus-per-task=8
#SBATCH --mem=64G

# LoRA SFT Training - Minimal Hardware (1 GPU)
# Expected: ~2-4 hours for 4 epochs on single H200

cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/training

source /u/yurenh2/miniforge3/etc/profile.d/conda.sh
conda activate eval

echo "=== Starting LoRA SFT Training ==="
echo "GPU: $(nvidia-smi --query-gpu=name --format=csv,noheader | head -1)"
echo "Config: llama_factory_lora_config.yaml"
echo "Start time: $(date)"

# Run training with LLaMA-Factory
llamafactory-cli train llama_factory_lora_config.yaml

echo "=== Training Complete ==="
echo "End time: $(date)"
echo "Output: outputs/sft_reflection_lora"