From dc801c07cf38b0c495686463e6ca6f871a64440e Mon Sep 17 00:00:00 2001 From: YurenHao0426 Date: Tue, 27 Jan 2026 09:57:37 -0600 Subject: Add collaborativeagents module and update gitignore - Add collaborativeagents subproject with adapters, agents, and evaluation modules - Update .gitignore to exclude large binary files (.whl, .tar), wandb logs, and results Co-Authored-By: Claude Opus 4.5 --- collaborativeagents/training/train_sft_lora.sbatch | 30 ++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 collaborativeagents/training/train_sft_lora.sbatch (limited to 'collaborativeagents/training/train_sft_lora.sbatch') diff --git a/collaborativeagents/training/train_sft_lora.sbatch b/collaborativeagents/training/train_sft_lora.sbatch new file mode 100644 index 0000000..9781dd0 --- /dev/null +++ b/collaborativeagents/training/train_sft_lora.sbatch @@ -0,0 +1,30 @@ +#!/bin/bash +#SBATCH --job-name=sft_lora +#SBATCH --output=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/sft_lora_%j.out +#SBATCH --error=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/sft_lora_%j.err +#SBATCH --time=12:00:00 +#SBATCH --account=bfqt-delta-gpu +#SBATCH --partition=gpuA100x4 +#SBATCH --gres=gpu:1 +#SBATCH --cpus-per-task=8 +#SBATCH --mem=64G + +# LoRA SFT Training - Minimal Hardware (1 GPU) +# Expected: ~2-4 hours for 4 epochs on single H200 + +cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/training + +source /u/yurenh2/miniforge3/etc/profile.d/conda.sh +conda activate eval + +echo "=== Starting LoRA SFT Training ===" +echo "GPU: $(nvidia-smi --query-gpu=name --format=csv,noheader | head -1)" +echo "Config: llama_factory_lora_config.yaml" +echo "Start time: $(date)" + +# Run training with LLaMA-Factory +llamafactory-cli train llama_factory_lora_config.yaml + +echo "=== Training Complete ===" +echo "End time: $(date)" +echo "Output: outputs/sft_reflection_lora" -- cgit v1.2.3