blob: 8cc9f78d1c57fe43da36ed95fe612a5d3826e41a (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
|
#!/bin/bash
#SBATCH --job-name=sft_refl
#SBATCH --account=bfqt-delta-gpu
#SBATCH --partition=gpuA100x4
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=16
#SBATCH --gres=gpu:4
#SBATCH --mem=200G
#SBATCH --time=24:00:00
#SBATCH --output=logs/sft_reflection_%j.out
#SBATCH --error=logs/sft_reflection_%j.err
set -e
cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model
mkdir -p collaborativeagents/slurm/logs collaborativeagents/training/outputs collaborativeagents/training/training_data
source /u/yurenh2/miniforge3/etc/profile.d/conda.sh
conda activate eval
export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface
export PYTHONPATH="${PWD}/src:${PWD}/collaborativeagents:${PYTHONPATH}"
export WANDB_PROJECT="collaborative-agent-reflection-sft"
# Step 1: Generate training data from completed experiments
echo "=== Step 1: Generating training data ==="
python collaborativeagents/training/generate_training_data.py \
--results-dir collaborativeagents/results \
--output-dir collaborativeagents/training/training_data
# Step 2: Run SFT training using TRL
echo "=== Step 2: Running SFT training ==="
python collaborativeagents/training/train_sft.py \
--model-path /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct \
--data-path collaborativeagents/training/training_data/sft_training_data.json \
--output-dir collaborativeagents/training/outputs/sft_reflection \
--num-epochs 4 \
--learning-rate 1e-6 \
--batch-size 1 \
--gradient-accumulation 64
echo "=== SFT Training Complete ==="
echo "Model saved to: collaborativeagents/training/outputs/sft_reflection"
|