blob: 627d5a4e92a193b4b644b026d841f75aa0832719 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
|
### LLaMA-Factory SFT Training Config - Resume from checkpoint ###
### For session-level reflection training ###
### Model
model_name_or_path: /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct
### Method
stage: sft
do_train: true
finetuning_type: full
deepspeed: ds_z3_config.json
### Dataset
dataset: sft_reflection
dataset_dir: /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/training
template: llama3
cutoff_len: 4096
### Output - Using NVMe storage for checkpoints
output_dir: /work/nvme/bfqt/yurenh2/sft_checkpoints
### Resume from checkpoint
resume_from_checkpoint: /work/nvme/bfqt/yurenh2/sft_checkpoints/checkpoint-100
### Training
per_device_train_batch_size: 1
gradient_accumulation_steps: 16
learning_rate: 1.0e-6
num_train_epochs: 4.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
bf16: true
ddp_timeout: 3600
### Logging
logging_steps: 10
save_steps: 300
save_total_limit: 2
|