summaryrefslogtreecommitdiff
path: root/collaborativeagents/training/llama_factory_lora_config.yaml
blob: 31b23bde094f2a1cd15e245afd76bc23f9312f1f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
### LLaMA-Factory SFT Training Config - LoRA (Minimal Hardware) ###
### For session-level reflection training ###

### Model
model_name_or_path: /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct

### Method - LoRA instead of full fine-tuning
stage: sft
do_train: true
finetuning_type: lora

### LoRA Config
lora_rank: 64
lora_alpha: 128
lora_dropout: 0.05
lora_target: all  # Target all linear layers

### Dataset
dataset: sft_reflection
dataset_dir: /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/training
template: llama3
cutoff_len: 4096

### Output - Using work HDD to avoid quota issues
output_dir: /work/hdd/bfqt/yurenh2/training_outputs/sft_reflection_lora

### Training - Can run on single GPU
per_device_train_batch_size: 2
gradient_accumulation_steps: 32
learning_rate: 2.0e-5
num_train_epochs: 4.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
bf16: true

### Logging
logging_steps: 10
save_steps: 100
save_total_limit: 3