summaryrefslogtreecommitdiff
path: root/collaborativeagents/training/sft_config.yaml
diff options
context:
space:
mode:
Diffstat (limited to 'collaborativeagents/training/sft_config.yaml')
-rw-r--r--collaborativeagents/training/sft_config.yaml40
1 files changed, 40 insertions, 0 deletions
diff --git a/collaborativeagents/training/sft_config.yaml b/collaborativeagents/training/sft_config.yaml
new file mode 100644
index 0000000..52a52d5
--- /dev/null
+++ b/collaborativeagents/training/sft_config.yaml
@@ -0,0 +1,40 @@
+### LLaMA-Factory SFT Configuration for Session-Level Reflection
+### Based on paper's training setup (Table 4)
+
+### Model
+model_name_or_path: /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct
+
+### Method
+stage: sft
+do_train: true
+finetuning_type: full
+
+### Dataset
+dataset: session_level_reflection
+dataset_dir: /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/training/training_data
+template: llama3
+cutoff_len: 32768
+preprocessing_num_workers: 16
+
+### Output
+output_dir: /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/training/outputs/sft_reflection
+logging_steps: 10
+save_steps: 100
+save_total_limit: 3
+
+### Training hyperparameters (from paper Table 4)
+per_device_train_batch_size: 1
+gradient_accumulation_steps: 64
+learning_rate: 1.0e-6
+num_train_epochs: 4
+lr_scheduler_type: cosine
+warmup_ratio: 0.1
+bf16: true
+
+### Optimization
+optim: adamw_torch
+weight_decay: 0.01
+max_grad_norm: 1.0
+
+### Report
+report_to: none