summaryrefslogtreecommitdiff
path: root/collaborativeagents/training/llama_factory_resume_config.yaml
diff options
context:
space:
mode:
Diffstat (limited to 'collaborativeagents/training/llama_factory_resume_config.yaml')
-rw-r--r--collaborativeagents/training/llama_factory_resume_config.yaml38
1 files changed, 38 insertions, 0 deletions
diff --git a/collaborativeagents/training/llama_factory_resume_config.yaml b/collaborativeagents/training/llama_factory_resume_config.yaml
new file mode 100644
index 0000000..627d5a4
--- /dev/null
+++ b/collaborativeagents/training/llama_factory_resume_config.yaml
@@ -0,0 +1,38 @@
+### LLaMA-Factory SFT Training Config - Resume from checkpoint ###
+### For session-level reflection training ###
+
+### Model
+model_name_or_path: /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct
+
+### Method
+stage: sft
+do_train: true
+finetuning_type: full
+deepspeed: ds_z3_config.json
+
+### Dataset
+dataset: sft_reflection
+dataset_dir: /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/training
+template: llama3
+cutoff_len: 4096
+
+### Output - Using NVMe storage for checkpoints
+output_dir: /work/nvme/bfqt/yurenh2/sft_checkpoints
+
+### Resume from checkpoint
+resume_from_checkpoint: /work/nvme/bfqt/yurenh2/sft_checkpoints/checkpoint-100
+
+### Training
+per_device_train_batch_size: 1
+gradient_accumulation_steps: 16
+learning_rate: 1.0e-6
+num_train_epochs: 4.0
+lr_scheduler_type: cosine
+warmup_ratio: 0.1
+bf16: true
+ddp_timeout: 3600
+
+### Logging
+logging_steps: 10
+save_steps: 300
+save_total_limit: 2