summaryrefslogtreecommitdiff
path: root/collaborativeagents/training/llama_factory_lora_config.yaml
diff options
context:
space:
mode:
authorYurenHao0426 <blackhao0426@gmail.com>2026-01-27 09:57:37 -0600
committerYurenHao0426 <blackhao0426@gmail.com>2026-01-27 09:57:37 -0600
commitdc801c07cf38b0c495686463e6ca6f871a64440e (patch)
tree599f03114775921dbc472403c701f4a3a8ea188a /collaborativeagents/training/llama_factory_lora_config.yaml
parente43b3f8aa36c198b95c1e46bea2eaf3893b13dc3 (diff)
Add collaborativeagents module and update gitignore
- Add collaborativeagents subproject with adapters, agents, and evaluation modules - Update .gitignore to exclude large binary files (.whl, .tar), wandb logs, and results Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
Diffstat (limited to 'collaborativeagents/training/llama_factory_lora_config.yaml')
-rw-r--r--collaborativeagents/training/llama_factory_lora_config.yaml39
1 files changed, 39 insertions, 0 deletions
diff --git a/collaborativeagents/training/llama_factory_lora_config.yaml b/collaborativeagents/training/llama_factory_lora_config.yaml
new file mode 100644
index 0000000..31b23bd
--- /dev/null
+++ b/collaborativeagents/training/llama_factory_lora_config.yaml
@@ -0,0 +1,39 @@
+### LLaMA-Factory SFT Training Config - LoRA (Minimal Hardware) ###
+### For session-level reflection training ###
+
+### Model
+model_name_or_path: /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct
+
+### Method - LoRA instead of full fine-tuning
+stage: sft
+do_train: true
+finetuning_type: lora
+
+### LoRA Config
+lora_rank: 64
+lora_alpha: 128
+lora_dropout: 0.05
+lora_target: all # Target all linear layers
+
+### Dataset
+dataset: sft_reflection
+dataset_dir: /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/training
+template: llama3
+cutoff_len: 4096
+
+### Output - Using work HDD to avoid quota issues
+output_dir: /work/hdd/bfqt/yurenh2/training_outputs/sft_reflection_lora
+
+### Training - Can run on single GPU
+per_device_train_batch_size: 2
+gradient_accumulation_steps: 32
+learning_rate: 2.0e-5
+num_train_epochs: 4.0
+lr_scheduler_type: cosine
+warmup_ratio: 0.1
+bf16: true
+
+### Logging
+logging_steps: 10
+save_steps: 100
+save_total_limit: 3