From dc801c07cf38b0c495686463e6ca6f871a64440e Mon Sep 17 00:00:00 2001 From: YurenHao0426 Date: Tue, 27 Jan 2026 09:57:37 -0600 Subject: Add collaborativeagents module and update gitignore - Add collaborativeagents subproject with adapters, agents, and evaluation modules - Update .gitignore to exclude large binary files (.whl, .tar), wandb logs, and results Co-Authored-By: Claude Opus 4.5 --- .../training/llama_factory_lora_config.yaml | 39 ++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 collaborativeagents/training/llama_factory_lora_config.yaml (limited to 'collaborativeagents/training/llama_factory_lora_config.yaml') diff --git a/collaborativeagents/training/llama_factory_lora_config.yaml b/collaborativeagents/training/llama_factory_lora_config.yaml new file mode 100644 index 0000000..31b23bd --- /dev/null +++ b/collaborativeagents/training/llama_factory_lora_config.yaml @@ -0,0 +1,39 @@ +### LLaMA-Factory SFT Training Config - LoRA (Minimal Hardware) ### +### For session-level reflection training ### + +### Model +model_name_or_path: /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct + +### Method - LoRA instead of full fine-tuning +stage: sft +do_train: true +finetuning_type: lora + +### LoRA Config +lora_rank: 64 +lora_alpha: 128 +lora_dropout: 0.05 +lora_target: all # Target all linear layers + +### Dataset +dataset: sft_reflection +dataset_dir: /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/training +template: llama3 +cutoff_len: 4096 + +### Output - Using work HDD to avoid quota issues +output_dir: /work/hdd/bfqt/yurenh2/training_outputs/sft_reflection_lora + +### Training - Can run on single GPU +per_device_train_batch_size: 2 +gradient_accumulation_steps: 32 +learning_rate: 2.0e-5 +num_train_epochs: 4.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true + +### Logging +logging_steps: 10 +save_steps: 100 +save_total_limit: 3 -- cgit v1.2.3