diff options
| author | YurenHao0426 <blackhao0426@gmail.com> | 2025-12-17 04:29:37 -0600 |
|---|---|---|
| committer | YurenHao0426 <blackhao0426@gmail.com> | 2025-12-17 04:29:37 -0600 |
| commit | e43b3f8aa36c198b95c1e46bea2eaf3893b13dc3 (patch) | |
| tree | 6ce8a00d2f8b9ebd83c894a27ea01ac50cfb2ff5 /configs | |
Diffstat (limited to 'configs')
| -rw-r--r-- | configs/base.yaml | 0 | ||||
| -rw-r--r-- | configs/local_models.yaml | 50 | ||||
| -rw-r--r-- | configs/qwen2.5_0.5b_full_sft.yaml | 34 | ||||
| -rw-r--r-- | configs/qwen2.5_1.5b_full_sft.yaml | 33 | ||||
| -rw-r--r-- | configs/qwen3_0.6b_full_sft.yaml | 35 | ||||
| -rw-r--r-- | configs/qwen3_1.7b_full_sft.yaml | 34 | ||||
| -rw-r--r-- | configs/reranker.yaml | 3 | ||||
| -rw-r--r-- | configs/retrieval.yaml | 5 | ||||
| -rw-r--r-- | configs/user_model.yaml | 14 |
9 files changed, 208 insertions, 0 deletions
diff --git a/configs/base.yaml b/configs/base.yaml new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/configs/base.yaml diff --git a/configs/local_models.yaml b/configs/local_models.yaml new file mode 100644 index 0000000..13c7fcc --- /dev/null +++ b/configs/local_models.yaml @@ -0,0 +1,50 @@ +models: + llm: + # New Multi-Backend Config + qwen_1_5b: + backend: qwen + path: models/qwen2.5-1.5b-instruct + device: auto + dtype: bfloat16 + max_context_length: 4096 + + llama_8b: + backend: llama + path: models/llama-3.1-8b-instruct + device: auto + dtype: bfloat16 + max_context_length: 8192 + + # Legacy fallback (needed if from_config is called directly without name) + hf_id: Qwen/Qwen2.5-1.5B-Instruct + local_path: models/qwen2.5-1.5b-instruct + dtype: bfloat16 + device_map: auto + + preference_extractor: + # Default/Legacy + default: + hf_id: Qwen/Qwen2.5-0.5B-Instruct + local_path: models/qwen2.5-0.5b-instruct + dtype: bfloat16 + device_map: auto + # New SFT Extractor + qwen3_0_6b_sft: + path: saves/qwen3-0.6b-full-sft-h200/checkpoint-4358 + prompt_template_path: fine_tuning_prompt_template.txt + device: auto + dtype: bfloat16 + max_new_tokens: 512 + embedding: + qwen3: + hf_id: Qwen/Qwen3-Embedding-8B + local_path: models/qwen3-embedding-8b + nemotron: + hf_id: nvidia/llama-embed-nemotron-8b + local_path: models/llama-embed-nemotron-8b + reranker: + qwen3_8b: + hf_id: Qwen/Qwen3-Reranker-8B + local_path: models/rerankers/qwen3-reranker-8b + dtype: bfloat16 + device_map: auto diff --git a/configs/qwen2.5_0.5b_full_sft.yaml b/configs/qwen2.5_0.5b_full_sft.yaml new file mode 100644 index 0000000..ca1cca2 --- /dev/null +++ b/configs/qwen2.5_0.5b_full_sft.yaml @@ -0,0 +1,34 @@ +### Qwen2.5-0.5B Full SFT Config +model_name_or_path: Qwen/Qwen2.5-0.5B-Instruct +stage: sft +do_train: true +finetuning_type: full +freeze_trainable_layers: 0 + +dataset: preference_extractor_train +template: qwen +cutoff_len: 1024 +overwrite_cache: true +preprocessing_num_workers: 16 + +output_dir: saves/qwen2.5-0.5b-full-sft +logging_steps: 10 +save_strategy: steps +save_steps: 500 +plot_loss: true +overwrite_output_dir: true + +per_device_train_batch_size: 16 +gradient_accumulation_steps: 8 +learning_rate: 2.0e-5 +num_train_epochs: 1.0 +lr_scheduler_type: cosine +warmup_ratio: 0.05 +bf16: true +flash_attn: fa2 + +val_size: 0.01 +per_device_eval_batch_size: 16 +eval_strategy: steps +eval_steps: 500 + diff --git a/configs/qwen2.5_1.5b_full_sft.yaml b/configs/qwen2.5_1.5b_full_sft.yaml new file mode 100644 index 0000000..e91176b --- /dev/null +++ b/configs/qwen2.5_1.5b_full_sft.yaml @@ -0,0 +1,33 @@ +### Qwen2.5-1.5B Full SFT Config +model_name_or_path: Qwen/Qwen2.5-1.5B-Instruct +stage: sft +do_train: true +finetuning_type: full + +dataset: preference_extractor_train +template: qwen +cutoff_len: 1024 +overwrite_cache: true +preprocessing_num_workers: 16 + +output_dir: saves/qwen2.5-1.5b-full-sft +logging_steps: 10 +save_strategy: steps +save_steps: 500 +plot_loss: true +overwrite_output_dir: true + +per_device_train_batch_size: 8 +gradient_accumulation_steps: 16 +learning_rate: 2.0e-5 +num_train_epochs: 1.0 +lr_scheduler_type: cosine +warmup_ratio: 0.05 +bf16: true +flash_attn: fa2 + +val_size: 0.01 +per_device_eval_batch_size: 8 +eval_strategy: steps +eval_steps: 500 + diff --git a/configs/qwen3_0.6b_full_sft.yaml b/configs/qwen3_0.6b_full_sft.yaml new file mode 100644 index 0000000..e41a419 --- /dev/null +++ b/configs/qwen3_0.6b_full_sft.yaml @@ -0,0 +1,35 @@ +### Qwen3-0.6B Full SFT Config (H200x4 Optimized) +model_name_or_path: Qwen/Qwen3-0.6B +stage: sft +do_train: true +finetuning_type: full +freeze_trainable_layers: 0 + +dataset: preference_extractor_train +template: qwen +cutoff_len: 1024 +overwrite_cache: true +preprocessing_num_workers: 16 + +output_dir: saves/qwen3-0.6b-full-sft-h200 +logging_steps: 5 +save_strategy: steps +save_steps: 200 +plot_loss: true +overwrite_output_dir: true + +# H200x4 Configuration +# Total Batch Size = 32 * 4 * 1 = 128 +per_device_train_batch_size: 32 +gradient_accumulation_steps: 1 +learning_rate: 2.0e-5 +num_train_epochs: 1.0 +lr_scheduler_type: cosine +warmup_ratio: 0.05 +bf16: true +flash_attn: fa2 + +val_size: 0.01 +per_device_eval_batch_size: 32 +eval_strategy: steps +eval_steps: 200 diff --git a/configs/qwen3_1.7b_full_sft.yaml b/configs/qwen3_1.7b_full_sft.yaml new file mode 100644 index 0000000..069c53d --- /dev/null +++ b/configs/qwen3_1.7b_full_sft.yaml @@ -0,0 +1,34 @@ +### Qwen3-1.7B Full SFT Config (H200x4 Optimized) +model_name_or_path: models/Qwen3-1.7B +stage: sft +do_train: true +finetuning_type: full + +dataset: preference_extractor_train +template: qwen +cutoff_len: 1024 +overwrite_cache: true +preprocessing_num_workers: 4 + +output_dir: saves/qwen3-1.7b-full-sft-h200 +logging_steps: 5 +save_strategy: steps +save_steps: 200 +plot_loss: true +overwrite_output_dir: true + +# H200x4 Configuration +# Total Batch Size = 32 * 4 * 1 = 128 +per_device_train_batch_size: 32 +gradient_accumulation_steps: 1 +learning_rate: 2.0e-5 +num_train_epochs: 1.0 +lr_scheduler_type: cosine +warmup_ratio: 0.05 +bf16: true +flash_attn: fa2 + +val_size: 0.01 +per_device_eval_batch_size: 32 +eval_strategy: steps +eval_steps: 200 diff --git a/configs/reranker.yaml b/configs/reranker.yaml new file mode 100644 index 0000000..c376fc7 --- /dev/null +++ b/configs/reranker.yaml @@ -0,0 +1,3 @@ +reranker: + default: qwen3_8b + diff --git a/configs/retrieval.yaml b/configs/retrieval.yaml new file mode 100644 index 0000000..d2e100e --- /dev/null +++ b/configs/retrieval.yaml @@ -0,0 +1,5 @@ +retrieval: + dense_topk: 64 # Initial recall count + rerank_topk: 8 # Count fed to LLM after rerank + pca_dim: 256 + diff --git a/configs/user_model.yaml b/configs/user_model.yaml new file mode 100644 index 0000000..7b8e230 --- /dev/null +++ b/configs/user_model.yaml @@ -0,0 +1,14 @@ +user_model: + item_dim: 256 + user_dim: 256 + beta_long: 0.1 # Enable personalization for Day 4 + beta_short: 0.3 + tau: 1.0 + preference_extractor_name: qwen3_0_6b_sft # Switch to new extractor + rl: + eta_long: 1.0e-3 + eta_short: 5.0e-3 + ema_alpha: 0.05 + short_decay: 0.1 + +llm_name: llama_8b # Switch backend to Llama 3.1 |
