diff options
| author | YurenHao0426 <blackhao0426@gmail.com> | 2025-12-17 04:29:37 -0600 |
|---|---|---|
| committer | YurenHao0426 <blackhao0426@gmail.com> | 2025-12-17 04:29:37 -0600 |
| commit | e43b3f8aa36c198b95c1e46bea2eaf3893b13dc3 (patch) | |
| tree | 6ce8a00d2f8b9ebd83c894a27ea01ac50cfb2ff5 /configs/qwen3_0.6b_full_sft.yaml | |
Diffstat (limited to 'configs/qwen3_0.6b_full_sft.yaml')
| -rw-r--r-- | configs/qwen3_0.6b_full_sft.yaml | 35 |
1 files changed, 35 insertions, 0 deletions
diff --git a/configs/qwen3_0.6b_full_sft.yaml b/configs/qwen3_0.6b_full_sft.yaml new file mode 100644 index 0000000..e41a419 --- /dev/null +++ b/configs/qwen3_0.6b_full_sft.yaml @@ -0,0 +1,35 @@ +### Qwen3-0.6B Full SFT Config (H200x4 Optimized) +model_name_or_path: Qwen/Qwen3-0.6B +stage: sft +do_train: true +finetuning_type: full +freeze_trainable_layers: 0 + +dataset: preference_extractor_train +template: qwen +cutoff_len: 1024 +overwrite_cache: true +preprocessing_num_workers: 16 + +output_dir: saves/qwen3-0.6b-full-sft-h200 +logging_steps: 5 +save_strategy: steps +save_steps: 200 +plot_loss: true +overwrite_output_dir: true + +# H200x4 Configuration +# Total Batch Size = 32 * 4 * 1 = 128 +per_device_train_batch_size: 32 +gradient_accumulation_steps: 1 +learning_rate: 2.0e-5 +num_train_epochs: 1.0 +lr_scheduler_type: cosine +warmup_ratio: 0.05 +bf16: true +flash_attn: fa2 + +val_size: 0.01 +per_device_eval_batch_size: 32 +eval_strategy: steps +eval_steps: 200 |
