diff options
| author | YurenHao0426 <Blackhao0426@gmail.com> | 2026-04-05 17:41:30 -0500 |
|---|---|---|
| committer | YurenHao0426 <Blackhao0426@gmail.com> | 2026-04-05 17:41:30 -0500 |
| commit | ab5fae5decb7d24aafd16d855885c1c99e51cf7f (patch) | |
| tree | fe17a62d77c151f7cd5ba174f76dd766ac6bf1e1 /scripts | |
| parent | 6139a848c3b9d5d6c1322cf8acadf2baacee9e8a (diff) | |
Fix PromptTuning/PrefixTuning cleanup crash and tune learning rates
- peft_baseline.py: Fix cleanup() to handle PromptTuning/PrefixTuning
which don't support unload(). Falls back to base_model access.
- run_all_methods.py: Reduce lr from 0.3 to 0.01 for PromptTuning,
0.01 to 0.001 for PrefixTuning. Previous lr caused R-L=0.03 (broken).
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Diffstat (limited to 'scripts')
| -rw-r--r-- | scripts/run_all_methods.py | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/scripts/run_all_methods.py b/scripts/run_all_methods.py index 1502ff3..db31a56 100644 --- a/scripts/run_all_methods.py +++ b/scripts/run_all_methods.py @@ -100,11 +100,11 @@ class MethodRunner: 'lora': lambda *a: self._run_peft(*a, config=get_lora_config(rank=8), lr=1e-4, desc='LoRA r=8'), 'tiny_lora': lambda *a: self._run_peft(*a, config=get_tiny_lora_config(rank=1), lr=1e-4, desc='Tiny LoRA r=1'), 'vera': lambda *a: self._run_peft(*a, config=get_vera_config(rank=256), lr=1e-3, desc='VeRA r=256'), - 'prompt_tuning_5': lambda *a: self._run_peft(*a, config=get_prompt_tuning_config(5), lr=3e-1, desc='PromptTuning L=5'), - 'prompt_tuning_10': lambda *a: self._run_peft(*a, config=get_prompt_tuning_config(10), lr=3e-1, desc='PromptTuning L=10'), - 'prompt_tuning_20': lambda *a: self._run_peft(*a, config=get_prompt_tuning_config(20), lr=3e-1, desc='PromptTuning L=20'), - 'prefix_tuning_5': lambda *a: self._run_peft(*a, config=get_prefix_tuning_config(5), lr=1e-2, desc='PrefixTuning L=5'), - 'prefix_tuning_10': lambda *a: self._run_peft(*a, config=get_prefix_tuning_config(10), lr=1e-2, desc='PrefixTuning L=10'), + 'prompt_tuning_5': lambda *a: self._run_peft(*a, config=get_prompt_tuning_config(5), lr=1e-2, desc='PromptTuning L=5'), + 'prompt_tuning_10': lambda *a: self._run_peft(*a, config=get_prompt_tuning_config(10), lr=1e-2, desc='PromptTuning L=10'), + 'prompt_tuning_20': lambda *a: self._run_peft(*a, config=get_prompt_tuning_config(20), lr=1e-2, desc='PromptTuning L=20'), + 'prefix_tuning_5': lambda *a: self._run_peft(*a, config=get_prefix_tuning_config(5), lr=1e-3, desc='PrefixTuning L=5'), + 'prefix_tuning_10': lambda *a: self._run_peft(*a, config=get_prefix_tuning_config(10), lr=1e-3, desc='PrefixTuning L=10'), } if method_name not in dispatch: |
