summaryrefslogtreecommitdiff
path: root/baselines/peft_baseline.py
diff options
context:
space:
mode:
authorYurenHao0426 <Blackhao0426@gmail.com>2026-04-05 17:41:30 -0500
committerYurenHao0426 <Blackhao0426@gmail.com>2026-04-05 17:41:30 -0500
commitab5fae5decb7d24aafd16d855885c1c99e51cf7f (patch)
treefe17a62d77c151f7cd5ba174f76dd766ac6bf1e1 /baselines/peft_baseline.py
parent6139a848c3b9d5d6c1322cf8acadf2baacee9e8a (diff)
Fix PromptTuning/PrefixTuning cleanup crash and tune learning rates
- peft_baseline.py: Fix cleanup() to handle PromptTuning/PrefixTuning which don't support unload(). Falls back to base_model access. - run_all_methods.py: Reduce lr from 0.3 to 0.01 for PromptTuning, 0.01 to 0.001 for PrefixTuning. Previous lr caused R-L=0.03 (broken). Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Diffstat (limited to 'baselines/peft_baseline.py')
-rw-r--r--baselines/peft_baseline.py11
1 files changed, 9 insertions, 2 deletions
diff --git a/baselines/peft_baseline.py b/baselines/peft_baseline.py
index 246385f..af1b71c 100644
--- a/baselines/peft_baseline.py
+++ b/baselines/peft_baseline.py
@@ -214,7 +214,14 @@ class PEFTBaseline:
def cleanup(self):
"""Remove adapter and restore wrapper.model to the original base model."""
- base_model = self.peft_model.unload()
- self.wrapper.model = base_model
+ try:
+ base_model = self.peft_model.unload()
+ self.wrapper.model = base_model
+ except (AttributeError, NotImplementedError):
+ # unload() not supported for PromptTuning/PrefixTuning
+ # Access base model directly
+ self.wrapper.model = self.peft_model.base_model
+ if hasattr(self.wrapper.model, 'model'):
+ self.wrapper.model = self.wrapper.model.model
del self.peft_model
torch.cuda.empty_cache()