summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--scripts/run_all_methods.py16
1 files changed, 8 insertions, 8 deletions
diff --git a/scripts/run_all_methods.py b/scripts/run_all_methods.py
index db31a56..66b9cb7 100644
--- a/scripts/run_all_methods.py
+++ b/scripts/run_all_methods.py
@@ -100,11 +100,11 @@ class MethodRunner:
'lora': lambda *a: self._run_peft(*a, config=get_lora_config(rank=8), lr=1e-4, desc='LoRA r=8'),
'tiny_lora': lambda *a: self._run_peft(*a, config=get_tiny_lora_config(rank=1), lr=1e-4, desc='Tiny LoRA r=1'),
'vera': lambda *a: self._run_peft(*a, config=get_vera_config(rank=256), lr=1e-3, desc='VeRA r=256'),
- 'prompt_tuning_5': lambda *a: self._run_peft(*a, config=get_prompt_tuning_config(5), lr=1e-2, desc='PromptTuning L=5'),
- 'prompt_tuning_10': lambda *a: self._run_peft(*a, config=get_prompt_tuning_config(10), lr=1e-2, desc='PromptTuning L=10'),
- 'prompt_tuning_20': lambda *a: self._run_peft(*a, config=get_prompt_tuning_config(20), lr=1e-2, desc='PromptTuning L=20'),
- 'prefix_tuning_5': lambda *a: self._run_peft(*a, config=get_prefix_tuning_config(5), lr=1e-3, desc='PrefixTuning L=5'),
- 'prefix_tuning_10': lambda *a: self._run_peft(*a, config=get_prefix_tuning_config(10), lr=1e-3, desc='PrefixTuning L=10'),
+ 'prompt_tuning_5': lambda *a: self._run_peft(*a, config=get_prompt_tuning_config(5), lr=1e-3, desc='PromptTuning L=5', steps=100),
+ 'prompt_tuning_10': lambda *a: self._run_peft(*a, config=get_prompt_tuning_config(10), lr=1e-3, desc='PromptTuning L=10', steps=100),
+ 'prompt_tuning_20': lambda *a: self._run_peft(*a, config=get_prompt_tuning_config(20), lr=1e-3, desc='PromptTuning L=20', steps=100),
+ 'prefix_tuning_5': lambda *a: self._run_peft(*a, config=get_prefix_tuning_config(5), lr=5e-4, desc='PrefixTuning L=5', steps=100),
+ 'prefix_tuning_10': lambda *a: self._run_peft(*a, config=get_prefix_tuning_config(10), lr=5e-4, desc='PrefixTuning L=10', steps=100),
}
if method_name not in dispatch:
@@ -261,9 +261,9 @@ class MethodRunner:
return per_user
def _run_peft(self, examples, support_sets, references, support_texts, N,
- config, lr, desc):
+ config, lr, desc, steps=30):
baseline = PEFTBaseline(self.wrapper, config)
- print(f" Trainable params: {baseline.n_params:,} ({baseline.n_bytes:,} bytes)")
+ print(f" Trainable params: {baseline.n_params:,} ({baseline.n_bytes:,} bytes), steps={steps}, lr={lr}")
per_user = []
for i, (ex, support) in enumerate(zip(examples, support_sets)):
@@ -272,7 +272,7 @@ class MethodRunner:
support_items=support,
query_input=ex['query_input'],
task=ex['task'],
- lr=lr, steps=30,
+ lr=lr, steps=steps,
max_new_tokens=512, min_new_tokens=128,
)
entry = self._make_per_user_entry(