summaryrefslogtreecommitdiff
path: root/scripts/run_significance.py
diff options
context:
space:
mode:
authorYurenHao0426 <Blackhao0426@gmail.com>2026-04-05 10:31:36 -0500
committerYurenHao0426 <Blackhao0426@gmail.com>2026-04-05 10:31:36 -0500
commitea4a8f837e81b5e5fab6086cb3014c711c5e58e9 (patch)
tree11638546dc91c97815e5bdab8fa0b587481d0a3c /scripts/run_significance.py
parent8fe28101366dd32562b8c5534d7fe359b252bdf3 (diff)
Add PEFT baselines, ICL baselines, profile-based, and unified pipeline
New baselines: - baselines/peft_baseline.py: LoRA, Tiny LoRA, VeRA (per-user PEFT adaptation) - baselines/dense_retrieval.py: Dense retrieval ICL (sentence-transformers) - baselines/profile_based.py: LLM-generated user profile conditioned generation New scripts: - scripts/run_all_methods.py: Unified pipeline running all 9 methods with per-method directory output structure (method/per_user.json) - scripts/run_peft_baselines.py: PEFT-only evaluation (legacy) - scripts/run_significance.py: Significance tests (UPH+Base per-user) - scripts/run_uph_base_per_user.py: UPH+Base with full per-user data - scripts/compute_bertscore.py: BERTScore from saved predictions - scripts/significance_test.py: Standalone significance test framework Updated .gitignore to exclude outputs/ directory. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Diffstat (limited to 'scripts/run_significance.py')
-rw-r--r--scripts/run_significance.py265
1 files changed, 265 insertions, 0 deletions
diff --git a/scripts/run_significance.py b/scripts/run_significance.py
new file mode 100644
index 0000000..c8b2392
--- /dev/null
+++ b/scripts/run_significance.py
@@ -0,0 +1,265 @@
+"""Run UPH+Base with per-user scores, then compute significance tests vs PEFT baselines.
+
+Loads PEFT per-user data from run_peft_baselines.py output, runs UPH and Base
+to get per-user R-L, then computes paired significance tests.
+
+Usage:
+ python scripts/run_significance.py --task review --setting user --device cuda:0
+"""
+
+import sys
+import os
+import json
+import time
+import numpy as np
+import torch
+from scipy import stats
+
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+from data.longlamp import load_longlamp, select_k_profile_items
+from data.templates import build_query_prompt
+from models.qwen_wrapper import QwenWrapper
+from models.cvh import UnconditionalHead
+from adapt.cache_hidden import cache_support_hidden_states
+from adapt.fit_theta import fit_theta
+from eval.metrics import compute_rouge, compute_meteor
+
+
+def per_user_scores(predictions, references):
+ """Compute per-example ROUGE-L and METEOR."""
+ rl_scores = []
+ meteor_scores = []
+ for pred, ref in zip(predictions, references):
+ r = compute_rouge([pred], [ref])
+ m = compute_meteor([pred], [ref])
+ rl_scores.append(r['rougeL'])
+ meteor_scores.append(m)
+ return rl_scores, meteor_scores
+
+
+def generate_base(wrapper, prompt, max_new_tokens=512, min_new_tokens=128):
+ chat_messages = [
+ {"role": "system", "content": "You are a helpful writing assistant."},
+ {"role": "user", "content": prompt},
+ ]
+ prompt_text = wrapper.tokenizer.apply_chat_template(
+ chat_messages, tokenize=False, add_generation_prompt=True
+ )
+ input_ids = wrapper.tokenizer.encode(prompt_text, return_tensors="pt").to(wrapper.device)
+ with torch.no_grad():
+ outputs = wrapper.model.generate(
+ input_ids,
+ max_new_tokens=max_new_tokens,
+ min_new_tokens=min_new_tokens,
+ temperature=None, top_p=None, do_sample=False,
+ pad_token_id=wrapper.tokenizer.pad_token_id,
+ )
+ return wrapper.tokenizer.decode(outputs[0, input_ids.shape[1]:], skip_special_tokens=True)
+
+
+def run_base(wrapper, examples, N):
+ preds = []
+ for i, ex in enumerate(examples):
+ prompt = build_query_prompt(ex['query_input'], ex['task'])
+ pred = generate_base(wrapper, prompt)
+ preds.append(pred)
+ if (i + 1) % 40 == 0:
+ print(f" Base: {i+1}/{N}")
+ return preds
+
+
+def run_uph(wrapper, examples, support_sets, N, device):
+ H = wrapper.hidden_size
+ uncond = UnconditionalHead(H, d=64, alpha=0.1, basis_seed=42).to(device)
+ lm_head_bias = None
+ if hasattr(wrapper.model.lm_head, 'bias') and wrapper.model.lm_head.bias is not None:
+ lm_head_bias = wrapper.model.lm_head.bias.data
+
+ preds = []
+ for i, (ex, support) in enumerate(zip(examples, support_sets)):
+ cached_h = cache_support_hidden_states(wrapper, support, ex['task'])
+ if not cached_h:
+ prompt = build_query_prompt(ex['query_input'], ex['task'])
+ pred = generate_base(wrapper, prompt)
+ preds.append(pred)
+ continue
+
+ theta = fit_theta(
+ cached_h=cached_h,
+ lm_head_weight=wrapper.lm_head_weight,
+ lm_head_bias=lm_head_bias,
+ head_module=uncond,
+ d=64, lr=0.05, steps=30, beta=0.05, lam=1e-4,
+ max_grad_norm=5.0, device=device,
+ )
+
+ prompt = build_query_prompt(ex['query_input'], ex['task'])
+ pred = wrapper.generate_with_head_blended(
+ prompt, theta, uncond.forward_fn,
+ blend_gamma=0.5, max_new_tokens=512,
+ min_new_tokens=128, temperature=0.0,
+ )
+ preds.append(pred)
+ del cached_h, theta
+ torch.cuda.empty_cache()
+
+ if (i + 1) % 40 == 0:
+ print(f" UPH: {i+1}/{N}")
+ return preds
+
+
+def paired_tests(scores_a, scores_b, name_a, name_b):
+ a = np.array(scores_a)
+ b = np.array(scores_b)
+ diff = a - b
+
+ mean_a, mean_b = np.mean(a), np.mean(b)
+ mean_diff = np.mean(diff)
+
+ t_stat, t_pval = stats.ttest_rel(a, b)
+ try:
+ w_stat, w_pval = stats.wilcoxon(a, b)
+ except ValueError:
+ w_stat, w_pval = float('nan'), float('nan')
+
+ se = stats.sem(diff)
+ ci_low = mean_diff - 1.96 * se
+ ci_high = mean_diff + 1.96 * se
+
+ print(f"\n {name_a} vs {name_b}:")
+ print(f" Mean {name_a}: {mean_a:.4f}, Mean {name_b}: {mean_b:.4f}, Diff: {mean_diff:+.4f}")
+ print(f" 95% CI: [{ci_low:+.4f}, {ci_high:+.4f}]")
+ print(f" Paired t-test: t={t_stat:.3f}, p={t_pval:.2e}")
+ print(f" Wilcoxon: W={w_stat:.0f}, p={w_pval:.2e}")
+
+ return {
+ 'mean_a': float(mean_a), 'mean_b': float(mean_b),
+ 'mean_diff': float(mean_diff),
+ 'ci_low': float(ci_low), 'ci_high': float(ci_high),
+ 't_stat': float(t_stat), 't_pval': float(t_pval),
+ 'w_stat': float(w_stat), 'w_pval': float(w_pval),
+ }
+
+
+def main():
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--num_eval', type=int, default=200)
+ parser.add_argument('--task', type=str, default='review', choices=['review', 'topic'])
+ parser.add_argument('--setting', type=str, default='user', choices=['user', 'temporal'])
+ parser.add_argument('--device', type=str, default='cuda:0')
+ parser.add_argument('--peft_dir', type=str, default='outputs/peft_baselines')
+ parser.add_argument('--output_dir', type=str, default='outputs/significance')
+ args = parser.parse_args()
+
+ N = args.num_eval
+ device = args.device
+ task = args.task
+ setting = args.setting
+
+ config_map = {
+ ('review', 'user'): 'product_review_user',
+ ('review', 'temporal'): 'product_review_temporal',
+ ('topic', 'user'): 'topic_writing_user',
+ ('topic', 'temporal'): 'topic_writing_temporal',
+ }
+ config_name = config_map[(task, setting)]
+
+ # Load PEFT per-user data
+ peft_path = os.path.join(args.peft_dir, f"{task}_{setting}_K4_N{N}_peft_per_user.json")
+ if not os.path.exists(peft_path):
+ print(f"PEFT per-user data not found: {peft_path}")
+ print("Run run_peft_baselines.py first.")
+ return
+
+ with open(peft_path) as f:
+ peft_data = json.load(f)
+
+ # Extract PEFT per-user R-L scores
+ peft_rl = {}
+ peft_meteor = {}
+ for method, users in peft_data['per_user'].items():
+ peft_rl[method] = [u['metrics']['rougeL'] for u in users]
+ peft_meteor[method] = [u['metrics']['meteor'] for u in users]
+
+ print(f"=== Significance Tests: {task}_{setting}, N={N} ===")
+ print(f"Loaded PEFT per-user data: {list(peft_rl.keys())}")
+
+ # Load data and run UPH + Base
+ print("\nLoading data...")
+ examples = load_longlamp(config_name, split='val')[:N]
+ K = 4
+ support_sets = [select_k_profile_items(ex['profile_items'], K, seed=0) for ex in examples]
+ references = [ex['target_output'] for ex in examples]
+
+ print(f"Loading model on {device}...")
+ wrapper = QwenWrapper('Qwen/Qwen2.5-1.5B-Instruct', device=device)
+
+ # Run Base
+ print("\n--- Base ---")
+ base_preds = run_base(wrapper, examples, N)
+ base_rl, base_meteor = per_user_scores(base_preds, references)
+ print(f" Mean R-L: {np.mean(base_rl):.4f}, METEOR: {np.mean(base_meteor):.4f}")
+
+ # Run UPH
+ print("\n--- UPH ---")
+ uph_preds = run_uph(wrapper, examples, support_sets, N, device)
+ uph_rl, uph_meteor = per_user_scores(uph_preds, references)
+ print(f" Mean R-L: {np.mean(uph_rl):.4f}, METEOR: {np.mean(uph_meteor):.4f}")
+
+ # Significance tests
+ all_rl = {'Base': base_rl, 'UPH': uph_rl}
+ all_rl.update(peft_rl)
+
+ all_meteor = {'Base': base_meteor, 'UPH': uph_meteor}
+ all_meteor.update(peft_meteor)
+
+ print("\n" + "=" * 80)
+ print("SIGNIFICANCE TESTS — ROUGE-L (paired)")
+ print("=" * 80)
+
+ rl_tests = {}
+ comparisons = [
+ ('UPH', 'Base'),
+ ('UPH', 'lora'),
+ ('UPH', 'tiny_lora'),
+ ('UPH', 'vera'),
+ ]
+ for name_a, name_b in comparisons:
+ if name_b in all_rl:
+ r = paired_tests(all_rl[name_a], all_rl[name_b], name_a, name_b)
+ rl_tests[f'{name_a}_vs_{name_b}'] = r
+
+ print("\n" + "=" * 80)
+ print("SIGNIFICANCE TESTS — METEOR (paired)")
+ print("=" * 80)
+
+ meteor_tests = {}
+ for name_a, name_b in comparisons:
+ if name_b in all_meteor:
+ r = paired_tests(all_meteor[name_a], all_meteor[name_b], name_a, name_b)
+ meteor_tests[f'{name_a}_vs_{name_b}'] = r
+
+ # Save
+ os.makedirs(args.output_dir, exist_ok=True)
+ output_path = os.path.join(args.output_dir, f'{task}_{setting}_significance.json')
+
+ save_data = {
+ 'per_user_rougeL': {k: [float(x) for x in v] for k, v in all_rl.items()},
+ 'per_user_meteor': {k: [float(x) for x in v] for k, v in all_meteor.items()},
+ 'significance_rougeL': rl_tests,
+ 'significance_meteor': meteor_tests,
+ 'num_examples': N,
+ 'task': task,
+ 'setting': setting,
+ 'base_predictions': base_preds,
+ 'uph_predictions': uph_preds,
+ }
+ with open(output_path, 'w') as f:
+ json.dump(save_data, f, indent=2, default=str)
+ print(f"\nResults saved to {output_path}")
+
+
+if __name__ == '__main__':
+ main()