summaryrefslogtreecommitdiff
path: root/scripts/run_all_methods.py
diff options
context:
space:
mode:
authorYurenHao0426 <Blackhao0426@gmail.com>2026-04-05 10:31:36 -0500
committerYurenHao0426 <Blackhao0426@gmail.com>2026-04-05 10:31:36 -0500
commitea4a8f837e81b5e5fab6086cb3014c711c5e58e9 (patch)
tree11638546dc91c97815e5bdab8fa0b587481d0a3c /scripts/run_all_methods.py
parent8fe28101366dd32562b8c5534d7fe359b252bdf3 (diff)
Add PEFT baselines, ICL baselines, profile-based, and unified pipeline
New baselines: - baselines/peft_baseline.py: LoRA, Tiny LoRA, VeRA (per-user PEFT adaptation) - baselines/dense_retrieval.py: Dense retrieval ICL (sentence-transformers) - baselines/profile_based.py: LLM-generated user profile conditioned generation New scripts: - scripts/run_all_methods.py: Unified pipeline running all 9 methods with per-method directory output structure (method/per_user.json) - scripts/run_peft_baselines.py: PEFT-only evaluation (legacy) - scripts/run_significance.py: Significance tests (UPH+Base per-user) - scripts/run_uph_base_per_user.py: UPH+Base with full per-user data - scripts/compute_bertscore.py: BERTScore from saved predictions - scripts/significance_test.py: Standalone significance test framework Updated .gitignore to exclude outputs/ directory. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Diffstat (limited to 'scripts/run_all_methods.py')
-rw-r--r--scripts/run_all_methods.py438
1 files changed, 438 insertions, 0 deletions
diff --git a/scripts/run_all_methods.py b/scripts/run_all_methods.py
new file mode 100644
index 0000000..c5eb523
--- /dev/null
+++ b/scripts/run_all_methods.py
@@ -0,0 +1,438 @@
+"""Unified evaluation pipeline: all methods, all per-user data saved.
+
+Runs Base, UPH, PEFT baselines, and ICL baselines in one script.
+Saves complete per-user data (predictions, references, scores, metadata) for ALL methods.
+
+Usage:
+ python scripts/run_all_methods.py --task review --setting user --device cuda:0
+ python scripts/run_all_methods.py --task review --setting user --methods base,uph,lora
+"""
+
+import sys
+import os
+import json
+import time
+import numpy as np
+import torch
+from scipy import stats
+
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+from data.longlamp import load_longlamp, select_k_profile_items
+from data.templates import build_query_prompt, build_prompt_with_examples
+from data.style_features import compute_sfd, compute_feature_deltas
+from models.qwen_wrapper import QwenWrapper
+from models.cvh import UnconditionalHead
+from adapt.cache_hidden import cache_support_hidden_states
+from adapt.fit_theta import fit_theta
+from baselines.peft_baseline import (
+ PEFTBaseline, get_lora_config, get_tiny_lora_config, get_vera_config,
+)
+from baselines.bm25_top1 import bm25_select_top1
+from baselines.dense_retrieval import DenseRetriever
+from baselines.profile_based import generate_profile, build_profile_conditioned_prompt
+from eval.metrics import compute_rouge, compute_meteor
+
+
+ALL_METHODS = [
+ 'base', 'uph',
+ 'prompt_all_k', 'bm25_top1', 'dense_top1', 'profile_based',
+ 'lora', 'tiny_lora', 'vera',
+]
+
+
+def compute_per_user_metrics(pred, ref, support_texts):
+ r = compute_rouge([pred], [ref])
+ m = compute_meteor([pred], [ref])
+ p = pred if pred.strip() else "empty"
+ sfd_all = compute_sfd(p, support_texts, exclude_length=False)
+ sfd_nolen = compute_sfd(p, support_texts, exclude_length=True)
+ deltas = compute_feature_deltas(p, support_texts)
+ return {
+ 'rouge1': r['rouge1'],
+ 'rougeL': r['rougeL'],
+ 'meteor': m,
+ 'sfd_all': sfd_all,
+ 'sfd_nolen': sfd_nolen,
+ 'length': len(pred.split()),
+ 'feature_deltas': {k: v['delta'] for k, v in deltas.items()},
+ }
+
+
+def generate_greedy(wrapper, prompt, max_new_tokens=512, min_new_tokens=128):
+ chat_messages = [
+ {"role": "system", "content": "You are a helpful writing assistant."},
+ {"role": "user", "content": prompt},
+ ]
+ prompt_text = wrapper.tokenizer.apply_chat_template(
+ chat_messages, tokenize=False, add_generation_prompt=True
+ )
+ input_ids = wrapper.tokenizer.encode(prompt_text, return_tensors="pt").to(wrapper.device)
+ with torch.no_grad():
+ outputs = wrapper.model.generate(
+ input_ids,
+ max_new_tokens=max_new_tokens, min_new_tokens=min_new_tokens,
+ temperature=None, top_p=None, do_sample=False,
+ pad_token_id=wrapper.tokenizer.pad_token_id,
+ )
+ return wrapper.tokenizer.decode(outputs[0, input_ids.shape[1]:], skip_special_tokens=True)
+
+
+class MethodRunner:
+ """Encapsulates running a single method across all examples."""
+
+ def __init__(self, wrapper, device, dense_retriever=None):
+ self.wrapper = wrapper
+ self.device = device
+ self.dense_retriever = dense_retriever
+
+ def run(self, method_name, examples, support_sets, references, support_texts, N):
+ dispatch = {
+ 'base': self._run_base,
+ 'uph': self._run_uph,
+ 'prompt_all_k': self._run_prompt_all_k,
+ 'bm25_top1': self._run_bm25_top1,
+ 'dense_top1': self._run_dense_top1,
+ 'profile_based': self._run_profile_based,
+ 'lora': lambda *a: self._run_peft(*a, config=get_lora_config(rank=8), lr=1e-4, desc='LoRA r=8'),
+ 'tiny_lora': lambda *a: self._run_peft(*a, config=get_tiny_lora_config(rank=1), lr=1e-4, desc='Tiny LoRA r=1'),
+ 'vera': lambda *a: self._run_peft(*a, config=get_vera_config(rank=256), lr=1e-3, desc='VeRA r=256'),
+ }
+
+ if method_name not in dispatch:
+ print(f"Unknown method: {method_name}")
+ return []
+
+ print(f"\n--- {method_name} ---")
+ per_user = dispatch[method_name](examples, support_sets, references, support_texts, N)
+
+ avg_rl = np.mean([u['metrics']['rougeL'] for u in per_user])
+ avg_sfd = np.mean([u['metrics']['sfd_nolen'] for u in per_user])
+ print(f" Mean R-L: {avg_rl:.4f}, SFD_-len: {avg_sfd:.4f}")
+ return per_user
+
+ def _make_per_user_entry(self, ex, ref, stexts, K, pred, timing, extra=None):
+ metrics = compute_per_user_metrics(pred, ref, stexts)
+ entry = {
+ 'example_id': ex['example_id'],
+ 'user_id': ex['user_id'],
+ 'prediction': pred,
+ 'reference': ref,
+ 'support_texts': stexts,
+ 'K': K,
+ 'metrics': metrics,
+ **timing,
+ }
+ if extra:
+ entry.update(extra)
+ return entry
+
+ def _run_base(self, examples, support_sets, references, support_texts, N):
+ per_user = []
+ for i, ex in enumerate(examples):
+ t0 = time.time()
+ prompt = build_query_prompt(ex['query_input'], ex['task'])
+ pred = generate_greedy(self.wrapper, prompt)
+ entry = self._make_per_user_entry(
+ ex, references[i], support_texts[i], len(support_sets[i]),
+ pred, {'gen_time': time.time() - t0}
+ )
+ per_user.append(entry)
+ if (i + 1) % 40 == 0:
+ print(f" {i+1}/{N}")
+ return per_user
+
+ def _run_prompt_all_k(self, examples, support_sets, references, support_texts, N):
+ per_user = []
+ for i, (ex, support) in enumerate(zip(examples, support_sets)):
+ t0 = time.time()
+ prompt = build_prompt_with_examples(ex['query_input'], support, ex['task'])
+ pred = generate_greedy(self.wrapper, prompt)
+ entry = self._make_per_user_entry(
+ ex, references[i], support_texts[i], len(support),
+ pred, {'gen_time': time.time() - t0}
+ )
+ per_user.append(entry)
+ if (i + 1) % 40 == 0:
+ print(f" {i+1}/{N}")
+ return per_user
+
+ def _run_bm25_top1(self, examples, support_sets, references, support_texts, N):
+ per_user = []
+ for i, (ex, support) in enumerate(zip(examples, support_sets)):
+ t0 = time.time()
+ selected = bm25_select_top1(ex['query_input'], support)
+ prompt = build_prompt_with_examples(ex['query_input'], selected, ex['task'])
+ pred = generate_greedy(self.wrapper, prompt)
+ entry = self._make_per_user_entry(
+ ex, references[i], support_texts[i], len(support),
+ pred, {'gen_time': time.time() - t0}
+ )
+ per_user.append(entry)
+ if (i + 1) % 40 == 0:
+ print(f" {i+1}/{N}")
+ return per_user
+
+ def _run_dense_top1(self, examples, support_sets, references, support_texts, N):
+ if self.dense_retriever is None:
+ self.dense_retriever = DenseRetriever(device='cpu')
+ per_user = []
+ for i, (ex, support) in enumerate(zip(examples, support_sets)):
+ t0 = time.time()
+ selected = self.dense_retriever.retrieve_top_k(ex['query_input'], support, k=1)
+ prompt = build_prompt_with_examples(ex['query_input'], selected, ex['task'])
+ pred = generate_greedy(self.wrapper, prompt)
+ entry = self._make_per_user_entry(
+ ex, references[i], support_texts[i], len(support),
+ pred, {'gen_time': time.time() - t0}
+ )
+ per_user.append(entry)
+ if (i + 1) % 40 == 0:
+ print(f" {i+1}/{N}")
+ return per_user
+
+ def _run_profile_based(self, examples, support_sets, references, support_texts, N):
+ per_user = []
+ for i, (ex, support) in enumerate(zip(examples, support_sets)):
+ t0 = time.time()
+ # Step 1: Generate user profile summary from support examples
+ profile = generate_profile(self.wrapper, support, ex['task'])
+ # Step 2: Generate conditioned on profile
+ prompt = build_profile_conditioned_prompt(ex['query_input'], profile, ex['task'])
+ pred = generate_greedy(self.wrapper, prompt)
+ entry = self._make_per_user_entry(
+ ex, references[i], support_texts[i], len(support),
+ pred, {'gen_time': time.time() - t0},
+ extra={'profile_summary': profile},
+ )
+ per_user.append(entry)
+ if (i + 1) % 40 == 0:
+ print(f" {i+1}/{N}")
+ return per_user
+
+ def _run_uph(self, examples, support_sets, references, support_texts, N):
+ H = self.wrapper.hidden_size
+ uncond = UnconditionalHead(H, d=64, alpha=0.1, basis_seed=42).to(self.device)
+ lm_head_bias = None
+ if hasattr(self.wrapper.model.lm_head, 'bias') and self.wrapper.model.lm_head.bias is not None:
+ lm_head_bias = self.wrapper.model.lm_head.bias.data
+
+ per_user = []
+ for i, (ex, support) in enumerate(zip(examples, support_sets)):
+ t0 = time.time()
+ cached_h = cache_support_hidden_states(self.wrapper, support, ex['task'])
+ if not cached_h:
+ prompt = build_query_prompt(ex['query_input'], ex['task'])
+ pred = generate_greedy(self.wrapper, prompt)
+ else:
+ theta = fit_theta(
+ cached_h=cached_h,
+ lm_head_weight=self.wrapper.lm_head_weight,
+ lm_head_bias=lm_head_bias,
+ head_module=uncond,
+ d=64, lr=0.05, steps=30, beta=0.05, lam=1e-4,
+ max_grad_norm=5.0, device=self.device,
+ )
+ prompt = build_query_prompt(ex['query_input'], ex['task'])
+ pred = self.wrapper.generate_with_head_blended(
+ prompt, theta, uncond.forward_fn,
+ blend_gamma=0.5, max_new_tokens=512,
+ min_new_tokens=128, temperature=0.0,
+ )
+ del cached_h, theta
+ torch.cuda.empty_cache()
+
+ entry = self._make_per_user_entry(
+ ex, references[i], support_texts[i], len(support),
+ pred, {'adapt_time': time.time() - t0}
+ )
+ per_user.append(entry)
+ if (i + 1) % 40 == 0:
+ avg_rl = np.mean([u['metrics']['rougeL'] for u in per_user])
+ print(f" {i+1}/{N} (avg R-L: {avg_rl:.4f})")
+ return per_user
+
+ def _run_peft(self, examples, support_sets, references, support_texts, N,
+ config, lr, desc):
+ baseline = PEFTBaseline(self.wrapper, config)
+ print(f" Trainable params: {baseline.n_params:,} ({baseline.n_bytes:,} bytes)")
+
+ per_user = []
+ for i, (ex, support) in enumerate(zip(examples, support_sets)):
+ t0 = time.time()
+ pred = baseline.adapt_and_generate(
+ support_items=support,
+ query_input=ex['query_input'],
+ task=ex['task'],
+ lr=lr, steps=30,
+ max_new_tokens=512, min_new_tokens=128,
+ )
+ entry = self._make_per_user_entry(
+ ex, references[i], support_texts[i], len(support),
+ pred, {'adapt_time': time.time() - t0},
+ extra={'n_params': baseline.n_params, 'n_bytes': baseline.n_bytes},
+ )
+ per_user.append(entry)
+ if (i + 1) % 20 == 0:
+ avg_rl = np.mean([u['metrics']['rougeL'] for u in per_user])
+ avg_t = np.mean([u['adapt_time'] for u in per_user])
+ print(f" {i+1}/{N} (avg R-L: {avg_rl:.4f}, avg time: {avg_t:.1f}s)")
+
+ baseline.cleanup()
+ return per_user
+
+
+def paired_test(scores_a, scores_b, name_a, name_b, metric_name):
+ a, b = np.array(scores_a), np.array(scores_b)
+ diff = a - b
+ mean_diff = np.mean(diff)
+ t_stat, t_pval = stats.ttest_rel(a, b)
+ try:
+ w_stat, w_pval = stats.wilcoxon(a, b)
+ except ValueError:
+ w_stat, w_pval = float('nan'), float('nan')
+ se = stats.sem(diff)
+ ci_low, ci_high = mean_diff - 1.96 * se, mean_diff + 1.96 * se
+ return {
+ 'mean_a': float(np.mean(a)), 'mean_b': float(np.mean(b)),
+ 'mean_diff': float(mean_diff),
+ 'ci_low': float(ci_low), 'ci_high': float(ci_high),
+ 't_pval': float(t_pval), 'w_pval': float(w_pval),
+ }
+
+
+def main():
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--num_eval', type=int, default=200)
+ parser.add_argument('--task', type=str, default='review', choices=['review', 'topic'])
+ parser.add_argument('--setting', type=str, default='user', choices=['user', 'temporal'])
+ parser.add_argument('--methods', type=str, default='all',
+ help='Comma-separated methods or "all"')
+ parser.add_argument('--device', type=str, default='cuda:0')
+ parser.add_argument('--K', type=int, default=4)
+ parser.add_argument('--output_dir', type=str, default='outputs/unified')
+ args = parser.parse_args()
+
+ N = args.num_eval
+ task = args.task
+ setting = args.setting
+ K = args.K
+
+ config_map = {
+ ('review', 'user'): 'product_review_user',
+ ('review', 'temporal'): 'product_review_temporal',
+ ('topic', 'user'): 'topic_writing_user',
+ ('topic', 'temporal'): 'topic_writing_temporal',
+ }
+ config_name = config_map[(task, setting)]
+
+ if args.methods == 'all':
+ methods = ALL_METHODS
+ else:
+ methods = [m.strip() for m in args.methods.split(',')]
+
+ print(f"=== Unified Eval: {task}_{setting}, N={N}, K={K} ===")
+ print(f"Methods: {methods}")
+ print(f"Decode: greedy, min=128, max=512")
+
+ print("\nLoading data...")
+ examples = load_longlamp(config_name, split='val')[:N]
+ support_sets = [select_k_profile_items(ex['profile_items'], K, seed=0) for ex in examples]
+ references = [ex['target_output'] for ex in examples]
+ support_texts = [[s['support_output'] for s in ss] for ss in support_sets]
+
+ print(f"Loading model on {args.device}...")
+ wrapper = QwenWrapper('Qwen/Qwen2.5-1.5B-Instruct', device=args.device)
+
+ runner = MethodRunner(wrapper, args.device)
+ all_per_user = {}
+
+ for method in methods:
+ per_user = runner.run(method, examples, support_sets, references, support_texts, N)
+ all_per_user[method] = per_user
+
+ # Summary table
+ print("\n" + "=" * 90)
+ print(f"{'Method':<15} {'R-L':<8} {'METEOR':<8} {'SFD_-len':<9} {'Len':<6}")
+ print("-" * 90)
+ for method in methods:
+ pu = all_per_user[method]
+ rl = np.mean([u['metrics']['rougeL'] for u in pu])
+ mt = np.mean([u['metrics']['meteor'] for u in pu])
+ sf = np.mean([u['metrics']['sfd_nolen'] for u in pu])
+ ln = np.mean([u['metrics']['length'] for u in pu])
+ print(f"{method:<15} {rl:<8.4f} {mt:<8.4f} {sf:<9.4f} {ln:<6.0f}")
+
+ # Significance tests (UPH vs all others)
+ if 'uph' in all_per_user:
+ print("\n" + "=" * 90)
+ print("Significance (UPH vs each, paired t-test p-value)")
+ print("=" * 90)
+ uph_rl = [u['metrics']['rougeL'] for u in all_per_user['uph']]
+ uph_sf = [u['metrics']['sfd_nolen'] for u in all_per_user['uph']]
+ sig_results = {}
+ for method in methods:
+ if method == 'uph':
+ continue
+ other_rl = [u['metrics']['rougeL'] for u in all_per_user[method]]
+ other_sf = [u['metrics']['sfd_nolen'] for u in all_per_user[method]]
+ rl_t = paired_test(uph_rl, other_rl, 'uph', method, 'R-L')
+ sf_t = paired_test(uph_sf, other_sf, 'uph', method, 'SFD')
+ sig_results[method] = {'rougeL': rl_t, 'sfd_nolen': sf_t}
+ print(f" vs {method:<12} R-L: diff={rl_t['mean_diff']:+.4f} p={rl_t['t_pval']:.2e} "
+ f"SFD: diff={sf_t['mean_diff']:+.4f} p={sf_t['t_pval']:.2e}")
+
+ # Save per-method data in separate directories
+ # Structure: output_dir/task_setting_K{K}/{method}/per_user.json
+ exp_dir = os.path.join(args.output_dir, f"{task}_{setting}_K{K}")
+ os.makedirs(exp_dir, exist_ok=True)
+
+ for method in methods:
+ method_dir = os.path.join(exp_dir, method)
+ os.makedirs(method_dir, exist_ok=True)
+
+ pu = all_per_user[method]
+ agg_m = {
+ 'rougeL': float(np.mean([u['metrics']['rougeL'] for u in pu])),
+ 'meteor': float(np.mean([u['metrics']['meteor'] for u in pu])),
+ 'sfd_nolen': float(np.mean([u['metrics']['sfd_nolen'] for u in pu])),
+ 'avg_len': float(np.mean([u['metrics']['length'] for u in pu])),
+ }
+
+ with open(os.path.join(method_dir, 'per_user.json'), 'w') as f:
+ json.dump({
+ 'per_user': pu,
+ 'aggregate': agg_m,
+ 'num_examples': N, 'task': task, 'setting': setting, 'K': K,
+ 'method': method,
+ 'decode_policy': 'greedy, min=128, max=512',
+ }, f, indent=2, default=str)
+
+ print(f" Saved: {method_dir}/per_user.json")
+
+ # Also save a combined summary (aggregate only, no per-user data)
+ summary = {}
+ for method in methods:
+ pu = all_per_user[method]
+ summary[method] = {
+ 'rougeL': float(np.mean([u['metrics']['rougeL'] for u in pu])),
+ 'meteor': float(np.mean([u['metrics']['meteor'] for u in pu])),
+ 'sfd_nolen': float(np.mean([u['metrics']['sfd_nolen'] for u in pu])),
+ 'avg_len': float(np.mean([u['metrics']['length'] for u in pu])),
+ }
+ summary_path = os.path.join(exp_dir, 'summary.json')
+ with open(summary_path, 'w') as f:
+ json.dump({
+ 'aggregate': summary,
+ 'significance': sig_results if 'uph' in all_per_user else {},
+ 'num_examples': N, 'task': task, 'setting': setting, 'K': K,
+ 'methods': methods,
+ }, f, indent=2, default=str)
+
+ print(f"\nPer-method data: {exp_dir}/{{method}}/per_user.json")
+ print(f"Summary: {summary_path}")
+
+
+if __name__ == '__main__':
+ main()