#!/usr/bin/env python3 """E1: DBLP depth scaling — upgrade depth_stress 3-seed to 20 seeds on DBLP, extend to L={8,12,16,20,24,32}. Goal: confirm (or falsify) the preliminary finding that KAFT > ResGCN at L=16 (3-seed: 69.9 vs 63.7) and scales to L=32. BP vs ResGCN vs KAFT vs KAFT+ResGCN, GCN backbone, lr=0.01, 200 epochs.""" import torch import numpy as np import json import os from scipy import stats as scipy_stats from src.trainers import BPTrainer, KAFTTrainer from run_deep_baselines import ResGCNTrainer from run_combo_20seeds import GRAFTResGCN from run_dblp_depth import load_dblp device = 'cuda:0' # selected via CUDA_VISIBLE_DEVICES SEEDS = list(range(20)) EPOCHS = 200 DEPTHS = [8, 12, 16, 20, 24, 32] OUT_DIR = 'results/dblp_depth_scaling_20seeds' grape_extra = dict(diffusion_alpha=0.5, diffusion_iters=10, lr_feedback=0.5, num_probes=64, topo_mode='fixed_A') METHODS = { 'BP': (BPTrainer, {}), 'ResGCN': (ResGCNTrainer, {}), 'KAFT': (KAFTTrainer, grape_extra), 'KAFT+ResGCN': (GRAFTResGCN, grape_extra), } def train_one(cls, common, extra, seed): torch.manual_seed(seed); np.random.seed(seed); torch.cuda.manual_seed_all(seed) t = cls(**common, **extra) if hasattr(t, 'align_mode'): t.align_mode = 'chain_norm' bv, bt = 0, 0 for ep in range(EPOCHS): t.train_step() if ep % 5 == 0: v = t.evaluate('val_mask') te = t.evaluate('test_mask') if v > bv: bv, bt = v, te del t; torch.cuda.empty_cache() return bt def main(): os.makedirs(OUT_DIR, exist_ok=True) per_seed_file = os.path.join(OUT_DIR, 'per_seed_data.json') if os.path.exists(per_seed_file): with open(per_seed_file) as f: per_seed_data = json.load(f) else: per_seed_data = {} data = load_dblp() for L in DEPTHS: print(f"\n{'=' * 70}\nDepth L={L}\n{'=' * 70}", flush=True) common = dict(data=data, hidden_dim=64, lr=0.01, weight_decay=5e-4, num_layers=L, residual_alpha=0.0, backbone='gcn') for mname, (cls, extra) in METHODS.items(): key = f"DBLP_L{L}_{mname}" if key not in per_seed_data: per_seed_data[key] = {} print(f"\n--- {key} ({len(SEEDS)} seeds) ---", flush=True) for seed in SEEDS: sk = str(seed) if sk in per_seed_data[key]: print(f" seed {seed}: cached ({per_seed_data[key][sk]*100:.1f}%)", flush=True) continue try: acc = train_one(cls, common, extra, seed) per_seed_data[key][sk] = acc print(f" seed {seed}: {acc*100:.1f}%", flush=True) except Exception as e: print(f" seed {seed}: FAILED - {e}", flush=True) per_seed_data[key][sk] = 0.0 with open(per_seed_file, 'w') as f: json.dump(per_seed_data, f, indent=2) # Summary print(f"\n{'=' * 70}\nDBLP depth scaling summary\n{'=' * 70}") results = {} for L in DEPTHS: print(f"\nL={L}:") method_means = {} for mname in METHODS: key = f"DBLP_L{L}_{mname}" vals = np.array([per_seed_data[key][str(s)] for s in SEEDS]) * 100 method_means[mname] = (vals.mean(), vals.std()) results[key] = {'mean': float(vals.mean()), 'std': float(vals.std()), 'per_seed': vals.tolist()} print(f" {mname:<15} {vals.mean():5.1f} ± {vals.std():4.1f}") # KAFT vs ResGCN (paired) g_accs = np.array([per_seed_data[f"DBLP_L{L}_GRAFT"][str(s)] for s in SEEDS]) * 100 r_accs = np.array([per_seed_data[f"DBLP_L{L}_ResGCN"][str(s)] for s in SEEDS]) * 100 t_gr, p_gr = scipy_stats.ttest_rel(g_accs, r_accs) print(f" KAFT vs ResGCN: Δ={g_accs.mean() - r_accs.mean():+.1f}, p={p_gr:.4f}") with open(os.path.join(OUT_DIR, 'results.json'), 'w') as f: json.dump(results, f, indent=2) print(f"\nSaved to {OUT_DIR}/results.json") if __name__ == '__main__': main()