diff options
Diffstat (limited to 'experiments/run_dfagnn_depth.py')
| -rw-r--r-- | experiments/run_dfagnn_depth.py | 101 |
1 files changed, 101 insertions, 0 deletions
diff --git a/experiments/run_dfagnn_depth.py b/experiments/run_dfagnn_depth.py new file mode 100644 index 0000000..ed6e6c3 --- /dev/null +++ b/experiments/run_dfagnn_depth.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python3 +"""H7: DFA-GNN depth sweep for Figure 4(a)-style plot. + +Runs DFA-GNN at L ∈ {4, 8, 10, 12, 16, 20} × {Cora, CiteSeer, PubMed, DBLP} × 20 seeds. +L=6 data already exists from prior experiments; L=2/3 skipped (CiteSeer L=2 GRAFT soft spot). + +Combined with existing BP and GRAFT depth data, produces 3-method depth curves for Figure 4(a). +""" + +import torch +import numpy as np +import json +import os +from src.data import load_dataset +from src.trainers import DFAGNNTrainer +from run_dblp_depth import load_dblp + +device = 'cuda:0' +SEEDS = list(range(20)) +EPOCHS = 200 +DEPTHS = [4, 8, 10, 12, 16, 20] +OUT_DIR = 'results/dfagnn_depth_20seeds' + +dfagnn_extra = dict(diffusion_alpha=0.5, diffusion_iters=10, max_topo_power=3) + + +def train_one(data, L, seed): + torch.manual_seed(seed); np.random.seed(seed); torch.cuda.manual_seed_all(seed) + t = DFAGNNTrainer(data=data, hidden_dim=64, lr=0.01, weight_decay=5e-4, + num_layers=L, residual_alpha=0.0, backbone='gcn', **dfagnn_extra) + bv, bt = 0, 0 + for ep in range(EPOCHS): + t.train_step() + if ep % 5 == 0: + v = t.evaluate('val_mask') + te = t.evaluate('test_mask') + if v > bv: bv, bt = v, te + del t; torch.cuda.empty_cache() + return bt + + +def main(): + os.makedirs(OUT_DIR, exist_ok=True) + per_seed_file = os.path.join(OUT_DIR, 'per_seed_data.json') + if os.path.exists(per_seed_file): + with open(per_seed_file) as f: + per_seed_data = json.load(f) + else: + per_seed_data = {} + + datasets_cfg = { + 'Cora': lambda: load_dataset('Cora', device=device), + 'CiteSeer': lambda: load_dataset('CiteSeer', device=device), + 'PubMed': lambda: load_dataset('PubMed', device=device), + 'DBLP': lambda: load_dblp(), + } + + for ds_name, loader in datasets_cfg.items(): + data = loader() + for L in DEPTHS: + key = f"{ds_name}_L{L}_DFA-GNN" + if key not in per_seed_data: + per_seed_data[key] = {} + + print(f"\n=== {key} (20 seeds) ===", flush=True) + for seed in SEEDS: + sk = str(seed) + if sk in per_seed_data[key]: + print(f" seed {seed}: cached ({per_seed_data[key][sk]*100:.1f}%)", flush=True) + continue + try: + acc = train_one(data, L, seed) + per_seed_data[key][sk] = acc + print(f" seed {seed}: {acc*100:.1f}%", flush=True) + except Exception as e: + print(f" seed {seed}: FAILED - {e}", flush=True) + per_seed_data[key][sk] = 0.0 + + with open(per_seed_file, 'w') as f: + json.dump(per_seed_data, f, indent=2) + del data; torch.cuda.empty_cache() + + # Summary + print(f"\n{'=' * 70}\nDFA-GNN depth sweep summary (20 seeds)\n{'=' * 70}") + results = {} + for ds in datasets_cfg: + print(f"\n{ds}:") + for L in DEPTHS: + key = f"{ds}_L{L}_DFA-GNN" + vals = np.array([per_seed_data[key][str(s)] for s in SEEDS]) * 100 + results[key] = {'mean': float(vals.mean()), 'std': float(vals.std()), + 'per_seed': vals.tolist()} + print(f" L={L:2d} DFA-GNN {vals.mean():5.1f} ± {vals.std():4.1f}") + + with open(os.path.join(OUT_DIR, 'results.json'), 'w') as f: + json.dump(results, f, indent=2) + print(f"\nSaved to {OUT_DIR}/results.json") + + +if __name__ == '__main__': + main() |
