summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorYurenHao0426 <blackhao0426@gmail.com>2026-05-04 23:10:10 -0500
committerYurenHao0426 <blackhao0426@gmail.com>2026-05-04 23:10:10 -0500
commitba6ead6d7a41b7ed78bb228181b7262d0c75d2eb (patch)
tree726171fb4b0c536d9287a15daf52929ec65fa3d0 /src
parent37ba0f83e3652a215680fd8515af9c14fc02e21c (diff)
Global rename GRAFT → KAFT (incl. internal class + filenames)
- src/trainers.py: GraphGrAPETrainer → KAFTTrainer; module docstring + comments. VanillaGrAPETrainer kept as-is (it is a separate control method, not KAFT). - experiments/: all 19 runners pick up the new class name; result keys ('Cora_GRAFT' etc) become 'Cora_KAFT'; OUT_DIRs renamed (e.g. bp_graft_depth_20seeds → bp_kaft_depth_20seeds). - figures/: data-lookup keys + display labels both 'KAFT'; output filename graft_depth_sweep.{pdf,png} → kaft_depth_sweep.{pdf,png}. - File rename: experiments/run_bp_graft_depth.py → run_bp_kaft_depth.py; figures/graft_depth_sweep.pdf → kaft_depth_sweep.pdf. - README aligned. Imports verified: from src.trainers import KAFTTrainer succeeds.
Diffstat (limited to 'src')
-rw-r--r--src/data.py2
-rw-r--r--src/trainers.py10
2 files changed, 6 insertions, 6 deletions
diff --git a/src/data.py b/src/data.py
index 6e80285..baf5a8b 100644
--- a/src/data.py
+++ b/src/data.py
@@ -1,4 +1,4 @@
-"""Data loading and preprocessing for Graph-GrAPE experiments."""
+"""Data loading and preprocessing for KAFT experiments."""
import torch
import torch.nn.functional as F
diff --git a/src/trainers.py b/src/trainers.py
index 651dffc..7589fc9 100644
--- a/src/trainers.py
+++ b/src/trainers.py
@@ -1,5 +1,5 @@
"""
-Training methods for Graph-GrAPE experiments.
+Training methods for KAFT experiments.
Generalized to L-layer residual GCN.
Methods compared:
@@ -7,7 +7,7 @@ Methods compared:
DFA — Fixed random R, P=I
DFA-GNN — Fixed random R, P=Â^{L-l}
VanillaGrAPE — Aligned R (per layer), P=I
- GraphGrAPE — Aligned R (per layer) + topology P=Â^{L-l}
+ KAFT — Aligned R (per layer) + topology P=Â^{L-l}
"""
import torch
@@ -179,7 +179,7 @@ class BPTrainer:
# ---------------------------------------------------------------------------
class _FeedbackTrainerBase:
- """Shared logic for DFA / GrAPE variants, generalized to L layers."""
+ """Shared logic for DFA / KAFT variants, generalized to L layers."""
def __init__(self, data, hidden_dim, lr, weight_decay,
diffusion_alpha, diffusion_iters,
@@ -608,10 +608,10 @@ class VanillaGrAPETrainer(_FeedbackTrainerBase):
# ---------------------------------------------------------------------------
-# Graph-GrAPE Trainer
+# KAFT Trainer
# ---------------------------------------------------------------------------
-class GraphGrAPETrainer(_FeedbackTrainerBase):
+class KAFTTrainer(_FeedbackTrainerBase):
"""Aligned R per layer + topology P = Â^{min(L-l, max_power)}."""
def __init__(self, data, hidden_dim, lr, weight_decay,