diff options
Diffstat (limited to 'src')
| -rw-r--r-- | src/data.py | 2 | ||||
| -rw-r--r-- | src/trainers.py | 10 |
2 files changed, 6 insertions, 6 deletions
diff --git a/src/data.py b/src/data.py index 6e80285..baf5a8b 100644 --- a/src/data.py +++ b/src/data.py @@ -1,4 +1,4 @@ -"""Data loading and preprocessing for Graph-GrAPE experiments.""" +"""Data loading and preprocessing for KAFT experiments.""" import torch import torch.nn.functional as F diff --git a/src/trainers.py b/src/trainers.py index 651dffc..7589fc9 100644 --- a/src/trainers.py +++ b/src/trainers.py @@ -1,5 +1,5 @@ """ -Training methods for Graph-GrAPE experiments. +Training methods for KAFT experiments. Generalized to L-layer residual GCN. Methods compared: @@ -7,7 +7,7 @@ Methods compared: DFA — Fixed random R, P=I DFA-GNN — Fixed random R, P=Â^{L-l} VanillaGrAPE — Aligned R (per layer), P=I - GraphGrAPE — Aligned R (per layer) + topology P=Â^{L-l} + KAFT — Aligned R (per layer) + topology P=Â^{L-l} """ import torch @@ -179,7 +179,7 @@ class BPTrainer: # --------------------------------------------------------------------------- class _FeedbackTrainerBase: - """Shared logic for DFA / GrAPE variants, generalized to L layers.""" + """Shared logic for DFA / KAFT variants, generalized to L layers.""" def __init__(self, data, hidden_dim, lr, weight_decay, diffusion_alpha, diffusion_iters, @@ -608,10 +608,10 @@ class VanillaGrAPETrainer(_FeedbackTrainerBase): # --------------------------------------------------------------------------- -# Graph-GrAPE Trainer +# KAFT Trainer # --------------------------------------------------------------------------- -class GraphGrAPETrainer(_FeedbackTrainerBase): +class KAFTTrainer(_FeedbackTrainerBase): """Aligned R per layer + topology P = Â^{min(L-l, max_power)}.""" def __init__(self, data, hidden_dim, lr, weight_decay, |
