From c69a4c6e3596f75bd392c27d3c072adc825ce497 Mon Sep 17 00:00:00 2001 From: YurenHao0426 Date: Wed, 11 Feb 2026 14:21:11 -0600 Subject: A12-A14 init_logit ablation: confirm frozen OLMo cannot benefit from sparse topology - A12 (logit=3): NLL 2.76, A13 (logit=0): NLL 3.51, A14 (logit=1): NLL 3.26 - All worse than baseline (2.46). Lower init_logit = more deviation = worse NLL - Confirms: gradient flows (gates move), but A=1 is global optimum for frozen model - Added Dolma streaming retry logic (max 10 retries, 30s wait) - Phase 1 frozen approach has fundamental limitation; Phase 2 (unfreeze) needed Co-Authored-By: Claude Opus 4.6 --- configs/a13_init_logit_0.yaml | 53 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 configs/a13_init_logit_0.yaml (limited to 'configs/a13_init_logit_0.yaml') diff --git a/configs/a13_init_logit_0.yaml b/configs/a13_init_logit_0.yaml new file mode 100644 index 0000000..5bd356d --- /dev/null +++ b/configs/a13_init_logit_0.yaml @@ -0,0 +1,53 @@ +# A13 — Init logit = 0.0 (maximum gradient, A starts at 0.5) +# Purpose: σ(0)=0.5, maximum gradient signal. Init NLL will be bad +# but learning space is large. Tests if predictor can learn from scratch. +# Run: python scripts/train.py --config configs/a13_init_logit_0.yaml + +# Model +olmo_model_id: "allenai/OLMo-2-0425-1B" +qwen_model_id: "Qwen/Qwen3-Embedding-0.6B" + +# Predictor +predictor_hidden_dim: 1024 +predictor_rank: 32 +cascading_gate_k: 5.0 +input_norm: "none" +init_logit: 0.0 + +# Data +dataset: "allenai/dolma" +dataset_name: "v1_7" +seq_len: 1024 +batch_size: 4 +micro_batch_size: 2 +qwen_input_prefix: "" + +# Eval +eval_skip: 10000 +eval_size: 50 + +# Training — ~50M tokens = 12500 steps +total_steps: 12500 +lr: 3e-4 +weight_decay: 0.01 +optimizer: "adamw" + +# Schedules — constant tau=2 (same as S2), no sparsity +tau_init: 2.0 +tau_final: 2.0 +tau_schedule: "cosine" +lambda_max: 0.0 +lambda_warmup_frac: 0.2 + +# Logging +wandb_project: "dagformer" +wandb_run_name: "a13-init-logit-0" +log_every: 10 +eval_every: 500 + +# Checkpointing +save_every: 2500 +save_dir: "checkpoints/a13/" + +# Hardware +num_gpus: 1 -- cgit v1.2.3