From 039c12d3cf7178db6a7d80b02cf022d67231014e Mon Sep 17 00:00:00 2001 From: YurenHao0426 Date: Tue, 10 Feb 2026 09:50:33 -0600 Subject: Add auto-resume checkpointing, S1/S2 configs, and experiment results - Auto-resume: find latest checkpoint in save_dir on startup - SIGUSR1 handler: save checkpoint before SLURM timeout - S1 config (constant tau=5, identity init verification) - S2 config (constant tau=2, gradient flow check) - Experiment results tracker with S0/S1 data - Speed estimates and experiment plan Co-Authored-By: Claude Opus 4.6 --- src/training/checkpointing.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) (limited to 'src/training/checkpointing.py') diff --git a/src/training/checkpointing.py b/src/training/checkpointing.py index 9ff02df..b53ce4f 100644 --- a/src/training/checkpointing.py +++ b/src/training/checkpointing.py @@ -6,7 +6,9 @@ Frozen models (OLMo, Qwen) are not checkpointed — they load from HuggingFace. from __future__ import annotations +import glob import os +import re from typing import Any, Optional import torch @@ -55,6 +57,35 @@ def save_checkpoint( return path +def find_latest_checkpoint(save_dir: str) -> Optional[str]: + """Find the latest checkpoint in save_dir by step number. + + Returns: + Path to latest checkpoint, or None if no checkpoints found. + """ + if not os.path.isdir(save_dir): + return None + + pattern = os.path.join(save_dir, "checkpoint_step*.pt") + files = glob.glob(pattern) + if not files: + return None + + # Extract step numbers and find max + step_re = re.compile(r"checkpoint_step(\d+)\.pt$") + best_step = -1 + best_path = None + for f in files: + m = step_re.search(f) + if m: + step = int(m.group(1)) + if step > best_step: + best_step = step + best_path = f + + return best_path + + def load_checkpoint( path: str, predictor: nn.Module, -- cgit v1.2.3