#!/bin/bash #SBATCH --job-name=lyap_speedup #SBATCH --account=bfqt-delta-gpu #SBATCH --partition=gpuA40x4 #SBATCH --nodes=1 #SBATCH --ntasks=1 #SBATCH --cpus-per-task=4 #SBATCH --gpus-per-node=1 #SBATCH --mem=32G #SBATCH --time=01:00:00 #SBATCH --output=runs/slurm_logs/%j_speedup.out #SBATCH --error=runs/slurm_logs/%j_speedup.err # ============================================================ # Lyapunov Computation Speedup Benchmark # ============================================================ # Tests different optimization approaches: # - Baseline: Current sequential implementation # - Approach A: Trajectory-as-batch (P=2) # - Approach B: Global-norm divergence + single-scale renorm # - Approach C: torch.compile # - Combined: All optimizations together # ============================================================ set -e PROJECT_DIR="/projects/bfqt/users/yurenh2/ml-projects/snn-training" cd "$PROJECT_DIR" mkdir -p runs/slurm_logs echo "============================================================" echo "Lyapunov Speedup Benchmark" echo "Job ID: $SLURM_JOB_ID | Node: $SLURM_NODELIST" echo "Start: $(date)" echo "============================================================" nvidia-smi --query-gpu=name,memory.total --format=csv,noheader echo "============================================================" # Run main benchmark + scaling tests python files/experiments/lyapunov_speedup_benchmark.py \ --batch_size 64 \ --T 4 \ --hidden_dims 64 128 256 \ --scaling echo "============================================================" echo "Finished: $(date)" echo "============================================================"