#!/bin/bash #SBATCH --job-name=snn_grid_search #SBATCH --account=bfqt-delta-gpu #SBATCH --partition=gpuA40x4 #SBATCH --nodes=1 #SBATCH --ntasks=1 #SBATCH --cpus-per-task=8 #SBATCH --gpus-per-node=1 #SBATCH --mem=32G #SBATCH --time=48:00:00 #SBATCH --output=runs/slurm_logs/%j_grid_search.out #SBATCH --error=runs/slurm_logs/%j_grid_search.err # ============================================================ # Hyperparameter Grid Search for Lyapunov-Regularized SNN # ============================================================ # Uses CIFAR-10 (real data) with rate encoding # Includes warmup for λ_reg to avoid killing learning early # # Grid: # - Depths: 4, 6, 8, 10 # - λ_reg: 0.01, 0.05, 0.1, 0.2, 0.3 # - λ_target: 0.0, -0.05, -0.1, -0.2 # # Total: 4 depths × 5 λ_reg × 4 λ_target = 80 configurations # Estimated time: ~4 min/config × 80 = ~5-6 hours # ============================================================ set -e PROJECT_DIR="/projects/bfqt/users/yurenh2/ml-projects/snn-training" cd "$PROJECT_DIR" mkdir -p runs/slurm_logs echo "============================================================" echo "Hyperparameter Grid Search" echo "Job ID: $SLURM_JOB_ID | Node: $SLURM_NODELIST" echo "Start: $(date)" echo "============================================================" nvidia-smi --query-gpu=name,memory.total --format=csv,noheader echo "============================================================" # Using CIFAR-10 (real data) instead of synthetic # Reduced grid: 4 depths × 5 λ_reg × 4 λ_target = 80 configs # With warmup to avoid killing learning early python files/experiments/hyperparameter_grid_search.py \ --depths 4 6 8 10 \ --lambda_regs 0.01 0.05 0.1 0.2 0.3 \ --lambda_targets 0.0 -0.05 -0.1 -0.2 \ --hidden_dim 256 \ --epochs 15 \ --batch_size 128 \ --T 8 \ --lr 0.001 \ --data_dir ./data \ --out_dir runs/grid_search_cifar10 \ --device cuda echo "============================================================" echo "Finished: $(date)" echo "============================================================"