#!/bin/bash #SBATCH --job-name=snn_asymm #SBATCH --account=bfqt-delta-gpu #SBATCH --partition=gpuA40x4 #SBATCH --nodes=1 #SBATCH --ntasks=1 #SBATCH --cpus-per-task=8 #SBATCH --gpus-per-node=1 #SBATCH --mem=64G #SBATCH --time=48:00:00 #SBATCH --output=runs/slurm_logs/%j_asymm.out #SBATCH --error=runs/slurm_logs/%j_asymm.err # ============================================================ # Asymmetric Lyapunov Regularization Experiment # ============================================================ # Hypothesis: Using asymmetric penalty will balance between # preventing chaos and allowing learning. # # Asymmetric loss: # - Strong penalty for chaos (lambda > 0): relu(lambda)^2 # - Weak penalty for collapse (lambda < -1): 0.1 * relu(-lambda-1)^2 # # This allows dynamics in the "sweet spot" of slightly negative # Lyapunov exponents (stable but not dead). # ============================================================ set -e PROJECT_DIR="/projects/bfqt/users/yurenh2/ml-projects/snn-training" cd "$PROJECT_DIR" mkdir -p runs/slurm_logs data echo "============================================================" echo "ASYMMETRIC Lyapunov Regularization" echo "Job ID: $SLURM_JOB_ID | Node: $SLURM_NODELIST" echo "Start: $(date)" echo "============================================================" nvidia-smi --query-gpu=name,memory.total --format=csv,noheader echo "============================================================" # Test depths: 4, 8, 12, 16 conv layers # Using asymmetric loss + 20 epoch warmup python files/experiments/depth_scaling_benchmark.py \ --dataset cifar100 \ --depths 4 8 12 16 \ --T 4 \ --epochs 150 \ --batch_size 128 \ --lr 0.001 \ --lambda_reg 0.3 \ --lambda_target -0.1 \ --reg_type asymmetric \ --warmup_epochs 20 \ --data_dir ./data \ --out_dir runs/depth_scaling_asymm \ --device cuda echo "============================================================" echo "Finished: $(date)" echo "============================================================"