diff options
| author | YurenHao0426 <blackhao0426@gmail.com> | 2026-01-13 23:50:59 -0600 |
|---|---|---|
| committer | YurenHao0426 <blackhao0426@gmail.com> | 2026-01-13 23:50:59 -0600 |
| commit | 00cf667cee7ffacb144d5805fc7e0ef443f3583a (patch) | |
| tree | 77d20a3adaecf96bf3aff0612bdd3b5fa1a7dc7e /scripts/run_depth_scaling_hinge.sbatch | |
| parent | c53c04aa1d6ff75cb478a9498c370baa929c74b6 (diff) | |
| parent | cd99d6b874d9d09b3bb87b8485cc787885af71f1 (diff) | |
Merge master into main
Diffstat (limited to 'scripts/run_depth_scaling_hinge.sbatch')
| -rw-r--r-- | scripts/run_depth_scaling_hinge.sbatch | 61 |
1 files changed, 61 insertions, 0 deletions
diff --git a/scripts/run_depth_scaling_hinge.sbatch b/scripts/run_depth_scaling_hinge.sbatch new file mode 100644 index 0000000..8ca3112 --- /dev/null +++ b/scripts/run_depth_scaling_hinge.sbatch @@ -0,0 +1,61 @@ +#!/bin/bash +#SBATCH --job-name=snn_hinge +#SBATCH --account=bfqt-delta-gpu +#SBATCH --partition=gpuA40x4 +#SBATCH --nodes=1 +#SBATCH --ntasks=1 +#SBATCH --cpus-per-task=8 +#SBATCH --gpus-per-node=1 +#SBATCH --mem=64G +#SBATCH --time=48:00:00 +#SBATCH --output=runs/slurm_logs/%j_hinge.out +#SBATCH --error=runs/slurm_logs/%j_hinge.err + +# ============================================================ +# Hinge Loss Lyapunov Regularization Experiment +# ============================================================ +# Hypothesis: Using hinge loss (only penalize chaos, not stability) +# will allow the network to learn while still preventing chaotic +# dynamics. +# +# Hinge loss: max(0, lambda)^2 +# - Only penalizes positive Lyapunov (chaos) +# - Allows negative Lyapunov (stable dynamics) without penalty +# - Combined with warmup to let network start learning first +# ============================================================ + +set -e + +PROJECT_DIR="/projects/bfqt/users/yurenh2/ml-projects/snn-training" +cd "$PROJECT_DIR" + +mkdir -p runs/slurm_logs data + +echo "============================================================" +echo "HINGE LOSS Lyapunov Regularization" +echo "Job ID: $SLURM_JOB_ID | Node: $SLURM_NODELIST" +echo "Start: $(date)" +echo "============================================================" +nvidia-smi --query-gpu=name,memory.total --format=csv,noheader +echo "============================================================" + +# Test depths: 4, 8, 12, 16 conv layers +# Using hinge loss + 20 epoch warmup +python files/experiments/depth_scaling_benchmark.py \ + --dataset cifar100 \ + --depths 4 8 12 16 \ + --T 4 \ + --epochs 150 \ + --batch_size 128 \ + --lr 0.001 \ + --lambda_reg 0.3 \ + --lambda_target -0.1 \ + --reg_type hinge \ + --warmup_epochs 20 \ + --data_dir ./data \ + --out_dir runs/depth_scaling_hinge \ + --device cuda + +echo "============================================================" +echo "Finished: $(date)" +echo "============================================================" |
