diff options
| author | YurenHao0426 <blackhao0426@gmail.com> | 2026-01-13 23:49:05 -0600 |
|---|---|---|
| committer | YurenHao0426 <blackhao0426@gmail.com> | 2026-01-13 23:49:05 -0600 |
| commit | cd99d6b874d9d09b3bb87b8485cc787885af71f1 (patch) | |
| tree | 59a233959932ca0e4f12f196275e07fcf443b33f /scripts/run_posthoc.sbatch | |
init commit
Diffstat (limited to 'scripts/run_posthoc.sbatch')
| -rw-r--r-- | scripts/run_posthoc.sbatch | 59 |
1 files changed, 59 insertions, 0 deletions
diff --git a/scripts/run_posthoc.sbatch b/scripts/run_posthoc.sbatch new file mode 100644 index 0000000..ce81918 --- /dev/null +++ b/scripts/run_posthoc.sbatch @@ -0,0 +1,59 @@ +#!/bin/bash +#SBATCH --job-name=snn_posthoc +#SBATCH --account=bfqt-delta-gpu +#SBATCH --partition=gpuA40x4 +#SBATCH --nodes=1 +#SBATCH --ntasks=1 +#SBATCH --cpus-per-task=8 +#SBATCH --gpus-per-node=1 +#SBATCH --mem=64G +#SBATCH --time=48:00:00 +#SBATCH --output=runs/slurm_logs/%j_posthoc.out +#SBATCH --error=runs/slurm_logs/%j_posthoc.err + +# ============================================================ +# Experiment: Post-hoc Lyapunov Fine-tuning +# ============================================================ +# Strategy: +# 1. Train vanilla network for 100 epochs (learn features) +# 2. Fine-tune with Lyapunov regularization for 50 epochs +# +# This allows the network to learn first, then we stabilize +# the dynamics without fighting chaotic initialization. +# ============================================================ + +set -e + +PROJECT_DIR="/projects/bfqt/users/yurenh2/ml-projects/snn-training" +cd "$PROJECT_DIR" + +mkdir -p runs/slurm_logs data runs/posthoc_finetune + +echo "============================================================" +echo "POST-HOC FINE-TUNING Experiment" +echo "Job ID: $SLURM_JOB_ID | Node: $SLURM_NODELIST" +echo "Start: $(date)" +echo "============================================================" +nvidia-smi --query-gpu=name,memory.total --format=csv,noheader +echo "============================================================" + +python files/experiments/posthoc_finetune.py \ + --dataset cifar100 \ + --depths 4 8 12 16 \ + --T 4 \ + --pretrain_epochs 100 \ + --finetune_epochs 50 \ + --batch_size 128 \ + --lr 0.001 \ + --finetune_lr 0.0001 \ + --lambda_reg 0.1 \ + --lambda_target -0.1 \ + --reg_type extreme \ + --lyap_threshold 2.0 \ + --data_dir ./data \ + --out_dir runs/posthoc_finetune \ + --device cuda + +echo "============================================================" +echo "Finished: $(date)" +echo "============================================================" |
