diff options
| author | YurenHao0426 <blackhao0426@gmail.com> | 2026-01-13 23:49:05 -0600 |
|---|---|---|
| committer | YurenHao0426 <blackhao0426@gmail.com> | 2026-01-13 23:49:05 -0600 |
| commit | cd99d6b874d9d09b3bb87b8485cc787885af71f1 (patch) | |
| tree | 59a233959932ca0e4f12f196275e07fcf443b33f /scripts/run_depth_experiment.sbatch | |
init commit
Diffstat (limited to 'scripts/run_depth_experiment.sbatch')
| -rw-r--r-- | scripts/run_depth_experiment.sbatch | 95 |
1 files changed, 95 insertions, 0 deletions
diff --git a/scripts/run_depth_experiment.sbatch b/scripts/run_depth_experiment.sbatch new file mode 100644 index 0000000..0e03d82 --- /dev/null +++ b/scripts/run_depth_experiment.sbatch @@ -0,0 +1,95 @@ +#!/bin/bash +#SBATCH --job-name=snn_depth_exp +#SBATCH --account=bfqt-delta-gpu +#SBATCH --partition=gpuA40x4 +#SBATCH --nodes=1 +#SBATCH --ntasks=1 +#SBATCH --cpus-per-task=4 +#SBATCH --gpus-per-node=1 +#SBATCH --mem=32G +#SBATCH --time=02:00:00 +#SBATCH --output=runs/slurm_logs/%j_depth_exp.out +#SBATCH --error=runs/slurm_logs/%j_depth_exp.err + +# ============================================================ +# Depth Comparison Experiment: Vanilla vs Lyapunov SNN +# ============================================================ +# Compares training stability across network depths (1-8 layers) +# Hypothesis: Lyapunov regularization enables training of deeper networks +# +# Usage: +# sbatch scripts/run_depth_experiment.sbatch +# +# Or with custom parameters: +# sbatch --export=EPOCHS=50,DEPTHS="1 2 4 6 8" scripts/run_depth_experiment.sbatch +# ============================================================ + +set -e + +# Default parameters (can be overridden via --export) +EPOCHS=${EPOCHS:-30} +DEPTHS=${DEPTHS:-"1 2 3 4 6"} +HIDDEN_DIM=${HIDDEN_DIM:-128} +LAMBDA_REG=${LAMBDA_REG:-0.1} +LR=${LR:-0.001} +SEED=${SEED:-42} +USE_SYNTHETIC=${USE_SYNTHETIC:-true} + +# Project directory +PROJECT_DIR="/projects/bfqt/users/yurenh2/ml-projects/snn-training" +cd "$PROJECT_DIR" + +# Create log directory +mkdir -p runs/slurm_logs + +# Print job info +echo "============================================================" +echo "Job ID: $SLURM_JOB_ID" +echo "Node: $SLURM_NODELIST" +echo "Start time: $(date)" +echo "============================================================" +echo "Configuration:" +echo " EPOCHS: $EPOCHS" +echo " DEPTHS: $DEPTHS" +echo " HIDDEN_DIM: $HIDDEN_DIM" +echo " LAMBDA_REG: $LAMBDA_REG" +echo " LR: $LR" +echo " USE_SYNTHETIC: $USE_SYNTHETIC" +echo "============================================================" + +# Check GPU +nvidia-smi --query-gpu=name,memory.total --format=csv,noheader +echo "============================================================" + +# Build command +CMD="python files/experiments/depth_comparison.py" +CMD="$CMD --epochs $EPOCHS" +CMD="$CMD --depths $DEPTHS" +CMD="$CMD --hidden_dim $HIDDEN_DIM" +CMD="$CMD --lambda_reg $LAMBDA_REG" +CMD="$CMD --lr $LR" +CMD="$CMD --seed $SEED" +CMD="$CMD --out_dir runs/depth_comparison" +CMD="$CMD --device cuda" + +if [ "$USE_SYNTHETIC" = true ]; then + CMD="$CMD --synthetic" +fi + +echo "Running: $CMD" +echo "============================================================" + +# Run experiment +$CMD + +# Generate plots if results exist +LATEST_RUN=$(ls -td runs/depth_comparison/*/ 2>/dev/null | head -1) +if [ -n "$LATEST_RUN" ]; then + echo "============================================================" + echo "Generating plots for: $LATEST_RUN" + python files/experiments/plot_depth_comparison.py --results_dir "$LATEST_RUN" +fi + +echo "============================================================" +echo "Job finished: $(date)" +echo "============================================================" |
