diff options
| author | YurenHao0426 <blackhao0426@gmail.com> | 2026-02-11 14:21:11 -0600 |
|---|---|---|
| committer | YurenHao0426 <blackhao0426@gmail.com> | 2026-02-11 14:21:11 -0600 |
| commit | c69a4c6e3596f75bd392c27d3c072adc825ce497 (patch) | |
| tree | 3d5c836a7adbf7e5f51e945c576e074fc13fd4cb /scripts | |
| parent | 039c12d3cf7178db6a7d80b02cf022d67231014e (diff) | |
- A12 (logit=3): NLL 2.76, A13 (logit=0): NLL 3.51, A14 (logit=1): NLL 3.26
- All worse than baseline (2.46). Lower init_logit = more deviation = worse NLL
- Confirms: gradient flows (gates move), but A=1 is global optimum for frozen model
- Added Dolma streaming retry logic (max 10 retries, 30s wait)
- Phase 1 frozen approach has fundamental limitation; Phase 2 (unfreeze) needed
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
Diffstat (limited to 'scripts')
| -rw-r--r-- | scripts/slurm_a12.sh | 22 | ||||
| -rw-r--r-- | scripts/slurm_a13.sh | 22 | ||||
| -rw-r--r-- | scripts/slurm_a14.sh | 22 |
3 files changed, 66 insertions, 0 deletions
diff --git a/scripts/slurm_a12.sh b/scripts/slurm_a12.sh new file mode 100644 index 0000000..941caae --- /dev/null +++ b/scripts/slurm_a12.sh @@ -0,0 +1,22 @@ +#!/bin/bash +#SBATCH --signal=SIGUSR1@120 +export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache +export TRANSFORMERS_CACHE=/projects/bfqt/users/yurenh2/hf_cache/transformers +export HF_HUB_CACHE=/projects/bfqt/users/yurenh2/hf_cache/hub +export HF_DATASETS_CACHE=/projects/bfqt/users/yurenh2/hf_cache/datasets +export TOKENIZERS_PARALLELISM=false + +export PYTHONPATH=/projects/bfqt/users/yurenh2/ml-projects/DAGFormer:$PYTHONPATH +export PATH=$HOME/.local/bin:$PATH + +cd /projects/bfqt/users/yurenh2/ml-projects/DAGFormer +mkdir -p logs checkpoints/a12 + +echo "=== Job Info ===" +echo "Job ID: $SLURM_JOB_ID" +echo "Node: $SLURM_NODELIST" +echo "GPU: $(nvidia-smi --query-gpu=name,memory.total --format=csv,noheader)" +echo "" + +echo "=== Starting A12: init_logit=3.0 ===" +python3 -u scripts/train.py --config configs/a12_init_logit_3.yaml diff --git a/scripts/slurm_a13.sh b/scripts/slurm_a13.sh new file mode 100644 index 0000000..8b4e5c0 --- /dev/null +++ b/scripts/slurm_a13.sh @@ -0,0 +1,22 @@ +#!/bin/bash +#SBATCH --signal=SIGUSR1@120 +export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache +export TRANSFORMERS_CACHE=/projects/bfqt/users/yurenh2/hf_cache/transformers +export HF_HUB_CACHE=/projects/bfqt/users/yurenh2/hf_cache/hub +export HF_DATASETS_CACHE=/projects/bfqt/users/yurenh2/hf_cache/datasets +export TOKENIZERS_PARALLELISM=false + +export PYTHONPATH=/projects/bfqt/users/yurenh2/ml-projects/DAGFormer:$PYTHONPATH +export PATH=$HOME/.local/bin:$PATH + +cd /projects/bfqt/users/yurenh2/ml-projects/DAGFormer +mkdir -p logs checkpoints/a13 + +echo "=== Job Info ===" +echo "Job ID: $SLURM_JOB_ID" +echo "Node: $SLURM_NODELIST" +echo "GPU: $(nvidia-smi --query-gpu=name,memory.total --format=csv,noheader)" +echo "" + +echo "=== Starting A13: init_logit=0.0 ===" +python3 -u scripts/train.py --config configs/a13_init_logit_0.yaml diff --git a/scripts/slurm_a14.sh b/scripts/slurm_a14.sh new file mode 100644 index 0000000..a5daabe --- /dev/null +++ b/scripts/slurm_a14.sh @@ -0,0 +1,22 @@ +#!/bin/bash +#SBATCH --signal=SIGUSR1@120 +export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache +export TRANSFORMERS_CACHE=/projects/bfqt/users/yurenh2/hf_cache/transformers +export HF_HUB_CACHE=/projects/bfqt/users/yurenh2/hf_cache/hub +export HF_DATASETS_CACHE=/projects/bfqt/users/yurenh2/hf_cache/datasets +export TOKENIZERS_PARALLELISM=false + +export PYTHONPATH=/projects/bfqt/users/yurenh2/ml-projects/DAGFormer:$PYTHONPATH +export PATH=$HOME/.local/bin:$PATH + +cd /projects/bfqt/users/yurenh2/ml-projects/DAGFormer +mkdir -p logs checkpoints/a14 + +echo "=== Job Info ===" +echo "Job ID: $SLURM_JOB_ID" +echo "Node: $SLURM_NODELIST" +echo "GPU: $(nvidia-smi --query-gpu=name,memory.total --format=csv,noheader)" +echo "" + +echo "=== Starting A14: init_logit=1.0 ===" +python3 -u scripts/train.py --config configs/a14_init_logit_1.yaml |
