summaryrefslogtreecommitdiff
path: root/collaborativeagents/scripts/exp_reflection.sbatch
diff options
context:
space:
mode:
Diffstat (limited to 'collaborativeagents/scripts/exp_reflection.sbatch')
-rw-r--r--collaborativeagents/scripts/exp_reflection.sbatch66
1 files changed, 66 insertions, 0 deletions
diff --git a/collaborativeagents/scripts/exp_reflection.sbatch b/collaborativeagents/scripts/exp_reflection.sbatch
new file mode 100644
index 0000000..2c94495
--- /dev/null
+++ b/collaborativeagents/scripts/exp_reflection.sbatch
@@ -0,0 +1,66 @@
+#!/bin/bash
+#SBATCH --job-name=exp_reflection
+#SBATCH --account=bfqt-delta-gpu
+#SBATCH --partition=gpuH200x8
+#SBATCH --gres=gpu:4
+#SBATCH --nodes=1
+#SBATCH --ntasks=1
+#SBATCH --cpus-per-task=32
+#SBATCH --mem=256G
+#SBATCH --time=24:00:00
+#SBATCH --output=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/exp_reflection-%j.out
+#SBATCH --error=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/exp_reflection-%j.err
+
+set -e
+cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents
+source /u/yurenh2/miniforge3/etc/profile.d/conda.sh
+conda activate eval
+
+export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface
+export PYTHONPATH="${PWD}:${PWD}/scripts:${PWD}/../src:${PYTHONPATH}"
+
+MODEL_8B="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct"
+PROFILE_PATH="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/data/complex_profiles_v2/profiles_200.jsonl"
+
+echo "=== reflection (vLLM-based) ==="
+date
+nvidia-smi --query-gpu=index,name,memory.total --format=csv
+
+pkill -f "vllm.entrypoints" 2>/dev/null || true
+sleep 2
+
+# GPU 0,1: vLLM server for user simulator (port 8004)
+echo "Starting user simulator vLLM server on GPU 0,1 (port 8004)..."
+CUDA_VISIBLE_DEVICES=0,1 python -m vllm.entrypoints.openai.api_server \
+ --model $MODEL_8B --port 8004 --tensor-parallel-size 2 \
+ --gpu-memory-utilization 0.90 --max-model-len 8192 \
+ --disable-log-requests --dtype bfloat16 &
+
+# GPU 2,3: vLLM server for agent (port 8003)
+echo "Starting agent vLLM server on GPU 2,3 (port 8003)..."
+CUDA_VISIBLE_DEVICES=2,3 python -m vllm.entrypoints.openai.api_server \
+ --model $MODEL_8B --port 8003 --tensor-parallel-size 2 \
+ --gpu-memory-utilization 0.90 --max-model-len 8192 \
+ --disable-log-requests --dtype bfloat16 &
+
+echo "Waiting for vLLM servers..."
+for i in $(seq 1 200); do
+ user_ready=$(curl -s http://localhost:8004/health > /dev/null 2>&1 && echo 1 || echo 0)
+ agent_ready=$(curl -s http://localhost:8003/health > /dev/null 2>&1 && echo 1 || echo 0)
+ if [ "$user_ready" = "1" ] && [ "$agent_ready" = "1" ]; then
+ echo "Both servers ready after $((i*3))s"; break
+ fi
+ sleep 3
+done
+
+# Run experiment (uses vLLM HTTP API, no local GPU needed)
+python scripts/run_experiments.py \
+ --methods reflection \
+ --datasets math-hard,math-500,bigcodebench \
+ --n-profiles 200 --n-sessions 30 --max-turns 15 \
+ --use-vllm --parallel-profiles 50 \
+ --output-dir ../results/full_h200 \
+ --profile-path "$PROFILE_PATH"
+
+pkill -f "vllm.entrypoints" 2>/dev/null || true
+echo "Done: $(date)"