diff options
Diffstat (limited to 'collaborativeagents/scripts/contextual_test_small.sbatch')
| -rw-r--r-- | collaborativeagents/scripts/contextual_test_small.sbatch | 80 |
1 files changed, 80 insertions, 0 deletions
diff --git a/collaborativeagents/scripts/contextual_test_small.sbatch b/collaborativeagents/scripts/contextual_test_small.sbatch new file mode 100644 index 0000000..83c20ef --- /dev/null +++ b/collaborativeagents/scripts/contextual_test_small.sbatch @@ -0,0 +1,80 @@ +#!/bin/bash +#SBATCH --job-name=ctx_test +#SBATCH --account=bfqt-delta-gpu +#SBATCH --partition=gpuH200x8-interactive +#SBATCH --nodes=1 +#SBATCH --ntasks=1 +#SBATCH --cpus-per-task=16 +#SBATCH --gres=gpu:4 +#SBATCH --mem=100G +#SBATCH --time=00:20:00 +#SBATCH --output=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/ctx_test-%j.out +#SBATCH --error=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/ctx_test-%j.err + +# Small-scale contextual test: 1 profile, 15 sessions +# Testing fix: token estimation ratio changed from 4:1 to 2.5:1 + +cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents +source /u/yurenh2/miniforge3/etc/profile.d/conda.sh +conda activate eval +export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface +export PYTHONPATH="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/src:$PYTHONPATH" + +PROFILE_PATH="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/data/complex_profiles_v2/profiles_200.jsonl" +AGENT_MODEL="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct" +USER_MODEL="meta-llama/Llama-3.1-70B-Instruct" + +echo "=== Contextual Test (Token Fix) ===" +echo "Fix: token estimation 4:1 -> 2.5:1" +echo "1 profile, 15 sessions" +date +nvidia-smi --query-gpu=index,name,memory.total --format=csv + +# Start vLLM servers +# User simulator: GPUs 0,1 +CUDA_VISIBLE_DEVICES=0,1 python -m vllm.entrypoints.openai.api_server \ + --model $USER_MODEL \ + --port 8004 --tensor-parallel-size 2 --gpu-memory-utilization 0.90 \ + --max-model-len 16384 --dtype bfloat16 --download-dir $HF_HOME & + +# Agent: GPUs 2,3 +CUDA_VISIBLE_DEVICES=2,3 python -m vllm.entrypoints.openai.api_server \ + --model $AGENT_MODEL \ + --port 8003 --tensor-parallel-size 2 --gpu-memory-utilization 0.90 \ + --max-model-len 16384 --dtype bfloat16 & + +echo "Waiting for vLLM servers..." +for i in {1..200}; do + if curl -s http://localhost:8004/health > /dev/null 2>&1; then + echo "User simulator ready after $((i*5))s" + break + fi + sleep 5 +done +for i in {1..60}; do + if curl -s http://localhost:8003/health > /dev/null 2>&1; then + echo "Agent ready after $((i*5))s" + break + fi + sleep 5 +done +sleep 5 + +OUTPUT_DIR="../results/contextual_test_$(date +%Y%m%d_%H%M%S)" + +echo "" +echo "============================================" +echo "Testing: contextual (with token fix)" +echo "============================================" +date + +python scripts/run_experiments.py --methods contextual \ + --datasets math-hard --n-profiles 1 --n-sessions 15 --max-turns 15 \ + --use-vllm --no-batch-processing --parallel-profiles 1 \ + --output-dir $OUTPUT_DIR --profile-path $PROFILE_PATH + +echo "" +echo "=== Done ===" +date + +pkill -f "vllm.entrypoints" 2>/dev/null || true |
