#!/bin/bash #SBATCH --job-name=fs_%x #SBATCH --account=bfqt-delta-gpu #SBATCH --partition=gpuH200x8 #SBATCH --nodes=1 #SBATCH --ntasks=1 #SBATCH --cpus-per-task=32 #SBATCH --gres=gpu:4 #SBATCH --mem=200G #SBATCH --time=30:00:00 #SBATCH --output=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/fs_%x-%j.out #SBATCH --error=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/fs_%x-%j.err # Usage: sbatch --job-name=vanilla fullscale_method.sbatch vanilla METHOD=$1 cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents source /u/yurenh2/miniforge3/etc/profile.d/conda.sh conda activate eval export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface export PYTHONPATH="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/src:$PYTHONPATH" PROFILE_PATH="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/data/complex_profiles_v2/profiles_200.jsonl" AGENT_MODEL="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct" # Full-precision 70B for user simulator (H200 143GB/GPU can handle it with TP=2) USER_MODEL="meta-llama/Llama-3.1-70B-Instruct" # vLLM memory and parallel workers for methods needing preference extractor # These methods need GPU memory for embedding/reranker/extractor models on GPUs 2,3 if [[ "$METHOD" == "all_memory" || "$METHOD" == "rag" || "$METHOD" == "rag_vector" ]]; then AGENT_MEM=0.40 # Leave 60% free for embedding/reranker/extractor PARALLEL_PROFILES=30 # With CUDA_VISIBLE_DEVICES=2,3, extractor uses correct GPUs else AGENT_MEM=0.90 PARALLEL_PROFILES=50 fi echo "=== Starting vLLM servers ===" echo "Method: $METHOD" echo "User simulator: $USER_MODEL (70B full-precision)" echo "Agent: $AGENT_MODEL (8B)" echo "Agent memory: $AGENT_MEM" date # User simulator on GPUs 0,1 (70B full-precision, ~70GB/GPU with TP=2) CUDA_VISIBLE_DEVICES=0,1 python -m vllm.entrypoints.openai.api_server \ --model $USER_MODEL \ --port 8004 --tensor-parallel-size 2 --gpu-memory-utilization 0.90 \ --max-model-len 16384 --dtype bfloat16 --download-dir $HF_HOME & # Agent on GPUs 2,3 CUDA_VISIBLE_DEVICES=2,3 python -m vllm.entrypoints.openai.api_server \ --model $AGENT_MODEL \ --port 8003 --tensor-parallel-size 2 --gpu-memory-utilization $AGENT_MEM \ --max-model-len 16384 --dtype bfloat16 & # Wait for 70B model to load (takes 9-12 minutes) echo "Waiting for vLLM servers to be ready (this may take 10-15 minutes for 70B)..." for i in {1..200}; do if curl -s http://localhost:8004/health > /dev/null 2>&1; then echo "User simulator (8004) ready after $((i*5)) seconds" break fi sleep 5 done for i in {1..60}; do if curl -s http://localhost:8003/health > /dev/null 2>&1; then echo "Agent (8003) ready after $((i*5)) seconds" break fi sleep 5 done echo "Both vLLM servers ready" sleep 10 # Batch processing only for vanilla if [[ "$METHOD" == "vanilla" ]]; then EXTRA_ARGS="--use-batch-processing --batch-size 100" else EXTRA_ARGS="--no-batch-processing" fi echo "Parallel profiles: $PARALLEL_PROFILES" # Run experiment with CUDA_VISIBLE_DEVICES=2,3 so preference extractor/embedding/reranker # use GPUs 2,3 (which have more headroom) instead of GPUs 0,1 (saturated by 70B model) CUDA_VISIBLE_DEVICES=2,3 python scripts/run_experiments.py --methods $METHOD \ --datasets math-hard --n-profiles 200 --n-sessions 30 --max-turns 15 \ --use-vllm $EXTRA_ARGS --parallel-profiles $PARALLEL_PROFILES \ --output-dir ../results/fullscale --profile-path $PROFILE_PATH pkill -f "vllm.entrypoints" 2>/dev/null || true