#!/bin/bash #SBATCH --job-name=ctx_forget #SBATCH --account=bfqt-delta-gpu #SBATCH --partition=gpuH200x8-interactive #SBATCH --gres=gpu:4 #SBATCH --time=01:00:00 #SBATCH --mem=200G #SBATCH --cpus-per-task=32 #SBATCH --output=%x-%j.out #SBATCH --error=%x-%j.err cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model source ~/miniforge3/etc/profile.d/conda.sh conda activate eval export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface export TRANSFORMERS_CACHE=/projects/bfqt/users/yurenh2/hf_cache/huggingface export PYTHONPATH=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model:$PYTHONPATH PROFILE_PATH="collaborativeagents/data/complex_profiles_v2/profiles_200.jsonl" AGENT_MODEL="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct" USER_MODEL="meta-llama/Llama-3.1-70B-Instruct" echo "=== Starting vLLM servers ===" date # User simulator on GPUs 0,1 (70B) CUDA_VISIBLE_DEVICES=0,1 python -m vllm.entrypoints.openai.api_server \ --model $USER_MODEL \ --port 8004 --tensor-parallel-size 2 --gpu-memory-utilization 0.90 \ --max-model-len 16384 --dtype bfloat16 --download-dir $HF_HOME & # Agent on GPUs 2,3 (8B) CUDA_VISIBLE_DEVICES=2,3 python -m vllm.entrypoints.openai.api_server \ --model $AGENT_MODEL \ --port 8003 --tensor-parallel-size 2 --gpu-memory-utilization 0.90 \ --max-model-len 16384 --dtype bfloat16 & # Wait for servers echo "Waiting for vLLM servers..." for i in {1..200}; do if curl -s http://localhost:8004/health > /dev/null 2>&1; then echo "User simulator (8004) ready after $((i*5)) seconds" break fi sleep 5 done for i in {1..60}; do if curl -s http://localhost:8003/health > /dev/null 2>&1; then echo "Agent (8003) ready after $((i*5)) seconds" break fi sleep 5 done echo "Both vLLM servers ready" sleep 10 # Run contextual with reduced memory limits (4000 tokens, 15 turns) CUDA_VISIBLE_DEVICES=2,3 python collaborativeagents/scripts/run_experiments.py \ --methods contextual \ --n-profiles 5 \ --n-sessions 15 \ --output-dir results/scale_test_contextual_forget \ --profile-path $PROFILE_PATH \ --datasets math-hard \ --use-vllm --parallel-profiles 30 --no-batch-processing pkill -f "vllm.entrypoints" 2>/dev/null || true