#!/bin/bash #SBATCH --job-name=gpt_user_test #SBATCH --account=bfqt-delta-gpu #SBATCH --partition=gpuA100x4 #SBATCH --nodes=1 #SBATCH --ntasks=1 #SBATCH --cpus-per-task=16 #SBATCH --gres=gpu:nvidia_a100:2 #SBATCH --mem=128G #SBATCH --time=4:00:00 #SBATCH --output=gpt_user_test_%j.out #SBATCH --error=gpt_user_test_%j.err cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model source /u/yurenh2/miniforge3/etc/profile.d/conda.sh conda activate eval export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface export PYTHONPATH="${PWD}/src:${PWD}/collaborativeagents:${PYTHONPATH}" export NCCL_P2P_DISABLE=1 # Load OpenAI API key set -a source .env set +a pip install --quiet openai python-dotenv json-repair MODEL_8B="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct" pkill -f "vllm.entrypoints" 2>/dev/null || true sleep 2 # GPU 0: vLLM 8B agent, GPU 1: adapter models (embedding/extractor/reranker) CUDA_VISIBLE_DEVICES=0 python -m vllm.entrypoints.openai.api_server \ --model $MODEL_8B --port 8003 --tensor-parallel-size 1 \ --gpu-memory-utilization 0.5 --max-model-len 8192 \ --dtype bfloat16 --disable-log-requests & # Wait for server for i in $(seq 1 90); do curl -s http://localhost:8003/health > /dev/null 2>&1 && break sleep 2 done echo "vLLM 8B agent server ready." cd collaborativeagents/scripts # Large scale test: 50 profiles in parallel, 3 sessions each, all 6 methods python run_experiments.py \ --methods vanilla,contextual,reflection,all_memory,rag,rag_vector \ --datasets math-hard \ --n-profiles 20 \ --n-sessions 5 \ --max-turns 8 \ --use-vllm \ --use-openai-user \ --openai-user-model gpt-5 \ --reward-mode llm \ --vllm-agent-url http://localhost:8003/v1 \ --parallel-profiles 20 \ --profile-path ../data/complex_profiles_v2/profiles_200.jsonl \ --output-dir ../results/gpt_user_scale_test_$(date +%Y%m%d_%H%M%S) echo "All methods test complete!" pkill -f "vllm.entrypoints" 2>/dev/null || true