#!/bin/bash #SBATCH --job-name=smalltest #SBATCH --account=bfqt-delta-gpu #SBATCH --partition=gpuH200x8 #SBATCH --nodes=1 #SBATCH --ntasks=1 #SBATCH --cpus-per-task=32 #SBATCH --gres=gpu:4 #SBATCH --mem=200G #SBATCH --time=02:00:00 #SBATCH --output=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/smalltest-%j.out #SBATCH --error=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/smalltest-%j.err # Small-scale test: 5 profiles, 5 sessions, all 6 methods # Full settings (70B user sim, 8B agent) but fewer questions cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents source /u/yurenh2/miniforge3/etc/profile.d/conda.sh conda activate eval export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface export PYTHONPATH="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/src:$PYTHONPATH" PROFILE_PATH="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/data/complex_profiles_v2/profiles_200.jsonl" AGENT_MODEL="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct" USER_MODEL="meta-llama/Llama-3.1-70B-Instruct" echo "=== Small-scale Test: All 6 Methods ===" echo "Settings: 5 profiles, 5 sessions each, max 15 turns" echo "User simulator: $USER_MODEL (70B)" echo "Agent: $AGENT_MODEL (8B)" date # Start vLLM servers # User simulator on GPUs 0,1 (70B, TP=2) CUDA_VISIBLE_DEVICES=0,1 python -m vllm.entrypoints.openai.api_server \ --model $USER_MODEL \ --port 8004 --tensor-parallel-size 2 --gpu-memory-utilization 0.90 \ --max-model-len 16384 --dtype bfloat16 --download-dir $HF_HOME & # Agent on GPUs 2,3 (8B, TP=2, lower memory for embedding/reranker) CUDA_VISIBLE_DEVICES=2,3 python -m vllm.entrypoints.openai.api_server \ --model $AGENT_MODEL \ --port 8003 --tensor-parallel-size 2 --gpu-memory-utilization 0.45 \ --max-model-len 16384 --dtype bfloat16 & # Wait for servers echo "Waiting for vLLM servers..." for i in {1..200}; do if curl -s http://localhost:8004/health > /dev/null 2>&1; then echo "User simulator (8004) ready after $((i*5)) seconds" break fi sleep 5 done for i in {1..60}; do if curl -s http://localhost:8003/health > /dev/null 2>&1; then echo "Agent (8003) ready after $((i*5)) seconds" break fi sleep 5 done echo "Both vLLM servers ready" sleep 10 # Run all 6 methods sequentially with small scale for METHOD in vanilla contextual reflection all_memory rag rag_vector; do echo "" echo "=== Testing method: $METHOD ===" date python scripts/run_experiments.py --methods $METHOD \ --datasets math-hard --n-profiles 5 --n-sessions 5 --max-turns 15 \ --use-vllm --no-batch-processing --parallel-profiles 5 \ --output-dir ../results/smalltest --profile-path $PROFILE_PATH if [ $? -eq 0 ]; then echo "Method $METHOD: SUCCESS" else echo "Method $METHOD: FAILED" fi done echo "" echo "=== Small-scale test complete ===" date pkill -f "vllm.entrypoints" 2>/dev/null || true