summaryrefslogtreecommitdiff
path: root/collaborativeagents/scripts/test_vllm_speed.sbatch
blob: 070df5dcde33575736043e78dd39d79b3aa6caf6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
#!/bin/bash
#SBATCH --job-name=test_vllm_speed
#SBATCH --account=bfqt-delta-gpu
#SBATCH --partition=gpuH200x8
#SBATCH --gres=gpu:4
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=16
#SBATCH --mem=128G
#SBATCH --time=00:30:00
#SBATCH --output=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/test_vllm_speed-%j.out
#SBATCH --error=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/test_vllm_speed-%j.err

set -e
cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents
source /u/yurenh2/miniforge3/etc/profile.d/conda.sh
conda activate eval

export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface
export PYTHONPATH="${PWD}:${PWD}/scripts:${PWD}/../src:${PYTHONPATH}"

MODEL_8B="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct"

echo "=== vLLM Speed Test ==="
date
nvidia-smi --query-gpu=index,name,memory.used,memory.total --format=csv

pkill -f "vllm.entrypoints" 2>/dev/null || true
sleep 2

echo ""
echo "=== Test 1: ContextualAdapter with vLLM (2 servers) ==="
echo "Starting vLLM servers on GPU 0,1 (user) and GPU 2,3 (agent)..."

CUDA_VISIBLE_DEVICES=0,1 python -m vllm.entrypoints.openai.api_server \
    --model $MODEL_8B --port 8004 --tensor-parallel-size 2 \
    --gpu-memory-utilization 0.90 --max-model-len 8192 \
    --disable-log-requests --dtype bfloat16 &

CUDA_VISIBLE_DEVICES=2,3 python -m vllm.entrypoints.openai.api_server \
    --model $MODEL_8B --port 8003 --tensor-parallel-size 2 \
    --gpu-memory-utilization 0.90 --max-model-len 8192 \
    --disable-log-requests --dtype bfloat16 &

for i in $(seq 1 120); do
    u=$(curl -s http://localhost:8004/health > /dev/null 2>&1 && echo 1 || echo 0)
    a=$(curl -s http://localhost:8003/health > /dev/null 2>&1 && echo 1 || echo 0)
    if [ "$u" = "1" ] && [ "$a" = "1" ]; then
        echo "Both servers ready after $((i*2))s"; break
    fi
    sleep 2
done

python -c "
import time
import sys
sys.path.insert(0, '.')
from adapters.contextual_adapter import ContextualAdapter

print('Testing ContextualAdapter with vLLM...')
adapter = ContextualAdapter(vllm_url='http://localhost:8003/v1')
adapter.initialize()
adapter.start_session('test_user')

# Warm up
adapter.generate_response('Hello')

# Benchmark
n_requests = 20
start = time.time()
for i in range(n_requests):
    resp = adapter.generate_response(f'Solve: What is {i*7} + {i*3}? Give a brief answer.')
elapsed = time.time() - start

print(f'ContextualAdapter (vLLM): {n_requests} requests in {elapsed:.2f}s')
print(f'Throughput: {n_requests/elapsed:.2f} req/s = {n_requests/elapsed*3600:.0f} requests/hr')
print(f'Estimated sessions/hr (assuming 5 turns/session): {n_requests/elapsed*3600/5:.0f}')
"

pkill -f "vllm.entrypoints" 2>/dev/null || true
sleep 5

echo ""
echo "=== Test 2: PersonalizedLLMAdapter (vLLM user + transformers adapter) ==="
echo "Starting vLLM on GPU 0,1 for user simulation..."

CUDA_VISIBLE_DEVICES=0,1 python -m vllm.entrypoints.openai.api_server \
    --model $MODEL_8B --port 8004 --tensor-parallel-size 2 \
    --gpu-memory-utilization 0.90 --max-model-len 8192 \
    --disable-log-requests --dtype bfloat16 &

for i in $(seq 1 120); do
    if curl -s http://localhost:8004/health > /dev/null 2>&1; then
        echo "Server ready after $((i*2))s"; break
    fi
    sleep 2
done

echo "Loading PersonalizedLLMAdapter on GPU 2,3..."
CUDA_VISIBLE_DEVICES=2,3 python -c "
import time
import sys
sys.path.insert(0, '.')
from adapters.personalized_llm_adapter import create_baseline_adapter

print('Testing PersonalizedLLMAdapter (all_memory mode)...')
adapter = create_baseline_adapter('all_memory')
adapter.initialize()
adapter.start_session('test_user')

# Warm up
adapter.generate_response('Hello')

# Benchmark
n_requests = 10
start = time.time()
for i in range(n_requests):
    resp = adapter.generate_response(f'Solve: What is {i*7} + {i*3}? Give a brief answer.')
elapsed = time.time() - start

print(f'PersonalizedLLMAdapter (transformers): {n_requests} requests in {elapsed:.2f}s')
print(f'Throughput: {n_requests/elapsed:.2f} req/s = {n_requests/elapsed*3600:.0f} requests/hr')
print(f'Estimated sessions/hr (assuming 5 turns/session): {n_requests/elapsed*3600/5:.0f}')
"

pkill -f "vllm.entrypoints" 2>/dev/null || true

echo ""
echo "=== Test Complete ==="
date