blob: d812b43ded7d5590ce530cc9f879c7783f0c75f0 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
|
#!/bin/bash
#SBATCH --job-name=vllm_bench
#SBATCH --account=bfqt-delta-gpu
#SBATCH --partition=gpuA100x4
#SBATCH --nodes=1
#SBATCH --gpus-per-node=1
#SBATCH --time=00:30:00
#SBATCH --output=slurm/logs/vllm_bench_%j.out
#SBATCH --error=slurm/logs/vllm_bench_%j.err
# Benchmark vLLM vs transformers inference speed
set -e
cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents
export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface
export TRANSFORMERS_CACHE=/projects/bfqt/users/yurenh2/hf_cache/huggingface
echo "=== Job Info ==="
echo "Job ID: $SLURM_JOB_ID"
echo "Node: $SLURM_NODELIST"
echo "GPUs: $SLURM_GPUS_ON_NODE"
date
echo ""
echo "=== GPU Info ==="
nvidia-smi --query-gpu=index,name,memory.total,memory.free --format=csv
MODEL_8B="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct"
PORT=8003
# ============================================
# Test 1: Transformers baseline
# ============================================
echo ""
echo "============================================"
echo "Test 1: Transformers Baseline (10 requests)"
echo "============================================"
python scripts/benchmark_inference.py --mode transformers --model $MODEL_8B -n 10
# ============================================
# Test 2: vLLM server
# ============================================
echo ""
echo "============================================"
echo "Test 2: Starting vLLM Server"
echo "============================================"
# Start vLLM server
python -m vllm.entrypoints.openai.api_server \
--model $MODEL_8B \
--port $PORT \
--gpu-memory-utilization 0.9 \
--max-model-len 4096 \
--disable-log-requests &
SERVER_PID=$!
echo "vLLM Server PID: $SERVER_PID"
# Wait for server to be ready
echo "Waiting for server to start..."
for i in {1..60}; do
if curl -s http://localhost:$PORT/health > /dev/null 2>&1; then
echo "Server ready after $((i*5)) seconds"
break
fi
sleep 5
done
# Check if server is up
if ! curl -s http://localhost:$PORT/health > /dev/null 2>&1; then
echo "ERROR: vLLM server failed to start"
kill $SERVER_PID 2>/dev/null || true
exit 1
fi
echo ""
echo "============================================"
echo "Test 2a: vLLM Sequential (20 requests)"
echo "============================================"
python scripts/benchmark_inference.py --mode vllm --url http://localhost:$PORT/v1 -n 20
echo ""
echo "============================================"
echo "Test 2b: vLLM Concurrent (50 requests)"
echo "============================================"
python scripts/benchmark_inference.py --mode vllm --url http://localhost:$PORT/v1 -n 50 --concurrent
# Cleanup
echo ""
echo "Cleaning up..."
kill $SERVER_PID 2>/dev/null || true
wait $SERVER_PID 2>/dev/null || true
echo ""
echo "============================================"
echo "Benchmark Complete!"
echo "============================================"
echo ""
echo "Target: 2000 conversations/hour (paper on H100x8)"
echo "Our A100x4 should achieve ~500-1000 conv/hr with vLLM"
echo ""
date
|