blob: af2266741e08514c4895c58dd81863c6920c9f74 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
|
#!/bin/bash
# Test vLLM with 45% memory + ContextualAdapter loading
set -e
cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents
source /u/yurenh2/miniforge3/etc/profile.d/conda.sh
conda activate eval
export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface
export PYTHONPATH="${PWD}:${PWD}/scripts:${PWD}/../src:${PYTHONPATH}"
MODEL_8B="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct"
echo "=== Testing vLLM 45% memory + Adapter ==="
echo "GPUs available:"
nvidia-smi --query-gpu=index,name,memory.total --format=csv
# Kill any existing vLLM
pkill -f "vllm.entrypoints" 2>/dev/null || true
sleep 2
echo ""
echo "Memory before vLLM:"
nvidia-smi --query-gpu=index,memory.used --format=csv
echo ""
echo "Starting vLLM with 45% memory on GPU 0,1..."
CUDA_VISIBLE_DEVICES=0,1 python -m vllm.entrypoints.openai.api_server \
--model $MODEL_8B --port 8004 --tensor-parallel-size 2 \
--gpu-memory-utilization 0.45 --max-model-len 8192 \
--disable-log-requests --dtype bfloat16 &
VLLM_PID=$!
echo "vLLM PID: $VLLM_PID"
echo "Waiting for vLLM to start..."
for i in $(seq 1 60); do
if curl -s http://localhost:8004/health > /dev/null 2>&1; then
echo "vLLM ready after $((i*2))s"
break
fi
sleep 2
done
echo ""
echo "Memory after vLLM started:"
nvidia-smi --query-gpu=index,memory.used --format=csv
echo ""
echo "Testing ContextualAdapter loading..."
python -c "
import sys
sys.path.insert(0, 'collaborativeagents')
sys.path.insert(0, 'src')
from adapters.contextual_adapter import ContextualAdapter
print('Creating ContextualAdapter...')
adapter = ContextualAdapter()
print('Initializing (loading model)...')
adapter.initialize()
print('Testing generation...')
adapter.start_session('test')
result = adapter.generate_response('What is 2+2?')
print(f'Response: {result[\"response\"][:100]}')
print('SUCCESS: ContextualAdapter works with vLLM running!')
"
echo ""
echo "Final memory usage:"
nvidia-smi --query-gpu=index,memory.used --format=csv
# Cleanup
pkill -f "vllm.entrypoints" 2>/dev/null || true
echo "Test complete!"
|