summaryrefslogtreecommitdiff
path: root/collaborativeagents/scripts/scale_test_batch2.sbatch
blob: 6a1fb2739f0ba8bf411a237e77e4753ca8501676 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
#!/bin/bash
#SBATCH --job-name=scale_b2
#SBATCH --account=bfqt-delta-gpu
#SBATCH --partition=gpuH200x8-interactive
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=32
#SBATCH --gres=gpu:4
#SBATCH --mem=200G
#SBATCH --time=01:00:00
#SBATCH --output=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/scale_b2-%j.out
#SBATCH --error=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/scale_b2-%j.err

# Scale Test Batch 2: Users 6-10, 15 sessions each, 3 methods
# With CollaborativeAgents-style prompts

cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents
source /u/yurenh2/miniforge3/etc/profile.d/conda.sh
conda activate eval
export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface
export PYTHONPATH="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/src:$PYTHONPATH"

PROFILE_PATH="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/data/complex_profiles_v2/profiles_200.jsonl"
AGENT_MODEL="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct"
USER_MODEL="meta-llama/Llama-3.1-70B-Instruct"
MEMORY_STORE="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/data/corpora/empty_store_b2"

echo "=== Scale Test Batch 2: 5 users × 15 sessions × 3 methods ==="
date
nvidia-smi --query-gpu=index,name,memory.total --format=csv

# Create separate memory store for batch 2
mkdir -p ${MEMORY_STORE}
> ${MEMORY_STORE}/memory_cards.jsonl

# Start vLLM servers
CUDA_VISIBLE_DEVICES=0,1 python -m vllm.entrypoints.openai.api_server \
    --model $USER_MODEL \
    --port 8004 --tensor-parallel-size 2 --gpu-memory-utilization 0.90 \
    --max-model-len 16384 --dtype bfloat16 --download-dir $HF_HOME &

CUDA_VISIBLE_DEVICES=2,3 python -m vllm.entrypoints.openai.api_server \
    --model $AGENT_MODEL \
    --port 8003 --tensor-parallel-size 2 --gpu-memory-utilization 0.45 \
    --max-model-len 16384 --dtype bfloat16 &

echo "Waiting for vLLM servers..."
for i in {1..200}; do
    if curl -s http://localhost:8004/health > /dev/null 2>&1; then
        echo "User simulator ready after $((i*5))s"
        break
    fi
    sleep 5
done
for i in {1..60}; do
    if curl -s http://localhost:8003/health > /dev/null 2>&1; then
        echo "Agent ready after $((i*5))s"
        break
    fi
    sleep 5
done
sleep 5

OUTPUT_DIR="../results/scale_test_b2_$(date +%Y%m%d_%H%M%S)"

# Run each method with profiles 6-10 (skip first 5)
for METHOD in vanilla rag rag_vector; do
    echo ""
    echo "============================================"
    echo "Testing: $METHOD (users 6-10 × 15 sessions)"
    echo "============================================"

    # Clear memory store before each method
    > ${MEMORY_STORE}/memory_cards.jsonl
    rm -f ${MEMORY_STORE}/memory_embeddings.npy

    date
    python scripts/run_experiments.py --methods $METHOD \
        --datasets math-hard --n-profiles 5 --n-sessions 15 --max-turns 15 \
        --use-vllm --no-batch-processing --parallel-profiles 1 \
        --profile-offset 5 \
        --output-dir $OUTPUT_DIR --profile-path $PROFILE_PATH

    echo "Method $METHOD completed"
    if [ "$METHOD" != "vanilla" ]; then
        echo "Final memory cards: $(wc -l < ${MEMORY_STORE}/memory_cards.jsonl 2>/dev/null || echo 0)"
    fi
done

echo ""
echo "=== Scale Test Batch 2 Complete ==="
date

# Generate comparison
python3 << 'PYEOF'
import json
from pathlib import Path

output_base = sorted(Path("../results").glob("scale_test_b2_*"))[-1]
print(f"\n=== Results Summary (Batch 2) ===\nDir: {output_base}\n")

methods = ["vanilla", "rag", "rag_vector"]
results = {}

for subdir in output_base.iterdir():
    if subdir.is_dir():
        for method in methods:
            result_file = subdir / method / "results.json"
            if result_file.exists() and method not in results:
                with open(result_file) as f:
                    results[method] = json.load(f)

if results:
    print(f"{'Method':<12} {'Success':<10} {'Turns':<10} {'Enforce':<10} {'Sessions':<10}")
    print("-" * 55)
    for method in methods:
        if method in results:
            data = results[method]
            n = len(data)
            succ = sum(r['metrics']['task_success'] for r in data) / n
            turns = sum(r['metrics']['total_turns'] for r in data) / n
            enf = sum(r['metrics']['enforcement_count'] for r in data) / n
            print(f"{method:<12} {succ:<10.1%} {turns:<10.1f} {enf:<10.1f} {n:<10}")
PYEOF

pkill -f "vllm.entrypoints" 2>/dev/null || true