From dc801c07cf38b0c495686463e6ca6f871a64440e Mon Sep 17 00:00:00 2001 From: YurenHao0426 Date: Tue, 27 Jan 2026 09:57:37 -0600 Subject: Add collaborativeagents module and update gitignore - Add collaborativeagents subproject with adapters, agents, and evaluation modules - Update .gitignore to exclude large binary files (.whl, .tar), wandb logs, and results Co-Authored-By: Claude Opus 4.5 --- .../scripts/scale_test_ctx_refl.sbatch | 114 +++++++++++++++++++++ 1 file changed, 114 insertions(+) create mode 100644 collaborativeagents/scripts/scale_test_ctx_refl.sbatch (limited to 'collaborativeagents/scripts/scale_test_ctx_refl.sbatch') diff --git a/collaborativeagents/scripts/scale_test_ctx_refl.sbatch b/collaborativeagents/scripts/scale_test_ctx_refl.sbatch new file mode 100644 index 0000000..1055e16 --- /dev/null +++ b/collaborativeagents/scripts/scale_test_ctx_refl.sbatch @@ -0,0 +1,114 @@ +#!/bin/bash +#SBATCH --job-name=scale_cr +#SBATCH --account=bfqt-delta-gpu +#SBATCH --partition=gpuH200x8-interactive +#SBATCH --nodes=1 +#SBATCH --ntasks=1 +#SBATCH --cpus-per-task=32 +#SBATCH --gres=gpu:4 +#SBATCH --mem=200G +#SBATCH --time=01:00:00 +#SBATCH --output=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/scale_cr-%j.out +#SBATCH --error=/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/scale_cr-%j.err + +# Scale Test: Contextual and Reflection methods +# 5 users × 15 sessions × 2 methods = 150 sessions +# With CollaborativeAgents-style prompts + +cd /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents +source /u/yurenh2/miniforge3/etc/profile.d/conda.sh +conda activate eval +export HF_HOME=/projects/bfqt/users/yurenh2/hf_cache/huggingface +export PYTHONPATH="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/src:$PYTHONPATH" + +PROFILE_PATH="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/collaborativeagents/data/complex_profiles_v2/profiles_200.jsonl" +AGENT_MODEL="/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct" +USER_MODEL="meta-llama/Llama-3.1-70B-Instruct" + +echo "=== Scale Test: Contextual & Reflection (5 users × 15 sessions × 2 methods) ===" +date +nvidia-smi --query-gpu=index,name,memory.total --format=csv + +# Start vLLM servers +CUDA_VISIBLE_DEVICES=0,1 python -m vllm.entrypoints.openai.api_server \ + --model $USER_MODEL \ + --port 8004 --tensor-parallel-size 2 --gpu-memory-utilization 0.90 \ + --max-model-len 16384 --dtype bfloat16 --download-dir $HF_HOME & + +CUDA_VISIBLE_DEVICES=2,3 python -m vllm.entrypoints.openai.api_server \ + --model $AGENT_MODEL \ + --port 8003 --tensor-parallel-size 2 --gpu-memory-utilization 0.90 \ + --max-model-len 16384 --dtype bfloat16 & + +echo "Waiting for vLLM servers..." +for i in {1..200}; do + if curl -s http://localhost:8004/health > /dev/null 2>&1; then + echo "User simulator ready after $((i*5))s" + break + fi + sleep 5 +done +for i in {1..60}; do + if curl -s http://localhost:8003/health > /dev/null 2>&1; then + echo "Agent ready after $((i*5))s" + break + fi + sleep 5 +done +sleep 5 + +OUTPUT_DIR="../results/scale_test_ctx_refl_$(date +%Y%m%d_%H%M%S)" + +# Run contextual and reflection methods +for METHOD in contextual reflection; do + echo "" + echo "============================================" + echo "Testing: $METHOD (5 users × 15 sessions)" + echo "============================================" + + date + python scripts/run_experiments.py --methods $METHOD \ + --datasets math-hard --n-profiles 5 --n-sessions 15 --max-turns 15 \ + --use-vllm --no-batch-processing --parallel-profiles 1 \ + --output-dir $OUTPUT_DIR --profile-path $PROFILE_PATH + + echo "Method $METHOD completed" +done + +echo "" +echo "=== Contextual & Reflection Test Complete ===" +date + +# Generate comparison +python3 << 'PYEOF' +import json +from pathlib import Path + +output_base = sorted(Path("../results").glob("scale_test_ctx_refl_*"))[-1] +print(f"\n=== Results Summary (Contextual & Reflection) ===\nDir: {output_base}\n") + +methods = ["contextual", "reflection"] +results = {} + +for subdir in output_base.iterdir(): + if subdir.is_dir(): + for method in methods: + result_file = subdir / method / "results.json" + if result_file.exists() and method not in results: + with open(result_file) as f: + results[method] = json.load(f) + +if results: + print(f"{'Method':<12} {'Success':<10} {'Turns':<10} {'Enforce':<10} {'Sessions':<10}") + print("-" * 55) + for method in methods: + if method in results: + data = results[method] + n = len(data) + succ = sum(r['metrics']['task_success'] for r in data) / n + turns = sum(r['metrics']['total_turns'] for r in data) / n + enf = sum(r['metrics']['enforcement_count'] for r in data) / n + print(f"{method:<12} {succ:<10.1%} {turns:<10.1f} {enf:<10.1f} {n:<10}") +PYEOF + +pkill -f "vllm.entrypoints" 2>/dev/null || true -- cgit v1.2.3