# vllm serve meta-llama/Llama-3.3-70B-Instruct --port 8004 --tensor-parallel-size 4 --max-model-len 16384 --gpu-memory-utilization 0.9 BATCH_SIZE=100 # BATCH_SIZE=20 # Loop over eval sizes and datasets for EVAL_SIZE in 20; do for DATASET in math-500; do # Convert dataset name for file paths (replace - with _) DATASET_FILE=$(echo ${DATASET} | tr '-' '_') echo "Running experiments for dataset: ${DATASET} with eval_size ${EVAL_SIZE}" # debug experiment python3 run.py --experiment_type debug --dataset ${DATASET} --eval_size ${EVAL_SIZE} --max_turns 10 --batch_size ${BATCH_SIZE} \ --user_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --user_api_base http://localhost:8004/v1 --user_api_key EMPTY \ --collaborator_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --collaborator_api_base http://localhost:8004/v1 --collaborator_api_key EMPTY \ --judge_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --judge_api_base http://localhost:8004/v1 --judge_api_key EMPTY \ --output_file /shared/storage-01/users/mehri2/mem/collaborativeagents/scripts/runs/llama70b/debug/${DATASET_FILE}_llama70b_user_llama70b_agent_debug_eval_size_${EVAL_SIZE}.jsonl \ >> ./runs/llama70b/debug/${DATASET_FILE}_llama70b_user_llama70b_agent_debug_eval_size_${EVAL_SIZE}.out 2>&1 done done