blob: a7f8595432117b98fc51dce6eb91fa6ccd8c412f (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
|
# vllm serve meta-llama/Llama-3.3-70B-Instruct --port 8004 --tensor-parallel-size 4 --max-model-len 16384 --gpu-memory-utilization 0.9
# python -m sglang.launch_server --model-path meta-llama/Llama-3.3-70B-Instruct --port 8004 --tp-size 4 --context-length 16384
# python -m sglang.launch_server --model-path meta-llama/Llama-3.1-8B-Instruct --port 8003 --tp-size 4 --context-length 16384
BATCH_SIZE=100
BATCH_SIZE=50
# Loop over eval sizes and datasets
for EVAL_SIZE in 20; do
for DATASET in math-hard math-500 logiqa mmlu medqa; do # humaneval bigcodebench
# Convert dataset name for file paths (replace - with _)
DATASET_FILE=$(echo ${DATASET} | tr '-' '_')
echo "Running experiments for dataset: ${DATASET} with eval_size ${EVAL_SIZE}"
# training_data_with_user_profiles_with_preferences
python3 run.py --experiment_type training_data_with_user_profiles_with_preferences --dataset ${DATASET} --eval_size ${EVAL_SIZE} --max_turns 10 --batch_size ${BATCH_SIZE} \
--user_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --user_api_base http://localhost:8004/v1 --user_api_key EMPTY \
--collaborator_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --collaborator_api_base http://localhost:8004/v1 --collaborator_api_key EMPTY \
--judge_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --judge_api_base http://localhost:8004/v1 --judge_api_key EMPTY \
--output_file /shared/storage-01/users/mehri2/mem/collaborativeagents/scripts/runs/training_llama70b_temp_1/${DATASET_FILE}_llama70b_user_llama70b_agent_training_data_with_reflection_eval_size_${EVAL_SIZE}.jsonl \
>> ./runs/training_llama70b_temp_1/${DATASET_FILE}_llama70b_user_llama70b_agent_training_data_with_reflection_eval_size_${EVAL_SIZE}.out 2>&1
done
done
|