diff options
Diffstat (limited to 'collaborativeagents/scripts/run_fp8.sh')
| -rw-r--r-- | collaborativeagents/scripts/run_fp8.sh | 65 |
1 files changed, 65 insertions, 0 deletions
diff --git a/collaborativeagents/scripts/run_fp8.sh b/collaborativeagents/scripts/run_fp8.sh new file mode 100644 index 0000000..54537fa --- /dev/null +++ b/collaborativeagents/scripts/run_fp8.sh @@ -0,0 +1,65 @@ +# vllm serve meta-llama/Llama-3.3-70B-Instruct --port 8004 --tensor-parallel-size 4 --max-model-len 16384 --gpu-memory-utilization 0.9 --quantization fp8 --enforce-eager +# python -m sglang.launch_server --model-path meta-llama/Llama-3.3-70B-Instruct --port 8004 --tp-size 4 --context-length 16384 --mem-fraction-static 0.9 --quantization fp8 + + +BATCH_SIZE=1 +# BATCH_SIZE=20 + +# Loop over eval sizes and datasets +for EVAL_SIZE in 5; do + for DATASET in logiqa; do # mmlu medqa humaneval bigcodebench math-500 math-hard + # Convert dataset name for file paths (replace - with _) + DATASET_FILE=$(echo ${DATASET} | tr '-' '_') + + echo "Running experiments for dataset: ${DATASET} with eval_size ${EVAL_SIZE}" + + # # no_user experiment + # python3 run.py --experiment_type no_user --dataset ${DATASET} --eval_size ${EVAL_SIZE} --batch_size ${BATCH_SIZE} \ + # --collaborator_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --collaborator_api_base http://localhost:8004/v1 --collaborator_api_key EMPTY \ + # --judge_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --judge_api_base http://localhost:8004/v1 --judge_api_key EMPTY \ + # --output_file /shared/storage-01/users/mehri2/mem/collaborativeagents/scripts/runs/llama70b_fp8/no_user/${DATASET_FILE}_llama70b_user_llama70b_agent_no_user_eval_size_${EVAL_SIZE}.jsonl \ + # >> ./runs/llama70b_fp8/no_user/${DATASET_FILE}_llama70b_user_llama70b_agent_no_user_eval_size_${EVAL_SIZE}.out 2>&1 + + # # user_no_profile experiment + # python3 run.py --experiment_type user_no_profile --dataset ${DATASET} --eval_size ${EVAL_SIZE} --max_turns 10 --batch_size ${BATCH_SIZE} \ + # --user_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --user_api_base http://localhost:8004/v1 --user_api_key EMPTY \ + # --collaborator_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --collaborator_api_base http://localhost:8004/v1 --collaborator_api_key EMPTY \ + # --judge_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --judge_api_base http://localhost:8004/v1 --judge_api_key EMPTY \ + # --output_file /shared/storage-01/users/mehri2/mem/collaborativeagents/scripts/runs/llama70b_fp8/user_no_profile/${DATASET_FILE}_llama70b_user_llama70b_agent_user_no_profile_eval_size_${EVAL_SIZE}.jsonl \ + # >> ./runs/llama70b_fp8/user_no_profile/${DATASET_FILE}_llama70b_user_llama70b_agent_user_no_profile_eval_size_${EVAL_SIZE}.out 2>&1 + + # # user_profiles_without_preferences experiment + # python3 run.py --experiment_type user_profiles_without_preferences --dataset ${DATASET} --eval_size ${EVAL_SIZE} --max_turns 10 --batch_size ${BATCH_SIZE} \ + # --user_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --user_api_base http://localhost:8004/v1 --user_api_key EMPTY \ + # --collaborator_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --collaborator_api_base http://localhost:8004/v1 --collaborator_api_key EMPTY \ + # --judge_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --judge_api_base http://localhost:8004/v1 --judge_api_key EMPTY \ + # --output_file /shared/storage-01/users/mehri2/mem/collaborativeagents/scripts/runs/llama70b_fp8/user_profiles_without_preferences/${DATASET_FILE}_llama70b_user_llama70b_agent_user_profiles_without_preferences_eval_size_${EVAL_SIZE}.jsonl \ + # >> ./runs/llama70b_fp8/user_profiles_without_preferences/${DATASET_FILE}_llama70b_user_llama70b_agent_user_profiles_without_preferences_eval_size_${EVAL_SIZE}.out 2>&1 + + # # user_profiles_with_preferences experiment + # python3 run.py --experiment_type user_profiles_with_preferences --dataset ${DATASET} --eval_size ${EVAL_SIZE} --max_turns 10 --batch_size ${BATCH_SIZE} \ + # --user_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --user_api_base http://localhost:8004/v1 --user_api_key EMPTY \ + # --collaborator_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --collaborator_api_base http://localhost:8004/v1 --collaborator_api_key EMPTY \ + # --judge_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --judge_api_base http://localhost:8004/v1 --judge_api_key EMPTY \ + # --output_file /shared/storage-01/users/mehri2/mem/collaborativeagents/scripts/runs/llama70b_fp8/user_profiles_with_preferences/${DATASET_FILE}_llama70b_user_llama70b_agent_user_profiles_with_preferences_eval_size_${EVAL_SIZE}.jsonl \ + # >> ./runs/llama70b_fp8/user_profiles_with_preferences/${DATASET_FILE}_llama70b_user_llama70b_agent_user_profiles_with_preferences_eval_size_${EVAL_SIZE}.out 2>&1 + + # # agent_with_user_preferences experiment + # python3 run.py --experiment_type agent_with_user_preferences --dataset ${DATASET} --eval_size ${EVAL_SIZE} --max_turns 10 --batch_size ${BATCH_SIZE} \ + # --user_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --user_api_base http://localhost:8004/v1 --user_api_key EMPTY \ + # --collaborator_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --collaborator_api_base http://localhost:8004/v1 --collaborator_api_key EMPTY \ + # --judge_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --judge_api_base http://localhost:8004/v1 --judge_api_key EMPTY \ + # --output_file /shared/storage-01/users/mehri2/mem/collaborativeagents/scripts/runs/llama70b_fp8/agent_with_user_preferences/${DATASET_FILE}_llama70b_user_llama70b_agent_agent_with_user_preferences_eval_size_${EVAL_SIZE}_v2.jsonl \ + # >> ./runs/llama70b_fp8/agent_with_user_preferences/${DATASET_FILE}_llama70b_user_llama70b_agent_agent_with_user_preferences_eval_size_${EVAL_SIZE}_v2.out 2>&1 + + # agent_with_reflection experiment + python3 run.py --experiment_type agent_with_reflection --dataset ${DATASET} --eval_size ${EVAL_SIZE} --max_turns 10 --batch_size ${BATCH_SIZE} \ + --user_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --user_api_base http://localhost:8004/v1 --user_api_key EMPTY \ + --collaborator_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --collaborator_api_base http://localhost:8004/v1 --collaborator_api_key EMPTY \ + --judge_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --judge_api_base http://localhost:8004/v1 --judge_api_key EMPTY \ + --output_file /shared/storage-01/users/mehri2/mem/collaborativeagents/scripts/runs/llama70b_fp8/agent_with_reflection/${DATASET_FILE}_llama70b_user_llama70b_agent_agent_with_reflection_eval_size_${EVAL_SIZE}.jsonl \ + >> ./runs/llama70b_fp8/agent_with_reflection/${DATASET_FILE}_llama70b_user_llama70b_agent_agent_with_reflection_eval_size_${EVAL_SIZE}.out 2>&1 + + + done +done
\ No newline at end of file |
