summaryrefslogtreecommitdiff
path: root/collaborativeagents/scripts/run_fp8.sh
blob: 54537fa335eb49f4059f2ad6f4f93ece545d7b79 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
# vllm serve meta-llama/Llama-3.3-70B-Instruct --port 8004 --tensor-parallel-size 4 --max-model-len 16384 --gpu-memory-utilization 0.9 --quantization fp8 --enforce-eager
# python -m sglang.launch_server --model-path meta-llama/Llama-3.3-70B-Instruct --port 8004 --tp-size 4 --context-length 16384 --mem-fraction-static 0.9 --quantization fp8


BATCH_SIZE=1
# BATCH_SIZE=20

# Loop over eval sizes and datasets
for EVAL_SIZE in 5; do
    for DATASET in logiqa; do #  mmlu medqa humaneval bigcodebench math-500 math-hard
        # Convert dataset name for file paths (replace - with _)
        DATASET_FILE=$(echo ${DATASET} | tr '-' '_')

        echo "Running experiments for dataset: ${DATASET} with eval_size ${EVAL_SIZE}"

        # # no_user experiment
        # python3 run.py --experiment_type no_user --dataset ${DATASET} --eval_size ${EVAL_SIZE} --batch_size ${BATCH_SIZE} \
        #     --collaborator_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --collaborator_api_base http://localhost:8004/v1 --collaborator_api_key EMPTY \
        #     --judge_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --judge_api_base http://localhost:8004/v1 --judge_api_key EMPTY \
        #     --output_file /shared/storage-01/users/mehri2/mem/collaborativeagents/scripts/runs/llama70b_fp8/no_user/${DATASET_FILE}_llama70b_user_llama70b_agent_no_user_eval_size_${EVAL_SIZE}.jsonl \
        #     >> ./runs/llama70b_fp8/no_user/${DATASET_FILE}_llama70b_user_llama70b_agent_no_user_eval_size_${EVAL_SIZE}.out 2>&1

        # # user_no_profile experiment
        # python3 run.py --experiment_type user_no_profile --dataset ${DATASET} --eval_size ${EVAL_SIZE} --max_turns 10 --batch_size ${BATCH_SIZE} \
        #     --user_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --user_api_base http://localhost:8004/v1 --user_api_key EMPTY \
        #     --collaborator_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --collaborator_api_base http://localhost:8004/v1 --collaborator_api_key EMPTY \
        #     --judge_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --judge_api_base http://localhost:8004/v1 --judge_api_key EMPTY \
        #     --output_file /shared/storage-01/users/mehri2/mem/collaborativeagents/scripts/runs/llama70b_fp8/user_no_profile/${DATASET_FILE}_llama70b_user_llama70b_agent_user_no_profile_eval_size_${EVAL_SIZE}.jsonl \
        #     >> ./runs/llama70b_fp8/user_no_profile/${DATASET_FILE}_llama70b_user_llama70b_agent_user_no_profile_eval_size_${EVAL_SIZE}.out 2>&1

        # # user_profiles_without_preferences experiment
        # python3 run.py --experiment_type user_profiles_without_preferences --dataset ${DATASET} --eval_size ${EVAL_SIZE} --max_turns 10 --batch_size ${BATCH_SIZE} \
        #     --user_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --user_api_base http://localhost:8004/v1 --user_api_key EMPTY \
        #     --collaborator_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --collaborator_api_base http://localhost:8004/v1 --collaborator_api_key EMPTY \
        #     --judge_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --judge_api_base http://localhost:8004/v1 --judge_api_key EMPTY \
        #     --output_file /shared/storage-01/users/mehri2/mem/collaborativeagents/scripts/runs/llama70b_fp8/user_profiles_without_preferences/${DATASET_FILE}_llama70b_user_llama70b_agent_user_profiles_without_preferences_eval_size_${EVAL_SIZE}.jsonl \
        #     >> ./runs/llama70b_fp8/user_profiles_without_preferences/${DATASET_FILE}_llama70b_user_llama70b_agent_user_profiles_without_preferences_eval_size_${EVAL_SIZE}.out 2>&1

        # # user_profiles_with_preferences experiment
        # python3 run.py --experiment_type user_profiles_with_preferences --dataset ${DATASET} --eval_size ${EVAL_SIZE} --max_turns 10 --batch_size ${BATCH_SIZE} \
        #     --user_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --user_api_base http://localhost:8004/v1 --user_api_key EMPTY \
        #     --collaborator_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --collaborator_api_base http://localhost:8004/v1 --collaborator_api_key EMPTY \
        #     --judge_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --judge_api_base http://localhost:8004/v1 --judge_api_key EMPTY \
        #     --output_file /shared/storage-01/users/mehri2/mem/collaborativeagents/scripts/runs/llama70b_fp8/user_profiles_with_preferences/${DATASET_FILE}_llama70b_user_llama70b_agent_user_profiles_with_preferences_eval_size_${EVAL_SIZE}.jsonl \
        #     >> ./runs/llama70b_fp8/user_profiles_with_preferences/${DATASET_FILE}_llama70b_user_llama70b_agent_user_profiles_with_preferences_eval_size_${EVAL_SIZE}.out 2>&1

        # # agent_with_user_preferences experiment
        # python3 run.py --experiment_type agent_with_user_preferences --dataset ${DATASET} --eval_size ${EVAL_SIZE} --max_turns 10 --batch_size ${BATCH_SIZE} \
        #     --user_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --user_api_base http://localhost:8004/v1 --user_api_key EMPTY \
        #     --collaborator_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --collaborator_api_base http://localhost:8004/v1 --collaborator_api_key EMPTY \
        #     --judge_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --judge_api_base http://localhost:8004/v1 --judge_api_key EMPTY \
        #     --output_file /shared/storage-01/users/mehri2/mem/collaborativeagents/scripts/runs/llama70b_fp8/agent_with_user_preferences/${DATASET_FILE}_llama70b_user_llama70b_agent_agent_with_user_preferences_eval_size_${EVAL_SIZE}_v2.jsonl \
        #     >> ./runs/llama70b_fp8/agent_with_user_preferences/${DATASET_FILE}_llama70b_user_llama70b_agent_agent_with_user_preferences_eval_size_${EVAL_SIZE}_v2.out 2>&1

        # agent_with_reflection experiment
        python3 run.py --experiment_type agent_with_reflection --dataset ${DATASET} --eval_size ${EVAL_SIZE} --max_turns 10 --batch_size ${BATCH_SIZE} \
            --user_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --user_api_base http://localhost:8004/v1 --user_api_key EMPTY \
            --collaborator_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --collaborator_api_base http://localhost:8004/v1 --collaborator_api_key EMPTY \
            --judge_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --judge_api_base http://localhost:8004/v1 --judge_api_key EMPTY \
            --output_file /shared/storage-01/users/mehri2/mem/collaborativeagents/scripts/runs/llama70b_fp8/agent_with_reflection/${DATASET_FILE}_llama70b_user_llama70b_agent_agent_with_reflection_eval_size_${EVAL_SIZE}.jsonl \
            >> ./runs/llama70b_fp8/agent_with_reflection/${DATASET_FILE}_llama70b_user_llama70b_agent_agent_with_reflection_eval_size_${EVAL_SIZE}.out 2>&1


    done
done