summaryrefslogtreecommitdiff
path: root/collaborativeagents/scripts/generate_training_data.sh
blob: bdd5fbaf38793e56a8f80eeb01493d0c2142afb9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
# python -m sglang.launch_server --model-path meta-llama/Llama-3.3-70B-Instruct --port 8004 --tp-size 4 --context-length 16384

BATCH_SIZE=100

# Loop over eval sizes and datasets
for EVAL_SIZE in 20; do
    for DATASET in math-hard math-500 logiqa mmlu medqa; do
        # Convert dataset name for file paths (replace - with _)
        DATASET_FILE=$(echo ${DATASET} | tr '-' '_')

        echo "Generating training data for dataset: ${DATASET} with eval_size ${EVAL_SIZE}"

    # training_data_with_user_profiles_with_preferences
        python3 run.py --experiment_type training_data_with_user_profiles_with_preferences --dataset ${DATASET} --eval_size ${EVAL_SIZE} --max_turns 10 --batch_size ${BATCH_SIZE} \
            --user_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --user_api_base http://localhost:8004/v1 --user_api_key EMPTY \
            --collaborator_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --collaborator_api_base http://localhost:8004/v1 --collaborator_api_key EMPTY \
            --judge_model_name hosted_vllm/meta-llama/Llama-3.3-70B-Instruct --judge_api_base http://localhost:8004/v1 --judge_api_key EMPTY \
            --output_file /shared/storage-01/users/mehri2/mem/collaborativeagents/training/training_data/${DATASET_FILE}_llama70b_user_llama70b_agent_training_data_with_reflection_eval_size_${EVAL_SIZE}.jsonl \
            >> /shared/storage-01/users/mehri2/mem/collaborativeagents/training/training_data/${DATASET_FILE}_llama70b_user_llama70b_agent_training_data_with_reflection_eval_size_${EVAL_SIZE}.out 2>&1

    done
done