From e43b3f8aa36c198b95c1e46bea2eaf3893b13dc3 Mon Sep 17 00:00:00 2001 From: YurenHao0426 Date: Wed, 17 Dec 2025 04:29:37 -0600 Subject: Initial commit (clean history) --- configs/local_models.yaml | 50 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 configs/local_models.yaml (limited to 'configs/local_models.yaml') diff --git a/configs/local_models.yaml b/configs/local_models.yaml new file mode 100644 index 0000000..13c7fcc --- /dev/null +++ b/configs/local_models.yaml @@ -0,0 +1,50 @@ +models: + llm: + # New Multi-Backend Config + qwen_1_5b: + backend: qwen + path: models/qwen2.5-1.5b-instruct + device: auto + dtype: bfloat16 + max_context_length: 4096 + + llama_8b: + backend: llama + path: models/llama-3.1-8b-instruct + device: auto + dtype: bfloat16 + max_context_length: 8192 + + # Legacy fallback (needed if from_config is called directly without name) + hf_id: Qwen/Qwen2.5-1.5B-Instruct + local_path: models/qwen2.5-1.5b-instruct + dtype: bfloat16 + device_map: auto + + preference_extractor: + # Default/Legacy + default: + hf_id: Qwen/Qwen2.5-0.5B-Instruct + local_path: models/qwen2.5-0.5b-instruct + dtype: bfloat16 + device_map: auto + # New SFT Extractor + qwen3_0_6b_sft: + path: saves/qwen3-0.6b-full-sft-h200/checkpoint-4358 + prompt_template_path: fine_tuning_prompt_template.txt + device: auto + dtype: bfloat16 + max_new_tokens: 512 + embedding: + qwen3: + hf_id: Qwen/Qwen3-Embedding-8B + local_path: models/qwen3-embedding-8b + nemotron: + hf_id: nvidia/llama-embed-nemotron-8b + local_path: models/llama-embed-nemotron-8b + reranker: + qwen3_8b: + hf_id: Qwen/Qwen3-Reranker-8B + local_path: models/rerankers/qwen3-reranker-8b + dtype: bfloat16 + device_map: auto -- cgit v1.2.3