blob: 13c7fcc2b337edb7dbf8aa9290928563f98ca4b0 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
|
models:
llm:
# New Multi-Backend Config
qwen_1_5b:
backend: qwen
path: models/qwen2.5-1.5b-instruct
device: auto
dtype: bfloat16
max_context_length: 4096
llama_8b:
backend: llama
path: models/llama-3.1-8b-instruct
device: auto
dtype: bfloat16
max_context_length: 8192
# Legacy fallback (needed if from_config is called directly without name)
hf_id: Qwen/Qwen2.5-1.5B-Instruct
local_path: models/qwen2.5-1.5b-instruct
dtype: bfloat16
device_map: auto
preference_extractor:
# Default/Legacy
default:
hf_id: Qwen/Qwen2.5-0.5B-Instruct
local_path: models/qwen2.5-0.5b-instruct
dtype: bfloat16
device_map: auto
# New SFT Extractor
qwen3_0_6b_sft:
path: saves/qwen3-0.6b-full-sft-h200/checkpoint-4358
prompt_template_path: fine_tuning_prompt_template.txt
device: auto
dtype: bfloat16
max_new_tokens: 512
embedding:
qwen3:
hf_id: Qwen/Qwen3-Embedding-8B
local_path: models/qwen3-embedding-8b
nemotron:
hf_id: nvidia/llama-embed-nemotron-8b
local_path: models/llama-embed-nemotron-8b
reranker:
qwen3_8b:
hf_id: Qwen/Qwen3-Reranker-8B
local_path: models/rerankers/qwen3-reranker-8b
dtype: bfloat16
device_map: auto
|