diff options
Diffstat (limited to 'configs/local_models.yaml')
| -rw-r--r-- | configs/local_models.yaml | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/configs/local_models.yaml b/configs/local_models.yaml index 8f91955..9372f3d 100644 --- a/configs/local_models.yaml +++ b/configs/local_models.yaml @@ -1,19 +1,19 @@ # Base path for all models -_base_path: &base /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model +_base_path: &base /workspace/personalization-user-model models: llm: # New Multi-Backend Config qwen_1_5b: backend: qwen - path: /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/qwen2.5-1.5b-instruct + path: /workspace/personalization-user-model/models/qwen2.5-1.5b-instruct device: auto dtype: bfloat16 max_context_length: 4096 llama_8b: backend: llama - path: /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct + path: /workspace/personalization-user-model/models/llama-3.1-8b-instruct device: auto dtype: bfloat16 max_context_length: 8192 @@ -21,13 +21,13 @@ models: # vLLM backend for high-throughput experiments llama_8b_vllm: backend: vllm - path: /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-3.1-8b-instruct + path: /workspace/personalization-user-model/models/llama-3.1-8b-instruct vllm_url: http://localhost:8003/v1 max_context_length: 8192 # Legacy fallback (needed if from_config is called directly without name) hf_id: Qwen/Qwen2.5-1.5B-Instruct - local_path: /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/qwen2.5-1.5b-instruct + local_path: /workspace/personalization-user-model/models/qwen2.5-1.5b-instruct dtype: bfloat16 device_map: auto @@ -35,12 +35,12 @@ models: # Default/Legacy default: hf_id: Qwen/Qwen2.5-0.5B-Instruct - local_path: /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/qwen2.5-0.5b-instruct + local_path: /workspace/personalization-user-model/models/qwen2.5-0.5b-instruct dtype: bfloat16 device_map: auto # New SFT Extractor qwen3_0_6b_sft: - path: /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/saves/qwen3-0.6b-full-sft-h200/checkpoint-4358 + path: /workspace/personalization-user-model/models/pref-extractor-qwen3-0.6b-sft prompt_template_path: fine_tuning_prompt_template.txt device: auto dtype: bfloat16 @@ -48,18 +48,18 @@ models: embedding: qwen3: hf_id: Qwen/Qwen3-Embedding-8B - local_path: /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/qwen3-embedding-8b + local_path: /workspace/personalization-user-model/models/qwen3-embedding-8b nemotron: hf_id: nvidia/llama-embed-nemotron-8b - local_path: /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/llama-embed-nemotron-8b + local_path: /workspace/personalization-user-model/models/llama-embed-nemotron-8b reranker: qwen3_8b: hf_id: Qwen/Qwen3-Reranker-8B - local_path: /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/rerankers/qwen3-reranker-8b + local_path: /workspace/personalization-user-model/models/rerankers/qwen3-reranker-8b dtype: bfloat16 device_map: auto bge_base: hf_id: BAAI/bge-reranker-base - local_path: /projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/models/rerankers/bge-reranker-base + local_path: /workspace/personalization-user-model/models/rerankers/bge-reranker-base dtype: float16 device_map: auto |
