summaryrefslogtreecommitdiff
path: root/src/personalization/config
diff options
context:
space:
mode:
Diffstat (limited to 'src/personalization/config')
-rw-r--r--src/personalization/config/registry.py9
1 files changed, 9 insertions, 0 deletions
diff --git a/src/personalization/config/registry.py b/src/personalization/config/registry.py
index d825ad3..6048044 100644
--- a/src/personalization/config/registry.py
+++ b/src/personalization/config/registry.py
@@ -49,6 +49,7 @@ def get_chat_model(name: str, device_override: Optional[str] = None):
from personalization.models.llm.base import ChatModel
from personalization.models.llm.qwen_instruct import QwenInstruct
from personalization.models.llm.llama_instruct import LlamaChatModel
+ from personalization.models.llm.vllm_chat import VLLMChatModel
cfg = settings.load_local_models_config()
@@ -81,6 +82,14 @@ def get_chat_model(name: str, device_override: Optional[str] = None):
dtype=choose_dtype(dtype), # Converts string to torch.dtype
max_context_length=max_len
)
+ elif backend == "vllm":
+ # Use vLLM HTTP API for high-throughput inference
+ vllm_url = spec.get("vllm_url", "http://localhost:8003/v1")
+ return VLLMChatModel(
+ vllm_url=vllm_url,
+ model_name=spec.get("model_name"),
+ max_context_length=max_len
+ )
# Fallback to legacy single config
return QwenInstruct.from_config(cfg)