diff options
| author | YurenHao0426 <blackhao0426@gmail.com> | 2026-01-27 09:57:37 -0600 |
|---|---|---|
| committer | YurenHao0426 <blackhao0426@gmail.com> | 2026-01-27 09:57:37 -0600 |
| commit | dc801c07cf38b0c495686463e6ca6f871a64440e (patch) | |
| tree | 599f03114775921dbc472403c701f4a3a8ea188a /src/personalization/config | |
| parent | e43b3f8aa36c198b95c1e46bea2eaf3893b13dc3 (diff) | |
Add collaborativeagents module and update gitignore
- Add collaborativeagents subproject with adapters, agents, and evaluation modules
- Update .gitignore to exclude large binary files (.whl, .tar), wandb logs, and results
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
Diffstat (limited to 'src/personalization/config')
| -rw-r--r-- | src/personalization/config/registry.py | 9 |
1 files changed, 9 insertions, 0 deletions
diff --git a/src/personalization/config/registry.py b/src/personalization/config/registry.py index d825ad3..6048044 100644 --- a/src/personalization/config/registry.py +++ b/src/personalization/config/registry.py @@ -49,6 +49,7 @@ def get_chat_model(name: str, device_override: Optional[str] = None): from personalization.models.llm.base import ChatModel from personalization.models.llm.qwen_instruct import QwenInstruct from personalization.models.llm.llama_instruct import LlamaChatModel + from personalization.models.llm.vllm_chat import VLLMChatModel cfg = settings.load_local_models_config() @@ -81,6 +82,14 @@ def get_chat_model(name: str, device_override: Optional[str] = None): dtype=choose_dtype(dtype), # Converts string to torch.dtype max_context_length=max_len ) + elif backend == "vllm": + # Use vLLM HTTP API for high-throughput inference + vllm_url = spec.get("vllm_url", "http://localhost:8003/v1") + return VLLMChatModel( + vllm_url=vllm_url, + model_name=spec.get("model_name"), + max_context_length=max_len + ) # Fallback to legacy single config return QwenInstruct.from_config(cfg) |
