summaryrefslogtreecommitdiff
path: root/configs
diff options
context:
space:
mode:
authorYurenHao0426 <Blackhao0426@gmail.com>2026-03-18 18:25:09 -0500
committerYurenHao0426 <Blackhao0426@gmail.com>2026-03-18 18:25:09 -0500
commitb6c3e4e51eeab703b40284459c6e9fff2151216c (patch)
tree221410886f23214575f93b9ef44fa8431c9a6dfc /configs
Initial release: VARS - personalized LLM with RAG and user vector learning
Diffstat (limited to 'configs')
-rw-r--r--configs/base.yaml0
-rw-r--r--configs/local_models.yaml66
-rw-r--r--configs/reranker.yaml3
-rw-r--r--configs/retrieval.yaml5
-rw-r--r--configs/user_model.yaml14
5 files changed, 88 insertions, 0 deletions
diff --git a/configs/base.yaml b/configs/base.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/configs/base.yaml
diff --git a/configs/local_models.yaml b/configs/local_models.yaml
new file mode 100644
index 0000000..ea001ea
--- /dev/null
+++ b/configs/local_models.yaml
@@ -0,0 +1,66 @@
+# Base path for all models
+_base_path: &base ./
+
+models:
+ llm:
+ # New Multi-Backend Config
+ qwen_1_5b:
+ backend: qwen
+ path: .//models/qwen2.5-1.5b-instruct
+ device: auto
+ dtype: bfloat16
+ max_context_length: 4096
+
+ llama_8b:
+ backend: llama
+ path: .//models/llama-3.1-8b-instruct
+ device: auto
+ dtype: bfloat16
+ max_context_length: 8192
+
+ # vLLM backend for high-throughput experiments
+ llama_8b_vllm:
+ backend: vllm
+ path: .//models/llama-3.1-8b-instruct
+ vllm_url: http://localhost:8003/v1
+ model_name: meta-llama/Llama-3.1-8B-Instruct
+ max_context_length: 8192
+
+ # Legacy fallback (needed if from_config is called directly without name)
+ hf_id: Qwen/Qwen2.5-1.5B-Instruct
+ local_path: .//models/qwen2.5-1.5b-instruct
+ dtype: bfloat16
+ device_map: auto
+
+ preference_extractor:
+ # Default/Legacy
+ default:
+ hf_id: Qwen/Qwen2.5-0.5B-Instruct
+ local_path: .//models/qwen2.5-0.5b-instruct
+ dtype: bfloat16
+ device_map: auto
+ # New SFT Extractor
+ qwen3_0_6b_sft:
+ path: .//models/pref-extractor-qwen3-0.6b-sft
+ prompt_template_path: fine_tuning_prompt_template.txt
+ device: auto
+ dtype: bfloat16
+ max_new_tokens: 512
+ embedding:
+ qwen3:
+ hf_id: Qwen/Qwen3-Embedding-8B
+ local_path: .//models/qwen3-embedding-8b
+ nemotron:
+ hf_id: nvidia/llama-embed-nemotron-8b
+ local_path: .//models/llama-embed-nemotron-8b
+ reranker:
+ qwen3_8b:
+ hf_id: Qwen/Qwen3-Reranker-8B
+ local_path: .//models/rerankers/qwen3-reranker-8b
+ dtype: bfloat16
+ device_map: auto
+ bge_base:
+ hf_id: BAAI/bge-reranker-base
+ local_path: .//models/rerankers/bge-reranker-base
+ dtype: float16
+ device_map: auto
diff --git a/configs/reranker.yaml b/configs/reranker.yaml
new file mode 100644
index 0000000..c376fc7
--- /dev/null
+++ b/configs/reranker.yaml
@@ -0,0 +1,3 @@
+reranker:
+ default: qwen3_8b
+
diff --git a/configs/retrieval.yaml b/configs/retrieval.yaml
new file mode 100644
index 0000000..d2e100e
--- /dev/null
+++ b/configs/retrieval.yaml
@@ -0,0 +1,5 @@
+retrieval:
+ dense_topk: 64 # Initial recall count
+ rerank_topk: 8 # Count fed to LLM after rerank
+ pca_dim: 256
+
diff --git a/configs/user_model.yaml b/configs/user_model.yaml
new file mode 100644
index 0000000..7b8e230
--- /dev/null
+++ b/configs/user_model.yaml
@@ -0,0 +1,14 @@
+user_model:
+ item_dim: 256
+ user_dim: 256
+ beta_long: 0.1 # Enable personalization for Day 4
+ beta_short: 0.3
+ tau: 1.0
+ preference_extractor_name: qwen3_0_6b_sft # Switch to new extractor
+ rl:
+ eta_long: 1.0e-3
+ eta_short: 5.0e-3
+ ema_alpha: 0.05
+ short_decay: 0.1
+
+llm_name: llama_8b # Switch backend to Llama 3.1