diff options
| author | maszhongming <mingz5@illinois.edu> | 2025-09-16 15:15:29 -0500 |
|---|---|---|
| committer | maszhongming <mingz5@illinois.edu> | 2025-09-16 15:15:29 -0500 |
| commit | 73c194f304f827b55081b15524479f82a1b7d94c (patch) | |
| tree | 5e8660e421915420892c5eca18f1ad680f80a861 /kg_rag/prompt_based_generation/Llama/text_generation.py | |
Initial commit
Diffstat (limited to 'kg_rag/prompt_based_generation/Llama/text_generation.py')
| -rw-r--r-- | kg_rag/prompt_based_generation/Llama/text_generation.py | 36 |
1 files changed, 36 insertions, 0 deletions
diff --git a/kg_rag/prompt_based_generation/Llama/text_generation.py b/kg_rag/prompt_based_generation/Llama/text_generation.py new file mode 100644 index 0000000..49bfebb --- /dev/null +++ b/kg_rag/prompt_based_generation/Llama/text_generation.py @@ -0,0 +1,36 @@ +from langchain import PromptTemplate, LLMChain +from kg_rag.utility import * +import argparse + +parser = argparse.ArgumentParser() +parser.add_argument('-m', type=str, default='method-1', help='Method to choose for Llama model') +args = parser.parse_args() + +METHOD = args.m + + +SYSTEM_PROMPT = system_prompts["PROMPT_BASED_TEXT_GENERATION"] +MODEL_NAME = config_data["LLAMA_MODEL_NAME"] +BRANCH_NAME = config_data["LLAMA_MODEL_BRANCH"] +CACHE_DIR = config_data["LLM_CACHE_DIR"] + + + +INSTRUCTION = "Question: {question}" + + +def main(): + llm = llama_model(MODEL_NAME, BRANCH_NAME, CACHE_DIR, stream=True, method=METHOD) + template = get_prompt(INSTRUCTION, SYSTEM_PROMPT) + prompt = PromptTemplate(template=template, input_variables=["question"]) + llm_chain = LLMChain(prompt=prompt, llm=llm) + print(" ") + question = input("Enter your question : ") + print("Here is the prompt-based answer:") + print("") + output = llm_chain.run(question) + + + +if __name__ == "__main__": + main() |
