diff options
| author | maszhongming <mingz5@illinois.edu> | 2025-09-16 15:15:29 -0500 |
|---|---|---|
| committer | maszhongming <mingz5@illinois.edu> | 2025-09-16 15:15:29 -0500 |
| commit | 73c194f304f827b55081b15524479f82a1b7d94c (patch) | |
| tree | 5e8660e421915420892c5eca18f1ad680f80a861 /kg_rag/run_setup.py | |
Initial commit
Diffstat (limited to 'kg_rag/run_setup.py')
| -rw-r--r-- | kg_rag/run_setup.py | 77 |
1 files changed, 77 insertions, 0 deletions
diff --git a/kg_rag/run_setup.py b/kg_rag/run_setup.py new file mode 100644 index 0000000..04c856c --- /dev/null +++ b/kg_rag/run_setup.py @@ -0,0 +1,77 @@ +import os +from kg_rag.utility import config_data + +def download_llama(method): + from kg_rag.utility import llama_model + try: + llama_model(config_data["LLAMA_MODEL_NAME"], config_data["LLAMA_MODEL_BRANCH"], config_data["LLM_CACHE_DIR"], method=method) + print("Model is successfully downloaded to the provided cache directory!") + except: + print("Model is not downloaded! Make sure the above mentioned conditions are satisfied") + + +print("") +print("Starting to set up KG-RAG ...") +print("") + +# user_input = input("Did you update the config.yaml file with all necessary configurations (such as GPT .env path, vectorDB file paths, other file paths)? Enter Y or N: ") +# print("") +# if user_input == "Y": +if True: + print("Checking disease vectorDB ...") + print("The current VECTOR_DB_PATH is ", config_data["VECTOR_DB_PATH"]) + try: + if os.path.exists(config_data["VECTOR_DB_PATH"]): + print("vectorDB already exists!") + else: + print("Creating vectorDB ...") + from kg_rag.vectorDB.create_vectordb import create_vectordb + create_vectordb() + print("Congratulations! The disease database is completed.") + except: + print("Double check the path that was given in VECTOR_DB_PATH of config.yaml file.") + ''' + print("") + user_input_1 = input("Do you want to install Llama model? Enter Y or N: ") + if user_input_1 == "Y": + user_input_2 = input("Did you update the config.yaml file with proper configuration for downloading Llama model? Enter Y or N: ") + if user_input_2 == "Y": + user_input_3 = input("Are you using official Llama model from Meta? Enter Y or N: ") + if user_input_3 == "Y": + user_input_4 = input("Did you get access to use the model? Enter Y or N: ") + if user_input_4 == "Y": + download_llama() + print("Congratulations! Setup is completed.") + else: + print("Aborting!") + else: + download_llama(method='method-1') + user_input_5 = input("Did you get a message like 'Model is not downloaded!'? Enter Y or N: ") + if user_input_5 == "N": + print("Congratulations! Setup is completed.") + else: + download_llama(method='method-2') + user_input_6 = input("Did you get a message like 'Model is not downloaded!'? Enter Y or N: ") + if user_input_6 == "N": + print(""" + IMPORTANT : + Llama model was downloaded using 'LlamaTokenizer' instead of 'AutoTokenizer' method. + So, when you run text generation script, please provide an extra command line argument '-m method-2'. + For example: + python -m kg_rag.rag_based_generation.Llama.text_generation -m method-2 + """) + print("Congratulations! Setup is completed.") + else: + print("We have now tried two methods to download Llama. If they both do not work, then please check the Llama configuration requirement in the huggingface model card page. Aborting!") + else: + print("Aborting!") + else: + print("No problem. Llama will get installed on-the-fly when you run the model for the first time.") + print("Congratulations! Setup is completed.") + ''' +else: + print("As the first step, update config.yaml file and then run this python script again.") + + + + |
