app_models_directory

This commit is contained in:
ahmed531998 2023-04-05 22:35:32 +02:00
parent 071738ee4e
commit 5566a89c3c
1 changed files with 9 additions and 9 deletions

18
main.py
View File

@ -113,20 +113,20 @@ if __name__ == "__main__":
device_flag = torch.cuda.current_device() if torch.cuda.is_available() else -1
query_rewriter = pipeline("text2text-generation", model="castorini/t5-base-canard")
intent_classifier = pipeline("sentiment-analysis", model='/models/intent_classifier', device=device_flag)
entity_extractor = spacy.load("/models/entity_extractor")
offensive_classifier = pipeline("sentiment-analysis", model='/models/offensive_classifier', device=device_flag)
ambig_classifier = pipeline("sentiment-analysis", model='/models/ambig_classifier', device=device_flag)
intent_classifier = pipeline("sentiment-analysis", model='/app/models/intent_classifier', device=device_flag)
entity_extractor = spacy.load("/app/models/entity_extractor")
offensive_classifier = pipeline("sentiment-analysis", model='/app/models/offensive_classifier', device=device_flag)
ambig_classifier = pipeline("sentiment-analysis", model='/app/models/ambig_classifier', device=device_flag)
coref_resolver = spacy.load("en_coreference_web_trf")
nlu = NLU(query_rewriter, coref_resolver, intent_classifier, offensive_classifier, entity_extractor, ambig_classifier)
#load retriever and generator
retriever = SentenceTransformer('/models/BigRetriever/').to(device)
qa_generator = pipeline("text2text-generation", model="/models/train_qa", device=device_flag)
summ_generator = pipeline("text2text-generation", model="/models/train_summ", device=device_flag)
chat_generator = pipeline("text2text-generation", model="/models/train_chat", device=device_flag)
amb_generator = pipeline("text2text-generation", model="/models/train_amb_gen", device=device_flag)
retriever = SentenceTransformer('/app/models/BigRetriever/').to(device)
qa_generator = pipeline("text2text-generation", model="/app/models/train_qa", device=device_flag)
summ_generator = pipeline("text2text-generation", model="/app/models/train_summ", device=device_flag)
chat_generator = pipeline("text2text-generation", model="/app/models/train_chat", device=device_flag)
amb_generator = pipeline("text2text-generation", model="/app/models/train_amb_gen", device=device_flag)
generators = {'qa': qa_generator,
'chat': chat_generator,
'amb': amb_generator,