enhancements

This commit is contained in:
ahmed531998 2023-04-16 02:44:09 +02:00
parent 63368fc9aa
commit 4586b43a44
4 changed files with 43 additions and 9 deletions

13
DM.py
View File

@ -4,6 +4,7 @@ class DM:
def __init__(self, max_history_length=3):
self.working_history_sep = ""
self.working_history_consec = ""
self.chitchat_history_consec = ""
self.max_history_length = max_history_length
self.chat_history = []
self.curr_state = None
@ -12,9 +13,21 @@ class DM:
to_consider = [x['modified_query'] for x in self.chat_history[-self.max_history_length*2:]]
self.working_history_consec = " . ".join(to_consider)
self.working_history_sep = " ||| ".join(to_consider)
chat = []
for utt in self.chat_history:
if utt['intent'] == 'CHITCHAT':
if len(chat) == 8:
chat = chat[1:]
chat.append(utt['modified_query'])
self.chitchat_history_consec = '. '.join(chat)
def get_consec_history(self):
return self.working_history_consec
def get_chitchat_history(self):
return self.chitchat_history_consec
def get_sep_history(self):
return self.working_history_sep

7
NLU.py
View File

@ -17,9 +17,14 @@ class NLU:
doc = self.coref_resolver(to_resolve)
token_mention_mapper = {}
output_string = ""
clusters = [
cand_clusters = [
val for key, val in doc.spans.items() if key.startswith("coref_cluster")
]
clusters = []
for cluster in cand_clusters:
if cluster[0].text == "I":
continue
clusters.append(cluster)
# Iterate through every found cluster
for cluster in clusters:

View File

@ -79,7 +79,7 @@ class ResponseGenerator:
return self.db[db_type].iloc[[I[0]][0]].reset_index(drop=True).loc[0]
def gen_response(self, action, utterance=None, username=None, state=None, consec_history=None):
def gen_response(self, action, utterance=None, username=None, state=None, consec_history=None, chitchat_history=None):
if action == "Help":
return "Hey it's Janet! I am here to help you make use of the datasets and papers in the catalogue. I can answer questions whose answers may be inside the papers. I can summarize papers for you. I can also chat with you. So, whichever it is, I am ready to chat!"
elif action == "Recommend":
@ -107,12 +107,22 @@ class ResponseGenerator:
return str("Here is the most recent post by: " + self.post['author'] + ', which is about ' + ', '.join(self.post['tags']) + self.post['content'])
else:
return str("Here is the most recent post by: " + self.post['author'] + ', ' + self.post['content'])
if len(self.post) > 0:
ev = self.post['content']
#generate the answer
gen_seq = 'question: '+utterance+" context: "+ev
gen_kwargs = {"length_penalty": 0.5, "num_beams":2, "max_length": 60, "repetition_penalty": 2.5, "temperature": 2}
answer = self.generators['qa'](gen_seq, **gen_kwargs)[0]['generated_text']
if len(self.post['tags']) > 0:
return "The post is about: " + answer + " \n There is a special focus on " + ', '.join(self.post['tags'])
else:
return "The post is about: " + answer
return "I could not find the post you are looking for."
elif action == "ConvGen":
gen_kwargs = {"length_penalty": 2.5, "num_beams":2, "max_length": 30, "repetition_penalty": 2.5, "temperature": 2}
#answer = self.generators['chat']('history: '+ consec_history + ' ' + utterance + ' persona: ' + 'I am Janet. My name is Janet. I am an AI developed by CNR to help VRE users.' , **gen_kwargs)[0]['generated_text']
answer = self.generators['chat']('question: ' + utterance + 'context: My name is Janet. I am an AI developed by CNR to help VRE users. ' + consec_history , **gen_kwargs)[0]['generated_text']
answer = self.generators['chat']('question: ' + utterance + 'context: My name is Janet. I am an AI developed by CNR to help VRE users. ' + chitchat_history , **gen_kwargs)[0]['generated_text']
return answer
elif action == "findPaper":
@ -186,7 +196,7 @@ class ResponseGenerator:
#handle return random 2 answers
gen_kwargs = {"length_penalty": 0.5, "num_beams":2, "max_length": 60, "repetition_penalty": 2.5, "temperature": 2}
answer = self.generators['qa'](gen_seq, **gen_kwargs)[0]['generated_text']
return "According to the following evidence: " + evidence + " \n ........" + "The answer is: " + answer
return "According to the following evidence: " + evidence + " \n _______ \n " + "The answer is: " + answer
elif action == "sumPaper":
if len(self.paper) == 0:
@ -213,8 +223,12 @@ class ResponseGenerator:
if len(self.dataset) == 0:
return 'Please specify the title, the topic of the dataset of interest.'
elif state['intent'] == 'EXPLAINPOST' and len(state['entities']) == 0:
if len(self.post) != 0:
return self.gen_response(action="findPost", utterance=utterance, username=username, state=state, consec_history=consec_history)
return 'Please specify the the topic or the author of the post.'
else:
gen_kwargs = {"length_penalty": 2.5, "num_beams":8, "max_length": 120, "repetition_penalty": 2.5, "temperature": 2}
question = self.generators['amb']('question: '+ utterance + ' context: ' + consec_history , **gen_kwargs)[0]['generated_text']
return question
return "I am unable to generate the response. Can you please provide me with a prefered response in the feedback form so I can learn?"

12
main.py
View File

@ -70,13 +70,13 @@ def predict():
text = request.get_json().get("message")
message = {}
if text == "<HELP_ON_START>":
state = {'help': True, 'inactive': False, 'modified_query':""}
state = {'help': True, 'inactive': False, 'modified_query':"", 'intent':""}
dm.update(state)
action = dm.next_action()
response = rg.gen_response(action)
message = {"answer": response}
elif text == "<RECOMMEND_ON_IDLE>":
state = {'help': False, 'inactive': True, 'modified_query':"recommed: "}
state = {'help': False, 'inactive': True, 'modified_query':"recommed: ", 'intent':""}
dm.update(state)
action = dm.next_action()
response = rg.gen_response(action, username=user.username)
@ -100,9 +100,11 @@ def predict():
rec.generate_recommendations(new_user_interests, new_vre_material)
dm.update(state)
action = dm.next_action()
response = rg.gen_response(action=action, utterance=state['modified_query'], state=dm.get_recent_state(), consec_history=dm.get_consec_history())
response = rg.gen_response(action=action, utterance=state['modified_query'], state=dm.get_recent_state(), consec_history=dm.get_consec_history(), chitchat_history=dm.get_chitchat_history())
message = {"answer": response, "query": text, "cand": "candidate", "history": dm.get_consec_history(), "modQuery": state['modified_query']}
new_state = {'modified_query': response}
if state['intent'] == "QA":
response = response.split("_______ \n The answer is: ")[1]
new_state = {'modified_query': response, 'intent': state['intent']}
dm.update(new_state)
reply = jsonify(message)
return reply
@ -167,7 +169,7 @@ if __name__ == "__main__":
rg = ResponseGenerator(index,db, rec, generators, retriever)
cur.execute('CREATE TABLE IF NOT EXISTS feedback_experimental (id serial PRIMARY KEY,'
'query text NOT NULL,'
'history text NOT NULL,'