janet
This commit is contained in:
parent
f186889f03
commit
0a95375a1a
2
NLU.py
2
NLU.py
|
@ -89,7 +89,7 @@ class NLU:
|
||||||
Query -> coref resolution & intent extraction -> if intents are not confident or if query is ambig -> rewrite query and recheck -> if still ambig, ask a clarifying question
|
Query -> coref resolution & intent extraction -> if intents are not confident or if query is ambig -> rewrite query and recheck -> if still ambig, ask a clarifying question
|
||||||
"""
|
"""
|
||||||
if utterance.lower() in ["help", "list resources", "list papers", "list datasets", "list topics"]:
|
if utterance.lower() in ["help", "list resources", "list papers", "list datasets", "list topics"]:
|
||||||
return {"modified_query": utterance, "intent": "COMMAND", "entities": [], "is_offensive": False, "is_clear": True}
|
return {"modified_query": utterance.lower(), "intent": "COMMAND", "entities": [], "is_offensive": False, "is_clear": True}
|
||||||
|
|
||||||
self.to_process = utterance
|
self.to_process = utterance
|
||||||
|
|
||||||
|
|
|
@ -247,7 +247,7 @@ class ResponseGenerator:
|
||||||
|
|
||||||
elif action == "command":
|
elif action == "command":
|
||||||
if utterance == "help":
|
if utterance == "help":
|
||||||
return self.gen_response(action="Help", name=name, vrename=vrename)
|
return self.gen_response(action="getHelp", vrename=vrename)
|
||||||
elif utterance == "list resources":
|
elif utterance == "list resources":
|
||||||
return self.gen_response(action="listResources", vrename=vrename)
|
return self.gen_response(action="listResources", vrename=vrename)
|
||||||
elif utterance == "list papers":
|
elif utterance == "list papers":
|
||||||
|
|
2
main.py
2
main.py
|
@ -130,7 +130,7 @@ def predict():
|
||||||
rec.generate_recommendations(users[token]['username'], new_user_interests, new_vre_material)
|
rec.generate_recommendations(users[token]['username'], new_user_interests, new_vre_material)
|
||||||
dm.update(state)
|
dm.update(state)
|
||||||
action = dm.next_action()
|
action = dm.next_action()
|
||||||
response = rg.gen_response(action=action, utterance=state['modified_query'], state=dm.get_recent_state(), consec_history=dm.get_consec_history(), chitchat_history=dm.get_chitchat_history(), vrename=vre.name)
|
response = rg.gen_response(action=action, utterance=state['modified_query'], state=dm.get_recent_state(), consec_history=dm.get_consec_history(), chitchat_history=dm.get_chitchat_history(), vrename=vre.name, username=users[token]['username'], name=users[token]['name'].split()[0])
|
||||||
message = {"answer": response, "query": text, "cand": "candidate", "history": dm.get_consec_history(), "modQuery": state['modified_query']}
|
message = {"answer": response, "query": text, "cand": "candidate", "history": dm.get_consec_history(), "modQuery": state['modified_query']}
|
||||||
if state['intent'] == "QA":
|
if state['intent'] == "QA":
|
||||||
response = response.split("_______ \n The answer is: ")[1]
|
response = response.split("_______ \n The answer is: ")[1]
|
||||||
|
|
Loading…
Reference in New Issue