Increase readability to chatbot.py

This is an amazing script and I thought I would give it a huge justice by making it more readable for onlookers
This commit is contained in:
Josh Shepherd 2023-03-31 01:37:21 +01:00 committed by GitHub
parent d8cdb1c3d8
commit be9b3304ac

View File

@ -2,24 +2,25 @@ import openai
from termcolor import colored from termcolor import colored
import streamlit as st import streamlit as st
from database import get_redis_connection,get_redis_results from database import get_redis_connection, get_redis_results
from config import CHAT_MODEL,COMPLETIONS_MODEL, INDEX_NAME from config import CHAT_MODEL, COMPLETIONS_MODEL, INDEX_NAME
redis_client = get_redis_connection() redis_client = get_redis_connection()
# A basic class to create a message as a dict for chat # A basic class to create a message as a dict for chat
class Message: class Message:
def __init__(self, role,content):
def __init__(self,role,content):
self.role = role self.role = role
self.content = content self.content = content
def message(self): def message(self):
return {
"role": self.role,
"content": self.content
}
return {"role": self.role,"content": self.content}
# New Assistant class to add a vector database call to its responses # New Assistant class to add a vector database call to its responses
class RetrievalAssistant: class RetrievalAssistant:
@ -28,7 +29,6 @@ class RetrievalAssistant:
self.conversation_history = [] self.conversation_history = []
def _get_assistant_response(self, prompt): def _get_assistant_response(self, prompt):
try: try:
completion = openai.ChatCompletion.create( completion = openai.ChatCompletion.create(
model=CHAT_MODEL, model=CHAT_MODEL,
@ -36,7 +36,10 @@ class RetrievalAssistant:
temperature=0.1 temperature=0.1
) )
response_message = Message(completion['choices'][0]['message']['role'],completion['choices'][0]['message']['content']) response_message = Message(
completion['choices'][0]['message']['role'],
completion['choices'][0]['message']['content']
)
return response_message.message() return response_message.message()
except Exception as e: except Exception as e:
@ -44,11 +47,15 @@ class RetrievalAssistant:
return f'Request failed with exception {e}' return f'Request failed with exception {e}'
# The function to retrieve Redis search results # The function to retrieve Redis search results
def _get_search_results(self,prompt): def _get_search_results(self,prompt):
latest_question = prompt latest_question = prompt
search_content = get_redis_results(redis_client,latest_question,INDEX_NAME)['result'][0] search_content = get_redis_results(
return search_content redis_client,latest_question,
INDEX_NAME
)['result'][0]
return search_content
def ask_assistant(self, next_user_prompt): def ask_assistant(self, next_user_prompt):
[self.conversation_history.append(x) for x in next_user_prompt] [self.conversation_history.append(x) for x in next_user_prompt]
@ -56,14 +63,30 @@ class RetrievalAssistant:
# Answer normally unless the trigger sequence is used "searching_for_answers" # Answer normally unless the trigger sequence is used "searching_for_answers"
if 'searching for answers' in assistant_response['content'].lower(): if 'searching for answers' in assistant_response['content'].lower():
question_extract = openai.Completion.create(model=COMPLETIONS_MODEL,prompt=f"Extract the user's latest question and the year for that question from this conversation: {self.conversation_history}. Extract it as a sentence stating the Question and Year") question_extract = openai.Completion.create(
model = COMPLETIONS_MODEL,
prompt=f'''
Extract the user's latest question and the year for that question from this
conversation: {self.conversation_history}. Extract it as a sentence stating the Question and Year"
'''
)
search_result = self._get_search_results(question_extract['choices'][0]['text']) search_result = self._get_search_results(question_extract['choices'][0]['text'])
# We insert an extra system prompt here to give fresh context to the Chatbot on how to use the Redis results # We insert an extra system prompt here to give fresh context to the Chatbot on how to use the Redis results
# In this instance we add it to the conversation history, but in production it may be better to hide # In this instance we add it to the conversation history, but in production it may be better to hide
self.conversation_history.insert(-1,{"role": 'system',"content": f"Answer the user's question using this content: {search_result}. If you cannot answer the question, say 'Sorry, I don't know the answer to this one'"}) self.conversation_history.insert(
-1,{
"role": 'system',
"content": f'''
Answer the user's question using this content: {search_result}.
If you cannot answer the question, say 'Sorry, I don't know the answer to this one'
'''
}
)
assistant_response = self._get_assistant_response(self.conversation_history) assistant_response = self._get_assistant_response(
self.conversation_history
)
self.conversation_history.append(assistant_response) self.conversation_history.append(assistant_response)
return assistant_response return assistant_response
@ -71,14 +94,18 @@ class RetrievalAssistant:
self.conversation_history.append(assistant_response) self.conversation_history.append(assistant_response)
return assistant_response return assistant_response
def pretty_print_conversation_history(
self,
colorize_assistant_replies=True):
def pretty_print_conversation_history(self, colorize_assistant_replies=True):
for entry in self.conversation_history: for entry in self.conversation_history:
if entry['role'] == 'system': if entry['role']=='system':
pass pass
else: else:
prefix = entry['role'] prefix = entry['role']
content = entry['content'] content = entry['content']
output = colored(prefix +':\n' + content, 'green') if colorize_assistant_replies and entry['role'] == 'assistant' else prefix +':\n' + content if colorize_assistant_replies and entry['role'] == 'assistant':
#prefix = entry['role'] output = colored(f"{prefix}:\n{content}, green")
else:
output = colored(f"{prefix}:\n{content}")
print(output) print(output)