From 680e542607db5a16e491d0567a6313fb705b0193 Mon Sep 17 00:00:00 2001 From: sean1832 Date: Sat, 4 Mar 2023 01:47:28 +1100 Subject: [PATCH] feat: Add support for GPT-3.5-Turbo (ChatGPT) model This commit adds support for the GPT-3.5-Turbo model in ChatGPT. This model is an improved version of the existing GPT-3 model and offers better performance and accuracy in language generation tasks. --- GPT/query.py | 11 ++++------- streamlit_toolkit/tools.py | 20 +++++++++++++++++--- 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/GPT/query.py b/GPT/query.py index 72e4acb..6cf8fea 100644 --- a/GPT/query.py +++ b/GPT/query.py @@ -79,11 +79,8 @@ def get_stream_prompt(query, prompt_file, isQuestion, info_file=None): def run_stream(query, model, prompt_file, isQuestion, params, info_file=None): prompt = get_stream_prompt(query, prompt_file, isQuestion, info_file) - client = GPT.gpt_tools.gpt3_stream(prompt, model, params) - return client - - -def run_35_Stream(query, prompt_file, isQuestion, params, info_file=None): - prompt = get_stream_prompt(query, prompt_file, isQuestion, info_file) - client = GPT.gpt_tools.gpt35_stream(prompt, params) + if model == 'gpt-3.5-turbo': + client = GPT.gpt_tools.gpt35_stream(prompt, params) + else: + client = GPT.gpt_tools.gpt3_stream(prompt, model, params) return client diff --git a/streamlit_toolkit/tools.py b/streamlit_toolkit/tools.py index 3b69465..43d78cf 100644 --- a/streamlit_toolkit/tools.py +++ b/streamlit_toolkit/tools.py @@ -257,7 +257,15 @@ def process_response_stream(query, target_model, prompt_file: str, params: GPT.m if choice['finish_reason'] == 'length': st.warning("⚠️ " + _('Result cut off. max_tokens') + f' ({params.max_tokens}) ' + _('too small. Consider increasing max_tokens.')) break - char = choice['text'] + + if 'gpt-3.5-turbo' in target_model: + delta = choice['delta'] + if "role" in delta or delta == {}: + char = '' + else: + char = delta['content'] + else: + char = choice['text'] response = previous_chars + char response_panel.info(f'{response}') previous_chars += char @@ -312,8 +320,14 @@ def execute_brain(q, params: GPT.model.param, if choice['finish_reason'] == 'length': st.warning("⚠️ " + _('Result cut off. max_tokens') + f' ({params.max_tokens}) ' + _('too small. Consider increasing max_tokens.')) break - - char = choice['text'] + if 'gpt-3.5-turbo' in model.question_model: + delta = choice['delta'] + if "role" in delta or delta == {}: + char = '' + else: + char = delta['content'] + else: + char = choice['text'] answer = previous_chars + char if is_question_selected: answer_panel.info(f'{answer}')