Revert "Revert "Revert "fix: tokens not display if brain is not build yet"""

This reverts commit 9d24713faa.
pull/13/head
sean1832 1 year ago
parent 9d24713faa
commit 6d5f8bf313

@ -6,7 +6,6 @@ import modules.utilities as util
import modules.language as language
import GPT
import modules.INFO as INFO
import streamlit_toolkit.tools as st_tools
API_KEY = util.read_file(r'.user\API-KEYS.txt').strip()
@ -66,15 +65,12 @@ def get_stream_prompt(query, prompt_file, isQuestion, info_file=None):
openai.api_key = API_KEY
if isQuestion:
data = util.read_json(INFO.BRAIN_DATA)
if data:
result = GPT.gpt_tools.search_chunks(query, data, count=1)
my_info = util.read_file(info_file)
prompt = util.read_file(prompt_file)
prompt = prompt.replace('<<INFO>>', result[0]['content'])
prompt = prompt.replace('<<QS>>', query)
prompt = prompt.replace('<<MY-INFO>>', my_info)
else:
prompt = ''
result = GPT.gpt_tools.search_chunks(query, data, count=1)
my_info = util.read_file(info_file)
prompt = util.read_file(prompt_file)
prompt = prompt.replace('<<INFO>>', result[0]['content'])
prompt = prompt.replace('<<QS>>', query)
prompt = prompt.replace('<<MY-INFO>>', my_info)
else:
chunk = textwrap.wrap(query, 10000)[0]
prompt = util.read_file(prompt_file).replace('<<DATA>>', chunk)

@ -156,12 +156,8 @@ with body:
max_model_token = 4096
else:
max_model_token = 2048
tokens, isTokenZero = st_tool.predict_token(query, prompt_core)
token_panel = st.empty()
if isTokenZero:
token_panel.markdown('Prompt token: `Not Available`')
else:
token_panel.markdown(f'Prompt token: `{tokens}/{max_model_token}`')
st.markdown(f'Prompt token: `{st_tool.predict_token(query, prompt_core)}/{max_model_token}`')
if send:
st_tool.execute_brain(query,
param,

@ -11,19 +11,6 @@ def compare_time(t1, t2):
return t1 == t2
def update_time():
if os.path.exists(file_path):
# get modification time of the file
mod_time = os.path.getmtime(file_path)
# convert the modification time to readable format
read_mod_time = time.ctime(mod_time)
util.write_file(read_mod_time, temp_file)
else:
raise FileNotFoundError(f'File: {file_path} does not exist.')
def is_input_updated():
if os.path.exists(file_path):
# get modification time of the file

@ -19,14 +19,13 @@ SESSION_TIME = st.session_state['SESSION_TIME']
CURRENT_LOG_FILE = f'{INFO.LOG_PATH}/log_{SESSION_TIME}.log'
def predict_token(query: str, prompt_core: GPT.model.prompt_core) -> (int, bool):
def predict_token(query: str, prompt_core: GPT.model.prompt_core) -> int:
"""predict how many tokens to generate"""
llm = OpenAI()
prompt = GPT.query.get_stream_prompt(query, prompt_file=prompt_core.question,
isQuestion=True,
info_file=prompt_core.my_info)
token = llm.get_num_tokens(prompt)
return token, token == 0
token = llm.get_num_tokens(GPT.query.get_stream_prompt(query, prompt_file=prompt_core.question,
isQuestion=True,
info_file=prompt_core.my_info))
return token
def create_log():
@ -256,8 +255,7 @@ def process_response_stream(query, target_model, prompt_file: str, params: GPT.m
break
# error handling
if choice['finish_reason'] == 'length':
st.warning("⚠️ " + _('Result cut off. max_tokens') + f' ({params.max_tokens}) ' + _(
'too small. Consider increasing max_tokens.'))
st.warning("⚠️ " + _('Result cut off. max_tokens') + f' ({params.max_tokens}) ' + _('too small. Consider increasing max_tokens.'))
break
if 'gpt-3.5-turbo' in target_model:
@ -282,7 +280,7 @@ def rebuild_brain(chunk_size: int):
for idx, chunk_num in GPT.query.build(chunk_size):
progress_bar.progress((idx + 1) / chunk_num)
msg.success(_('Brain Updated!'), icon="👍")
time.sleep(1)
time.sleep(2)
def execute_brain(q, params: GPT.model.param,
@ -320,8 +318,7 @@ def execute_brain(q, params: GPT.model.param,
break
# error handling
if choice['finish_reason'] == 'length':
st.warning("⚠️ " + _('Result cut off. max_tokens') + f' ({params.max_tokens}) ' + _(
'too small. Consider increasing max_tokens.'))
st.warning("⚠️ " + _('Result cut off. max_tokens') + f' ({params.max_tokens}) ' + _('too small. Consider increasing max_tokens.'))
break
if 'gpt-3.5-turbo' in model.question_model:
delta = choice['delta']

Loading…
Cancel
Save