fix: tokens not display if brain is not build yet

pull/13/head
sean1832 1 year ago
parent c8c12b81e2
commit e1e94db2b3

@ -6,6 +6,7 @@ import modules.utilities as util
import modules.language as language
import GPT
import modules.INFO as INFO
import streamlit_toolkit.tools as st_tools
API_KEY = util.read_file(r'.user\API-KEYS.txt').strip()
@ -65,12 +66,15 @@ def get_stream_prompt(query, prompt_file, isQuestion, info_file=None):
openai.api_key = API_KEY
if isQuestion:
data = util.read_json(INFO.BRAIN_DATA)
result = GPT.gpt_tools.search_chunks(query, data, count=1)
my_info = util.read_file(info_file)
prompt = util.read_file(prompt_file)
prompt = prompt.replace('<<INFO>>', result[0]['content'])
prompt = prompt.replace('<<QS>>', query)
prompt = prompt.replace('<<MY-INFO>>', my_info)
if data:
result = GPT.gpt_tools.search_chunks(query, data, count=1)
my_info = util.read_file(info_file)
prompt = util.read_file(prompt_file)
prompt = prompt.replace('<<INFO>>', result[0]['content'])
prompt = prompt.replace('<<QS>>', query)
prompt = prompt.replace('<<MY-INFO>>', my_info)
else:
prompt = ''
else:
chunk = textwrap.wrap(query, 10000)[0]
prompt = util.read_file(prompt_file).replace('<<DATA>>', chunk)

@ -156,8 +156,12 @@ with body:
max_model_token = 4096
else:
max_model_token = 2048
st.markdown(f'Prompt token: `{st_tool.predict_token(query, prompt_core)}/{max_model_token}`')
tokens, isTokenZero = st_tool.predict_token(query, prompt_core)
token_panel = st.empty()
if isTokenZero:
token_panel.markdown('Prompt token: `Not Available`')
else:
token_panel.markdown(f'Prompt token: `{tokens}/{max_model_token}`')
if send:
st_tool.execute_brain(query,
param,

@ -11,6 +11,19 @@ def compare_time(t1, t2):
return t1 == t2
def update_time():
if os.path.exists(file_path):
# get modification time of the file
mod_time = os.path.getmtime(file_path)
# convert the modification time to readable format
read_mod_time = time.ctime(mod_time)
util.write_file(read_mod_time, temp_file)
else:
raise FileNotFoundError(f'File: {file_path} does not exist.')
def is_input_updated():
if os.path.exists(file_path):
# get modification time of the file

@ -19,13 +19,14 @@ SESSION_TIME = st.session_state['SESSION_TIME']
CURRENT_LOG_FILE = f'{INFO.LOG_PATH}/log_{SESSION_TIME}.log'
def predict_token(query: str, prompt_core: GPT.model.prompt_core) -> int:
def predict_token(query: str, prompt_core: GPT.model.prompt_core) -> (int, bool):
"""predict how many tokens to generate"""
llm = OpenAI()
token = llm.get_num_tokens(GPT.query.get_stream_prompt(query, prompt_file=prompt_core.question,
isQuestion=True,
info_file=prompt_core.my_info))
return token
prompt = GPT.query.get_stream_prompt(query, prompt_file=prompt_core.question,
isQuestion=True,
info_file=prompt_core.my_info)
token = llm.get_num_tokens(prompt)
return token, token == 0
def create_log():
@ -255,7 +256,8 @@ def process_response_stream(query, target_model, prompt_file: str, params: GPT.m
break
# error handling
if choice['finish_reason'] == 'length':
st.warning("⚠️ " + _('Result cut off. max_tokens') + f' ({params.max_tokens}) ' + _('too small. Consider increasing max_tokens.'))
st.warning("⚠️ " + _('Result cut off. max_tokens') + f' ({params.max_tokens}) ' + _(
'too small. Consider increasing max_tokens.'))
break
if 'gpt-3.5-turbo' in target_model:
@ -280,7 +282,7 @@ def rebuild_brain(chunk_size: int):
for idx, chunk_num in GPT.query.build(chunk_size):
progress_bar.progress((idx + 1) / chunk_num)
msg.success(_('Brain Updated!'), icon="👍")
time.sleep(2)
time.sleep(1)
def execute_brain(q, params: GPT.model.param,
@ -318,7 +320,8 @@ def execute_brain(q, params: GPT.model.param,
break
# error handling
if choice['finish_reason'] == 'length':
st.warning("⚠️ " + _('Result cut off. max_tokens') + f' ({params.max_tokens}) ' + _('too small. Consider increasing max_tokens.'))
st.warning("⚠️ " + _('Result cut off. max_tokens') + f' ({params.max_tokens}) ' + _(
'too small. Consider increasing max_tokens.'))
break
if 'gpt-3.5-turbo' in model.question_model:
delta = choice['delta']

Loading…
Cancel
Save