Merge pull request #10 from sean1832/patch-only-davinci-model-works

Patch #9.
pull/13/head
Zeke Zhang 1 year ago committed by GitHub
commit bdd664b4f4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,5 +1,4 @@
import os
import time
import streamlit as st
import streamlit_toggle as st_toggle
@ -60,11 +59,16 @@ with st.sidebar:
help=_('What sampling temperature to use, between 0 and 1. Higher values like 0.8 will make the '
'output more random, while lower values like 0.2 will make it more focused and '
'deterministic. \n\nIt is generally recommend altering this or `top_p` but not both.'))
max_tokens = st.slider(_('Max Tokens'), 850, 4096, value=util.read_json_at(INFO.BRAIN_MEMO, 'max_tokens', 1000),
max_tokens = st.slider(_('Max Tokens'), 10, 4096, value=util.read_json_at(INFO.BRAIN_MEMO, 'max_tokens', 1000),
help=_("The maximum number of tokens to generate in the completion.\n\nThe token count of "
"your prompt plus `max_tokens` cannot exceed the model's context length. Most "
"models have a context length of 2048 tokens (except for the newest models, "
"which support 4096)."))
chunk_size = st.slider(_('Chunk size'), 1500, 4500,
value=util.read_json_at(INFO.BRAIN_MEMO, 'chunk_size', 4000),
help=_("The number of tokens to consider at each step. The larger this is, the more "
"context the model has to work with, but the slower generation and expensive "
"will it be."))
with st.expander(label=_('Advanced Options')):
top_p = st.slider(_('Top_P'), 0.0, 1.0, value=util.read_json_at(INFO.BRAIN_MEMO, 'top_p', 1.0),
@ -84,15 +88,9 @@ with st.sidebar:
"new tokens based on their existing frequency in the text so far."
"\n\n[See more information about frequency and presence penalties.]"
"(https://platform.openai.com/docs/api-reference/parameter-details)"))
chunk_size = st.slider(_('Chunk size'), 1500, 4500,
value=util.read_json_at(INFO.BRAIN_MEMO, 'chunk_size', 4000),
help=_("The number of tokens to consider at each step. The larger this is, the more "
"context the model has to work with, but the slower generation and expensive "
"will it be."))
enable_stream = st_toggle.st_toggle_switch(_('Stream (experimental)'),
default_value=util.read_json_at(INFO.BRAIN_MEMO, 'enable_stream',
True))
False))
if not enable_stream:
chunk_count = st.slider(_('Answer count'), 1, 5, value=util.read_json_at(INFO.BRAIN_MEMO, 'chunk_count', 1),

@ -1,6 +1,7 @@
import os
import time
import modules.utilities as util
import modules as mod
file_path = r'.user\input.txt'
temp_file = r'.user\input_last-run.temp'
@ -10,7 +11,7 @@ def compare_time(t1, t2):
return t1 == t2
def isUpdated():
def is_input_updated():
if os.path.exists(file_path):
# get modification time of the file
mod_time = os.path.getmtime(file_path)
@ -35,3 +36,9 @@ def isUpdated():
return True
else:
raise FileNotFoundError(f'File: {file_path} does not exist.')
def is_param_updated(param_val, param_infile_key):
infile_val = util.read_json_at(mod.INFO.BRAIN_MEMO, param_infile_key)
if infile_val != param_val:
return True

@ -14,10 +14,6 @@ PROMPT_PATH = f'{INFO.USER_DIR}/prompt/{SESSION_LANG}/'
_ = language.set_language()
# st.set_page_config(
# page_title='Configs'
# )
body = st.container()

@ -62,15 +62,7 @@ def save(content, path, page='', json_value: dict = None):
if page == '💽Brain Memory':
for key, value in json_value.items():
util.update_json(INFO.BRAIN_MEMO, key, value)
#
#
# util.update_json(INFO.BRAIN_MEMO, 'delimiter', json_value['delimiter'])
# util.update_json(INFO.BRAIN_MEMO, 'append_mode', json_value['append_mode'])
# util.update_json(INFO.BRAIN_MEMO, 'force_mode', json_value['force_mode'])
# util.update_json(INFO.BRAIN_MEMO, 'advanced_mode', json_value['advanced_mode'])
# util.update_json(INFO.BRAIN_MEMO, 'filter_info', json_value['filter_info'])
# util.update_json(INFO.BRAIN_MEMO, 'filter_row_count', json_value['filter_row_count'])
# util.update_json(INFO.BRAIN_MEMO, 'exclude_dir', json_value['exclude_dir'])
time.sleep(1)
# refresh page
st.experimental_rerun()
@ -270,7 +262,7 @@ def execute_brain(q, params: GPT.model.param,
# log question
log(f'\n\n\n\n[{str(time.ctime())}] - QUESTION: {q}')
if mod.check_update.isUpdated():
if mod.check_update.is_input_updated() or mod.check_update.is_param_updated(params.chunk_size, 'chunk_size'):
msg = st.warning(_('Updating Brain...'), icon="")
progress_bar = st.progress(0)
for idx, chunk_num in GPT.query.build(params.chunk_size):

Loading…
Cancel
Save