@ -1,5 +1,6 @@
import streamlit as st
from modules import utilities as util
from modules import model_data
import brain
import check_update
import time
@ -19,6 +20,7 @@ model_options = ['text-davinci-003', 'text-curie-001', 'text-babbage-001', 'text
header = st . container ( )
body = st . container ( )
LOG_PATH = ' .user/log '
PROMPT_PATH = ' .user/prompt '
SESSION_TIME = st . session_state [ ' SESSION_TIME ' ]
CURRENT_LOG_FILE = f ' { LOG_PATH } /log_ { SESSION_TIME } .log '
@ -58,13 +60,43 @@ def save_as():
)
def process_response ( query , target_model , prompt_file : str , data : model_data . param ) :
# check if exclude model is not target model
file_name = util . get_file_name ( prompt_file )
print ( f ' Processing { file_name } ... ' )
with st . spinner ( f ' Thinking on { file_name } ... ' ) :
results = brain . run ( query , target_model , prompt_file ,
data . temp ,
data . max_tokens ,
data . top_p ,
data . frequency_penalty ,
data . present_penalty )
# displaying results
st . header ( f ' 📃 { file_name } ' )
st . success ( results )
time . sleep ( 1 )
log ( results , delimiter = f ' { file_name . upper ( ) } ' )
# sidebar
with st . sidebar :
st . title ( ' Settings ' )
output_types = st . multiselect ( ' Output Types ' , [ ' Answer ' , ' Summary ' ] , default = [ ' Answer ' ] )
answer_model = st . selectbox ( ' Answer Model ' , model_options )
if util . contains ( output_types , ' Summary ' ) :
summary_model = st . selectbox ( ' Summary Model ' , model_options )
prompt_files = util . scan_directory ( PROMPT_PATH )
prompt_file_names = [ util . get_file_name ( file ) for file in prompt_files ]
prompt_dictionary = dict ( zip ( prompt_file_names , prompt_files ) )
# remove 'my-info' from prompt dictionary
prompt_dictionary . pop ( ' my-info ' )
operations = st . multiselect ( ' Operations ' , list ( prompt_dictionary . keys ( ) ) , default = list ( prompt_dictionary . keys ( ) ) [ 0 ] )
other_models = [ ]
question_model = st . selectbox ( ' Question Model ' , model_options )
operations_no_question = [ op for op in operations if op != ' question ' ]
for operation in operations_no_question :
model = st . selectbox ( f ' { operation } Model ' , model_options )
other_models . append ( model )
temp = st . slider ( ' Temperature ' , 0.0 , 1.0 , value = 0.1 )
max_tokens = st . slider ( ' Max Tokens ' , 850 , 4500 , value = 1000 )
@ -75,6 +107,14 @@ with st.sidebar:
chunk_size = st . slider ( ' Chunk Size ' , 1500 , 4500 , value = 4000 )
chunk_count = st . slider ( ' Answer Count ' , 1 , 5 , value = 1 )
param = model_data . param ( temp = temp ,
max_tokens = max_tokens ,
top_p = top_p ,
frequency_penalty = freq_panl ,
present_penalty = pres_panl ,
chunk_size = chunk_size ,
chunk_count = chunk_count )
if st . button ( ' Clear Log ' , on_click = clear_log ) :
st . success ( ' Log Cleared ' )
with header :
@ -95,23 +135,33 @@ def execute_brain(q):
# thinking on answer
with st . spinner ( ' Thinking on Answer ' ) :
answer = brain . run_answer ( q , answer _model, temp , max_tokens , top_p , freq_panl , pres_panl ,
answer = brain . run_answer ( q , question _model, temp , max_tokens , top_p , freq_panl , pres_panl ,
chunk_count = chunk_count )
if util . contains ( o utput_types, ' Answer ' ) :
if util . contains ( o perations, ' question ' ) :
# displaying results
st . header ( ' 💬Answer ' )
st . success ( answer )
time . sleep ( 1 )
log ( answer , delimiter = ' ANSWER ' )
# thinking on summary
if util . contains ( output_types , ' Summary ' ) :
with st . spinner ( ' Thinking on Summary ' ) :
time . sleep ( 2 )
summary = brain . run_summary ( answer , summary_model , temp , max_tokens , top_p , freq_panl , pres_panl )
# displaying results
st . header ( ' 📃Summary ' )
st . success ( summary )
log ( summary , delimiter = ' SUMMARY ' )
# thinking on other outputs
if len ( operations_no_question ) > 0 :
for i in range ( len ( operations_no_question ) ) :
prompt_path = prompt_dictionary [ operations_no_question [ i ] ]
other_model = other_models [ i ]
process_response ( answer , other_model , prompt_path , param )
# # thinking on summary
# if util.contains(output_types, 'Summary'):
# with st.spinner('Thinking on Summary'):
# summary = brain.run_summary(answer, summary_model, temp, max_tokens, top_p, freq_panl, pres_panl)
# # displaying results
# st.header('📃Summary')
# st.success(summary)
# log(summary, delimiter='SUMMARY')
# main