2023-02-09 14:46:55 +00:00
|
|
|
import streamlit as st
|
2023-02-14 13:43:10 +00:00
|
|
|
|
2023-02-09 14:46:55 +00:00
|
|
|
from modules import utilities as util
|
2023-02-13 16:12:27 +00:00
|
|
|
from modules import model_data
|
2023-02-09 14:46:55 +00:00
|
|
|
import brain
|
|
|
|
import check_update
|
|
|
|
import time
|
|
|
|
import os
|
|
|
|
|
|
|
|
# activate session
|
|
|
|
if 'SESSION_TIME' not in st.session_state:
|
|
|
|
st.session_state['SESSION_TIME'] = time.strftime("%Y%m%d-%H%H%S")
|
|
|
|
|
|
|
|
st.set_page_config(
|
|
|
|
page_title='Seanium Brain'
|
|
|
|
)
|
|
|
|
|
2023-02-13 11:36:18 +00:00
|
|
|
util.remove_oldest_file('.user/log', 10)
|
|
|
|
|
2023-02-09 15:30:51 +00:00
|
|
|
model_options = ['text-davinci-003', 'text-curie-001', 'text-babbage-001', 'text-ada-001']
|
2023-02-09 14:46:55 +00:00
|
|
|
header = st.container()
|
|
|
|
body = st.container()
|
|
|
|
LOG_PATH = '.user/log'
|
2023-02-13 16:12:27 +00:00
|
|
|
PROMPT_PATH = '.user/prompt'
|
2023-02-09 14:46:55 +00:00
|
|
|
SESSION_TIME = st.session_state['SESSION_TIME']
|
|
|
|
CURRENT_LOG_FILE = f'{LOG_PATH}/log_{SESSION_TIME}.log'
|
2023-02-14 14:08:05 +00:00
|
|
|
BRAIN_MEMO = '.user/brain-memo.json'
|
2023-02-17 05:20:04 +00:00
|
|
|
MANIFEST = '.core/manifest.json'
|
2023-02-14 15:13:02 +00:00
|
|
|
|
2023-02-09 14:46:55 +00:00
|
|
|
def create_log():
|
|
|
|
if not os.path.exists(CURRENT_LOG_FILE):
|
|
|
|
util.write_file(f'Session {SESSION_TIME}\n\n', CURRENT_LOG_FILE)
|
|
|
|
return CURRENT_LOG_FILE
|
|
|
|
|
2023-02-09 15:30:51 +00:00
|
|
|
|
|
|
|
def log(content, delimiter=''):
|
2023-02-09 14:46:55 +00:00
|
|
|
log_file = create_log()
|
|
|
|
|
2023-02-09 15:30:51 +00:00
|
|
|
if delimiter != '':
|
|
|
|
delimiter = f'\n\n=============={delimiter}==============\n'
|
|
|
|
|
|
|
|
util.write_file(f'\n{delimiter + content}', log_file, 'a')
|
|
|
|
|
2023-02-09 14:46:55 +00:00
|
|
|
|
|
|
|
def clear_log():
|
|
|
|
log_file_name = f'log_{SESSION_TIME}.log'
|
|
|
|
for root, dirs, files in os.walk(LOG_PATH):
|
|
|
|
for file in files:
|
|
|
|
if not file == log_file_name:
|
|
|
|
os.remove(os.path.join(root, file))
|
|
|
|
|
2023-02-09 15:30:51 +00:00
|
|
|
|
2023-02-09 14:46:55 +00:00
|
|
|
def save_as():
|
|
|
|
# download log file
|
|
|
|
with open(CURRENT_LOG_FILE, 'rb') as f:
|
2023-02-09 15:30:51 +00:00
|
|
|
content = f.read()
|
2023-02-09 14:46:55 +00:00
|
|
|
st.download_button(
|
|
|
|
label="📥download log",
|
2023-02-09 15:30:51 +00:00
|
|
|
data=content,
|
2023-02-09 14:46:55 +00:00
|
|
|
file_name=f'log_{SESSION_TIME}.txt',
|
|
|
|
mime='text/plain'
|
|
|
|
)
|
|
|
|
|
2023-02-09 15:30:51 +00:00
|
|
|
|
2023-02-13 16:12:27 +00:00
|
|
|
def process_response(query, target_model, prompt_file: str, data: model_data.param):
|
|
|
|
# check if exclude model is not target model
|
|
|
|
file_name = util.get_file_name(prompt_file)
|
|
|
|
print(f'Processing {file_name}...')
|
|
|
|
with st.spinner(f'Thinking on {file_name}...'):
|
|
|
|
results = brain.run(query, target_model, prompt_file,
|
|
|
|
data.temp,
|
|
|
|
data.max_tokens,
|
|
|
|
data.top_p,
|
|
|
|
data.frequency_penalty,
|
|
|
|
data.present_penalty)
|
|
|
|
# displaying results
|
|
|
|
st.header(f'📃{file_name}')
|
2023-02-14 06:54:38 +00:00
|
|
|
st.info(f'{results}')
|
2023-02-13 16:12:27 +00:00
|
|
|
time.sleep(1)
|
|
|
|
log(results, delimiter=f'{file_name.upper()}')
|
|
|
|
|
|
|
|
|
2023-02-09 14:46:55 +00:00
|
|
|
# sidebar
|
|
|
|
with st.sidebar:
|
|
|
|
st.title('Settings')
|
2023-02-13 16:12:27 +00:00
|
|
|
|
|
|
|
prompt_files = util.scan_directory(PROMPT_PATH)
|
|
|
|
prompt_file_names = [util.get_file_name(file) for file in prompt_files]
|
|
|
|
prompt_dictionary = dict(zip(prompt_file_names, prompt_files))
|
|
|
|
# remove 'my-info' from prompt dictionary
|
|
|
|
prompt_dictionary.pop('my-info')
|
|
|
|
|
2023-02-14 14:23:58 +00:00
|
|
|
operation_options = list(prompt_dictionary.keys())
|
|
|
|
|
2023-02-14 15:13:02 +00:00
|
|
|
operations = st.multiselect('Operations', operation_options, default=util.read_json_at(BRAIN_MEMO, 'operations',
|
|
|
|
operation_options[0]))
|
|
|
|
|
|
|
|
last_question_model = util.read_json_at(BRAIN_MEMO, 'question_model', model_options[0])
|
|
|
|
# get index of last question model
|
|
|
|
question_model_index = util.get_index(model_options, last_question_model)
|
|
|
|
question_model = st.selectbox('Question Model', model_options, index=question_model_index)
|
2023-02-13 16:12:27 +00:00
|
|
|
|
|
|
|
operations_no_question = [op for op in operations if op != 'question']
|
2023-02-14 06:54:38 +00:00
|
|
|
other_models = []
|
|
|
|
replace_tokens = []
|
2023-02-13 16:12:27 +00:00
|
|
|
for operation in operations_no_question:
|
2023-02-14 15:13:02 +00:00
|
|
|
last_model = util.read_json_at(BRAIN_MEMO, f'{operation}_model', model_options[0])
|
|
|
|
# get index of last model
|
|
|
|
model_index = util.get_index(model_options, last_model)
|
|
|
|
model = st.selectbox(f'{operation} Model', model_options, index=model_index)
|
2023-02-13 16:12:27 +00:00
|
|
|
other_models.append(model)
|
2023-02-09 14:46:55 +00:00
|
|
|
|
2023-02-14 14:08:05 +00:00
|
|
|
temp = st.slider('Temperature', 0.0, 1.0, value=util.read_json_at(BRAIN_MEMO, 'temp', 0.1))
|
|
|
|
max_tokens = st.slider('Max Tokens', 850, 4500, value=util.read_json_at(BRAIN_MEMO, 'max_tokens', 1000))
|
2023-02-09 14:46:55 +00:00
|
|
|
|
2023-02-14 13:43:10 +00:00
|
|
|
with st.expander(label='Advanced Options'):
|
2023-02-14 14:08:05 +00:00
|
|
|
top_p = st.slider('Top_P', 0.0, 1.0, value=util.read_json_at(BRAIN_MEMO, 'top_p', 1.0))
|
2023-02-14 15:13:02 +00:00
|
|
|
freq_panl = st.slider('Frequency penalty', 0.0, 1.0,
|
|
|
|
value=util.read_json_at(BRAIN_MEMO, 'frequency_penalty', 0.0))
|
2023-02-14 14:08:05 +00:00
|
|
|
pres_panl = st.slider('Presence penalty', 0.0, 1.0, value=util.read_json_at(BRAIN_MEMO, 'present_penalty', 0.0))
|
2023-02-14 13:43:10 +00:00
|
|
|
|
2023-02-14 14:08:05 +00:00
|
|
|
chunk_size = st.slider('Chunk Size', 1500, 4500, value=util.read_json_at(BRAIN_MEMO, 'chunk_size', 4000))
|
|
|
|
chunk_count = st.slider('Answer Count', 1, 5, value=util.read_json_at(BRAIN_MEMO, 'chunk_count', 1))
|
2023-02-09 14:46:55 +00:00
|
|
|
|
2023-02-13 16:12:27 +00:00
|
|
|
param = model_data.param(temp=temp,
|
|
|
|
max_tokens=max_tokens,
|
|
|
|
top_p=top_p,
|
|
|
|
frequency_penalty=freq_panl,
|
|
|
|
present_penalty=pres_panl,
|
|
|
|
chunk_size=chunk_size,
|
|
|
|
chunk_count=chunk_count)
|
|
|
|
|
2023-02-09 15:30:51 +00:00
|
|
|
if st.button('Clear Log', on_click=clear_log):
|
2023-02-09 14:46:55 +00:00
|
|
|
st.success('Log Cleared')
|
2023-02-17 05:20:04 +00:00
|
|
|
|
|
|
|
# info
|
|
|
|
st.markdown('---')
|
|
|
|
st.markdown(f"# {util.read_json_at(MANIFEST, 'name')}")
|
|
|
|
st.markdown(f"version: {util.read_json_at(MANIFEST, 'version')}")
|
|
|
|
st.markdown(f"author: {util.read_json_at(MANIFEST, 'author')}")
|
|
|
|
st.markdown(f"[Report bugs]({util.read_json_at(MANIFEST, 'bugs')})")
|
|
|
|
st.markdown(f"[Github Repo]({util.read_json_at(MANIFEST, 'homepage')})")
|
|
|
|
|
2023-02-09 14:46:55 +00:00
|
|
|
with header:
|
|
|
|
st.title('🧠Seanium Brain')
|
|
|
|
st.text('This is my personal AI powered brain feeding my own Obsidian notes. Ask anything.')
|
|
|
|
|
|
|
|
|
2023-02-09 15:30:51 +00:00
|
|
|
def execute_brain(q):
|
2023-02-09 14:46:55 +00:00
|
|
|
# log question
|
2023-02-09 15:30:51 +00:00
|
|
|
log(f'\n\n\n\n[{str(time.ctime())}] - QUESTION: {q}')
|
2023-02-09 14:46:55 +00:00
|
|
|
|
|
|
|
if check_update.isUpdated():
|
2023-02-09 17:05:34 +00:00
|
|
|
st.success('Building Brain...')
|
2023-02-09 14:46:55 +00:00
|
|
|
# if brain-info is updated
|
|
|
|
brain.build(chunk_size)
|
2023-02-09 15:30:51 +00:00
|
|
|
st.success('Brain rebuild!')
|
2023-02-09 14:46:55 +00:00
|
|
|
time.sleep(2)
|
2023-02-09 15:30:51 +00:00
|
|
|
|
2023-02-09 14:46:55 +00:00
|
|
|
# thinking on answer
|
|
|
|
with st.spinner('Thinking on Answer'):
|
2023-02-13 16:12:27 +00:00
|
|
|
answer = brain.run_answer(q, question_model, temp, max_tokens, top_p, freq_panl, pres_panl,
|
2023-02-09 15:30:51 +00:00
|
|
|
chunk_count=chunk_count)
|
2023-02-13 16:12:27 +00:00
|
|
|
if util.contains(operations, 'question'):
|
2023-02-09 14:46:55 +00:00
|
|
|
# displaying results
|
|
|
|
st.header('💬Answer')
|
2023-02-14 06:54:38 +00:00
|
|
|
st.info(f'{answer}')
|
2023-02-13 16:12:27 +00:00
|
|
|
time.sleep(1)
|
2023-02-09 15:30:51 +00:00
|
|
|
log(answer, delimiter='ANSWER')
|
|
|
|
|
2023-02-13 16:12:27 +00:00
|
|
|
# thinking on other outputs
|
|
|
|
if len(operations_no_question) > 0:
|
|
|
|
for i in range(len(operations_no_question)):
|
|
|
|
prompt_path = prompt_dictionary[operations_no_question[i]]
|
|
|
|
other_model = other_models[i]
|
|
|
|
process_response(answer, other_model, prompt_path, param)
|
2023-02-14 14:08:05 +00:00
|
|
|
# convert param to dictionary
|
|
|
|
param_dict = vars(param)
|
|
|
|
|
|
|
|
# write param to json
|
|
|
|
for key in param_dict:
|
|
|
|
value = param_dict[key]
|
|
|
|
util.update_json(BRAIN_MEMO, key, value)
|
|
|
|
|
2023-02-14 14:23:58 +00:00
|
|
|
# write operation to json
|
|
|
|
util.update_json(BRAIN_MEMO, 'operations', operations)
|
|
|
|
|
2023-02-14 15:13:02 +00:00
|
|
|
# write question model to json
|
|
|
|
util.update_json(BRAIN_MEMO, 'question_model', question_model)
|
|
|
|
|
|
|
|
# write other models to json
|
|
|
|
for i in range(len(operations_no_question)):
|
|
|
|
util.update_json(BRAIN_MEMO, f'{operations_no_question[i]}_model', other_models[i])
|
2023-02-13 16:12:27 +00:00
|
|
|
|
|
|
|
|
2023-02-09 14:46:55 +00:00
|
|
|
# main
|
|
|
|
with body:
|
2023-02-12 13:06:41 +00:00
|
|
|
question = st.text_area('Ask Brain: ')
|
2023-02-09 14:46:55 +00:00
|
|
|
col1, col2, col3, col4 = st.columns(4)
|
|
|
|
with col1:
|
|
|
|
send = st.button('📩Send')
|
|
|
|
with col2:
|
|
|
|
if os.path.exists(CURRENT_LOG_FILE):
|
|
|
|
save_as()
|
|
|
|
# execute brain calculation
|
|
|
|
if not question == '' and send:
|
2023-02-14 15:13:02 +00:00
|
|
|
execute_brain(question)
|