feat: Customizable operation output type

Added function to dynamically change operation output based on prompt directory files. Improves program flexibility and control.
pull/3/head
sean1832 1 year ago
parent c9ada48782
commit e093b1ffe3

@ -1,5 +1,6 @@
import streamlit as st
from modules import utilities as util
from modules import model_data
import brain
import check_update
import time
@ -19,6 +20,7 @@ model_options = ['text-davinci-003', 'text-curie-001', 'text-babbage-001', 'text
header = st.container()
body = st.container()
LOG_PATH = '.user/log'
PROMPT_PATH = '.user/prompt'
SESSION_TIME = st.session_state['SESSION_TIME']
CURRENT_LOG_FILE = f'{LOG_PATH}/log_{SESSION_TIME}.log'
@ -58,13 +60,43 @@ def save_as():
)
def process_response(query, target_model, prompt_file: str, data: model_data.param):
# check if exclude model is not target model
file_name = util.get_file_name(prompt_file)
print(f'Processing {file_name}...')
with st.spinner(f'Thinking on {file_name}...'):
results = brain.run(query, target_model, prompt_file,
data.temp,
data.max_tokens,
data.top_p,
data.frequency_penalty,
data.present_penalty)
# displaying results
st.header(f'📃{file_name}')
st.success(results)
time.sleep(1)
log(results, delimiter=f'{file_name.upper()}')
# sidebar
with st.sidebar:
st.title('Settings')
output_types = st.multiselect('Output Types', ['Answer', 'Summary'], default=['Answer'])
answer_model = st.selectbox('Answer Model', model_options)
if util.contains(output_types, 'Summary'):
summary_model = st.selectbox('Summary Model', model_options)
prompt_files = util.scan_directory(PROMPT_PATH)
prompt_file_names = [util.get_file_name(file) for file in prompt_files]
prompt_dictionary = dict(zip(prompt_file_names, prompt_files))
# remove 'my-info' from prompt dictionary
prompt_dictionary.pop('my-info')
operations = st.multiselect('Operations', list(prompt_dictionary.keys()), default=list(prompt_dictionary.keys())[0])
other_models = []
question_model = st.selectbox('Question Model', model_options)
operations_no_question = [op for op in operations if op != 'question']
for operation in operations_no_question:
model = st.selectbox(f'{operation} Model', model_options)
other_models.append(model)
temp = st.slider('Temperature', 0.0, 1.0, value=0.1)
max_tokens = st.slider('Max Tokens', 850, 4500, value=1000)
@ -75,6 +107,14 @@ with st.sidebar:
chunk_size = st.slider('Chunk Size', 1500, 4500, value=4000)
chunk_count = st.slider('Answer Count', 1, 5, value=1)
param = model_data.param(temp=temp,
max_tokens=max_tokens,
top_p=top_p,
frequency_penalty=freq_panl,
present_penalty=pres_panl,
chunk_size=chunk_size,
chunk_count=chunk_count)
if st.button('Clear Log', on_click=clear_log):
st.success('Log Cleared')
with header:
@ -95,23 +135,33 @@ def execute_brain(q):
# thinking on answer
with st.spinner('Thinking on Answer'):
answer = brain.run_answer(q, answer_model, temp, max_tokens, top_p, freq_panl, pres_panl,
answer = brain.run_answer(q, question_model, temp, max_tokens, top_p, freq_panl, pres_panl,
chunk_count=chunk_count)
if util.contains(output_types, 'Answer'):
if util.contains(operations, 'question'):
# displaying results
st.header('💬Answer')
st.success(answer)
time.sleep(1)
log(answer, delimiter='ANSWER')
# thinking on summary
if util.contains(output_types, 'Summary'):
with st.spinner('Thinking on Summary'):
time.sleep(2)
summary = brain.run_summary(answer, summary_model, temp, max_tokens, top_p, freq_panl, pres_panl)
# displaying results
st.header('📃Summary')
st.success(summary)
log(summary, delimiter='SUMMARY')
# thinking on other outputs
if len(operations_no_question) > 0:
for i in range(len(operations_no_question)):
prompt_path = prompt_dictionary[operations_no_question[i]]
other_model = other_models[i]
process_response(answer, other_model, prompt_path, param)
# # thinking on summary
# if util.contains(output_types, 'Summary'):
# with st.spinner('Thinking on Summary'):
# summary = brain.run_summary(answer, summary_model, temp, max_tokens, top_p, freq_panl, pres_panl)
# # displaying results
# st.header('📃Summary')
# st.success(summary)
# log(summary, delimiter='SUMMARY')
# main

@ -44,17 +44,15 @@ def run_answer(query, model, temp, max_tokens, top_p, freq_penl, pres_penl, chun
answers.append(answer)
all_answers = '\n\n'.join(answers)
# print('\n\n============ANSWER============\n\n', all_answers)
return all_answers
def run_summary(query, model, temp, max_tokens, top_p, freq_penl, pres_penl):
def run(query, model, prompt_file, temp, max_tokens, top_p, freq_penl, pres_penl):
chunks = textwrap.wrap(query, 10000)
summaries = []
responses = []
for chunk in chunks:
prompt = util.read_file(f'{prompt_dir}/summarize.txt').replace('<<SUM>>', chunk)
summary = gpt.gpt3(prompt, model, temp, max_tokens, top_p, freq_penl, pres_penl)
summaries.append(summary)
all_summary = '\n\n'.join(summaries)
# print('\n\n============SUMMRY============\n\n', all_summary)
return all_summary
prompt = util.read_file(prompt_file).replace('<<DATA>>', chunk)
response = gpt.gpt3(prompt, model, temp, max_tokens, top_p, freq_penl, pres_penl)
responses.append(response)
all_response = '\n\n'.join(responses)
return all_response

@ -1,6 +1,6 @@
Write notes about the following document. Summarize the text into a single clear detailed dot points that are easy to read and in a way that it makes sense.
Include most important data.
<<SUM>>
<<DATA>>
NOTES:

@ -0,0 +1,9 @@
class param:
def __init__(self, temp, max_tokens, top_p, frequency_penalty, present_penalty, chunk_count, chunk_size):
self.temp = temp
self.max_tokens = max_tokens
self.top_p = top_p
self.frequency_penalty = frequency_penalty
self.present_penalty = present_penalty
self.chunk_count = chunk_count
self.chunk_size = chunk_size

@ -0,0 +1,6 @@
class prompt:
def __init__(self, name, path, model):
self.name = name
self.path = path
self.model = model

@ -29,12 +29,22 @@ def extract_string(text, delimiter, force=False, join=True, split_mode=False):
def remove_oldest_file(directory, max_files):
files = glob.glob(f'{directory}/*')
files = scan_directory(directory)
if len(files) >= max_files:
oldest_file = min(files, key=os.path.getctime)
os.remove(oldest_file)
def scan_directory(directory):
files = glob.glob(f'{directory}/*')
return files
# get file name without extension
def get_file_name(filepath):
return os.path.splitext(os.path.basename(filepath))[0]
def create_path_not_exist(path):
directory = os.path.dirname(path)
if not os.path.exists(directory):

Loading…
Cancel
Save