refactored for clarity

pull/3/head
sean1832 1 year ago
parent c4c41e054a
commit 0e26c7edc5

@ -8,6 +8,7 @@ openai.api_key = util.read_file(r'.user\API-KEYS.txt').strip()
BRAIN_DATA = util.read_json_file(r'.user\brain-data.json')
prompt_dir = '.user/prompt'
def build(chunk_size=4000):
all_text = util.read_file(r'.user\input.txt')
@ -18,12 +19,13 @@ def build(chunk_size=4000):
for chunk in chunks:
embedding = gpt.embedding(chunk.encode(encoding='ASCII', errors='ignore').decode())
info = {'content':chunk, 'vector':embedding}
info = {'content': chunk, 'vector': embedding}
print(info, '\n\n\n')
result.append(info)
util.write_json_file(result, r'.user\brain-data.json')
def run_answer(query, model, temp, max_tokens, top_p, freq_penl, pres_penl, chunk_count):
results = gpt.search_chunks(query, BRAIN_DATA, chunk_count)
answers = []
@ -42,6 +44,7 @@ def run_answer(query, model, temp, max_tokens, top_p, freq_penl, pres_penl, chun
# print('\n\n============ANSWER============\n\n', all_answers)
return all_answers
def run_summary(query, model, temp, max_tokens, top_p, freq_penl, pres_penl):
chunks = textwrap.wrap(query, 10000)
summaries = []
@ -51,4 +54,4 @@ def run_summary(query, model, temp, max_tokens, top_p, freq_penl, pres_penl):
summaries.append(summary)
all_summary = '\n\n'.join(summaries)
# print('\n\n============SUMMRY============\n\n', all_summary)
return all_summary
return all_summary

Loading…
Cancel
Save