mirror of https://github.com/sean1832/GPT-Brain
BREAKING CHANGE: Remove Console Apps
parent
0c922b3158
commit
f40abacc3e
@ -1,73 +0,0 @@
|
||||
import openai
|
||||
import numpy as np
|
||||
import textwrap
|
||||
import utilities
|
||||
|
||||
openai.api_key = utilities.open_file(r'.user\API-KEYS.txt').strip()
|
||||
BRAIN_DATA = utilities.read_json_file(r'.user\brain-data.json')
|
||||
|
||||
# this function compare similarity between two vectors.
|
||||
# The higher value the dot product have, the more alike between these vectors
|
||||
def similarity(v1, v2):
|
||||
return np.dot(v1, v2)
|
||||
|
||||
def search_chunks(text, data, count=1):
|
||||
vector = utilities.embedding(text)
|
||||
points = []
|
||||
|
||||
for item in data:
|
||||
# compare search terms with brain-data
|
||||
point = similarity(vector, item['vector'])
|
||||
points.append({
|
||||
'content': item['content'],
|
||||
'point': point
|
||||
})
|
||||
# sort points base on decendent order
|
||||
ordered = sorted(points, key=lambda d: d['point'], reverse=True)
|
||||
|
||||
return ordered[0:count]
|
||||
|
||||
def gpt3(prompt, model='text-davinci-003'):
|
||||
response = openai.Completion.create(
|
||||
model= model,
|
||||
prompt=prompt,
|
||||
temperature=0.1,
|
||||
max_tokens=1000,
|
||||
top_p=1,
|
||||
frequency_penalty=0,
|
||||
presence_penalty=0
|
||||
)
|
||||
text = response['choices'][0]['text'].strip()
|
||||
return text
|
||||
|
||||
def main():
|
||||
while True:
|
||||
|
||||
query = input('\n\nAsk brain: ')
|
||||
results = search_chunks(query, BRAIN_DATA)
|
||||
answers = []
|
||||
answers_count = 0
|
||||
for result in results:
|
||||
my_info = utilities.open_file(r'prompt\my-info.txt')
|
||||
|
||||
prompt = utilities.open_file(r'prompt\question.txt')
|
||||
prompt = prompt.replace('<<INFO>>', result['content'])
|
||||
prompt = prompt.replace('<<QS>>', query)
|
||||
prompt = prompt.replace('<<MY-INFO>>', my_info)
|
||||
|
||||
answer = gpt3(prompt, model='text-davinci-003')
|
||||
answers.append(answer)
|
||||
answers_count += 1
|
||||
|
||||
all_answers = '\n\n'.join(answers)
|
||||
print('\n\n============ANSWER============\n\n', all_answers)
|
||||
|
||||
chunks = textwrap.wrap(all_answers, 10000)
|
||||
end = []
|
||||
for chunk in chunks:
|
||||
prompt = utilities.open_file(r'prompt\summarize.txt').replace('<<SUM>>', chunk)
|
||||
summary = gpt3(prompt, model='text-curie-001')
|
||||
end.append(summary)
|
||||
print('\n\n============SUMMRY============\n\n', '\n\n'.join(end))
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,25 +0,0 @@
|
||||
import openai
|
||||
import textwrap
|
||||
import utilities
|
||||
|
||||
|
||||
openai.api_key = utilities.open_file(r'.user\API-KEYS.txt').strip()
|
||||
|
||||
def main():
|
||||
all_text = utilities.open_file(r'.user\input.txt')
|
||||
|
||||
# split text into smaller chunk of 4000 char each
|
||||
chunks = textwrap.wrap(all_text, 4000)
|
||||
|
||||
result = []
|
||||
|
||||
for chunk in chunks:
|
||||
embedding = utilities.embedding(chunk.encode(encoding='ASCII', errors='ignore').decode())
|
||||
info = {'content':chunk, 'vector':embedding}
|
||||
print(info, '\n\n\n')
|
||||
result.append(info)
|
||||
|
||||
utilities.write_json_file(result, r'.user\brain-data.json')
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,44 +0,0 @@
|
||||
import os
|
||||
import time
|
||||
import utilities
|
||||
|
||||
file_path = r'.user\input.txt'
|
||||
temp_file = r'.user\input_last-run.temp'
|
||||
sig_file = r'.user\input_sig.temp'
|
||||
|
||||
def compare_time(t1, t2):
|
||||
return t1 == t2
|
||||
|
||||
def write_sig(bool):
|
||||
utilities.write_file(bool, sig_file)
|
||||
|
||||
def check():
|
||||
if os.path.exists(file_path):
|
||||
# get modification time of the file
|
||||
mod_time = os.path.getmtime(file_path)
|
||||
|
||||
# convert the modification time to readable format
|
||||
read_mod_time = time.ctime(mod_time)
|
||||
|
||||
if os.path.exists(temp_file):
|
||||
temp_info = utilities.open_file(temp_file)
|
||||
if compare_time(read_mod_time, temp_info):
|
||||
write_sig('not updated')
|
||||
print('File has not been updated.')
|
||||
else:
|
||||
print('File has been updated.')
|
||||
utilities.write_file(read_mod_time, temp_file)
|
||||
write_sig('updated')
|
||||
else:
|
||||
print('Temp file not exist, writing temp file...')
|
||||
# write to temp file
|
||||
utilities.write_file(read_mod_time, temp_file)
|
||||
write_sig('not updated')
|
||||
else:
|
||||
raise FileNotFoundError(f'File: {file_path} does not exist.')
|
||||
|
||||
def main():
|
||||
check()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,27 +0,0 @@
|
||||
@echo off
|
||||
cd..
|
||||
echo Activating Virtural environment...
|
||||
call .\venv\Scripts\activate
|
||||
|
||||
rem checking if input.txt is updated
|
||||
python console_app\check_update.py
|
||||
|
||||
setlocal enabledelayedexpansion
|
||||
set "tempFile=.user\input_sig.temp"
|
||||
|
||||
for /f "usebackq delims=" %%a in ("%tempFile%") do (
|
||||
set "tempValue=%%a"
|
||||
)
|
||||
|
||||
if "%tempValue%" == "not updated" (
|
||||
goto end
|
||||
) else (
|
||||
call batch-programs\run-build-brain.bat
|
||||
cls
|
||||
echo Brain updated!
|
||||
)
|
||||
|
||||
|
||||
:end
|
||||
echo running brain...
|
||||
python console_app\brain.py
|
@ -1,24 +0,0 @@
|
||||
import json
|
||||
import openai
|
||||
|
||||
def open_file(filepath):
|
||||
with open(filepath, 'r', encoding='utf-8') as file:
|
||||
return file.read()
|
||||
|
||||
def write_file(content, filepath):
|
||||
with open(filepath, 'w') as file:
|
||||
file.write(content)
|
||||
|
||||
def write_json_file(content, filepath):
|
||||
with open(filepath, 'w') as file:
|
||||
json.dump(content, file, indent=2)
|
||||
|
||||
def read_json_file(filepath):
|
||||
with open(filepath, 'r') as file:
|
||||
return json.load(file)
|
||||
|
||||
# return a list of vectors
|
||||
def embedding(content, engine='text-embedding-ada-002'):
|
||||
response = openai.Embedding.create(input=content, engine=engine)
|
||||
vector = response['data'][0]['embedding']
|
||||
return vector
|
Loading…
Reference in New Issue