mirror of https://github.com/sean1832/GPT-Brain
Merge pull request #13 from sean1832/Major_Dev
Major dev 1.0.019-keywords-added-as-filtering-conditions 1.0.0
commit
39e0ee9676
@ -1,3 +1,3 @@
|
||||
from GPT import query
|
||||
from GPT import toolkit
|
||||
from GPT import gpt_tools
|
||||
from GPT import model
|
@ -0,0 +1,94 @@
|
||||
import openai
|
||||
import numpy as np
|
||||
import requests
|
||||
import sseclient
|
||||
|
||||
|
||||
# this function compare similarity between two vectors.
|
||||
# The higher value the dot product have, the more alike between these vectors
|
||||
def similarity(v1, v2):
|
||||
return np.dot(v1, v2)
|
||||
|
||||
|
||||
# return a list of vectors
|
||||
def embedding(content, engine='text-embedding-ada-002'):
|
||||
response = openai.Embedding.create(input=content, engine=engine)
|
||||
vector = response['data'][0]['embedding']
|
||||
return vector
|
||||
|
||||
|
||||
def search_chunks(query, data, count=1):
|
||||
vector = embedding(query)
|
||||
points = []
|
||||
|
||||
for item in data:
|
||||
# compare search terms with brain-data
|
||||
point = similarity(vector, item['vector'])
|
||||
points.append({
|
||||
'content': item['content'],
|
||||
'point': point
|
||||
})
|
||||
# sort points base on descendant order
|
||||
ordered = sorted(points, key=lambda d: d['point'], reverse=True)
|
||||
|
||||
return ordered[0:count]
|
||||
|
||||
|
||||
def gpt3(prompt, model, params):
|
||||
response = openai.Completion.create(
|
||||
model=model,
|
||||
prompt=prompt,
|
||||
temperature=params.temp,
|
||||
max_tokens=params.max_tokens,
|
||||
top_p=params.top_p,
|
||||
frequency_penalty=params.frequency_penalty,
|
||||
presence_penalty=params.present_penalty
|
||||
)
|
||||
text = response['choices'][0]['text'].strip()
|
||||
return text
|
||||
|
||||
|
||||
def gpt35(prompt, params, system_role_content: str = 'You are a helpful assistant.'):
|
||||
completions = openai.ChatCompletion.create(
|
||||
model="gpt-3.5-turbo",
|
||||
max_tokens=params.max_tokens,
|
||||
temperature=params.temp,
|
||||
top_p=params.top_p,
|
||||
frequency_penalty=params.frequency_penalty,
|
||||
presence_penalty=params.present_penalty,
|
||||
messages=[
|
||||
{"role": "system", "content": system_role_content},
|
||||
{"role": "user", "content": prompt}
|
||||
])
|
||||
text = completions['choices'][0]['message']['content']
|
||||
return text
|
||||
|
||||
|
||||
def gpt3_stream(prompt, model, params):
|
||||
response = openai.Completion.create(
|
||||
model=model,
|
||||
stream=True,
|
||||
prompt=prompt,
|
||||
temperature=params.temp,
|
||||
max_tokens=params.max_tokens,
|
||||
top_p=params.top_p,
|
||||
frequency_penalty=params.frequency_penalty,
|
||||
presence_penalty=params.present_penalty
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
def gpt35_stream(prompt, params, system_role_content: str = 'You are a helpful assistant.'):
|
||||
completions = openai.ChatCompletion.create(
|
||||
model="gpt-3.5-turbo",
|
||||
max_tokens=params.max_tokens,
|
||||
temperature=params.temp,
|
||||
top_p=params.top_p,
|
||||
frequency_penalty=params.frequency_penalty,
|
||||
presence_penalty=params.present_penalty,
|
||||
stream=True,
|
||||
messages=[
|
||||
{"role": "system", "content": system_role_content},
|
||||
{"role": "user", "content": prompt}
|
||||
])
|
||||
return completions
|
@ -1,71 +0,0 @@
|
||||
import openai
|
||||
import numpy as np
|
||||
import requests
|
||||
import sseclient
|
||||
|
||||
|
||||
# this function compare similarity between two vectors.
|
||||
# The higher value the dot product have, the more alike between these vectors
|
||||
def similarity(v1, v2):
|
||||
return np.dot(v1, v2)
|
||||
|
||||
|
||||
# return a list of vectors
|
||||
def embedding(content, engine='text-embedding-ada-002'):
|
||||
response = openai.Embedding.create(input=content, engine=engine)
|
||||
vector = response['data'][0]['embedding']
|
||||
return vector
|
||||
|
||||
|
||||
def search_chunks(text, data, count=1):
|
||||
vector = embedding(text)
|
||||
points = []
|
||||
|
||||
for item in data:
|
||||
# compare search terms with brain-data
|
||||
point = similarity(vector, item['vector'])
|
||||
points.append({
|
||||
'content': item['content'],
|
||||
'point': point
|
||||
})
|
||||
# sort points base on descendant order
|
||||
ordered = sorted(points, key=lambda d: d['point'], reverse=True)
|
||||
|
||||
return ordered[0:count]
|
||||
|
||||
|
||||
def gpt3(prompt, model, params):
|
||||
response = openai.Completion.create(
|
||||
model=model,
|
||||
prompt=prompt,
|
||||
temperature=params.temp,
|
||||
max_tokens=params.max_tokens,
|
||||
top_p=params.top_p,
|
||||
frequency_penalty=params.frequency_penalty,
|
||||
presence_penalty=params.present_penalty
|
||||
)
|
||||
text = response['choices'][0]['text'].strip()
|
||||
return text
|
||||
|
||||
|
||||
def gpt3_stream(API_KEY, prompt, model, params):
|
||||
url = 'https://api.openai.com/v1/completions'
|
||||
headers = {
|
||||
'Accept': 'text/event-stream',
|
||||
'Authorization': 'Bearer ' + API_KEY
|
||||
}
|
||||
body = {
|
||||
'model': model,
|
||||
'prompt': prompt,
|
||||
'max_tokens': params.max_tokens,
|
||||
'temperature': params.temp,
|
||||
'top_p': params.top_p,
|
||||
'frequency_penalty': params.frequency_penalty,
|
||||
'presence_penalty': params.present_penalty,
|
||||
'stream': True,
|
||||
}
|
||||
|
||||
req = requests.post(url, stream=True, headers=headers, json=body)
|
||||
client = sseclient.SSEClient(req)
|
||||
return client
|
||||
# print(json.loads(event.data)['choices'][0]['text'], end='', flush=True)
|
@ -1,8 +1,10 @@
|
||||
numpy==1.24.2
|
||||
openai==0.26.5
|
||||
openai==0.27.0
|
||||
requests==2.28.2
|
||||
sseclient==0.0.27
|
||||
sseclient_py==1.7.2
|
||||
streamlit==1.18.1
|
||||
streamlit==1.19.0
|
||||
streamlit_tags==1.2.8
|
||||
streamlit_toggle_switch==1.0.2
|
||||
langchain==0.0.100
|
||||
tiktoken==0.3.0
|
Loading…
Reference in New Issue