DocsGPT/application/worker.py

100 lines
3.4 KiB
Python
Raw Normal View History

2023-03-13 14:20:03 +00:00
import requests
import nltk
import os
from parser.file.bulk import SimpleDirectoryReader
from parser.schema.base import Document
from parser.open_ai_func import call_openai_api
2023-03-15 00:23:51 +00:00
from parser.token_func import group_split
2023-03-13 14:20:03 +00:00
from celery import current_task
2023-03-14 14:29:36 +00:00
2023-03-13 21:56:09 +00:00
import string
import zipfile
import shutil
2023-03-14 14:29:36 +00:00
try:
nltk.download('punkt', quiet=True)
nltk.download('averaged_perceptron_tagger', quiet=True)
except FileExistsError:
pass
2023-03-13 21:56:09 +00:00
def generate_random_string(length):
return ''.join([string.ascii_letters[i % 52] for i in range(length)])
2023-03-13 14:20:03 +00:00
def ingest_worker(self, directory, formats, name_job, filename, user):
2023-03-13 21:56:09 +00:00
# directory = 'inputs' or 'temp'
2023-03-13 14:20:03 +00:00
# formats = [".rst", ".md"]
input_files = None
recursive = True
limit = None
exclude = True
# name_job = 'job1'
# filename = 'install.rst'
# user = 'local'
2023-03-15 00:23:51 +00:00
sample = False
token_check = True
min_tokens = 150
2023-03-21 22:16:09 +00:00
max_tokens = 1250
2023-03-13 21:56:09 +00:00
full_path = directory + '/' + user + '/' + name_job
2023-03-14 14:29:36 +00:00
# check if API_URL env variable is set
if not os.environ.get('API_URL'):
url = 'http://localhost:5001/api/download'
else:
url = os.environ.get('API_URL') + '/api/download'
2023-03-13 14:20:03 +00:00
file_data = {'name': name_job, 'file': filename, 'user': user}
response = requests.get(url, params=file_data)
file = response.content
2023-03-13 21:56:09 +00:00
if not os.path.exists(full_path):
os.makedirs(full_path)
with open(full_path + '/' + filename, 'wb') as f:
2023-03-13 14:20:03 +00:00
f.write(file)
2023-03-13 21:56:09 +00:00
#check if file is .zip and extract it
if filename.endswith('.zip'):
with zipfile.ZipFile(full_path + '/' + filename, 'r') as zip_ref:
zip_ref.extractall(full_path)
os.remove(full_path + '/' + filename)
2023-03-13 14:20:03 +00:00
import time
self.update_state(state='PROGRESS', meta={'current': 1})
2023-03-13 21:56:09 +00:00
raw_docs = SimpleDirectoryReader(input_dir=full_path, input_files=input_files, recursive=recursive,
2023-03-13 14:20:03 +00:00
required_exts=formats, num_files_limit=limit,
exclude_hidden=exclude).load_data()
2023-03-15 00:23:51 +00:00
raw_docs = group_split(documents=raw_docs, min_tokens=min_tokens, max_tokens=max_tokens, token_check=token_check)
docs = [Document.to_langchain_format(raw_doc) for raw_doc in raw_docs]
2023-03-13 21:56:09 +00:00
call_openai_api(docs, full_path, self)
2023-03-13 14:20:03 +00:00
self.update_state(state='PROGRESS', meta={'current': 100})
2023-03-15 00:23:51 +00:00
if sample == True:
for i in range(min(5, len(raw_docs))):
print(raw_docs[i].text)
2023-03-13 14:20:03 +00:00
# get files from outputs/inputs/index.faiss and outputs/inputs/index.pkl
# and send them to the server (provide user and name in form)
2023-03-14 14:29:36 +00:00
if not os.environ.get('API_URL'):
url = 'http://localhost:5001/api/upload_index'
else:
url = os.environ.get('API_URL') + '/api/upload_index'
2023-03-13 14:20:03 +00:00
file_data = {'name': name_job, 'user': user}
2023-03-13 21:56:09 +00:00
files = {'file_faiss': open(full_path + '/index.faiss', 'rb'),
'file_pkl': open(full_path + '/index.pkl', 'rb')}
2023-03-13 14:20:03 +00:00
response = requests.post(url, files=files, data=file_data)
2023-03-13 21:56:09 +00:00
#deletes remote
2023-03-14 14:29:36 +00:00
if not os.environ.get('API_URL'):
url = 'http://localhost:5001/api/delete_old?path=' + 'inputs/' + user + '/' + name_job
else:
url = os.environ.get('API_URL') + '/api/delete_old?path=' + 'inputs/' + user + '/' + name_job
2023-03-13 21:56:09 +00:00
response = requests.get(url)
# delete local
shutil.rmtree(full_path)
2023-03-27 18:29:10 +00:00
return {'directory': directory, 'formats': formats, 'name_job': name_job, 'filename': filename, 'user': user, 'limited': False}