DocsGPT/application/worker.py

199 lines
6.3 KiB
Python
Raw Normal View History

2023-03-13 14:20:03 +00:00
import os
import shutil
import string
import zipfile
from urllib.parse import urljoin
2023-03-13 14:20:03 +00:00
import nltk
import requests
from application.core.settings import settings
from application.parser.file.bulk import SimpleDirectoryReader
2023-10-11 21:03:40 +00:00
from application.parser.remote.remote_creator import RemoteCreator
from application.parser.open_ai_func import call_openai_api
from application.parser.schema.base import Document
from application.parser.token_func import group_split
2023-03-14 14:29:36 +00:00
try:
2024-03-16 14:52:05 +00:00
nltk.download("punkt", quiet=True)
nltk.download("averaged_perceptron_tagger", quiet=True)
2023-03-14 14:29:36 +00:00
except FileExistsError:
pass
2023-04-29 14:56:32 +00:00
# Define a function to extract metadata from a given filename.
2023-05-17 20:41:24 +00:00
def metadata_from_filename(title):
2024-03-16 14:52:05 +00:00
store = "/".join(title.split("/")[1:3])
return {"title": title, "store": store}
2023-04-29 14:56:32 +00:00
# Define a function to generate a random string of a given length.
2023-03-13 21:56:09 +00:00
def generate_random_string(length):
2024-03-16 14:52:05 +00:00
return "".join([string.ascii_letters[i % 52] for i in range(length)])
current_dir = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
2023-03-13 21:56:09 +00:00
# Define the main function for ingesting and processing documents.
2023-03-13 14:20:03 +00:00
def ingest_worker(self, directory, formats, name_job, filename, user):
"""
Ingest and process documents.
Args:
self: Reference to the instance of the task.
directory (str): Specifies the directory for ingesting ('inputs' or 'temp').
formats (list of str): List of file extensions to consider for ingestion (e.g., [".rst", ".md"]).
name_job (str): Name of the job for this ingestion task.
filename (str): Name of the file to be ingested.
user (str): Identifier for the user initiating the ingestion.
Returns:
dict: Information about the completed ingestion task, including input parameters and a "limited" flag.
"""
2023-03-13 21:56:09 +00:00
# directory = 'inputs' or 'temp'
2023-03-13 14:20:03 +00:00
# formats = [".rst", ".md"]
input_files = None
recursive = True
limit = None
exclude = True
# name_job = 'job1'
# filename = 'install.rst'
# user = 'local'
2023-03-15 00:23:51 +00:00
sample = False
token_check = True
min_tokens = 150
2023-03-21 22:16:09 +00:00
max_tokens = 1250
2024-03-16 14:52:05 +00:00
full_path = directory + "/" + user + "/" + name_job
2023-09-27 15:25:57 +00:00
import sys
2024-03-16 14:52:05 +00:00
2023-09-27 15:25:57 +00:00
print(full_path, file=sys.stderr)
2023-03-14 14:29:36 +00:00
# check if API_URL env variable is set
2024-03-16 14:52:05 +00:00
file_data = {"name": name_job, "file": filename, "user": user}
response = requests.get(
urljoin(settings.API_URL, "/api/download"), params=file_data
)
2023-09-27 15:25:57 +00:00
# check if file is in the response
print(response, file=sys.stderr)
2023-03-13 14:20:03 +00:00
file = response.content
2023-03-13 21:56:09 +00:00
if not os.path.exists(full_path):
os.makedirs(full_path)
2024-03-16 14:52:05 +00:00
with open(full_path + "/" + filename, "wb") as f:
2023-03-13 14:20:03 +00:00
f.write(file)
# check if file is .zip and extract it
2024-03-16 14:52:05 +00:00
if filename.endswith(".zip"):
with zipfile.ZipFile(full_path + "/" + filename, "r") as zip_ref:
2023-03-13 21:56:09 +00:00
zip_ref.extractall(full_path)
2024-03-16 14:52:05 +00:00
os.remove(full_path + "/" + filename)
self.update_state(state="PROGRESS", meta={"current": 1})
raw_docs = SimpleDirectoryReader(
input_dir=full_path,
input_files=input_files,
recursive=recursive,
required_exts=formats,
num_files_limit=limit,
exclude_hidden=exclude,
file_metadata=metadata_from_filename,
).load_data()
raw_docs = group_split(
documents=raw_docs,
min_tokens=min_tokens,
max_tokens=max_tokens,
token_check=token_check,
)
2023-03-15 00:23:51 +00:00
docs = [Document.to_langchain_format(raw_doc) for raw_doc in raw_docs]
2023-03-13 21:56:09 +00:00
call_openai_api(docs, full_path, self)
2024-03-16 14:52:05 +00:00
self.update_state(state="PROGRESS", meta={"current": 100})
2023-03-13 14:20:03 +00:00
if sample:
2023-03-15 00:23:51 +00:00
for i in range(min(5, len(raw_docs))):
print(raw_docs[i].text)
2023-03-13 14:20:03 +00:00
# get files from outputs/inputs/index.faiss and outputs/inputs/index.pkl
# and send them to the server (provide user and name in form)
2024-03-16 14:52:05 +00:00
file_data = {"name": name_job, "user": user}
2023-09-29 16:17:48 +00:00
if settings.VECTOR_STORE == "faiss":
2024-03-16 14:52:05 +00:00
files = {
"file_faiss": open(full_path + "/index.faiss", "rb"),
"file_pkl": open(full_path + "/index.pkl", "rb"),
}
response = requests.post(
urljoin(settings.API_URL, "/api/upload_index"), files=files, data=file_data
)
response = requests.get(
urljoin(settings.API_URL, "/api/delete_old?path=" + full_path)
)
2023-09-29 16:17:48 +00:00
else:
2024-03-16 14:52:05 +00:00
response = requests.post(
urljoin(settings.API_URL, "/api/upload_index"), data=file_data
)
2023-09-29 16:17:48 +00:00
2023-03-13 21:56:09 +00:00
# delete local
shutil.rmtree(full_path)
2023-04-29 14:56:32 +00:00
return {
2024-03-16 14:52:05 +00:00
"directory": directory,
"formats": formats,
"name_job": name_job,
"filename": filename,
"user": user,
"limited": False,
2023-04-29 14:56:32 +00:00
}
2023-10-11 21:03:40 +00:00
2024-03-16 14:52:05 +00:00
def remote_worker(self, source_data, name_job, user, directory="temp", loader="url"):
2024-02-28 16:52:58 +00:00
# sample = False
2023-10-11 21:03:40 +00:00
token_check = True
min_tokens = 150
max_tokens = 1250
2024-03-16 14:52:05 +00:00
full_path = directory + "/" + user + "/" + name_job
2023-10-11 21:03:40 +00:00
if not os.path.exists(full_path):
os.makedirs(full_path)
2024-03-16 14:52:05 +00:00
self.update_state(state="PROGRESS", meta={"current": 1})
2024-02-13 20:41:36 +00:00
# source_data {"data": [url]} for url type task just urls
2024-03-16 14:52:05 +00:00
2023-10-11 21:03:40 +00:00
# Use RemoteCreator to load data from URL
remote_loader = RemoteCreator.create_loader(loader)
raw_docs = remote_loader.load_data(source_data)
2023-10-11 21:03:40 +00:00
2024-03-16 14:52:05 +00:00
docs = group_split(
documents=raw_docs,
min_tokens=min_tokens,
max_tokens=max_tokens,
token_check=token_check,
)
2023-10-11 21:03:40 +00:00
2024-03-16 14:52:05 +00:00
# docs = [Document.to_langchain_format(raw_doc) for raw_doc in raw_docs]
2023-10-11 21:03:40 +00:00
call_openai_api(docs, full_path, self)
2024-03-16 14:52:05 +00:00
self.update_state(state="PROGRESS", meta={"current": 100})
2023-10-11 21:03:40 +00:00
# Proceed with uploading and cleaning as in the original function
2024-03-16 14:52:05 +00:00
file_data = {"name": name_job, "user": user}
2023-10-11 21:03:40 +00:00
if settings.VECTOR_STORE == "faiss":
2024-03-16 14:52:05 +00:00
files = {
"file_faiss": open(full_path + "/index.faiss", "rb"),
"file_pkl": open(full_path + "/index.pkl", "rb"),
}
requests.post(
urljoin(settings.API_URL, "/api/upload_index"), files=files, data=file_data
)
2024-02-28 16:52:58 +00:00
requests.get(urljoin(settings.API_URL, "/api/delete_old?path=" + full_path))
2023-10-11 21:03:40 +00:00
else:
2024-02-28 16:52:58 +00:00
requests.post(urljoin(settings.API_URL, "/api/upload_index"), data=file_data)
2023-10-11 21:03:40 +00:00
shutil.rmtree(full_path)
2024-03-16 14:52:05 +00:00
return {"urls": source_data, "name_job": name_job, "user": user, "limited": False}