2023-03-13 14:20:03 +00:00
|
|
|
import os
|
2023-05-13 08:36:17 +00:00
|
|
|
import shutil
|
|
|
|
import string
|
|
|
|
import zipfile
|
2024-05-24 15:40:50 +00:00
|
|
|
import tiktoken
|
2023-05-13 08:36:17 +00:00
|
|
|
from urllib.parse import urljoin
|
2023-03-13 14:20:03 +00:00
|
|
|
|
2023-05-13 08:36:17 +00:00
|
|
|
import requests
|
|
|
|
|
2023-08-13 17:25:55 +00:00
|
|
|
from application.core.settings import settings
|
|
|
|
from application.parser.file.bulk import SimpleDirectoryReader
|
2023-10-11 21:03:40 +00:00
|
|
|
from application.parser.remote.remote_creator import RemoteCreator
|
2023-08-13 17:25:55 +00:00
|
|
|
from application.parser.open_ai_func import call_openai_api
|
|
|
|
from application.parser.schema.base import Document
|
|
|
|
from application.parser.token_func import group_split
|
2023-03-14 14:29:36 +00:00
|
|
|
|
2023-10-31 14:30:07 +00:00
|
|
|
# Define a function to extract metadata from a given filename.
|
2023-05-17 20:41:24 +00:00
|
|
|
def metadata_from_filename(title):
|
2024-03-16 14:52:05 +00:00
|
|
|
store = "/".join(title.split("/")[1:3])
|
|
|
|
return {"title": title, "store": store}
|
2023-04-29 14:56:32 +00:00
|
|
|
|
2023-06-17 10:40:28 +00:00
|
|
|
|
2023-10-31 14:30:07 +00:00
|
|
|
# Define a function to generate a random string of a given length.
|
2023-03-13 21:56:09 +00:00
|
|
|
def generate_random_string(length):
|
2024-03-16 14:52:05 +00:00
|
|
|
return "".join([string.ascii_letters[i % 52] for i in range(length)])
|
|
|
|
|
|
|
|
|
|
|
|
current_dir = os.path.dirname(
|
|
|
|
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
|
|
|
)
|
2023-03-13 21:56:09 +00:00
|
|
|
|
2024-05-03 12:55:01 +00:00
|
|
|
|
2024-04-09 15:56:07 +00:00
|
|
|
def extract_zip_recursive(zip_path, extract_to, current_depth=0, max_depth=5):
|
|
|
|
"""
|
|
|
|
Recursively extract zip files with a limit on recursion depth.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
zip_path (str): Path to the zip file to be extracted.
|
|
|
|
extract_to (str): Destination path for extracted files.
|
|
|
|
current_depth (int): Current depth of recursion.
|
|
|
|
max_depth (int): Maximum allowed depth of recursion to prevent infinite loops.
|
|
|
|
"""
|
|
|
|
if current_depth > max_depth:
|
|
|
|
print(f"Reached maximum recursion depth of {max_depth}")
|
|
|
|
return
|
|
|
|
|
2024-05-03 12:55:01 +00:00
|
|
|
with zipfile.ZipFile(zip_path, "r") as zip_ref:
|
2024-04-09 15:56:07 +00:00
|
|
|
zip_ref.extractall(extract_to)
|
|
|
|
os.remove(zip_path) # Remove the zip file after extracting
|
|
|
|
|
|
|
|
# Check for nested zip files and extract them
|
|
|
|
for root, dirs, files in os.walk(extract_to):
|
|
|
|
for file in files:
|
|
|
|
if file.endswith(".zip"):
|
|
|
|
# If a nested zip file is found, extract it recursively
|
|
|
|
file_path = os.path.join(root, file)
|
|
|
|
extract_zip_recursive(file_path, root, current_depth + 1, max_depth)
|
|
|
|
|
2023-03-13 21:56:09 +00:00
|
|
|
|
2023-10-31 14:30:07 +00:00
|
|
|
# Define the main function for ingesting and processing documents.
|
2023-03-13 14:20:03 +00:00
|
|
|
def ingest_worker(self, directory, formats, name_job, filename, user):
|
2023-10-31 14:30:07 +00:00
|
|
|
"""
|
|
|
|
Ingest and process documents.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
self: Reference to the instance of the task.
|
|
|
|
directory (str): Specifies the directory for ingesting ('inputs' or 'temp').
|
|
|
|
formats (list of str): List of file extensions to consider for ingestion (e.g., [".rst", ".md"]).
|
|
|
|
name_job (str): Name of the job for this ingestion task.
|
|
|
|
filename (str): Name of the file to be ingested.
|
|
|
|
user (str): Identifier for the user initiating the ingestion.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
dict: Information about the completed ingestion task, including input parameters and a "limited" flag.
|
|
|
|
"""
|
2023-03-13 21:56:09 +00:00
|
|
|
# directory = 'inputs' or 'temp'
|
2023-03-13 14:20:03 +00:00
|
|
|
# formats = [".rst", ".md"]
|
|
|
|
input_files = None
|
|
|
|
recursive = True
|
|
|
|
limit = None
|
|
|
|
exclude = True
|
|
|
|
# name_job = 'job1'
|
|
|
|
# filename = 'install.rst'
|
|
|
|
# user = 'local'
|
2023-03-15 00:23:51 +00:00
|
|
|
sample = False
|
|
|
|
token_check = True
|
|
|
|
min_tokens = 150
|
2023-03-21 22:16:09 +00:00
|
|
|
max_tokens = 1250
|
2024-04-09 15:56:07 +00:00
|
|
|
recursion_depth = 2
|
|
|
|
full_path = os.path.join(directory, user, name_job)
|
2023-09-27 15:25:57 +00:00
|
|
|
import sys
|
2024-03-16 14:52:05 +00:00
|
|
|
|
2023-09-27 15:25:57 +00:00
|
|
|
print(full_path, file=sys.stderr)
|
2023-03-14 14:29:36 +00:00
|
|
|
# check if API_URL env variable is set
|
2024-03-16 14:52:05 +00:00
|
|
|
file_data = {"name": name_job, "file": filename, "user": user}
|
|
|
|
response = requests.get(
|
|
|
|
urljoin(settings.API_URL, "/api/download"), params=file_data
|
|
|
|
)
|
2023-09-27 15:25:57 +00:00
|
|
|
# check if file is in the response
|
|
|
|
print(response, file=sys.stderr)
|
2023-03-13 14:20:03 +00:00
|
|
|
file = response.content
|
2023-03-13 21:56:09 +00:00
|
|
|
|
|
|
|
if not os.path.exists(full_path):
|
|
|
|
os.makedirs(full_path)
|
2024-04-09 15:56:07 +00:00
|
|
|
with open(os.path.join(full_path, filename), "wb") as f:
|
2023-03-13 14:20:03 +00:00
|
|
|
f.write(file)
|
|
|
|
|
2023-05-13 08:36:17 +00:00
|
|
|
# check if file is .zip and extract it
|
2024-03-16 14:52:05 +00:00
|
|
|
if filename.endswith(".zip"):
|
2024-05-03 12:55:01 +00:00
|
|
|
extract_zip_recursive(
|
|
|
|
os.path.join(full_path, filename), full_path, 0, recursion_depth
|
|
|
|
)
|
2024-03-16 14:52:05 +00:00
|
|
|
|
|
|
|
self.update_state(state="PROGRESS", meta={"current": 1})
|
|
|
|
|
|
|
|
raw_docs = SimpleDirectoryReader(
|
|
|
|
input_dir=full_path,
|
|
|
|
input_files=input_files,
|
|
|
|
recursive=recursive,
|
|
|
|
required_exts=formats,
|
|
|
|
num_files_limit=limit,
|
|
|
|
exclude_hidden=exclude,
|
|
|
|
file_metadata=metadata_from_filename,
|
|
|
|
).load_data()
|
|
|
|
raw_docs = group_split(
|
|
|
|
documents=raw_docs,
|
|
|
|
min_tokens=min_tokens,
|
|
|
|
max_tokens=max_tokens,
|
|
|
|
token_check=token_check,
|
|
|
|
)
|
2023-03-15 00:23:51 +00:00
|
|
|
|
|
|
|
docs = [Document.to_langchain_format(raw_doc) for raw_doc in raw_docs]
|
|
|
|
|
2023-03-13 21:56:09 +00:00
|
|
|
call_openai_api(docs, full_path, self)
|
2024-05-24 15:40:50 +00:00
|
|
|
tokens = count_tokens_docs(docs)
|
2024-03-16 14:52:05 +00:00
|
|
|
self.update_state(state="PROGRESS", meta={"current": 100})
|
2023-03-13 14:20:03 +00:00
|
|
|
|
2023-05-13 08:36:17 +00:00
|
|
|
if sample:
|
2023-03-15 00:23:51 +00:00
|
|
|
for i in range(min(5, len(raw_docs))):
|
|
|
|
print(raw_docs[i].text)
|
|
|
|
|
2023-03-13 14:20:03 +00:00
|
|
|
# get files from outputs/inputs/index.faiss and outputs/inputs/index.pkl
|
|
|
|
# and send them to the server (provide user and name in form)
|
2024-05-24 15:40:50 +00:00
|
|
|
file_data = {"name": name_job, "user": user, "tokens":tokens}
|
2023-09-29 16:17:48 +00:00
|
|
|
if settings.VECTOR_STORE == "faiss":
|
2024-03-16 14:52:05 +00:00
|
|
|
files = {
|
|
|
|
"file_faiss": open(full_path + "/index.faiss", "rb"),
|
|
|
|
"file_pkl": open(full_path + "/index.pkl", "rb"),
|
|
|
|
}
|
|
|
|
response = requests.post(
|
|
|
|
urljoin(settings.API_URL, "/api/upload_index"), files=files, data=file_data
|
|
|
|
)
|
|
|
|
response = requests.get(
|
|
|
|
urljoin(settings.API_URL, "/api/delete_old?path=" + full_path)
|
|
|
|
)
|
2023-09-29 16:17:48 +00:00
|
|
|
else:
|
2024-03-16 14:52:05 +00:00
|
|
|
response = requests.post(
|
|
|
|
urljoin(settings.API_URL, "/api/upload_index"), data=file_data
|
|
|
|
)
|
2023-09-29 16:17:48 +00:00
|
|
|
|
2023-03-13 21:56:09 +00:00
|
|
|
# delete local
|
|
|
|
shutil.rmtree(full_path)
|
|
|
|
|
2023-04-29 14:56:32 +00:00
|
|
|
return {
|
2024-03-16 14:52:05 +00:00
|
|
|
"directory": directory,
|
|
|
|
"formats": formats,
|
|
|
|
"name_job": name_job,
|
|
|
|
"filename": filename,
|
|
|
|
"user": user,
|
|
|
|
"limited": False,
|
2023-04-29 14:56:32 +00:00
|
|
|
}
|
2023-10-11 21:03:40 +00:00
|
|
|
|
2024-03-16 14:52:05 +00:00
|
|
|
|
2024-03-26 10:37:44 +00:00
|
|
|
def remote_worker(self, source_data, name_job, user, loader, directory="temp"):
|
2023-10-11 21:03:40 +00:00
|
|
|
token_check = True
|
|
|
|
min_tokens = 150
|
|
|
|
max_tokens = 1250
|
2024-03-16 14:52:05 +00:00
|
|
|
full_path = directory + "/" + user + "/" + name_job
|
2023-10-11 21:03:40 +00:00
|
|
|
|
|
|
|
if not os.path.exists(full_path):
|
|
|
|
os.makedirs(full_path)
|
2024-03-16 14:52:05 +00:00
|
|
|
self.update_state(state="PROGRESS", meta={"current": 1})
|
|
|
|
|
2024-02-14 15:17:56 +00:00
|
|
|
remote_loader = RemoteCreator.create_loader(loader)
|
|
|
|
raw_docs = remote_loader.load_data(source_data)
|
2023-10-11 21:03:40 +00:00
|
|
|
|
2024-03-16 14:52:05 +00:00
|
|
|
docs = group_split(
|
|
|
|
documents=raw_docs,
|
|
|
|
min_tokens=min_tokens,
|
|
|
|
max_tokens=max_tokens,
|
|
|
|
token_check=token_check,
|
|
|
|
)
|
|
|
|
# docs = [Document.to_langchain_format(raw_doc) for raw_doc in raw_docs]
|
2023-10-11 21:03:40 +00:00
|
|
|
call_openai_api(docs, full_path, self)
|
2024-05-24 15:40:50 +00:00
|
|
|
tokens = count_tokens_docs(docs)
|
2024-03-16 14:52:05 +00:00
|
|
|
self.update_state(state="PROGRESS", meta={"current": 100})
|
2023-10-11 21:03:40 +00:00
|
|
|
|
|
|
|
# Proceed with uploading and cleaning as in the original function
|
2024-05-24 15:40:50 +00:00
|
|
|
file_data = {"name": name_job, "user": user, "tokens":tokens}
|
2023-10-11 21:03:40 +00:00
|
|
|
if settings.VECTOR_STORE == "faiss":
|
2024-03-16 14:52:05 +00:00
|
|
|
files = {
|
|
|
|
"file_faiss": open(full_path + "/index.faiss", "rb"),
|
|
|
|
"file_pkl": open(full_path + "/index.pkl", "rb"),
|
|
|
|
}
|
2024-05-24 15:40:50 +00:00
|
|
|
|
2024-03-16 14:52:05 +00:00
|
|
|
requests.post(
|
|
|
|
urljoin(settings.API_URL, "/api/upload_index"), files=files, data=file_data
|
|
|
|
)
|
2024-02-28 16:52:58 +00:00
|
|
|
requests.get(urljoin(settings.API_URL, "/api/delete_old?path=" + full_path))
|
2023-10-11 21:03:40 +00:00
|
|
|
else:
|
2024-02-28 16:52:58 +00:00
|
|
|
requests.post(urljoin(settings.API_URL, "/api/upload_index"), data=file_data)
|
2023-10-11 21:03:40 +00:00
|
|
|
|
|
|
|
shutil.rmtree(full_path)
|
|
|
|
|
2024-03-16 14:52:05 +00:00
|
|
|
return {"urls": source_data, "name_job": name_job, "user": user, "limited": False}
|
2024-05-24 15:40:50 +00:00
|
|
|
|
|
|
|
|
|
|
|
def count_tokens_docs(docs):
|
|
|
|
# Here we convert the docs list to a string and calculate the number of tokens the string represents.
|
|
|
|
# docs_content = (" ".join(docs))
|
|
|
|
docs_content = ""
|
|
|
|
for doc in docs:
|
|
|
|
docs_content += doc.page_content
|
|
|
|
|
|
|
|
tokens, total_price = num_tokens_from_string(
|
|
|
|
string=docs_content, encoding_name="cl100k_base"
|
|
|
|
)
|
|
|
|
# Here we print the number of tokens and the approx user cost with some visually appealing formatting.
|
|
|
|
return tokens
|
|
|
|
|
|
|
|
|
|
|
|
def num_tokens_from_string(string: str, encoding_name: str) -> int:
|
|
|
|
# Function to convert string to tokens and estimate user cost.
|
|
|
|
encoding = tiktoken.get_encoding(encoding_name)
|
|
|
|
num_tokens = len(encoding.encode(string))
|
|
|
|
total_price = (num_tokens / 1000) * 0.0004
|
|
|
|
return num_tokens, total_price
|