langchain/templates/rag-semi-structured/rag_semi_structured/chain.py

121 lines
3.7 KiB
Python
Raw Normal View History

# Load
import uuid
2023-10-27 02:44:30 +00:00
from langchain.chat_models import ChatOpenAI
2023-10-27 02:44:30 +00:00
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts import ChatPromptTemplate
from langchain.pydantic_v1 import BaseModel
2023-10-27 02:44:30 +00:00
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.schema.document import Document
from langchain.schema.output_parser import StrOutputParser
2023-10-27 02:44:30 +00:00
from langchain.schema.runnable import RunnablePassthrough
from langchain.storage import InMemoryStore
2023-10-27 02:44:30 +00:00
from langchain.vectorstores import Chroma
from unstructured.partition.pdf import partition_pdf
# Path to docs
path = "docs"
2023-10-27 02:44:30 +00:00
raw_pdf_elements = partition_pdf(
filename=path + "LLaMA2.pdf",
# Unstructured first finds embedded image blocks
extract_images_in_pdf=False,
# Use layout model (YOLOX) to get bounding boxes (for tables) and find titles
# Titles are any sub-section of the document
infer_table_structure=True,
# Post processing to aggregate text once we have the title
chunking_strategy="by_title",
# Chunking params to aggregate text blocks
# Attempt to create a new chunk 3800 chars
# Attempt to keep chunks > 2000 chars
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
# Categorize by type
tables = []
texts = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
tables.append(str(element))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
texts.append(str(element))
# Summarize
2023-10-27 02:44:30 +00:00
prompt_text = """You are an assistant tasked with summarizing tables and text. \
Give a concise summary of the table or text. Table or text chunk: {element} """
2023-10-27 02:44:30 +00:00
prompt = ChatPromptTemplate.from_template(prompt_text)
model = ChatOpenAI(temperature=0, model="gpt-4")
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
# Apply
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5})
# To save time / cost, only do text summaries if chunk sizes are large
# text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5})
2023-10-27 02:44:30 +00:00
# We can just assign text_summaries to the raw texts
text_summaries = texts
# Use multi vector retriever
# The vectorstore to use to index the child chunks
2023-10-27 02:44:30 +00:00
vectorstore = Chroma(collection_name="summaries", embedding_function=OpenAIEmbeddings())
# The storage layer for the parent documents
store = InMemoryStore()
id_key = "doc_id"
# The retriever (empty to start)
retriever = MultiVectorRetriever(
2023-10-27 02:44:30 +00:00
vectorstore=vectorstore,
docstore=store,
id_key=id_key,
)
# Add texts
doc_ids = [str(uuid.uuid4()) for _ in texts]
2023-10-27 02:44:30 +00:00
summary_texts = [
Document(page_content=s, metadata={id_key: doc_ids[i]})
for i, s in enumerate(text_summaries)
]
retriever.vectorstore.add_documents(summary_texts)
retriever.docstore.mset(list(zip(doc_ids, texts)))
# Add tables
table_ids = [str(uuid.uuid4()) for _ in tables]
2023-10-27 02:44:30 +00:00
summary_tables = [
Document(page_content=s, metadata={id_key: table_ids[i]})
for i, s in enumerate(table_summaries)
]
retriever.vectorstore.add_documents(summary_tables)
retriever.docstore.mset(list(zip(table_ids, tables)))
# RAG
# Prompt template
template = """Answer the question based only on the following context, which can include text and tables:
{context}
Question: {question}
2023-10-27 02:44:30 +00:00
""" # noqa: E501
prompt = ChatPromptTemplate.from_template(template)
# LLM
2023-10-27 02:44:30 +00:00
model = ChatOpenAI(temperature=0, model="gpt-4")
# RAG pipeline
chain = (
2023-10-27 02:44:30 +00:00
{"context": retriever, "question": RunnablePassthrough()}
| prompt
| model
| StrOutputParser()
2023-10-27 02:44:30 +00:00
)
# Add typing for input
class Question(BaseModel):
__root__: str
chain = chain.with_types(input_type=Question)