langchain/templates/rag-chroma/rag_chroma/chain.py

64 lines
1.8 KiB
Python
Raw Normal View History

from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
2023-10-27 02:44:30 +00:00
from langchain.prompts import ChatPromptTemplate
from langchain.pydantic_v1 import BaseModel
from langchain.schema.output_parser import StrOutputParser
2023-10-27 02:44:30 +00:00
from langchain.schema.runnable import RunnableParallel, RunnablePassthrough
from langchain.vectorstores import Chroma
# Example for document loading (from url), splitting, and creating vectostore
2023-10-29 22:50:09 +00:00
"""
# Load
from langchain.document_loaders import WebBaseLoader
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
data = loader.load()
# Split
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
# Add to vectorDB
vectorstore = Chroma.from_documents(documents=all_splits,
collection_name="rag-chroma",
embedding=OpenAIEmbeddings(),
)
retriever = vectorstore.as_retriever()
2023-10-29 22:50:09 +00:00
"""
# Embed a single document as a test
vectorstore = Chroma.from_texts(
2023-10-29 22:50:09 +00:00
["harrison worked at kensho"],
collection_name="rag-chroma",
2023-10-29 22:50:09 +00:00
embedding=OpenAIEmbeddings(),
)
retriever = vectorstore.as_retriever()
# RAG prompt
template = """Answer the question based only on the following context:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
# LLM
model = ChatOpenAI()
# RAG chain
chain = (
RunnableParallel({"context": retriever, "question": RunnablePassthrough()})
| prompt
| model
| StrOutputParser()
)
2023-10-29 22:50:09 +00:00
# Add typing for input
class Question(BaseModel):
__root__: str
2023-10-29 22:50:09 +00:00
chain = chain.with_types(input_type=Question)