import os from langchain.retrievers import ContextualCompressionRetriever from langchain.retrievers.document_compressors import CohereRerank from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import Pinecone from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough if os.environ.get("PINECONE_API_KEY", None) is None: raise Exception("Missing `PINECONE_API_KEY` environment variable.") if os.environ.get("PINECONE_ENVIRONMENT", None) is None: raise Exception("Missing `PINECONE_ENVIRONMENT` environment variable.") PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX", "langchain-test") ### Ingest code - you may need to run this the first time # # Load # from langchain_community.document_loaders import WebBaseLoader # loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") # data = loader.load() # # Split # from langchain.text_splitter import RecursiveCharacterTextSplitter # text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) # all_splits = text_splitter.split_documents(data) # # Add to vectorDB # vectorstore = Pinecone.from_documents( # documents=all_splits, embedding=OpenAIEmbeddings(), index_name=PINECONE_INDEX_NAME # ) # retriever = vectorstore.as_retriever() vectorstore = Pinecone.from_existing_index(PINECONE_INDEX_NAME, OpenAIEmbeddings()) # Get k=10 docs retriever = vectorstore.as_retriever(search_kwargs={"k": 10}) # Re-rank compressor = CohereRerank() compression_retriever = ContextualCompressionRetriever( base_compressor=compressor, base_retriever=retriever ) # RAG prompt template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) # RAG model = ChatOpenAI() chain = ( RunnableParallel( {"context": compression_retriever, "question": RunnablePassthrough()} ) | prompt | model | StrOutputParser() ) # Add typing for input class Question(BaseModel): __root__: str chain = chain.with_types(input_type=Question)