import os from langchain.chat_models import ChatOpenAI from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.retrievers.multi_query import MultiQueryRetriever from langchain.schema.output_parser import StrOutputParser from langchain.schema.runnable import RunnableParallel, RunnablePassthrough from langchain.vectorstores import Pinecone if os.environ.get("PINECONE_API_KEY", None) is None: raise Exception("Missing `PINECONE_API_KEY` environment variable.") if os.environ.get("PINECONE_ENVIRONMENT", None) is None: raise Exception("Missing `PINECONE_ENVIRONMENT` environment variable.") PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX", "langchain-test") ### Ingest code - you may need to run this the first time # Load # from langchain.document_loaders import WebBaseLoader # loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") # data = loader.load() # # Split # from langchain.text_splitter import RecursiveCharacterTextSplitter # text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) # all_splits = text_splitter.split_documents(data) # # Add to vectorDB # vectorstore = Pinecone.from_documents( # documents=all_splits, embedding=OpenAIEmbeddings(), index_name=PINECONE_INDEX_NAME # ) # retriever = vectorstore.as_retriever() # Set up index with multi query retriever vectorstore = Pinecone.from_existing_index(PINECONE_INDEX_NAME, OpenAIEmbeddings()) model = ChatOpenAI(temperature=0) retriever = MultiQueryRetriever.from_llm( retriever=vectorstore.as_retriever(), llm=model ) # RAG prompt template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) # RAG model = ChatOpenAI() chain = ( RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) | prompt | model | StrOutputParser() )