from langchain_community.vectorstores import Neo4jVector from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough from langchain_openai import ChatOpenAI, OpenAIEmbeddings retrieval_query = """ MATCH (node)-[:HAS_PARENT]->(parent) WITH parent, max(score) AS score // deduplicate parents RETURN parent.text AS text, score, {} AS metadata """ def format_docs(docs): return "\n\n".join(doc.page_content for doc in docs) vectorstore = Neo4jVector.from_existing_index( OpenAIEmbeddings(), index_name="retrieval", node_label="Child", embedding_node_property="embedding", retrieval_query=retrieval_query, ) retriever = vectorstore.as_retriever() template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) model = ChatOpenAI() chain = ( RunnableParallel( {"context": retriever | format_docs, "question": RunnablePassthrough()} ) | prompt | model | StrOutputParser() ) # Add typing for input class Question(BaseModel): __root__: str chain = chain.with_types(input_type=Question)