diff --git a/docs/snippets/modules/chains/popular/summarize.mdx b/docs/snippets/modules/chains/popular/summarize.mdx index 1a48b8e600..e77c327bd3 100644 --- a/docs/snippets/modules/chains/popular/summarize.mdx +++ b/docs/snippets/modules/chains/popular/summarize.mdx @@ -184,6 +184,7 @@ You can also use prompt with multi input. In this example, we will use a MapRedu ```python from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain +from langchain.chains import ReduceDocumentsChain map_template_string = """Give the following python code information, generate a description that explains what the code does and also mention the time complexity. Code: diff --git a/libs/langchain/langchain/chains/combine_documents/refine.py b/libs/langchain/langchain/chains/combine_documents/refine.py index 76087a9877..025e264165 100644 --- a/libs/langchain/langchain/chains/combine_documents/refine.py +++ b/libs/langchain/langchain/chains/combine_documents/refine.py @@ -53,7 +53,7 @@ class RefineDocumentsChain(BaseCombineDocumentsChain): prompt = PromptTemplate.from_template( "Summarize this content: {context}" ) - llm_chain = LLMChain(llm=llm, prompt=prompt) + initial_llm_chain = LLMChain(llm=llm, prompt=prompt) initial_response_name = "prev_response" # The prompt here should take as an input variable the # `document_variable_name` as well as `initial_response_name` @@ -61,7 +61,7 @@ class RefineDocumentsChain(BaseCombineDocumentsChain): "Here's your first summary: {prev_response}. " "Now add to it based on the following context: {context}" ) - llm_chain_refine = LLMChain(llm=llm, prompt=prompt_refine) + refine_llm_chain = LLMChain(llm=llm, prompt=prompt_refine) chain = RefineDocumentsChain( initial_llm_chain=initial_llm_chain, refine_llm_chain=refine_llm_chain,