diff --git a/langchain/chains/qa_with_sources/__init__.py b/langchain/chains/qa_with_sources/__init__.py index c3a3fa2b..88b60aec 100644 --- a/langchain/chains/qa_with_sources/__init__.py +++ b/langchain/chains/qa_with_sources/__init__.py @@ -34,6 +34,7 @@ def _load_stuff_chain( llm_chain=llm_chain, document_variable_name=document_variable_name, document_prompt=stuff_prompt.EXAMPLE_PROMPT, + verbose=verbose, **kwargs, ) @@ -58,6 +59,7 @@ def _load_map_reduce_chain( llm_chain=reduce_chain, document_variable_name=combine_document_variable_name, document_prompt=document_prompt, + verbose=verbose, ) if collapse_prompt is None: collapse_chain = None @@ -82,6 +84,7 @@ def _load_map_reduce_chain( combine_document_chain=combine_document_chain, document_variable_name=map_reduce_document_variable_name, collapse_document_chain=collapse_chain, + verbose=verbose, **kwargs, ) @@ -106,6 +109,7 @@ def _load_refine_chain( document_variable_name=document_variable_name, initial_response_name=initial_response_name, document_prompt=document_prompt, + verbose=verbose, **kwargs, ) @@ -119,6 +123,8 @@ def load_qa_with_sources_chain( llm: Language Model to use in the chain. chain_type: Type of document combining chain to use. Should be one of "stuff", "map_reduce", and "refine". + verbose: Whether chains should be run in verbose mode or not. Note that this + applies to all chains that make up the final chain. Returns: A chain to use for question answering with sources. diff --git a/langchain/chains/question_answering/__init__.py b/langchain/chains/question_answering/__init__.py index f01bd0c6..7685d7ab 100644 --- a/langchain/chains/question_answering/__init__.py +++ b/langchain/chains/question_answering/__init__.py @@ -32,7 +32,10 @@ def _load_stuff_chain( llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose) # TODO: document prompt return StuffDocumentsChain( - llm_chain=llm_chain, document_variable_name=document_variable_name, **kwargs + llm_chain=llm_chain, + document_variable_name=document_variable_name, + verbose=verbose, + **kwargs, ) @@ -53,7 +56,9 @@ def _load_map_reduce_chain( reduce_chain = LLMChain(llm=_reduce_llm, prompt=combine_prompt, verbose=verbose) # TODO: document prompt combine_document_chain = StuffDocumentsChain( - llm_chain=reduce_chain, document_variable_name=combine_document_variable_name + llm_chain=reduce_chain, + document_variable_name=combine_document_variable_name, + verbose=verbose, ) if collapse_prompt is None: collapse_chain = None @@ -77,6 +82,7 @@ def _load_map_reduce_chain( combine_document_chain=combine_document_chain, document_variable_name=map_reduce_document_variable_name, collapse_document_chain=collapse_chain, + verbose=verbose, **kwargs, ) @@ -99,6 +105,7 @@ def _load_refine_chain( refine_llm_chain=refine_chain, document_variable_name=document_variable_name, initial_response_name=initial_response_name, + verbose=verbose, **kwargs, ) @@ -112,6 +119,8 @@ def load_qa_chain( llm: Language Model to use in the chain. chain_type: Type of document combining chain to use. Should be one of "stuff", "map_reduce", and "refine". + verbose: Whether chains should be run in verbose mode or not. Note that this + applies to all chains that make up the final chain. Returns: A chain to use for question answering. diff --git a/langchain/chains/summarize/__init__.py b/langchain/chains/summarize/__init__.py index 68621e27..8605ed2c 100644 --- a/langchain/chains/summarize/__init__.py +++ b/langchain/chains/summarize/__init__.py @@ -28,7 +28,10 @@ def _load_stuff_chain( llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose) # TODO: document prompt return StuffDocumentsChain( - llm_chain=llm_chain, document_variable_name=document_variable_name, **kwargs + llm_chain=llm_chain, + document_variable_name=document_variable_name, + verbose=verbose, + **kwargs, ) @@ -49,7 +52,9 @@ def _load_map_reduce_chain( reduce_chain = LLMChain(llm=_reduce_llm, prompt=combine_prompt, verbose=verbose) # TODO: document prompt combine_document_chain = StuffDocumentsChain( - llm_chain=reduce_chain, document_variable_name=combine_document_variable_name + llm_chain=reduce_chain, + document_variable_name=combine_document_variable_name, + verbose=verbose, ) if collapse_prompt is None: collapse_chain = None @@ -73,6 +78,7 @@ def _load_map_reduce_chain( combine_document_chain=combine_document_chain, document_variable_name=map_reduce_document_variable_name, collapse_document_chain=collapse_chain, + verbose=verbose, **kwargs, ) @@ -96,6 +102,7 @@ def _load_refine_chain( refine_llm_chain=refine_chain, document_variable_name=document_variable_name, initial_response_name=initial_response_name, + verbose=verbose, **kwargs, ) @@ -109,6 +116,8 @@ def load_summarize_chain( llm: Language Model to use in the chain. chain_type: Type of document combining chain to use. Should be one of "stuff", "map_reduce", and "refine". + verbose: Whether chains should be run in verbose mode or not. Note that this + applies to all chains that make up the final chain. Returns: A chain to use for summarizing. diff --git a/pyproject.toml b/pyproject.toml index 9d9dc5c9..78cf4709 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain" -version = "0.0.51" +version = "0.0.52" description = "Building applications with LLMs through composability" authors = [] license = "MIT"