diff --git a/libs/langchain/langchain/chains/question_answering/refine_prompts.py b/libs/langchain/langchain/chains/question_answering/refine_prompts.py index bec5fa4f57..d375b948e4 100644 --- a/libs/langchain/langchain/chains/question_answering/refine_prompts.py +++ b/libs/langchain/langchain/chains/question_answering/refine_prompts.py @@ -33,7 +33,7 @@ refine_template = ( "If the context isn't useful, return the original answer." ) CHAT_REFINE_PROMPT = ChatPromptTemplate.from_messages( - [("human", "{question}"), ("ai", "{existing_answer}"), ("human", "refine_template")] + [("human", "{question}"), ("ai", "{existing_answer}"), ("human", refine_template)] ) REFINE_PROMPT_SELECTOR = ConditionalPromptSelector( default_prompt=DEFAULT_REFINE_PROMPT,