diff --git a/docs/extras/integrations/llms/chatglm.ipynb b/docs/extras/integrations/llms/chatglm.ipynb index b9db839b9e..0601925a5f 100644 --- a/docs/extras/integrations/llms/chatglm.ipynb +++ b/docs/extras/integrations/llms/chatglm.ipynb @@ -55,7 +55,11 @@ " history=[[\"我将从美国到中国来旅游,出行前希望了解中国的城市\", \"欢迎问我任何问题。\"]],\n", " top_p=0.9,\n", " model_kwargs={\"sample_model_args\": False},\n", - ")" + ")\n", + "\n", + "# turn on with_history only when you want the LLM object to keep track of the conversation history\n", + "# and send the accumulated context to the backend model api, which make it stateful. By default it is stateless.\n", + "# llm.with_history = True" ] }, { @@ -95,22 +99,6 @@ "\n", "llm_chain.run(question)" ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "By Default, ChatGLM is statful to keep track of the conversation history and send the accumulated context to the model. To enable stateless mode, we could set ChatGLM.with_history as `False` explicitly." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "llm.with_history = False" - ] } ], "metadata": { diff --git a/libs/langchain/langchain/llms/chatglm.py b/libs/langchain/langchain/llms/chatglm.py index 072e1bc50d..232f2f9af7 100644 --- a/libs/langchain/langchain/llms/chatglm.py +++ b/libs/langchain/langchain/llms/chatglm.py @@ -37,7 +37,7 @@ class ChatGLM(LLM): """History of the conversation""" top_p: float = 0.7 """Top P for nucleus sampling from 0 to 1""" - with_history: bool = True + with_history: bool = False """Whether to use history or not""" @property