diff --git a/docs/docs/integrations/chat/nvidia_ai_endpoints.ipynb b/docs/docs/integrations/chat/nvidia_ai_endpoints.ipynb index d346d035f1..8d8b3b8262 100644 --- a/docs/docs/integrations/chat/nvidia_ai_endpoints.ipynb +++ b/docs/docs/integrations/chat/nvidia_ai_endpoints.ipynb @@ -540,7 +540,7 @@ "id": "137662a6" }, "source": [ - "## Example usage within a Conversation Chains" + "## Example usage within RunnableWithMessageHistory " ] }, { @@ -550,7 +550,7 @@ "id": "79efa62d" }, "source": [ - "Like any other integration, ChatNVIDIA is fine to support chat utilities like conversation buffers by default. Below, we show the [LangChain ConversationBufferMemory](https://python.langchain.com/docs/modules/memory/types/buffer) example applied to the `mistralai/mixtral-8x22b-instruct-v0.1` model." + "Like any other integration, ChatNVIDIA is fine to support chat utilities like RunnableWithMessageHistory which is analogous to using `ConversationChain`. Below, we show the [LangChain RunnableWithMessageHistory](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html) example applied to the `mistralai/mixtral-8x22b-instruct-v0.1` model." ] }, { @@ -572,8 +572,19 @@ }, "outputs": [], "source": [ - "from langchain.chains import ConversationChain\n", - "from langchain.memory import ConversationBufferMemory\n", + "from langchain_core.chat_history import InMemoryChatMessageHistory\n", + "from langchain_core.runnables.history import RunnableWithMessageHistory\n", + "\n", + "# store is a dictionary that maps session IDs to their corresponding chat histories.\n", + "store = {} # memory is maintained outside the chain\n", + "\n", + "\n", + "# A function that returns the chat history for a given session ID.\n", + "def get_session_history(session_id: str) -> InMemoryChatMessageHistory:\n", + " if session_id not in store:\n", + " store[session_id] = InMemoryChatMessageHistory()\n", + " return store[session_id]\n", + "\n", "\n", "chat = ChatNVIDIA(\n", " model=\"mistralai/mixtral-8x22b-instruct-v0.1\",\n", @@ -582,24 +593,18 @@ " top_p=1.0,\n", ")\n", "\n", - "conversation = ConversationChain(llm=chat, memory=ConversationBufferMemory())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f644ff28", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 268 - }, - "id": "f644ff28", - "outputId": "bae354cc-2118-4e01-ce20-a717ac94d27d" - }, - "outputs": [], - "source": [ - "conversation.invoke(\"Hi there!\")[\"response\"]" + "# Define a RunnableConfig object, with a `configurable` key. session_id determines thread\n", + "config = {\"configurable\": {\"session_id\": \"1\"}}\n", + "\n", + "conversation = RunnableWithMessageHistory(\n", + " chat,\n", + " get_session_history,\n", + ")\n", + "\n", + "conversation.invoke(\n", + " \"Hi I'm Srijan Dubey.\", # input or query\n", + " config=config,\n", + ")" ] }, { @@ -616,26 +621,30 @@ }, "outputs": [], "source": [ - "conversation.invoke(\"I'm doing well! Just having a conversation with an AI.\")[\n", - " \"response\"\n", - "]" + "conversation.invoke(\n", + " \"I'm doing well! Just having a conversation with an AI.\",\n", + " config=config,\n", + ")" ] }, { "cell_type": "code", "execution_count": null, - "id": "LyD1xVKmVSs4", + "id": "uHIMZxVSVNBC", "metadata": { "colab": { "base_uri": "https://localhost:8080/", - "height": 350 + "height": 284 }, - "id": "LyD1xVKmVSs4", - "outputId": "a1714513-a8fd-4d14-f974-233e39d5c4f5" + "id": "uHIMZxVSVNBC", + "outputId": "79acc89d-a820-4f2c-bac2-afe99da95580" }, "outputs": [], "source": [ - "conversation.invoke(\"Tell me about yourself.\")[\"response\"]" + "conversation.invoke(\n", + " \"Tell me about yourself.\",\n", + " config=config,\n", + ")" ] } ],