diff --git a/docs/docs/integrations/chat/nvidia_ai_endpoints.ipynb b/docs/docs/integrations/chat/nvidia_ai_endpoints.ipynb index 561e3bd7ec..d346d035f1 100644 --- a/docs/docs/integrations/chat/nvidia_ai_endpoints.ipynb +++ b/docs/docs/integrations/chat/nvidia_ai_endpoints.ipynb @@ -134,7 +134,7 @@ "from langchain_nvidia_ai_endpoints import ChatNVIDIA\n", "\n", "# connect to an embedding NIM running at localhost:8000, specifying a specific model\n", - "llm = ChatNVIDIA(base_url=\"http://localhost:8000/v1\", model=\"meta-llama3-8b-instruct\")" + "llm = ChatNVIDIA(base_url=\"http://localhost:8000/v1\", model=\"meta/llama3-8b-instruct\")" ] }, { @@ -658,7 +658,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.13" + "version": "3.10.2" } }, "nbformat": 4, diff --git a/docs/docs/integrations/providers/nvidia.mdx b/docs/docs/integrations/providers/nvidia.mdx index 0e9fa2e9dd..70f1123c41 100644 --- a/docs/docs/integrations/providers/nvidia.mdx +++ b/docs/docs/integrations/providers/nvidia.mdx @@ -62,7 +62,7 @@ When ready to deploy, you can self-host models with NVIDIA NIM—which is includ from langchain_nvidia_ai_endpoints import ChatNVIDIA, NVIDIAEmbeddings, NVIDIARerank # connect to an chat NIM running at localhost:8000, specifyig a specific model -llm = ChatNVIDIA(base_url="http://localhost:8000/v1", model="meta-llama3-8b-instruct") +llm = ChatNVIDIA(base_url="http://localhost:8000/v1", model="meta/llama3-8b-instruct") # connect to an embedding NIM running at localhost:8080 embedder = NVIDIAEmbeddings(base_url="http://localhost:8080/v1")