diff --git a/docs/extras/integrations/chat/ollama.ipynb b/docs/extras/integrations/chat/ollama.ipynb index 41a90405b7..d5569397bf 100644 --- a/docs/extras/integrations/chat/ollama.ipynb +++ b/docs/extras/integrations/chat/ollama.ipynb @@ -132,13 +132,7 @@ "ollama pull llama2:13b\n", "```\n", "\n", - "Or, the 13b-chat model:\n", - "\n", - "```\n", - "ollama pull llama2:13b-chat\n", - "```\n", - "\n", - "Let's also use local embeddings from `GPT4AllEmbeddings` and `Chroma`." + "Let's also use local embeddings from `OllamaEmbeddings` and `Chroma`." ] }, { @@ -147,7 +141,7 @@ "metadata": {}, "outputs": [], "source": [ - "! pip install gpt4all chromadb" + "! pip install chromadb" ] }, { @@ -167,22 +161,14 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Found model file at /Users/rlm/.cache/gpt4all/ggml-all-MiniLM-L6-v2-f16.bin\n" - ] - } - ], + "outputs": [], "source": [ "from langchain.vectorstores import Chroma\n", - "from langchain.embeddings import GPT4AllEmbeddings\n", + "from langchain.embeddings import OllamaEmbeddings\n", "\n", - "vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())" + "vectorstore = Chroma.from_documents(documents=all_splits, embedding=OllamaEmbeddings())" ] }, { @@ -238,7 +224,7 @@ "from langchain.chat_models import ChatOllama\n", "from langchain.callbacks.manager import CallbackManager\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", - "chat_model = ChatOllama(model=\"llama2:13b-chat\",\n", + "chat_model = ChatOllama(model=\"llama2:13b\",\n", " verbose=True,\n", " callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))" ] diff --git a/docs/extras/integrations/llms/ollama.ipynb b/docs/extras/integrations/llms/ollama.ipynb index 55e77871c6..49f8ae8f53 100644 --- a/docs/extras/integrations/llms/ollama.ipynb +++ b/docs/extras/integrations/llms/ollama.ipynb @@ -137,7 +137,6 @@ "\n", "```\n", "ollama pull llama2:13b\n", - "ollama run llama2:13b \n", "```\n", "\n", "Let's also use local embeddings from `OllamaEmbeddings` and `Chroma`." @@ -149,7 +148,7 @@ "metadata": {}, "outputs": [], "source": [ - "! pip install gpt4all chromadb" + "! pip install chromadb" ] }, { @@ -169,17 +168,9 @@ }, { "cell_type": "code", - "execution_count": 61, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Found model file at /Users/rlm/.cache/gpt4all/ggml-all-MiniLM-L6-v2-f16.bin\n" - ] - } - ], + "outputs": [], "source": [ "from langchain.vectorstores import Chroma\n", "from langchain.embeddings import OllamaEmbeddings\n",