diff --git a/docs/ecosystem/gpt4all.md b/docs/ecosystem/gpt4all.md index 81f073e3..36422eb5 100644 --- a/docs/ecosystem/gpt4all.md +++ b/docs/ecosystem/gpt4all.md @@ -36,7 +36,7 @@ from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler callback_manager = CallbackManager([StreamingStdOutCallbackHandler()]) model = GPT4All(model="./models/gpt4all-model.bin", n_ctx=512, n_threads=8, callback_handler=callback_handler, verbose=True) -# Generate text. Tokens are streamed throught the callback manager. +# Generate text. Tokens are streamed through the callback manager. model("Once upon a time, ") ``` @@ -44,4 +44,4 @@ model("Once upon a time, ") You can find links to model file downloads in the [pyllamacpp](https://github.com/nomic-ai/pyllamacpp) repository. -For a more detailed walkthrough of this, see [this notebook](../modules/models/llms/integrations/gpt4all.ipynb) \ No newline at end of file +For a more detailed walkthrough of this, see [this notebook](../modules/models/llms/integrations/gpt4all.ipynb)