@ -27,7 +27,7 @@
}
],
"source": [
"%pip install pyllamacpp > /dev/null"
"%pip install pygpt4a ll > /dev/null"
]
},
{
@ -64,7 +64,7 @@
"source": [
"### Specify Model\n",
"\n",
"To run locally, download a compatible ggml-formatted model. For more info, visit https://github.com/nomic-ai/pyllamacpp \n",
"To run locally, download a compatible ggml-formatted model. For more info, visit https://github.com/nomic-ai/pygpt4a ll\n",
"\n",
"For full installation instructions go [here](https://gpt4all.io/index.html).\n",
"\n",
@ -79,7 +79,7 @@
"metadata": {},
"outputs": [],
"source": [
"local_path = './models/gpt4all-lora-quantized-ggml .bin' # replace with your desired local file path"
"local_path = './models/ggml-gpt4all-l13b-snoozy .bin' # replace with your desired local file path"
]
},
{
@ -102,8 +102,8 @@
"\n",
"# Path(local_path).parent.mkdir(parents=True, exist_ok=True)\n",
"\n",
"# # Example model. Check https://github.com/nomic-ai/pyllamacpp for the latest models.\n",
"# url = 'https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized-ggml .bin'\n",
"# # Example model. Check https://github.com/nomic-ai/pygpt4a ll for the latest models.\n",
"# url = 'http://gpt4all.io/models/ggml-gpt4all-l13b-snoozy .bin'\n",
"\n",
"# # send a GET request to the URL to download the file. Stream since it's large\n",
"# response = requests.get(url, stream=True)\n",
@ -165,7 +165,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.6 "
"version": "3.11.2 "
}
},
"nbformat": 4,