2023-04-04 13:49:17 +00:00
|
|
|
# flake8: noqa
|
|
|
|
"""Test Llama.cpp wrapper."""
|
|
|
|
import os
|
|
|
|
from urllib.request import urlretrieve
|
|
|
|
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms import GPT4All
|
2023-04-04 13:49:17 +00:00
|
|
|
|
|
|
|
|
|
|
|
def _download_model() -> str:
|
2023-05-02 03:42:45 +00:00
|
|
|
"""Download model."""
|
|
|
|
model_url = "http://gpt4all.io/models/ggml-gpt4all-l13b-snoozy.bin"
|
2023-04-04 13:49:17 +00:00
|
|
|
local_filename = model_url.split("/")[-1]
|
|
|
|
|
|
|
|
if not os.path.exists(local_filename):
|
|
|
|
urlretrieve(model_url, local_filename)
|
|
|
|
|
|
|
|
return local_filename
|
|
|
|
|
|
|
|
|
|
|
|
def test_gpt4all_inference() -> None:
|
|
|
|
"""Test valid gpt4all inference."""
|
|
|
|
model_path = _download_model()
|
|
|
|
llm = GPT4All(model=model_path)
|
|
|
|
output = llm("Say foo:")
|
|
|
|
assert isinstance(output, str)
|