From 74f8e603d942ca22ed07bf0ea23a57ed67b36b2c Mon Sep 17 00:00:00 2001 From: Ralph Schlosser Date: Mon, 5 Jun 2023 20:45:29 +0100 Subject: [PATCH] Addresses GPT4All wrapper model_type attribute issues #5720. (#5743) Fixes #5720. A more in-depth discussion is in my comment here: https://github.com/hwchase17/langchain/issues/5720#issuecomment-1577047018 In a nutshell, there has been a subtle change in the latest version of GPT4Alls Python bindings. The change I submitted yesterday is compatible with this version, however, this version is as of yet unreleased and thus the code change breaks Langchain's wrapper under the currently released version of GPT4All. This pull request proposes a backwards-compatible solution. --- langchain/llms/gpt4all.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/langchain/llms/gpt4all.py b/langchain/llms/gpt4all.py index e4083847..f52a0915 100644 --- a/langchain/llms/gpt4all.py +++ b/langchain/llms/gpt4all.py @@ -153,7 +153,12 @@ class GPT4All(LLM): if values["n_threads"] is not None: # set n_threads values["client"].model.set_thread_count(values["n_threads"]) - values["backend"] = values["client"].model_type + + try: + values["backend"] = values["client"].model_type + except AttributeError: + # The below is for compatibility with GPT4All Python bindings <= 0.2.3. + values["backend"] = values["client"].model.model_type return values