python: fix py3.8 compat (#2871)

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Jared Van Bortel 2024-08-14 13:30:14 -04:00 committed by GitHub
parent 3386ac6331
commit a232befa58
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 26 additions and 17 deletions

View File

@ -4,6 +4,11 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
## [2.8.2] - 2024-08-14
### Fixed
- Fixed incompatibility with Python 3.8 since v2.7.0 and Python <=3.11 since v2.8.1 ([#2871](https://github.com/nomic-ai/gpt4all/pull/2871))
## [2.8.1] - 2024-08-13
### Added

View File

@ -45,7 +45,7 @@ def _load_cuda(rtver: str, blasver: str) -> None:
cudalib = f"lib/libcudart.so.{rtver}"
cublaslib = f"lib/libcublas.so.{blasver}"
else: # Windows
cudalib = fr"bin\cudart64_{rtver.replace(".", "")}.dll"
cudalib = fr"bin\cudart64_{rtver.replace('.', '')}.dll"
cublaslib = fr"bin\cublas64_{blasver}.dll"
# preload the CUDA libs so the backend can find them

View File

@ -209,27 +209,27 @@ class GPT4All:
self._current_prompt_template: str = "{0}"
device_init = None
if sys.platform == 'darwin':
if sys.platform == "darwin":
if device is None:
backend = 'auto' # 'auto' is effectively 'metal' due to currently non-functional fallback
elif device == 'cpu':
backend = 'cpu'
backend = "auto" # "auto" is effectively "metal" due to currently non-functional fallback
elif device == "cpu":
backend = "cpu"
else:
if platform.machine() != 'arm64' or device != 'gpu':
raise ValueError(f'Unknown device for this platform: {device}')
backend = 'metal'
if platform.machine() != "arm64" or device != "gpu":
raise ValueError(f"Unknown device for this platform: {device}")
backend = "metal"
else:
backend = 'kompute'
if device is None or device == 'cpu':
backend = "kompute"
if device is None or device == "cpu":
pass # use kompute with no device
elif device in ('cuda', 'kompute'):
elif device in ("cuda", "kompute"):
backend = device
device_init = 'gpu'
elif device.startswith('cuda:'):
backend = 'cuda'
device_init = device.removeprefix('cuda:')
device_init = "gpu"
elif device.startswith("cuda:"):
backend = "cuda"
device_init = _remove_prefix(device, "cuda:")
else:
device_init = device.removeprefix('kompute:')
device_init = _remove_prefix(device, "kompute:")
# Retrieve model and download if allowed
self.config: ConfigType = self.retrieve_model(model_name, model_path=model_path, allow_download=allow_download, verbose=verbose)
@ -706,3 +706,7 @@ def _fsync(fd: int | _HasFileno) -> None:
else:
return
os.fsync(fd)
def _remove_prefix(s: str, prefix: str) -> str:
return s[len(prefix):] if s.startswith(prefix) else s

View File

@ -68,7 +68,7 @@ def get_long_description():
setup(
name=package_name,
version="2.8.1",
version="2.8.2",
description="Python bindings for GPT4All",
long_description=get_long_description(),
long_description_content_type="text/markdown",