python: init_gpu fixes (#2368)

* python: tweak GPU init failure message
* llama.cpp: update submodule for use-after-free fix

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
v280-release-notes
Jared Van Bortel 1 month ago committed by GitHub
parent e021fe130f
commit c779d8a32d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -1 +1 @@
Subproject commit 40bac11e427f2307305b86c322cb366bb95fcb8a
Subproject commit fadf1135a54e80188d644df42ad6a53bf986e8b0

@ -274,11 +274,12 @@ class LLModel:
all_gpus = self.list_gpus()
available_gpus = self.list_gpus(mem_required)
unavailable_gpus = set(all_gpus).difference(available_gpus)
unavailable_gpus = [g for g in all_gpus if g not in available_gpus]
error_msg = "Unable to initialize model on GPU: {!r}".format(device)
error_msg += "\nAvailable GPUs: {}".format(available_gpus)
error_msg += "\nUnavailable GPUs due to insufficient memory or features: {}".format(unavailable_gpus)
error_msg = (f"Unable to initialize model on GPU: {device!r}" +
f"\nAvailable GPUs: {available_gpus}")
if unavailable_gpus:
error_msg += f"\nUnavailable GPUs due to insufficient memory: {unavailable_gpus}"
raise ValueError(error_msg)
def load_model(self) -> bool:

Loading…
Cancel
Save