llmodel_c: improve quality of error messages (#1625)

This commit is contained in:
Jared Van Bortel 2023-11-07 11:20:14 -05:00 committed by GitHub
parent 8fabf0be4a
commit d4ce9f4a7c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 61 additions and 70 deletions

View File

@ -385,22 +385,35 @@ DLL_EXPORT const char *get_build_variant() {
}
DLL_EXPORT bool magic_match(const char * fname) {
struct ggml_context * ctx_meta = NULL;
struct gguf_init_params params = {
/*.no_alloc = */ true,
/*.ctx = */ &ctx_meta,
};
gguf_context *ctx_gguf = gguf_init_from_file(fname, params);
if (!ctx_gguf)
if (!ctx_gguf) {
std::cerr << __func__ << ": gguf_init_from_file failed\n";
return false;
}
bool valid = true;
int gguf_ver = gguf_get_version(ctx_gguf);
if (valid && gguf_ver > 3) {
std::cerr << __func__ << ": unsupported gguf version: " << gguf_ver << "\n";
valid = false;
}
bool isValid = gguf_get_version(ctx_gguf) <= 3;
auto arch = get_arch_name(ctx_gguf);
isValid = isValid && (arch == "llama" || arch == "starcoder" || arch == "falcon" || arch == "mpt");
if (valid && !(arch == "llama" || arch == "starcoder" || arch == "falcon" || arch == "mpt")) {
if (!(arch == "gptj" || arch == "bert")) { // we support these via other modules
std::cerr << __func__ << ": unsupported model architecture: " << arch << "\n";
}
valid = false;
}
gguf_free(ctx_gguf);
return isValid;
return valid;
}
DLL_EXPORT LLModel *construct() {

View File

@ -123,11 +123,18 @@ const std::vector<LLModel::Implementation> &LLModel::Implementation::implementat
}
const LLModel::Implementation* LLModel::Implementation::implementation(const char *fname, const std::string& buildVariant) {
bool buildVariantMatched = false;
for (const auto& i : implementationList()) {
if (buildVariant != i.m_buildVariant) continue;
buildVariantMatched = true;
if (!i.m_magicMatch(fname)) continue;
return &i;
}
if (!buildVariantMatched) {
std::cerr << "LLModel ERROR: Could not find any implementations for build variant: " << buildVariant << "\n";
}
return nullptr;
}

View File

@ -11,45 +11,33 @@ struct LLModelWrapper {
~LLModelWrapper() { delete llModel; }
};
thread_local static std::string last_error_message;
llmodel_model llmodel_model_create(const char *model_path) {
auto fres = llmodel_model_create2(model_path, "auto", nullptr);
const char *error;
auto fres = llmodel_model_create2(model_path, "auto", &error);
if (!fres) {
fprintf(stderr, "Invalid model file\n");
fprintf(stderr, "Unable to instantiate model: %s\n", error);
}
return fres;
}
llmodel_model llmodel_model_create2(const char *model_path, const char *build_variant, llmodel_error *error) {
llmodel_model llmodel_model_create2(const char *model_path, const char *build_variant, const char **error) {
auto wrapper = new LLModelWrapper;
int error_code = 0;
try {
wrapper->llModel = LLModel::Implementation::construct(model_path, build_variant);
if (!wrapper->llModel) {
last_error_message = "Model format not supported (no matching implementation found)";
}
} catch (const std::exception& e) {
error_code = EINVAL;
last_error_message = e.what();
}
if (!wrapper->llModel) {
delete std::exchange(wrapper, nullptr);
// Get errno and error message if none
if (error_code == 0) {
if (errno != 0) {
error_code = errno;
last_error_message = std::strerror(error_code);
} else {
error_code = ENOTSUP;
last_error_message = "Model format not supported (no matching implementation found)";
}
}
// Set error argument
if (error) {
error->message = last_error_message.c_str();
error->code = error_code;
*error = last_error_message.c_str();
}
}
return reinterpret_cast<llmodel_model*>(wrapper);

View File

@ -23,17 +23,6 @@ extern "C" {
*/
typedef void *llmodel_model;
/**
* Structure containing any errors that may eventually occur
*/
struct llmodel_error {
const char *message; // Human readable error description; Thread-local; guaranteed to survive until next llmodel C API call
int code; // errno; 0 if none
};
#ifndef __cplusplus
typedef struct llmodel_error llmodel_error;
#endif
/**
* llmodel_prompt_context structure for holding the prompt context.
* NOTE: The implementation takes care of all the memory handling of the raw logits pointer and the
@ -105,10 +94,10 @@ DEPRECATED llmodel_model llmodel_model_create(const char *model_path);
* Recognises correct model type from file at model_path
* @param model_path A string representing the path to the model file; will only be used to detect model type.
* @param build_variant A string representing the implementation to use (auto, default, avxonly, ...),
* @param error A pointer to a llmodel_error; will only be set on error.
* @param error A pointer to a string; will only be set on error.
* @return A pointer to the llmodel_model instance; NULL on error.
*/
llmodel_model llmodel_model_create2(const char *model_path, const char *build_variant, llmodel_error *error);
llmodel_model llmodel_model_create2(const char *model_path, const char *build_variant, const char **error);
/**
* Destroy a llmodel instance.

View File

@ -139,7 +139,7 @@ $(info I CXX: $(CXXV))
$(info )
llmodel.o:
mkdir buildllm
[ -e buildllm ] || mkdir buildllm
cd buildllm && cmake ../../../gpt4all-backend/ $(CMAKEFLAGS) && make
cd buildllm && cp -rf CMakeFiles/llmodel.dir/llmodel_c.cpp.o ../llmodel_c.o
cd buildllm && cp -rf CMakeFiles/llmodel.dir/llmodel.cpp.o ../llmodel.o
@ -150,7 +150,7 @@ clean:
rm -rf buildllm
rm -rf example/main
binding.o:
binding.o: binding.cpp binding.h
$(CXX) $(CXXFLAGS) binding.cpp -o binding.o -c $(LDFLAGS)
libgpt4all.a: binding.o llmodel.o

View File

@ -17,11 +17,10 @@
void* load_model(const char *fname, int n_threads) {
// load the model
llmodel_error new_error{};
const char *new_error;
auto model = llmodel_model_create2(fname, "auto", &new_error);
if (model == nullptr ){
fprintf(stderr, "%s: error '%s'\n",
__func__, new_error.message);
if (model == nullptr) {
fprintf(stderr, "%s: error '%s'\n", __func__, new_error);
return nullptr;
}
if (!llmodel_loadModel(model, fname)) {

View File

@ -1,6 +1,7 @@
package com.hexadevlabs.gpt4all;
import jnr.ffi.Pointer;
import jnr.ffi.byref.PointerByReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -176,7 +177,7 @@ public class LLModel implements AutoCloseable {
modelName = modelPath.getFileName().toString();
String modelPathAbs = modelPath.toAbsolutePath().toString();
LLModelLibrary.LLModelError error = new LLModelLibrary.LLModelError(jnr.ffi.Runtime.getSystemRuntime());
PointerByReference error = new PointerByReference();
// Check if model file exists
if(!Files.exists(modelPath)){
@ -192,7 +193,7 @@ public class LLModel implements AutoCloseable {
model = library.llmodel_model_create2(modelPathAbs, "auto", error);
if(model == null) {
throw new IllegalStateException("Could not load, gpt4all backend returned error: " + error.message);
throw new IllegalStateException("Could not load, gpt4all backend returned error: " + error.getValue().getString(0));
}
library.llmodel_loadModel(model, modelPathAbs);
@ -631,4 +632,4 @@ public class LLModel implements AutoCloseable {
library.llmodel_model_destroy(model);
}
}
}

View File

@ -1,6 +1,7 @@
package com.hexadevlabs.gpt4all;
import jnr.ffi.Pointer;
import jnr.ffi.byref.PointerByReference;
import jnr.ffi.Struct;
import jnr.ffi.annotations.Delegate;
import jnr.ffi.annotations.Encoding;
@ -58,7 +59,7 @@ public interface LLModelLibrary {
}
}
Pointer llmodel_model_create2(String model_path, String build_variant, @Out LLModelError llmodel_error);
Pointer llmodel_model_create2(String model_path, String build_variant, PointerByReference error);
void llmodel_model_destroy(Pointer model);
boolean llmodel_loadModel(Pointer model, String model_path);
boolean llmodel_isModelLoaded(Pointer model);

View File

@ -42,10 +42,6 @@ def load_llmodel_library():
llmodel = load_llmodel_library()
class LLModelError(ctypes.Structure):
_fields_ = [("message", ctypes.c_char_p), ("code", ctypes.c_int32)]
class LLModelPromptContext(ctypes.Structure):
_fields_ = [
("logits", ctypes.POINTER(ctypes.c_float)),
@ -77,7 +73,7 @@ class LLModelGPUDevice(ctypes.Structure):
llmodel.llmodel_model_create.argtypes = [ctypes.c_char_p]
llmodel.llmodel_model_create.restype = ctypes.c_void_p
llmodel.llmodel_model_create2.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.POINTER(LLModelError)]
llmodel.llmodel_model_create2.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.POINTER(ctypes.c_char_p)]
llmodel.llmodel_model_create2.restype = ctypes.c_void_p
llmodel.llmodel_model_destroy.argtypes = [ctypes.c_void_p]
@ -150,6 +146,14 @@ def empty_response_callback(token_id: int, response: str) -> bool:
return True
def _create_model(model_path: bytes) -> ctypes.c_void_p:
err = ctypes.c_char_p()
model = llmodel.llmodel_model_create2(model_path, b"auto", ctypes.byref(err))
if model is None:
raise ValueError(f"Unable to instantiate model: {err.decode()}")
return model
class LLModel:
"""
Base class and universal wrapper for GPT4All language models
@ -178,12 +182,8 @@ class LLModel:
def memory_needed(self, model_path: str) -> int:
model_path_enc = model_path.encode("utf-8")
self.model = llmodel.llmodel_model_create(model_path_enc)
if self.model is not None:
return llmodel.llmodel_required_mem(self.model, model_path_enc)
else:
raise ValueError("Unable to instantiate model")
self.model = _create_model(model_path_enc)
return llmodel.llmodel_required_mem(self.model, model_path_enc)
def list_gpu(self, model_path: str) -> list:
"""
@ -253,11 +253,7 @@ class LLModel:
True if model loaded successfully, False otherwise
"""
model_path_enc = model_path.encode("utf-8")
err = LLModelError()
self.model = llmodel.llmodel_model_create2(model_path_enc, b"auto", ctypes.byref(err))
if self.model is None:
raise ValueError(f"Unable to instantiate model: code={err.code}, {err.message.decode()}")
self.model = _create_model(model_path_enc)
llmodel.llmodel_loadModel(self.model, model_path_enc)

View File

@ -134,13 +134,10 @@ Napi::Value NodeModelWrapper::GetRequiredMemory(const Napi::CallbackInfo& info)
device = config_object.Get("device").As<Napi::String>();
}
llmodel_set_implementation_search_path(library_path.c_str());
llmodel_error e = {
.message="looks good to me",
.code=0,
};
const char* e;
inference_ = llmodel_model_create2(full_weight_path.c_str(), "auto", &e);
if(e.code != 0) {
Napi::Error::New(env, e.message).ThrowAsJavaScriptException();
if(!inference_) {
Napi::Error::New(env, e).ThrowAsJavaScriptException();
return;
}
if(GetInference() == nullptr) {