llamamodel: re-enable error messages by default (#1537)

This commit is contained in:
cebtenzzre 2023-10-19 13:46:33 -04:00 committed by GitHub
parent f505619c84
commit 0fe2e19691
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 9 additions and 10 deletions

View File

@ -36,17 +36,18 @@ namespace {
const char *modelType_ = "LLaMA"; const char *modelType_ = "LLaMA";
} }
static void null_log_callback(enum ggml_log_level level, const char* text, void* userdata) {
(void)level;
(void)text;
(void)userdata;
}
static bool llama_verbose() { static bool llama_verbose() {
const char* var = getenv("GPT4ALL_VERBOSE_LLAMACPP"); const char* var = getenv("GPT4ALL_VERBOSE_LLAMACPP");
return var && *var; return var && *var;
} }
static void llama_log_callback(enum ggml_log_level level, const char *text, void *userdata) {
(void)userdata;
if (llama_verbose() || level <= GGML_LOG_LEVEL_ERROR) {
fputs(text, stderr);
}
}
struct gpt_params { struct gpt_params {
int32_t seed = -1; // RNG seed int32_t seed = -1; // RNG seed
int32_t n_keep = 0; // number of tokens to keep from initial prompt int32_t n_keep = 0; // number of tokens to keep from initial prompt
@ -403,9 +404,7 @@ DLL_EXPORT bool magic_match(const char * fname) {
} }
DLL_EXPORT LLModel *construct() { DLL_EXPORT LLModel *construct() {
if (!llama_verbose()) { llama_log_set(llama_log_callback, nullptr);
llama_log_set(null_log_callback, nullptr);
}
return new LLamaModel; return new LLamaModel;
} }
} }

View File

@ -61,7 +61,7 @@ copy_prebuilt_C_lib(SRC_CLIB_DIRECtORY,
setup( setup(
name=package_name, name=package_name,
version="2.0.0rc1", version="2.0.0rc2",
description="Python bindings for GPT4All", description="Python bindings for GPT4All",
author="Nomic and the Open Source Community", author="Nomic and the Open Source Community",
author_email="support@nomic.ai", author_email="support@nomic.ai",