New release notes for 2.4.9 and bump version.

pull/1079/head
Adam Treat 1 year ago
parent 71449bbc4b
commit 9375c71aa7

@ -17,7 +17,7 @@ endif()
set(APP_VERSION_MAJOR 2)
set(APP_VERSION_MINOR 4)
set(APP_VERSION_PATCH 9)
set(APP_VERSION_PATCH 10)
set(APP_VERSION "${APP_VERSION_MAJOR}.${APP_VERSION_MINOR}.${APP_VERSION_PATCH}")
# Include the binary directory for the generated header file

@ -19,6 +19,7 @@
"name": "GPT4All Falcon",
"filename": "ggml-model-gpt4all-falcon-q4_0.bin",
"filesize": "4061641216",
"requires": "2.4.9",
"ramrequired": "8",
"parameters": "7 billion",
"quant": "q4_0",

@ -304,6 +304,38 @@
* Felix Zaslavskiy
* Tim Miller
* Community (beta testers, bug reporters)
"
},
{
"version": "2.4.9",
"notes":
"
* New GPT4All Falcon model
* New Orca models
* Token generation speed is now reported in GUI
* Bugfix for localdocs references when regenerating
* General fixes for thread safety
* Many fixes to UI to add descriptions for error conditions
* Fixes for saving/reloading chats
* Complete refactor of the model download dialog with metadata about models available
* Resume downloads bugfix
* CORS fix
* Documentation fixes and typos
* Latest llama.cpp update
* Update of replit
* Force metal setting
* Fixes for model loading with metal on macOS
",
"contributors":
"
* Nils Sauer (Nomic AI)
* Adam Treat (Nomic AI)
* Aaron Miller (Nomic AI)
* Richard Guo (Nomic AI)
* Andriy Mulyar (Nomic AI)
* cosmic-snow
* AMOGUS
* Community (beta testers, bug reporters)
"
}
]

Loading…
Cancel
Save