From 607ac19dcbe3000e324ae70b91f9320450d55058 Mon Sep 17 00:00:00 2001 From: AT Date: Thu, 11 Jul 2024 13:02:54 -0400 Subject: [PATCH] Add scaffolding for translations. (#2612) * Fix up concat strings in favor of args, remove some translations that are not meant to be translated and add chinese. Signed-off-by: Adam Treat --- gpt4all-chat/CMakeLists.txt | 8 +- gpt4all-chat/chat.cpp | 8 +- gpt4all-chat/main.cpp | 7 + gpt4all-chat/main.qml | 30 +- gpt4all-chat/modellist.cpp | 10 +- gpt4all-chat/qml/AddModelView.qml | 23 +- gpt4all-chat/qml/ChatView.qml | 52 +- gpt4all-chat/qml/ModelsView.qml | 11 +- gpt4all-chat/qml/StartupDialog.qml | 5 +- gpt4all-chat/translations/gpt4all_en.ts | 2722 ++++++++++++++++++++++ gpt4all-chat/translations/gpt4all_zh.ts | 2799 +++++++++++++++++++++++ 11 files changed, 5597 insertions(+), 78 deletions(-) create mode 100644 gpt4all-chat/translations/gpt4all_en.ts create mode 100644 gpt4all-chat/translations/gpt4all_zh.ts diff --git a/gpt4all-chat/CMakeLists.txt b/gpt4all-chat/CMakeLists.txt index b940aa7f..4fc4f9c7 100644 --- a/gpt4all-chat/CMakeLists.txt +++ b/gpt4all-chat/CMakeLists.txt @@ -44,9 +44,9 @@ configure_file( ) if(LINUX) - find_package(Qt6 6.4 COMPONENTS Core Quick WaylandCompositor QuickDialogs2 Svg HttpServer Sql Pdf REQUIRED) + find_package(Qt6 6.4 COMPONENTS Core Quick WaylandCompositor QuickDialogs2 Svg HttpServer Sql Pdf LinguistTools REQUIRED) else() - find_package(Qt6 6.4 COMPONENTS Core Quick QuickDialogs2 Svg HttpServer Sql Pdf REQUIRED) + find_package(Qt6 6.4 COMPONENTS Core Quick QuickDialogs2 Svg HttpServer Sql Pdf LinguistTools REQUIRED) endif() # Get the Qt6Core target properties @@ -226,6 +226,10 @@ qt_add_qml_module(chat icons/you.svg ) +qt_add_translations(chat + TS_FILES ${CMAKE_SOURCE_DIR}/translations/gpt4all_en.ts +) + set_target_properties(chat PROPERTIES WIN32_EXECUTABLE TRUE ) diff --git a/gpt4all-chat/chat.cpp b/gpt4all-chat/chat.cpp index 138f2c7c..3181a8ce 100644 --- a/gpt4all-chat/chat.cpp +++ b/gpt4all-chat/chat.cpp @@ -239,8 +239,8 @@ void Chat::newPromptResponsePair(const QString &prompt) { resetResponseState(); m_chatModel->updateCurrentResponse(m_chatModel->count() - 1, false); - m_chatModel->appendPrompt(tr("Prompt: "), prompt); - m_chatModel->appendResponse(tr("Response: "), prompt); + m_chatModel->appendPrompt("Prompt: ", prompt); + m_chatModel->appendResponse("Response: ", prompt); emit resetResponseRequested(); } @@ -248,8 +248,8 @@ void Chat::serverNewPromptResponsePair(const QString &prompt) { resetResponseState(); m_chatModel->updateCurrentResponse(m_chatModel->count() - 1, false); - m_chatModel->appendPrompt(tr("Prompt: "), prompt); - m_chatModel->appendResponse(tr("Response: "), prompt); + m_chatModel->appendPrompt("Prompt: ", prompt); + m_chatModel->appendResponse("Response: ", prompt); } bool Chat::isRecalc() const diff --git a/gpt4all-chat/main.cpp b/gpt4all-chat/main.cpp index b7a8e9ec..20a1d3fe 100644 --- a/gpt4all-chat/main.cpp +++ b/gpt4all-chat/main.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -31,6 +32,12 @@ int main(int argc, char *argv[]) Logger::globalInstance(); QGuiApplication app(argc, argv); + + QTranslator translator; + bool success = translator.load(":/i18n/gpt4all_en.qm"); + Q_ASSERT(success); + app.installTranslator(&translator); + QQmlApplicationEngine engine; QString llmodelSearchPaths = QCoreApplication::applicationDirPath(); diff --git a/gpt4all-chat/main.qml b/gpt4all-chat/main.qml index da491775..16b5b65c 100644 --- a/gpt4all-chat/main.qml +++ b/gpt4all-chat/main.qml @@ -20,7 +20,7 @@ Window { minimumWidth: 1280 minimumHeight: 720 visible: true - title: qsTr("GPT4All v") + Qt.application.version + title: qsTr("GPT4All v%1").arg(Qt.application.version) Settings { property alias x: window.x @@ -108,14 +108,14 @@ Window { shouldShowBusy: false closePolicy: Popup.NoAutoClose modal: true - text: qsTr("

Encountered an error starting up:


") - + qsTr("\"Incompatible hardware detected.\"") - + qsTr("

Unfortunately, your CPU does not meet the minimal requirements to run ") - + qsTr("this program. In particular, it does not support AVX intrinsics which this ") - + qsTr("program requires to successfully run a modern large language model. ") - + qsTr("The only solution at this time is to upgrade your hardware to a more modern CPU.") - + qsTr("

See here for more information: ") - + qsTr("https://en.wikipedia.org/wiki/Advanced_Vector_Extensions") + text: qsTr("

Encountered an error starting up:


" + + "\"Incompatible hardware detected.\"" + + "

Unfortunately, your CPU does not meet the minimal requirements to run " + + "this program. In particular, it does not support AVX intrinsics which this " + + "program requires to successfully run a modern large language model. " + + "The only solution at this time is to upgrade your hardware to a more modern CPU." + + "

See here for more information: " + + "https://en.wikipedia.org/wiki/Advanced_Vector_Extensions"); } PopupDialog { @@ -124,12 +124,12 @@ Window { shouldTimeOut: false shouldShowBusy: false modal: true - text: qsTr("

Encountered an error starting up:


") - + qsTr("\"Inability to access settings file.\"") - + qsTr("

Unfortunately, something is preventing the program from accessing ") - + qsTr("the settings file. This could be caused by incorrect permissions in the local ") - + qsTr("app config directory where the settings file is located. ") - + qsTr("Check out our discord channel for help.") + text: qsTr("

Encountered an error starting up:


" + + "\"Inability to access settings file.\"" + + "

Unfortunately, something is preventing the program from accessing " + + "the settings file. This could be caused by incorrect permissions in the local " + + "app config directory where the settings file is located. " + + "Check out our discord channel for help.") } StartupDialog { diff --git a/gpt4all-chat/modellist.cpp b/gpt4all-chat/modellist.cpp index cfa4667d..c79335fa 100644 --- a/gpt4all-chat/modellist.cpp +++ b/gpt4all-chat/modellist.cpp @@ -1553,7 +1553,7 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save) { ModelList::FilesizeRole, "minimal" }, { ModelList::OnlineRole, true }, { ModelList::DescriptionRole, - tr("OpenAI's ChatGPT model GPT-3.5 Turbo
") + chatGPTDesc }, + tr("OpenAI's ChatGPT model GPT-3.5 Turbo
%1").arg(chatGPTDesc) }, { ModelList::RequiresVersionRole, "2.7.4" }, { ModelList::OrderRole, "ca" }, { ModelList::RamrequiredRole, 0 }, @@ -1581,7 +1581,7 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save) { ModelList::FilesizeRole, "minimal" }, { ModelList::OnlineRole, true }, { ModelList::DescriptionRole, - tr("OpenAI's ChatGPT model GPT-4
") + chatGPTDesc + chatGPT4Warn }, + tr("OpenAI's ChatGPT model GPT-4
%1 %2").arg(chatGPTDesc).arg(chatGPT4Warn) }, { ModelList::RequiresVersionRole, "2.7.4" }, { ModelList::OrderRole, "cb" }, { ModelList::RamrequiredRole, 0 }, @@ -1612,7 +1612,7 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save) { ModelList::FilesizeRole, "minimal" }, { ModelList::OnlineRole, true }, { ModelList::DescriptionRole, - tr("Mistral Tiny model
") + mistralDesc }, + tr("Mistral Tiny model
%1").arg(mistralDesc) }, { ModelList::RequiresVersionRole, "2.7.4" }, { ModelList::OrderRole, "cc" }, { ModelList::RamrequiredRole, 0 }, @@ -1637,7 +1637,7 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save) { ModelList::FilesizeRole, "minimal" }, { ModelList::OnlineRole, true }, { ModelList::DescriptionRole, - tr("Mistral Small model
") + mistralDesc }, + tr("Mistral Small model
%1").arg(mistralDesc) }, { ModelList::RequiresVersionRole, "2.7.4" }, { ModelList::OrderRole, "cd" }, { ModelList::RamrequiredRole, 0 }, @@ -1663,7 +1663,7 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save) { ModelList::FilesizeRole, "minimal" }, { ModelList::OnlineRole, true }, { ModelList::DescriptionRole, - tr("Mistral Medium model
") + mistralDesc }, + tr("Mistral Medium model
%1").arg(mistralDesc) }, { ModelList::RequiresVersionRole, "2.7.4" }, { ModelList::OrderRole, "ce" }, { ModelList::RamrequiredRole, 0 }, diff --git a/gpt4all-chat/qml/AddModelView.qml b/gpt4all-chat/qml/AddModelView.qml index 7cb76d50..ad69e00a 100644 --- a/gpt4all-chat/qml/AddModelView.qml +++ b/gpt4all-chat/qml/AddModelView.qml @@ -94,7 +94,7 @@ Rectangle { function onDiscoverInProgressChanged() { if (ModelList.discoverInProgress) { discoverField.textBeingSearched = discoverField.text; - discoverField.text = qsTr("Searching \u00B7 ") + discoverField.textBeingSearched; + discoverField.text = qsTr("Searching \u00B7 %1").arg(discoverField.textBeingSearched); } else { discoverField.text = discoverField.textBeingSearched; discoverField.textBeingSearched = ""; @@ -190,7 +190,7 @@ Rectangle { rightPadding: 30 color: theme.textColor text: { - return qsTr("Sort by: ") + comboSort.displayText + return qsTr("Sort by: %1").arg(comboSort.displayText) } font.pixelSize: theme.fontSizeLarger verticalAlignment: Text.AlignVCenter @@ -215,7 +215,7 @@ Rectangle { rightPadding: 30 color: theme.textColor text: { - return qsTr("Sort dir: ") + comboSortDirection.displayText + return qsTr("Sort dir: %1").arg(comboSortDirection.displayText) } font.pixelSize: theme.fontSizeLarger verticalAlignment: Text.AlignVCenter @@ -251,7 +251,7 @@ Rectangle { rightPadding: 30 color: theme.textColor text: { - return qsTr("Limit: ") + comboLimit.displayText + return qsTr("Limit: %1").arg(comboLimit.displayText) } font.pixelSize: theme.fontSizeLarger verticalAlignment: Text.AlignVCenter @@ -284,7 +284,7 @@ Rectangle { Layout.fillHeight: true horizontalAlignment: Qt.AlignHCenter verticalAlignment: Qt.AlignVCenter - text: qsTr("Network error: could not retrieve http://gpt4all.io/models/models3.json") + text: qsTr("Network error: could not retrieve %1").arg("http://gpt4all.io/models/models3.json") font.pixelSize: theme.fontSizeLarge color: theme.mutedTextColor } @@ -454,9 +454,7 @@ Rectangle { Layout.leftMargin: 20 visible: downloadError !== "" textFormat: Text.StyledText - text: "" - + qsTr("Error") - + "" + text: qsTr("Error") color: theme.textColor font.pixelSize: theme.fontSizeLarge linkColor: theme.textErrorColor @@ -475,10 +473,7 @@ Rectangle { Layout.leftMargin: 20 Layout.maximumWidth: 300 textFormat: Text.StyledText - text: qsTr("WARNING: Not recommended for your hardware.") - + qsTr(" Model requires more memory (") + ramrequired - + qsTr(" GB) than your system has available (") - + LLM.systemTotalRAMInGBString() + ")." + text: qsTr("WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).").arg(ramrequired).arg(LLM.systemTotalRAMInGBString()) color: theme.textErrorColor font.pixelSize: theme.fontSizeLarge wrapMode: Text.WordWrap @@ -630,7 +625,7 @@ Rectangle { color: theme.mutedDarkTextColor } Text { - text: ramrequired >= 0 ? ramrequired + qsTr(" GB") : "?" + text: ramrequired >= 0 ? qsTr("%1 GB").arg(ramrequired) : qsTr("?") color: theme.textColor font.pixelSize: theme.fontSizeSmall font.bold: true @@ -652,7 +647,7 @@ Rectangle { color: theme.mutedDarkTextColor } Text { - text: parameters !== "" ? parameters : "?" + text: parameters !== "" ? parameters : qsTr("?") color: theme.textColor font.pixelSize: theme.fontSizeSmall font.bold: true diff --git a/gpt4all-chat/qml/ChatView.qml b/gpt4all-chat/qml/ChatView.qml index 124bf32c..e0b75d26 100644 --- a/gpt4all-chat/qml/ChatView.qml +++ b/gpt4all-chat/qml/ChatView.qml @@ -55,18 +55,18 @@ Rectangle { id: modelLoadingErrorPopup anchors.centerIn: parent shouldTimeOut: false - text: qsTr("

Encountered an error loading model:


") - + "\"" + currentChat.modelLoadingError + "\"" - + qsTr("

Model loading failures can happen for a variety of reasons, but the most common " - + "causes include a bad file format, an incomplete or corrupted download, the wrong file " - + "type, not enough system RAM or an incompatible model type. Here are some suggestions for resolving the problem:" - + "
    " - + "
  • Ensure the model file has a compatible format and type" - + "
  • Check the model file is complete in the download folder" - + "
  • You can find the download folder in the settings dialog" - + "
  • If you've sideloaded the model ensure the file is not corrupt by checking md5sum" - + "
  • Read more about what models are supported in our documentation for the gui" - + "
  • Check out our discord channel for help") + text: qsTr("

    Encountered an error loading model:


    " + + "\"%1\"" + + "

    Model loading failures can happen for a variety of reasons, but the most common " + + "causes include a bad file format, an incomplete or corrupted download, the wrong file " + + "type, not enough system RAM or an incompatible model type. Here are some suggestions for resolving the problem:" + + "
      " + + "
    • Ensure the model file has a compatible format and type" + + "
    • Check the model file is complete in the download folder" + + "
    • You can find the download folder in the settings dialog" + + "
    • If you've sideloaded the model ensure the file is not corrupt by checking md5sum" + + "
    • Read more about what models are supported in our documentation for the gui" + + "
    • Check out our discord channel for help").arg(currentChat.modelLoadingError); } PopupDialog { @@ -107,7 +107,7 @@ Rectangle { for (var i = 0; i < chatModel.count; i++) { var item = chatModel.get(i) var string = item.name; - var isResponse = item.name === qsTr("Response: ") + var isResponse = item.name === "Response: " string += chatModel.get(i).value if (isResponse && item.stopped) string += " " @@ -121,7 +121,7 @@ Rectangle { var str = "{\"conversation\": ["; for (var i = 0; i < chatModel.count; i++) { var item = chatModel.get(i) - var isResponse = item.name === qsTr("Response: ") + var isResponse = item.name === "Response: " str += "{\"content\": "; str += JSON.stringify(item.value) str += ", \"role\": \"" + (isResponse ? "assistant" : "user") + "\""; @@ -374,9 +374,9 @@ Rectangle { if (!currentModelInstalled()) return qsTr("Not found: %1").arg(currentModelName()) if (currentChat.modelLoadingPercentage === 0.0) - return qsTr("Reload \u00B7 ") + currentModelName() + return qsTr("Reload \u00B7 %1").arg(currentModelName()) if (currentChat.isCurrentlyLoading) - return qsTr("Loading \u00B7 ") + currentModelName() + return qsTr("Loading \u00B7 %1").arg(currentModelName()) return currentModelName() } font.pixelSize: theme.fontSizeLarger @@ -705,7 +705,7 @@ Rectangle { } } - text: qsTr("Load \u00B7 ") + defaultModel + qsTr(" (default) \u2192"); + text: qsTr("Load \u00B7 %1 (default) \u2192").arg(defaultModel); onClicked: { var i = comboBox.find(MySettings.userDefaultModel) if (i !== -1) { @@ -812,7 +812,7 @@ Rectangle { fillMode: Image.PreserveAspectFit mipmap: true visible: false - source: name !== qsTr("Response: ") ? "qrc:/gpt4all/icons/you.svg" : "qrc:/gpt4all/icons/gpt4all_transparent.svg" + source: name !== "Response: " ? "qrc:/gpt4all/icons/you.svg" : "qrc:/gpt4all/icons/gpt4all_transparent.svg" } ColorOverlay { @@ -845,7 +845,7 @@ Rectangle { anchors.bottom: parent.bottom TextArea { - text: name === qsTr("Response: ") ? qsTr("GPT4All") : qsTr("You") + text: name === "Response: " ? qsTr("GPT4All") : qsTr("You") padding: 0 font.pixelSize: theme.fontSizeLarger font.bold: true @@ -855,7 +855,7 @@ Rectangle { readOnly: true } Text { - visible: name === qsTr("Response: ") + visible: name === "Response: " font.pixelSize: theme.fontSizeLarger text: currentModelName() color: theme.mutedTextColor @@ -870,8 +870,8 @@ Rectangle { return qsTr("recalculating context ..."); switch (currentChat.responseState) { case Chat.ResponseStopped: return qsTr("response stopped ..."); - case Chat.LocalDocsRetrieval: return qsTr("retrieving localdocs: ") + currentChat.collectionList.join(", ") + " ..."; - case Chat.LocalDocsProcessing: return qsTr("searching localdocs: ") + currentChat.collectionList.join(", ") + " ..."; + case Chat.LocalDocsRetrieval: return qsTr("retrieving localdocs: %1 ...").arg(currentChat.collectionList.join(", ")); + case Chat.LocalDocsProcessing: return qsTr("searching localdocs: %1 ...").arg(currentChat.collectionList.join(", ")); case Chat.PromptProcessing: return qsTr("processing ...") case Chat.ResponseGeneration: return qsTr("generating response ..."); case Chat.GeneratingQuestions: return qsTr("generating questions ..."); @@ -1005,7 +1005,7 @@ Rectangle { Accessible.role: Accessible.Paragraph Accessible.name: text - Accessible.description: name === qsTr("Response: ") ? "The response by the model" : "The prompt by the user" + Accessible.description: name === "Response: " ? "The response by the model" : "The prompt by the user" } ThumbsDownDialog { @@ -1031,7 +1031,7 @@ Rectangle { Column { Layout.alignment: Qt.AlignRight Layout.rightMargin: 15 - visible: name === qsTr("Response: ") && + visible: name === "Response: " && (!currentResponse || !currentChat.responseInProgress) && MySettings.networkIsActive spacing: 10 @@ -1692,7 +1692,7 @@ Rectangle { var listElement = chatModel.get(index); currentChat.regenerateResponse() if (chatModel.count) { - if (listElement.name === qsTr("Response: ")) { + if (listElement.name === "Response: ") { chatModel.updateCurrentResponse(index, true); chatModel.updateStopped(index, false); chatModel.updateThumbsUpState(index, false); @@ -1766,7 +1766,7 @@ Rectangle { padding: 15 topPadding: 8 bottomPadding: 8 - text: qsTr("Reload \u00B7 ") + currentChat.modelInfo.name + text: qsTr("Reload \u00B7 %1").arg(currentChat.modelInfo.name) fontPixelSize: theme.fontSizeSmall Accessible.description: qsTr("Reloads the model") } diff --git a/gpt4all-chat/qml/ModelsView.qml b/gpt4all-chat/qml/ModelsView.qml index 0eb74a11..b92fd8e9 100644 --- a/gpt4all-chat/qml/ModelsView.qml +++ b/gpt4all-chat/qml/ModelsView.qml @@ -250,9 +250,7 @@ Rectangle { Layout.leftMargin: 20 visible: downloadError !== "" textFormat: Text.StyledText - text: "" - + qsTr("Error") - + "" + text: qsTr("Error") color: theme.textColor font.pixelSize: theme.fontSizeLarge linkColor: theme.textErrorColor @@ -271,10 +269,7 @@ Rectangle { Layout.leftMargin: 20 Layout.maximumWidth: 300 textFormat: Text.StyledText - text: qsTr("WARNING: Not recommended for your hardware.") - + qsTr(" Model requires more memory (") + ramrequired - + qsTr(" GB) than your system has available (") - + LLM.systemTotalRAMInGBString() + ")." + text: qsTr("WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).").arg(ramrequired).arg(LLM.systemTotalRAMInGBString()) color: theme.textErrorColor font.pixelSize: theme.fontSizeLarge wrapMode: Text.WordWrap @@ -426,7 +421,7 @@ Rectangle { color: theme.mutedDarkTextColor } Text { - text: ramrequired >= 0 ? ramrequired + qsTr(" GB") : "?" + text: ramrequired >= 0 ? qsTr("%1 GB").arg(ramrequired) : qsTr("?") color: theme.textColor font.pixelSize: theme.fontSizeSmall font.bold: true diff --git a/gpt4all-chat/qml/StartupDialog.qml b/gpt4all-chat/qml/StartupDialog.qml index e15fced0..2d9ffb33 100644 --- a/gpt4all-chat/qml/StartupDialog.qml +++ b/gpt4all-chat/qml/StartupDialog.qml @@ -64,10 +64,7 @@ MyDialog { id: welcome width: 1024 - 40 textFormat: TextEdit.MarkdownText - text: qsTr("### Release notes\n") - + Download.releaseInfo.notes - + qsTr("### Contributors\n") - + Download.releaseInfo.contributors + text: qsTr("### Release notes\n%1### Contributors\n%2").arg(Download.releaseInfo.notes).arg(Download.releaseInfo.contributors) focus: false readOnly: true Accessible.role: Accessible.Paragraph diff --git a/gpt4all-chat/translations/gpt4all_en.ts b/gpt4all-chat/translations/gpt4all_en.ts new file mode 100644 index 00000000..aeaf403d --- /dev/null +++ b/gpt4all-chat/translations/gpt4all_en.ts @@ -0,0 +1,2722 @@ + + + + + AddCollectionView + + + + ← Existing Collections + + + + + + Add Document Collection + + + + + + Add a folder containing plain text files, PDFs, or Markdown. Configure additional extensions in Settings. + + + + + + Please choose a directory + + + + + + Name + + + + + + Collection name... + + + + + + Name of the collection to add (Required) + + + + + + Folder + + + + + + Folder path... + + + + + + Folder path to documents (Required) + + + + + + Browse + + + + + + Create Collection + + + + + AddModelView + + + + ← Existing Models + + + + + + Explore Models + + + + + + Discover and download models by keyword search... + + + + + + Text field for discovering and filtering downloadable models + + + + + + Initiate model discovery and filtering + + + + + + Triggers discovery and filtering of models + + + + + + Default + + + + + + Likes + + + + + + Downloads + + + + + + Recent + + + + + + Asc + + + + + + Desc + + + + + + None + + + + + + Searching · %1 + + + + + + Sort by: %1 + + + + + + Sort dir: %1 + + + + + + Limit: %1 + + + + + + Network error: could not retrieve %1 + + + + + + + + Busy indicator + + + + + + Displayed when the models request is ongoing + + + + + + Model file + + + + + + Model file to be downloaded + + + + + + Description + + + + + + File description + + + + + + Cancel + + + + + + Resume + + + + + + Download + + + + + + Stop/restart/start the download + + + + + + Remove + + + + + + Remove model from filesystem + + + + + + + + Install + + + + + + Install online model + + + + + + <strong><font size="2">WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).</strong></font> + + + + + + %1 GB + + + + + + + + ? + + + + + + Describes an error that occurred when downloading + + + + + + <strong><font size="1"><a href="#error">Error</a></strong></font> + + + + + + Error for incompatible hardware + + + + + + Download progressBar + + + + + + Shows the progress made in the download + + + + + + Download speed + + + + + + Download speed in bytes/kilobytes/megabytes per second + + + + + + Calculating... + + + + + + + + Whether the file hash is being calculated + + + + + + Displayed when the file hash is being calculated + + + + + + enter $API_KEY + + + + + + File size + + + + + + RAM required + + + + + + Parameters + + + + + + Quant + + + + + + Type + + + + + ApplicationSettings + + + + Application + + + + + + Network dialog + + + + + + opt-in to share feedback/conversations + + + + + + ERROR: Update system could not find the MaintenanceTool used<br> + to check for updates!<br><br> + Did you install this application using the online installer? If so,<br> + the MaintenanceTool executable should be located one directory<br> + above where this application resides on your filesystem.<br><br> + If you can't start it manually, then I'm afraid you'll have to<br> + reinstall. + + + + + + Error dialog + + + + + + Application Settings + + + + + + General + + + + + + Theme + + + + + + The application color scheme. + + + + + + Dark + + + + + + Light + + + + + + LegacyDark + + + + + + Font Size + + + + + + The size of text in the application. + + + + + + Device + + + + + + The compute device used for text generation. "Auto" uses Vulkan or Metal. + + + + + + Default Model + + + + + + The preferred model for new chats. Also used as the local server fallback. + + + + + + Suggestion Mode + + + + + + Generate suggested follow-up questions at the end of responses. + + + + + + When chatting with LocalDocs + + + + + + Whenever possible + + + + + + Never + + + + + + Download Path + + + + + + Where to store local models and the LocalDocs database. + + + + + + Browse + + + + + + Choose where to save model files + + + + + + Enable Datalake + + + + + + Send chats and feedback to the GPT4All Open-Source Datalake. + + + + + + Advanced + + + + + + CPU Threads + + + + + + The number of CPU threads used for inference and embedding. + + + + + + Save Chat Context + + + + + + Save the chat model's state to disk for faster loading. WARNING: Uses ~2GB per chat. + + + + + + Enable Local Server + + + + + + Expose an OpenAI-Compatible server to localhost. WARNING: Results in increased resource usage. + + + + + + API Server Port + + + + + + The port to use for the local server. Requires restart. + + + + + + Check For Updates + + + + + + Manually check for an update to GPT4All. + + + + + + Updates + + + + + Chat + + + + New Chat + + + + + Server Chat + + + + + ChatDrawer + + + + Drawer + + + + + + Main navigation drawer + + + + + + + New Chat + + + + + + Create a new chat + + + + + + Select the current chat or edit the chat when in edit mode + + + + + + Edit chat name + + + + + + Save chat name + + + + + + Delete chat + + + + + + Confirm chat deletion + + + + + + Cancel chat deletion + + + + + + List of chats + + + + + + List of chats in the drawer dialog + + + + + ChatListModel + + + TODAY + + + + + THIS WEEK + + + + + THIS MONTH + + + + + LAST SIX MONTHS + + + + + THIS YEAR + + + + + LAST YEAR + + + + + ChatView + + + + <h3>Warning</h3><p>%1</p> + + + + + + Switch model dialog + + + + + + Warn the user if they switch models, then context will be erased + + + + + + Conversation copied to clipboard. + + + + + + Code copied to clipboard. + + + + + + Chat panel + + + + + + Chat panel with options + + + + + + Reload the currently loaded model + + + + + + Eject the currently loaded model + + + + + + No model installed. + + + + + + Model loading error. + + + + + + Waiting for model... + + + + + + Switching context... + + + + + + Choose a model... + + + + + + Not found: %1 + + + + + + The top item is the current model + + + + + + + + LocalDocs + + + + + + Add documents + + + + + + add collections of documents to the chat + + + + + + Load the default model + + + + + + Loads the default model which can be changed in settings + + + + + + No Model Installed + + + + + + GPT4All requires that you install at least one +model to get started + + + + + + Install a Model + + + + + + Shows the add model view + + + + + + Conversation with the model + + + + + + prompt / response pairs from the conversation + + + + + + GPT4All + + + + + + You + + + + + + recalculating context ... + + + + + + response stopped ... + + + + + + processing ... + + + + + + generating response ... + + + + + + generating questions ... + + + + + + + + Copy + + + + + + Copy Message + + + + + + Disable markdown + + + + + + Enable markdown + + + + + + Thumbs up + + + + + + Gives a thumbs up to the response + + + + + + Thumbs down + + + + + + Opens thumbs down dialog + + + + + + %1 Sources + + + + + + Suggested follow-ups + + + + + + Erase and reset chat session + + + + + + Copy chat session to clipboard + + + + + + Redo last chat response + + + + + + Stop generating + + + + + + Stop the current response generation + + + + + + Reloads the model + + + + + + <h3>Encountered an error loading model:</h3><br><i>"%1"</i><br><br>Model loading failures can happen for a variety of reasons, but the most common causes include a bad file format, an incomplete or corrupted download, the wrong file type, not enough system RAM or an incompatible model type. Here are some suggestions for resolving the problem:<br><ul><li>Ensure the model file has a compatible format and type<li>Check the model file is complete in the download folder<li>You can find the download folder in the settings dialog<li>If you've sideloaded the model ensure the file is not corrupt by checking md5sum<li>Read more about what models are supported in our <a href="https://docs.gpt4all.io/">documentation</a> for the gui<li>Check out our <a href="https://discord.gg/4M2QFmTt2k">discord channel</a> for help + + + + + + + + Reload · %1 + + + + + + Loading · %1 + + + + + + Load · %1 (default) → + + + + + + retrieving localdocs: %1 ... + + + + + + searching localdocs: %1 ... + + + + + + Send a message... + + + + + + Load a model to continue... + + + + + + Send messages/prompts to the model + + + + + + Cut + + + + + + Paste + + + + + + Select All + + + + + + Send message + + + + + + Sends the message/prompt contained in textfield to the model + + + + + CollectionsDrawer + + + + Warning: searching collections while indexing can return incomplete results + + + + + + %n file(s) + + + + + + + + %n word(s) + + + + + + + + Updating + + + + + + + Add Docs + + + + + + Select a collection to make it available to the chat model. + + + + + HomeView + + + + Welcome to GPT4All + + + + + + The privacy-first LLM chat application + + + + + + Start chatting + + + + + + Start Chatting + + + + + + Chat with any LLM + + + + + + LocalDocs + + + + + + Chat with your local files + + + + + + Find Models + + + + + + Explore and download models + + + + + + Latest news + + + + + + Latest news from GPT4All + + + + + + Release Notes + + + + + + Documentation + + + + + + Discord + + + + + + X (Twitter) + + + + + + Github + + + + + + GPT4All.io + + + + + + Subscribe to Newsletter + + + + + LocalDocsSettings + + + + LocalDocs + + + + + + LocalDocs Settings + + + + + + Indexing + + + + + + Allowed File Extensions + + + + + + Comma-separated list. LocalDocs will only attempt to process files with these extensions. + + + + + + Embedding + + + + + + Use Nomic Embed API + + + + + + Embed documents using the fast Nomic API instead of a private local model. Requires restart. + + + + + + Nomic API Key + + + + + + API key to use for Nomic Embed. Get one from the Atlas <a href="https://atlas.nomic.ai/cli-login">API keys page</a>. Requires restart. + + + + + + Embeddings Device + + + + + + The compute device used for embeddings. "Auto" uses the CPU. Requires restart. + + + + + + Display + + + + + + Show Sources + + + + + + Display the sources used for each response. + + + + + + Advanced + + + + + + Warning: Advanced usage only. + + + + + + Values too large may cause localdocs failure, extremely slow responses or failure to respond at all. Roughly speaking, the {N chars x N snippets} are added to the model's context window. More info <a href="https://docs.gpt4all.io/gpt4all_desktop/localdocs.html">here</a>. + + + + + + Document snippet size (characters) + + + + + + Number of characters per document snippet. Larger numbers increase likelihood of factual responses, but also result in slower generation. + + + + + + Max document snippets per prompt + + + + + + Max best N matches of retrieved document snippets to add to the context for prompt. Larger numbers increase likelihood of factual responses, but also result in slower generation. + + + + + LocalDocsView + + + + LocalDocs + + + + + + Chat with your local files + + + + + + + Add Collection + + + + + + ERROR: The LocalDocs database is not valid. + + + + + + No Collections Installed + + + + + + Install a collection of local documents to get started using this feature + + + + + + + Add Doc Collection + + + + + + Shows the add model view + + + + + + Indexing progressBar + + + + + + Shows the progress made in the indexing + + + + + + ERROR + + + + + + INDEXING + + + + + + EMBEDDING + + + + + + REQUIRES UPDATE + + + + + + READY + + + + + + INSTALLING + + + + + + Indexing in progress + + + + + + Embedding in progress + + + + + + This collection requires an update after version change + + + + + + Automatically reindexes upon changes to the folder + + + + + + Installation in progress + + + + + + % + + + + + + %n file(s) + + + + + + + + %n word(s) + + + + + + + + Remove + + + + + + Rebuild + + + + + + Reindex this folder from scratch. This is slow and usually not needed. + + + + + + Update + + + + + + Update the collection to the new version. This is a slow operation. + + + + + ModelList + + + <ul><li>Requires personal OpenAI API key.</li><li>WARNING: Will send your chats to OpenAI!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with OpenAI</li><li>You can apply for an API key <a href="https://platform.openai.com/account/api-keys">here.</a></li> + + + + + <strong>OpenAI's ChatGPT model GPT-3.5 Turbo</strong><br> %1 + + + + + <strong>OpenAI's ChatGPT model GPT-4</strong><br> %1 %2 + + + + + <strong>Mistral Tiny model</strong><br> %1 + + + + + <strong>Mistral Small model</strong><br> %1 + + + + + <strong>Mistral Medium model</strong><br> %1 + + + + + <br><br><i>* Even if you pay OpenAI for ChatGPT-4 this does not guarantee API key access. Contact OpenAI for more info. + + + + + <ul><li>Requires personal Mistral API key.</li><li>WARNING: Will send your chats to Mistral!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with Mistral</li><li>You can apply for an API key <a href="https://console.mistral.ai/user/api-keys">here</a>.</li> + + + + + <strong>Created by %1.</strong><br><ul><li>Published on %2.<li>This model has %3 likes.<li>This model has %4 downloads.<li>More info can be found <a href="https://huggingface.co/%5">here.</a></ul> + + + + + ModelSettings + + + + Model + + + + + + Model Settings + + + + + + Clone + + + + + + Remove + + + + + + Name + + + + + + Model File + + + + + + System Prompt + + + + + + Prefixed at the beginning of every conversation. Must contain the appropriate framing tokens. + + + + + + Prompt Template + + + + + + The template that wraps every prompt. + + + + + + Must contain the string "%1" to be replaced with the user's input. + + + + + + Chat Name Prompt + + + + + + Prompt used to automatically generate chat names. + + + + + + Suggested FollowUp Prompt + + + + + + Prompt used to generate suggested follow-up questions. + + + + + + Context Length + + + + + + Number of input and output tokens the model sees. + + + + + + Maximum combined prompt/response tokens before information is lost. +Using more context than the model was trained on will yield poor results. +NOTE: Does not take effect until you reload the model. + + + + + + Temperature + + + + + + Randomness of model output. Higher -> more variation. + + + + + + Temperature increases the chances of choosing less likely tokens. +NOTE: Higher temperature gives more creative but less predictable outputs. + + + + + + Top-P + + + + + + Nucleus Sampling factor. Lower -> more predicatable. + + + + + + Only the most likely tokens up to a total probability of top_p can be chosen. +NOTE: Prevents choosing highly unlikely tokens. + + + + + + Min-P + + + + + + Minimum token probability. Higher -> more predictable. + + + + + + Sets the minimum relative probability for a token to be considered. + + + + + + Top-K + + + + + + Size of selection pool for tokens. + + + + + + Only the top K most likely tokens will be chosen from. + + + + + + Max Length + + + + + + Maximum response length, in tokens. + + + + + + Prompt Batch Size + + + + + + The batch size used for prompt processing. + + + + + + Amount of prompt tokens to process at once. +NOTE: Higher values can speed up reading prompts but will use more RAM. + + + + + + Repeat Penalty + + + + + + Repetition penalty factor. Set to 1 to disable. + + + + + + Repeat Penalty Tokens + + + + + + Number of previous tokens used for penalty. + + + + + + GPU Layers + + + + + + Number of model layers to load into VRAM. + + + + + + How many model layers to load into VRAM. Decrease this if GPT4All runs out of VRAM while loading this model. +Lower values increase CPU load and RAM usage, and make inference slower. +NOTE: Does not take effect until you reload the model. + + + + + ModelsView + + + + No Models Installed + + + + + + Install a model to get started using GPT4All + + + + + + + + + Add Model + + + + + + Shows the add model view + + + + + + Installed Models + + + + + + Locally installed chat models + + + + + + Model file + + + + + + Model file to be downloaded + + + + + + Description + + + + + + File description + + + + + + Cancel + + + + + + Resume + + + + + + Stop/restart/start the download + + + + + + Remove + + + + + + Remove model from filesystem + + + + + + + + Install + + + + + + Install online model + + + + + + <strong><font size="1"><a href="#error">Error</a></strong></font> + + + + + + <strong><font size="2">WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).</strong></font> + + + + + + %1 GB + + + + + + ? + + + + + + Describes an error that occurred when downloading + + + + + + Error for incompatible hardware + + + + + + Download progressBar + + + + + + Shows the progress made in the download + + + + + + Download speed + + + + + + Download speed in bytes/kilobytes/megabytes per second + + + + + + Calculating... + + + + + + + + Whether the file hash is being calculated + + + + + + Busy indicator + + + + + + Displayed when the file hash is being calculated + + + + + + enter $API_KEY + + + + + + File size + + + + + + RAM required + + + + + + Parameters + + + + + + Quant + + + + + + Type + + + + + MyFancyLink + + + + Fancy link + + + + + + A stylized link + + + + + MySettingsStack + + + + Please choose a directory + + + + + MySettingsTab + + + + Restore Defaults + + + + + + Restores settings dialog to a default state + + + + + NetworkDialog + + + + Contribute data to the GPT4All Opensource Datalake. + + + + + + By enabling this feature, you will be able to participate in the democratic process of training a large language model by contributing data for future model improvements. + +When a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All Open Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you can suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake. + +NOTE: By turning on this feature, you will be sending your data to the GPT4All Open Source Datalake. You should have no expectation of chat privacy when this feature is enabled. You should; however, have an expectation of an optional attribution if you wish. Your chat data will be openly available for anyone to download and will be used by Nomic AI to improve future GPT4All models. Nomic AI will retain all attribution information attached to your data and you will be credited as a contributor to any GPT4All model release that uses your data! + + + + + + Terms for opt-in + + + + + + Describes what will happen when you opt-in + + + + + + Please provide a name for attribution (optional) + + + + + + Attribution (optional) + + + + + + Provide attribution + + + + + + Enable + + + + + + Enable opt-in + + + + + + Cancel + + + + + + Cancel opt-in + + + + + NewVersionDialog + + + + New version is available + + + + + + Update + + + + + + Update to new version + + + + + PopupDialog + + + + Reveals a shortlived help balloon + + + + + + Busy indicator + + + + + + Displayed when the popup is showing busy + + + + + SettingsView + + + + + + Settings + + + + + + Contains various application settings + + + + + + Application + + + + + + Model + + + + + + LocalDocs + + + + + StartupDialog + + + + Welcome! + + + + + + ### Release notes +%1### Contributors +%2 + + + + + + Release notes + + + + + + Release notes for this version + + + + + + ### Opt-ins for anonymous usage analytics and datalake +By enabling these features, you will be able to participate in the democratic process of training a +large language model by contributing data for future model improvements. + +When a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All +Open Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you +can suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake. + +NOTE: By turning on this feature, you will be sending your data to the GPT4All Open Source Datalake. +You should have no expectation of chat privacy when this feature is enabled. You should; however, have +an expectation of an optional attribution if you wish. Your chat data will be openly available for anyone +to download and will be used by Nomic AI to improve future GPT4All models. Nomic AI will retain all +attribution information attached to your data and you will be credited as a contributor to any GPT4All +model release that uses your data! + + + + + + Terms for opt-in + + + + + + Describes what will happen when you opt-in + + + + + + + + Opt-in for anonymous usage statistics + + + + + + + + Yes + + + + + + Allow opt-in for anonymous usage statistics + + + + + + + + No + + + + + + Opt-out for anonymous usage statistics + + + + + + Allow opt-out for anonymous usage statistics + + + + + + + + Opt-in for network + + + + + + Allow opt-in for network + + + + + + Allow opt-in anonymous sharing of chats to the GPT4All Datalake + + + + + + Opt-out for network + + + + + + Allow opt-out anonymous sharing of chats to the GPT4All Datalake + + + + + SwitchModelDialog + + + + <b>Warning:</b> changing the model will erase the current conversation. Do you wish to continue? + + + + + + Continue + + + + + + Continue with model loading + + + + + + + + Cancel + + + + + ThumbsDownDialog + + + + Please edit the text below to provide a better response. (optional) + + + + + + Please provide a better response... + + + + + + Submit + + + + + + Submits the user's response + + + + + + Cancel + + + + + + Closes the response dialog + + + + + main + + + + <h3>Encountered an error starting up:</h3><br><i>"Incompatible hardware detected."</i><br><br>Unfortunately, your CPU does not meet the minimal requirements to run this program. In particular, it does not support AVX intrinsics which this program requires to successfully run a modern large language model. The only solution at this time is to upgrade your hardware to a more modern CPU.<br><br>See here for more information: <a href="https://en.wikipedia.org/wiki/Advanced_Vector_Extensions">https://en.wikipedia.org/wiki/Advanced_Vector_Extensions</a> + + + + + + GPT4All v%1 + + + + + + <h3>Encountered an error starting up:</h3><br><i>"Inability to access settings file."</i><br><br>Unfortunately, something is preventing the program from accessing the settings file. This could be caused by incorrect permissions in the local app config directory where the settings file is located. Check out our <a href="https://discord.gg/4M2QFmTt2k">discord channel</a> for help. + + + + + + Connection to datalake failed. + + + + + + Saving chats. + + + + + + Network dialog + + + + + + opt-in to share feedback/conversations + + + + + + Home view + + + + + + Home view of application + + + + + + Home + + + + + + Chat view + + + + + + Chat view to interact with models + + + + + + Chats + + + + + + + + Models + + + + + + Models view for installed models + + + + + + + + LocalDocs + + + + + + LocalDocs view to configure and use local docs + + + + + + + + Settings + + + + + + Settings view for application configuration + + + + + + The datalake is enabled + + + + + + Using a network model + + + + + + Server mode is enabled + + + + + + Installed models + + + + + + View of installed models + + + + diff --git a/gpt4all-chat/translations/gpt4all_zh.ts b/gpt4all-chat/translations/gpt4all_zh.ts new file mode 100644 index 00000000..9182ce0d --- /dev/null +++ b/gpt4all-chat/translations/gpt4all_zh.ts @@ -0,0 +1,2799 @@ + + + + + AddCollectionView + + + + ← Existing Collections + + + + + + Add Document Collection + + + + + + Add a folder containing plain text files, PDFs, or Markdown. Configure additional extensions in Settings. + + + + + + Please choose a directory + + + + + + Name + + + + + + Collection name... + + + + + + Name of the collection to add (Required) + + + + + + Folder + + + + + + Folder path... + + + + + + Folder path to documents (Required) + + + + + + Browse + + + + + + Create Collection + + + + + AddModelView + + + + ← Existing Models + + + + + + Explore Models + + + + + + Discover and download models by keyword search... + + + + + + Text field for discovering and filtering downloadable models + + + + + + Searching · + + + + + + Initiate model discovery and filtering + + + + + + Triggers discovery and filtering of models + + + + + + Default + + + + + + Likes + + + + + + Downloads + + + + + + Recent + + + + + + Sort by: + + + + + + Asc + + + + + + Desc + + + + + + Sort dir: + + + + + + None + + + + + + Limit: + + + + + + Network error: could not retrieve http://gpt4all.io/models/models3.json + + + + + + + + Busy indicator + + + + + + Displayed when the models request is ongoing + + + + + + Model file + + + + + + Model file to be downloaded + + + + + + Description + + + + + + File description + + + + + + Cancel + + + + + + Resume + + + + + + Download + + + + + + Stop/restart/start the download + + + + + + Remove + + + + + + Remove model from filesystem + + + + + + + + Install + + + + + + Install online model + + + + + + <a href="#error">Error</a> + + + + + + Describes an error that occurred when downloading + + + + + + <strong><font size="2">WARNING: Not recommended for your hardware. + + + + + + Model requires more memory ( + + + + + + GB) than your system has available ( + + + + + + Error for incompatible hardware + + + + + + Download progressBar + + + + + + Shows the progress made in the download + + + + + + Download speed + + + + + + Download speed in bytes/kilobytes/megabytes per second + + + + + + Calculating... + + + + + + + + Whether the file hash is being calculated + + + + + + Displayed when the file hash is being calculated + + + + + + enter $API_KEY + + + + + + File size + + + + + + RAM required + + + + + + GB + + + + + + Parameters + + + + + + Quant + + + + + + Type + + + + + ApplicationSettings + + + + Application + + + + + + Network dialog + + + + + + opt-in to share feedback/conversations + + + + + + ERROR: Update system could not find the MaintenanceTool used<br> + to check for updates!<br><br> + Did you install this application using the online installer? If so,<br> + the MaintenanceTool executable should be located one directory<br> + above where this application resides on your filesystem.<br><br> + If you can't start it manually, then I'm afraid you'll have to<br> + reinstall. + + + + + + Error dialog + + + + + + Application Settings + + + + + + General + + + + + + Theme + + + + + + The application color scheme. + + + + + + Dark + + + + + + Light + + + + + + LegacyDark + + + + + + Font Size + + + + + + The size of text in the application. + + + + + + Device + + + + + + The compute device used for text generation. "Auto" uses Vulkan or Metal. + + + + + + Default Model + + + + + + The preferred model for new chats. Also used as the local server fallback. + + + + + + Download Path + + + + + + Where to store local models and the LocalDocs database. + + + + + + Browse + + + + + + Choose where to save model files + + + + + + Enable Datalake + + + + + + Send chats and feedback to the GPT4All Open-Source Datalake. + + + + + + Advanced + + + + + + CPU Threads + + + + + + The number of CPU threads used for inference and embedding. + + + + + + Save Chat Context + + + + + + Save the chat model's state to disk for faster loading. WARNING: Uses ~2GB per chat. + + + + + + Enable Local Server + + + + + + Expose an OpenAI-Compatible server to localhost. WARNING: Results in increased resource usage. + + + + + + API Server Port + + + + + + The port to use for the local server. Requires restart. + + + + + + Check For Updates + + + + + + Manually check for an update to GPT4All. + + + + + + Updates + + + + + Chat + + + + New Chat + + + + + Server Chat + + + + + + Prompt: + + + + + + Response: + + + + + ChatDrawer + + + + Drawer + + + + + + Main navigation drawer + + + + + + + New Chat + + + + + + Create a new chat + + + + + + Select the current chat or edit the chat when in edit mode + + + + + + Edit chat name + + + + + + Save chat name + + + + + + Delete chat + + + + + + Confirm chat deletion + + + + + + Cancel chat deletion + + + + + + List of chats + + + + + + List of chats in the drawer dialog + + + + + ChatListModel + + + TODAY + + + + + THIS WEEK + + + + + THIS MONTH + + + + + LAST SIX MONTHS + + + + + THIS YEAR + + + + + LAST YEAR + + + + + ChatView + + + + <h3>Encountered an error loading model:</h3><br> + + + + + + <br><br>Model loading failures can happen for a variety of reasons, but the most common causes include a bad file format, an incomplete or corrupted download, the wrong file type, not enough system RAM or an incompatible model type. Here are some suggestions for resolving the problem:<br><ul><li>Ensure the model file has a compatible format and type<li>Check the model file is complete in the download folder<li>You can find the download folder in the settings dialog<li>If you've sideloaded the model ensure the file is not corrupt by checking md5sum<li>Read more about what models are supported in our <a href="https://docs.gpt4all.io/">documentation</a> for the gui<li>Check out our <a href="https://discord.gg/4M2QFmTt2k">discord channel</a> for help + + + + + + <h3>Warning</h3><p>%1</p> + + + + + + Switch model dialog + + + + + + Warn the user if they switch models, then context will be erased + + + + + + Conversation copied to clipboard. + + + + + + Code copied to clipboard. + + + + + + + + + + + + + + + + + + + + + + Response: + + + + + + Chat panel + + + + + + Chat panel with options + + + + + + Reload the currently loaded model + + + + + + Eject the currently loaded model + + + + + + No model installed. + + + + + + Model loading error. + + + + + + Waiting for model... + + + + + + Switching context... + + + + + + Choose a model... + + + + + + Not found: %1 + + + + + + + + Reload · + + + + + + Loading · + + + + + + The top item is the current model + + + + + + + + LocalDocs + + + + + + Add documents + + + + + + add collections of documents to the chat + + + + + + Load · + + + + + + (default) → + + + + + + Load the default model + + + + + + Loads the default model which can be changed in settings + + + + + + No Model Installed + + + + + + GPT4All requires that you install at least one +model to get started + + + + + + Install a Model + + + + + + Shows the add model view + + + + + + Conversation with the model + + + + + + prompt / response pairs from the conversation + + + + + + GPT4All + + + + + + You + + + + + + Busy indicator + + + + + + The model is thinking + + + + + + recalculating context ... + + + + + + response stopped ... + + + + + + retrieving localdocs: + + + + + + searching localdocs: + + + + + + processing ... + + + + + + generating response ... + + + + + + + + Copy + + + + + + Copy Message + + + + + + Disable markdown + + + + + + Enable markdown + + + + + + Thumbs up + + + + + + Gives a thumbs up to the response + + + + + + Thumbs down + + + + + + Opens thumbs down dialog + + + + + + %1 Sources + + + + + + Erase and reset chat session + + + + + + Copy chat session to clipboard + + + + + + Redo last chat response + + + + + + Stop the current response generation + + + + + + Reloads the model + + + + + + Send a message... + + + + + + Load a model to continue... + + + + + + Send messages/prompts to the model + + + + + + Cut + + + + + + Paste + + + + + + Select All + + + + + + Send message + + + + + + Sends the message/prompt contained in textfield to the model + + + + + CollectionsDrawer + + + + Warning: searching collections while indexing can return incomplete results + + + + + + %n file(s) + + + + + + + + %n word(s) + + + + + + + + Updating + + + + + + + Add Docs + + + + + + Select a collection to make it available to the chat model. + + + + + HomeView + + + + Welcome to GPT4All + + + + + + The privacy-first LLM chat application + + + + + + Start chatting + + + + + + Start Chatting + + + + + + Chat with any LLM + + + + + + LocalDocs + + + + + + Chat with your local files + + + + + + Find Models + + + + + + Explore and download models + + + + + + Latest news + + + + + + Latest news from GPT4All + + + + + + Release Notes + + + + + + Documentation + + + + + + Discord + + + + + + X (Twitter) + + + + + + Github + + + + + + GPT4All.io + + + + + + Subscribe to Newsletter + + + + + LocalDocsSettings + + + + LocalDocs + + + + + + LocalDocs Settings + + + + + + Indexing + + + + + + Allowed File Extensions + + + + + + Comma-separated list. LocalDocs will only attempt to process files with these extensions. + + + + + + Embedding + + + + + + Use Nomic Embed API + + + + + + Embed documents using the fast Nomic API instead of a private local model. Requires restart. + + + + + + Nomic API Key + + + + + + API key to use for Nomic Embed. Get one from the Atlas <a href="https://atlas.nomic.ai/cli-login">API keys page</a>. Requires restart. + + + + + + Embeddings Device + + + + + + The compute device used for embeddings. "Auto" uses the CPU. Requires restart. + + + + + + Display + + + + + + Show Sources + + + + + + Display the sources used for each response. + + + + + + Advanced + + + + + + Warning: Advanced usage only. + + + + + + Values too large may cause localdocs failure, extremely slow responses or failure to respond at all. Roughly speaking, the {N chars x N snippets} are added to the model's context window. More info <a href="https://docs.gpt4all.io/gpt4all_desktop/localdocs.html">here</a>. + + + + + + Document snippet size (characters) + + + + + + Number of characters per document snippet. Larger numbers increase likelihood of factual responses, but also result in slower generation. + + + + + + Max document snippets per prompt + + + + + + Max best N matches of retrieved document snippets to add to the context for prompt. Larger numbers increase likelihood of factual responses, but also result in slower generation. + + + + + LocalDocsView + + + + LocalDocs + + + + + + Chat with your local files + + + + + + + Add Collection + + + + + + ERROR: The LocalDocs database is not valid. + + + + + + No Collections Installed + + + + + + Install a collection of local documents to get started using this feature + + + + + + + Add Doc Collection + + + + + + Shows the add model view + + + + + + Indexing progressBar + + + + + + Shows the progress made in the indexing + + + + + + ERROR + + + + + + INDEXING + + + + + + EMBEDDING + + + + + + REQUIRES UPDATE + + + + + + READY + + + + + + INSTALLING + + + + + + Indexing in progress + + + + + + Embedding in progress + + + + + + This collection requires an update after version change + + + + + + Automatically reindexes upon changes to the folder + + + + + + Installation in progress + + + + + + % + + + + + + %n file(s) + + + + + + + + %n word(s) + + + + + + + + Remove + + + + + + Rebuild + + + + + + Reindex this folder from scratch. This is slow and usually not needed. + + + + + + Update + + + + + + Update the collection to the new version. This is a slow operation. + + + + + ModelList + + + <ul><li>Requires personal OpenAI API key.</li><li>WARNING: Will send your chats to OpenAI!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with OpenAI</li><li>You can apply for an API key <a href="https://platform.openai.com/account/api-keys">here.</a></li> + + + + + <strong>OpenAI's ChatGPT model GPT-3.5 Turbo</strong><br> + + + + + <br><br><i>* Even if you pay OpenAI for ChatGPT-4 this does not guarantee API key access. Contact OpenAI for more info. + + + + + <strong>OpenAI's ChatGPT model GPT-4</strong><br> + + + + + <ul><li>Requires personal Mistral API key.</li><li>WARNING: Will send your chats to Mistral!</li><li>Your API key will be stored on disk</li><li>Will only be used to communicate with Mistral</li><li>You can apply for an API key <a href="https://console.mistral.ai/user/api-keys">here</a>.</li> + + + + + <strong>Mistral Tiny model</strong><br> + + + + + <strong>Mistral Small model</strong><br> + + + + + <strong>Mistral Medium model</strong><br> + + + + + <strong>Created by %1.</strong><br><ul><li>Published on %2.<li>This model has %3 likes.<li>This model has %4 downloads.<li>More info can be found <a href="https://huggingface.co/%5">here.</a></ul> + + + + + ModelSettings + + + + Model + + + + + + Model Settings + + + + + + Clone + + + + + + Remove + + + + + + Name + + + + + + Model File + + + + + + System Prompt + + + + + + Prefixed at the beginning of every conversation. Must contain the appropriate framing tokens. + + + + + + Prompt Template + + + + + + The template that wraps every prompt. + + + + + + Must contain the string "%1" to be replaced with the user's input. + + + + + + Add +optional image + + + + + + Context Length + + + + + + Number of input and output tokens the model sees. + + + + + + Maximum combined prompt/response tokens before information is lost. +Using more context than the model was trained on will yield poor results. +NOTE: Does not take effect until you reload the model. + + + + + + Temperature + + + + + + Randomness of model output. Higher -> more variation. + + + + + + Temperature increases the chances of choosing less likely tokens. +NOTE: Higher temperature gives more creative but less predictable outputs. + + + + + + Top-P + + + + + + Nucleus Sampling factor. Lower -> more predicatable. + + + + + + Only the most likely tokens up to a total probability of top_p can be chosen. +NOTE: Prevents choosing highly unlikely tokens. + + + + + + Min-P + + + + + + Minimum token probability. Higher -> more predictable. + + + + + + Sets the minimum relative probability for a token to be considered. + + + + + + Top-K + + + + + + Size of selection pool for tokens. + + + + + + Only the top K most likely tokens will be chosen from. + + + + + + Max Length + + + + + + Maximum response length, in tokens. + + + + + + Prompt Batch Size + + + + + + The batch size used for prompt processing. + + + + + + Amount of prompt tokens to process at once. +NOTE: Higher values can speed up reading prompts but will use more RAM. + + + + + + Repeat Penalty + + + + + + Repetition penalty factor. Set to 1 to disable. + + + + + + Repeat Penalty Tokens + + + + + + Number of previous tokens used for penalty. + + + + + + GPU Layers + + + + + + Number of model layers to load into VRAM. + + + + + + How many model layers to load into VRAM. Decrease this if GPT4All runs out of VRAM while loading this model. +Lower values increase CPU load and RAM usage, and make inference slower. +NOTE: Does not take effect until you reload the model. + + + + + ModelsView + + + + No Models Installed + + + + + + Install a model to get started using GPT4All + + + + + + + + + Add Model + + + + + + Shows the add model view + + + + + + Installed Models + + + + + + Locally installed chat models + + + + + + Model file + + + + + + Model file to be downloaded + + + + + + Description + + + + + + File description + + + + + + Cancel + + + + + + Resume + + + + + + Stop/restart/start the download + + + + + + Remove + + + + + + Remove model from filesystem + + + + + + + + Install + + + + + + Install online model + + + + + + <a href="#error">Error</a> + + + + + + Describes an error that occurred when downloading + + + + + + <strong><font size="2">WARNING: Not recommended for your hardware. + + + + + + Model requires more memory ( + + + + + + GB) than your system has available ( + + + + + + Error for incompatible hardware + + + + + + Download progressBar + + + + + + Shows the progress made in the download + + + + + + Download speed + + + + + + Download speed in bytes/kilobytes/megabytes per second + + + + + + Calculating... + + + + + + + + Whether the file hash is being calculated + + + + + + Busy indicator + + + + + + Displayed when the file hash is being calculated + + + + + + enter $API_KEY + + + + + + File size + + + + + + RAM required + + + + + + GB + + + + + + Parameters + + + + + + Quant + + + + + + Type + + + + + MyFancyLink + + + + Fancy link + + + + + + A stylized link + + + + + MySettingsStack + + + + Please choose a directory + + + + + MySettingsTab + + + + Restore Defaults + + + + + + Restores settings dialog to a default state + + + + + NetworkDialog + + + + Contribute data to the GPT4All Opensource Datalake. + + + + + + By enabling this feature, you will be able to participate in the democratic process of training a large language model by contributing data for future model improvements. + +When a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All Open Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you can suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake. + +NOTE: By turning on this feature, you will be sending your data to the GPT4All Open Source Datalake. You should have no expectation of chat privacy when this feature is enabled. You should; however, have an expectation of an optional attribution if you wish. Your chat data will be openly available for anyone to download and will be used by Nomic AI to improve future GPT4All models. Nomic AI will retain all attribution information attached to your data and you will be credited as a contributor to any GPT4All model release that uses your data! + + + + + + Terms for opt-in + + + + + + Describes what will happen when you opt-in + + + + + + Please provide a name for attribution (optional) + + + + + + Attribution (optional) + + + + + + Provide attribution + + + + + + Enable + + + + + + Enable opt-in + + + + + + Cancel + + + + + + Cancel opt-in + + + + + NewVersionDialog + + + + New version is available + + + + + + Update + + + + + + Update to new version + + + + + PopupDialog + + + + Reveals a shortlived help balloon + + + + + + Busy indicator + + + + + + Displayed when the popup is showing busy + + + + + SettingsView + + + + + + Settings + + + + + + Contains various application settings + + + + + + Application + + + + + + Model + + + + + + LocalDocs + + + + + StartupDialog + + + + Welcome! + + + + + + ### Release notes + + + + + + + ### Contributors + + + + + + + Release notes + + + + + + Release notes for this version + + + + + + ### Opt-ins for anonymous usage analytics and datalake +By enabling these features, you will be able to participate in the democratic process of training a +large language model by contributing data for future model improvements. + +When a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All +Open Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you +can suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake. + +NOTE: By turning on this feature, you will be sending your data to the GPT4All Open Source Datalake. +You should have no expectation of chat privacy when this feature is enabled. You should; however, have +an expectation of an optional attribution if you wish. Your chat data will be openly available for anyone +to download and will be used by Nomic AI to improve future GPT4All models. Nomic AI will retain all +attribution information attached to your data and you will be credited as a contributor to any GPT4All +model release that uses your data! + + + + + + Terms for opt-in + + + + + + Describes what will happen when you opt-in + + + + + + + + Opt-in for anonymous usage statistics + + + + + + + + Yes + + + + + + Allow opt-in for anonymous usage statistics + + + + + + + + No + + + + + + Opt-out for anonymous usage statistics + + + + + + Allow opt-out for anonymous usage statistics + + + + + + + + Opt-in for network + + + + + + Allow opt-in for network + + + + + + Allow opt-in anonymous sharing of chats to the GPT4All Datalake + + + + + + Opt-out for network + + + + + + Allow opt-out anonymous sharing of chats to the GPT4All Datalake + + + + + SwitchModelDialog + + + + <b>Warning:</b> changing the model will erase the current conversation. Do you wish to continue? + + + + + + Continue + + + + + + Continue with model loading + + + + + + + + Cancel + + + + + ThumbsDownDialog + + + + Please edit the text below to provide a better response. (optional) + + + + + + Please provide a better response... + + + + + + Submit + + + + + + Submits the user's response + + + + + + Cancel + + + + + + Closes the response dialog + + + + + main + + + + GPT4All v + + + + + + + + <h3>Encountered an error starting up:</h3><br> + + + + + + <i>"Incompatible hardware detected."</i> + + + + + + <br><br>Unfortunately, your CPU does not meet the minimal requirements to run + + + + + + this program. In particular, it does not support AVX intrinsics which this + + + + + + program requires to successfully run a modern large language model. + + + + + + The only solution at this time is to upgrade your hardware to a more modern CPU. + + + + + + <br><br>See here for more information: <a href="https://en.wikipedia.org/wiki/Advanced_Vector_Extensions"> + + + + + + https://en.wikipedia.org/wiki/Advanced_Vector_Extensions</a> + + + + + + <i>"Inability to access settings file."</i> + + + + + + <br><br>Unfortunately, something is preventing the program from accessing + + + + + + the settings file. This could be caused by incorrect permissions in the local + + + + + + app config directory where the settings file is located. + + + + + + Check out our <a href="https://discord.gg/4M2QFmTt2k">discord channel</a> for help. + + + + + + Connection to datalake failed. + + + + + + Saving chats. + + + + + + Network dialog + + + + + + opt-in to share feedback/conversations + + + + + + Home view + + + + + + Home view of application + + + + + + Home + + + + + + Chat view + + + + + + Chat view to interact with models + + + + + + Chats + + + + + + + + Models + + + + + + Models view for installed models + + + + + + + + LocalDocs + + + + + + LocalDocs view to configure and use local docs + + + + + + + + Settings + + + + + + Settings view for application configuration + + + + + + The datalake is enabled + + + + + + Using a network model + + + + + + Server mode is enabled + + + + + + Installed models + + + + + + View of installed models + + + +