mirror of https://github.com/nomic-ai/gpt4all
Modellist temp
parent
c1794597a7
commit
7f01b153b3
@ -1,97 +1,262 @@
|
||||
[
|
||||
{
|
||||
"md5sum": "81a09a0ddf89690372fc296ff7f625af",
|
||||
"filename": "ggml-gpt4all-j-v1.3-groovy.bin",
|
||||
"filesize": "3785248281",
|
||||
"isDefault": "true",
|
||||
"bestGPTJ": "true",
|
||||
"description": "GPT-J 6B finetuned by Nomic AI on the latest GPT4All dataset.<br>- Licensed for commercial use.<br>- Fast responses."
|
||||
},
|
||||
{
|
||||
"md5sum": "11d9f060ca24575a2c303bdc39952486",
|
||||
"filename": "GPT4All-13B-snoozy.ggmlv3.q4_0.bin",
|
||||
"filesize": "8136770688",
|
||||
"order": "a",
|
||||
"md5sum": "4acc146dd43eb02845c233c29289c7c5",
|
||||
"name": "Hermes",
|
||||
"filename": "nous-hermes-13b.ggmlv3.q4_0.bin",
|
||||
"filesize": "8136777088",
|
||||
"requires": "2.4.7",
|
||||
"isDefault": "true",
|
||||
"bestLlama": "true",
|
||||
"description": "LLaMA 13B finetuned by Nomic AI on the latest GPT4All dataset.<br>- Cannot be used commercially.<br>- Slower responses but higher quality.",
|
||||
"url": "https://huggingface.co/TheBloke/GPT4All-13B-snoozy-GGML/resolve/main/GPT4All-13B-snoozy.ggmlv3.q4_0.bin"
|
||||
"ramrequired": "16",
|
||||
"parameters": "13 billion",
|
||||
"quant": "q4_0",
|
||||
"type": "LLaMA",
|
||||
"description": "
|
||||
<strong>Best overall model</strong>
|
||||
<br>
|
||||
<ul>
|
||||
<li>Instruction based
|
||||
<li>Gives long reponses
|
||||
<li>Curated with 300,000 uncensored instructions
|
||||
<li>Trained by Nous Research
|
||||
<li>Cannot be used commercially
|
||||
</ul>",
|
||||
"url": "https://huggingface.co/TheBloke/Nous-Hermes-13B-GGML/resolve/main/nous-hermes-13b.ggmlv3.q4_0.bin"
|
||||
},
|
||||
{
|
||||
"order": "b",
|
||||
"md5sum": "756249d3d6abe23bde3b1ae272628640",
|
||||
"name": "MPT Chat",
|
||||
"filename": "ggml-mpt-7b-chat.bin",
|
||||
"filesize": "4854401050",
|
||||
"isDefault": "true",
|
||||
"bestMPT": "true",
|
||||
"requires": "2.4.1",
|
||||
"description": "MPT 7B chat model trained by Mosaic ML.<br>- Cannot be used commercially.<br>- Fast responses."
|
||||
"ramrequired": "8",
|
||||
"parameters": "7 billion",
|
||||
"quant": "q4_0",
|
||||
"type": "MPT",
|
||||
"description": "
|
||||
<strong>Best overall smaller model</strong>
|
||||
<br>
|
||||
<ul>
|
||||
<li>Fast responses
|
||||
<li>Chat based
|
||||
<li>Trained by Mosaic ML
|
||||
<li>Cannot be used commercially
|
||||
</ul>"
|
||||
},
|
||||
{
|
||||
"md5sum": "4acc146dd43eb02845c233c29289c7c5",
|
||||
"filename": "nous-hermes-13b.ggmlv3.q4_0.bin",
|
||||
"filesize": "8136777088",
|
||||
"order": "c",
|
||||
"md5sum": "81a09a0ddf89690372fc296ff7f625af",
|
||||
"name": "Groovy",
|
||||
"filename": "ggml-gpt4all-j-v1.3-groovy.bin",
|
||||
"filesize": "3785248281",
|
||||
"ramrequired": "8",
|
||||
"parameters": "7 billion",
|
||||
"quant": "q4_0",
|
||||
"type": "GPT-J",
|
||||
"description": "
|
||||
<strong>Best overall for commercial usage</strong>
|
||||
<br>
|
||||
<ul>
|
||||
<li>Fast responses
|
||||
<li>Creative responses</li>
|
||||
<li>Instruction based</li>
|
||||
<li>Trained by Nomic ML
|
||||
<li>Licensed for commercial use
|
||||
</ul>"
|
||||
},
|
||||
{
|
||||
"order": "d",
|
||||
"md5sum": "11d9f060ca24575a2c303bdc39952486",
|
||||
"name": "Snoozy",
|
||||
"filename": "GPT4All-13B-snoozy.ggmlv3.q4_0.bin",
|
||||
"filesize": "8136770688",
|
||||
"requires": "2.4.7",
|
||||
"description": "LLaMa 13B finetuned on over 300,000 curated and uncensored instructions.<br>- Cannot be used commercially.<br>- Best finetuned LLaMA model.<br>- This model was fine-tuned by Nous Research, with Teknium and Karan4D leading the fine tuning process and dataset curation, Redmond AI sponsoring the compute, and several other contributors. The result is an enhanced Llama 13b model that rivals GPT-3.5-turbo in performance across a variety of tasks. This model stands out for its long responses, low hallucination rate, and absence of OpenAI censorship mechanisms.",
|
||||
"url": "https://huggingface.co/TheBloke/Nous-Hermes-13B-GGML/resolve/main/nous-hermes-13b.ggmlv3.q4_0.bin"
|
||||
"ramrequired": "16",
|
||||
"parameters": "13 billion",
|
||||
"quant": "q4_0",
|
||||
"type": "LLaMA",
|
||||
"description": "
|
||||
<strong>Very good overall model</strong>
|
||||
<br>
|
||||
<ul>
|
||||
<li>Instruction based
|
||||
<li>Based on the same dataset as Groovy
|
||||
<li>Slower than Groovy, with higher quality responses
|
||||
<li>Trained by Nomic AI
|
||||
<li>Cannot be used commercially
|
||||
</ul>",
|
||||
"url": "https://huggingface.co/TheBloke/GPT4All-13B-snoozy-GGML/resolve/main/GPT4All-13B-snoozy.ggmlv3.q4_0.bin"
|
||||
},
|
||||
{
|
||||
"order": "e",
|
||||
"md5sum": "29119f8fa11712704c6b22ac5ab792ea",
|
||||
"name": "Vicuna",
|
||||
"filename": "ggml-vicuna-7b-1.1-q4_2.bin",
|
||||
"filesize": "4212859520",
|
||||
"description": "LLaMA 7B finetuned by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.<br>- Cannot be used commercially."
|
||||
"ramrequired": "8",
|
||||
"parameters": "7 billion",
|
||||
"quant": "q4_2",
|
||||
"type": "LLaMA",
|
||||
"description": "
|
||||
<strong>Good small model - trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego</strong>
|
||||
<br>
|
||||
<ul>
|
||||
<li>Instruction based
|
||||
<li>Cannot be used commercially
|
||||
</ul>"
|
||||
},
|
||||
{
|
||||
"order": "f",
|
||||
"md5sum": "95999b7b0699e2070af63bf5d34101a8",
|
||||
"name": "Vicuna (large)",
|
||||
"filename": "ggml-vicuna-13b-1.1-q4_2.bin",
|
||||
"filesize": "8136770688",
|
||||
"description": "LLaMA 13B trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.<br>- Cannot be used commercially."
|
||||
"ramrequired": "16",
|
||||
"parameters": "13 billion",
|
||||
"quant": "q4_2",
|
||||
"type": "LLaMA",
|
||||
"description": "
|
||||
<strong>Good larger model - trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego</strong>
|
||||
<br>
|
||||
<ul>
|
||||
<li>Instruction based
|
||||
<li>Cannot be used commercially
|
||||
</ul>"
|
||||
},
|
||||
{
|
||||
"order": "g",
|
||||
"md5sum": "99e6d129745a3f1fb1121abed747b05a",
|
||||
"name": "Wizard",
|
||||
"filename": "ggml-wizardLM-7B.q4_2.bin",
|
||||
"filesize": "4212864640",
|
||||
"description": "LLaMA 7B finetuned by Microsoft and Peking University.<br>- Cannot be used commercially."
|
||||
"ramrequired": "8",
|
||||
"parameters": "7 billion",
|
||||
"quant": "q4_2",
|
||||
"type": "LLaMA",
|
||||
"description": "
|
||||
<strong>Good small model - trained by by Microsoft and Peking University</strong>
|
||||
<br>
|
||||
<ul>
|
||||
<li>Instruction based
|
||||
<li>Cannot be used commercially
|
||||
</ul>"
|
||||
},
|
||||
{
|
||||
"order": "h",
|
||||
"md5sum": "6cb4ee297537c9133bddab9692879de0",
|
||||
"name": "Stable Vicuna",
|
||||
"filename": "ggml-stable-vicuna-13B.q4_2.bin",
|
||||
"filesize": "8136777088",
|
||||
"description": "LLaMa 13B finetuned with RLHF by Stability AI.<br>- Cannot be used commercially."
|
||||
"ramrequired": "16",
|
||||
"parameters": "13 billion",
|
||||
"quant": "q4_2",
|
||||
"type": "LLaMA",
|
||||
"description": "
|
||||
<strong>Trained with RHLF by Stability AI</strong>
|
||||
<br>
|
||||
<ul>
|
||||
<li>Instruction based
|
||||
<li>Cannot be used commercially
|
||||
</ul>"
|
||||
},
|
||||
{
|
||||
"order": "i",
|
||||
"md5sum": "1cfa4958f489f0a0d1ffdf6b37322809",
|
||||
"name": "MPT Instruct",
|
||||
"filename": "ggml-mpt-7b-instruct.bin",
|
||||
"filesize": "4854401028",
|
||||
"requires": "2.4.1",
|
||||
"ramrequired": "8",
|
||||
"parameters": "7 billion",
|
||||
"quant": "q4_0",
|
||||
"type": "MPT",
|
||||
"description": "
|
||||
<strong>Mosaic's instruction model</strong>
|
||||
<br>
|
||||
<ul>
|
||||
<li>Instruction based
|
||||
<li>Trained by Mosaic ML
|
||||
<li>Licensed for commercial use
|
||||
</ul>"
|
||||
},
|
||||
{
|
||||
"order": "j",
|
||||
"md5sum": "120c32a51d020066288df045ef5d52b9",
|
||||
"name": "MPT Base",
|
||||
"filename": "ggml-mpt-7b-base.bin",
|
||||
"filesize": "4854401028",
|
||||
"requires": "2.4.1",
|
||||
"description": "MPT 7B pre-trained by Mosaic ML. Trained for text completion with no assistant finetuning.<br>- Licensed for commercial use."
|
||||
"ramrequired": "8",
|
||||
"parameters": "7 billion",
|
||||
"quant": "q4_0",
|
||||
"type": "MPT",
|
||||
"description": "
|
||||
<strong>Trained for text completion with no assistant finetuning</strong>
|
||||
<br>
|
||||
<ul>
|
||||
<li>Completion based
|
||||
<li>Trained by Mosaic ML
|
||||
<li>Licensed for commercial use
|
||||
</ul>"
|
||||
},
|
||||
{
|
||||
"order": "k",
|
||||
"md5sum": "d5eafd5b0bd0d615cfd5fd763f642dfe",
|
||||
"name": "Nous Vicuna",
|
||||
"filename": "ggml-nous-gpt4-vicuna-13b.bin",
|
||||
"filesize": "8136777088",
|
||||
"description": "LLaMa 13B fine-tuned on ~180,000 instructions by Nous Research.<br>- Cannot be used commercially."
|
||||
},
|
||||
{
|
||||
"md5sum": "1cfa4958f489f0a0d1ffdf6b37322809",
|
||||
"filename": "ggml-mpt-7b-instruct.bin",
|
||||
"filesize": "4854401028",
|
||||
"requires": "2.4.1",
|
||||
"description": "MPT 7B instruction finetuned by Mosaic ML.<br>- Licensed for commercial use."
|
||||
"ramrequired": "16",
|
||||
"parameters": "13 billion",
|
||||
"quant": "q4_0",
|
||||
"type": "LLaMA",
|
||||
"description": "
|
||||
<strong>Trained on ~180,000 instructions</strong>
|
||||
<br>
|
||||
<ul>
|
||||
<li>Instruction based
|
||||
<li>Trained by Nous Research
|
||||
<li>Cannot be used commercially
|
||||
</ul>"
|
||||
},
|
||||
{
|
||||
"order": "l",
|
||||
"md5sum": "489d21fd48840dcb31e5f92f453f3a20",
|
||||
"name": "Wizard Uncensored",
|
||||
"filename": "wizardLM-13B-Uncensored.ggmlv3.q4_0.bin",
|
||||
"filesize": "8136777088",
|
||||
"requires": "2.4.7",
|
||||
"description": "LLaMa 13B finetuned on the uncensored assistant and instruction data.<br>- Cannot be used commercially.",
|
||||
"ramrequired": "16",
|
||||
"parameters": "13 billion",
|
||||
"quant": "q4_0",
|
||||
"type": "LLaMA",
|
||||
"description": "
|
||||
<strong>Trained on uncensored assistant data and instruction data</strong>
|
||||
<br>
|
||||
<ul>
|
||||
<li>Instruction based
|
||||
<li>Cannot be used commercially
|
||||
</ul>",
|
||||
"url": "https://huggingface.co/TheBloke/WizardLM-13B-Uncensored-GGML/resolve/main/wizardLM-13B-Uncensored.ggmlv3.q4_0.bin"
|
||||
},
|
||||
{
|
||||
"order": "m",
|
||||
"md5sum": "615890cb571fcaa0f70b2f8d15ef809e",
|
||||
"disableGUI": "true",
|
||||
"name": "Replit",
|
||||
"filename": "ggml-replit-code-v1-3b.bin",
|
||||
"filesize": "5202046853",
|
||||
"requires": "2.4.7",
|
||||
"description": "Replit 3B code model trained on subset of the Stack.<br>- Licensed for commercial use.",
|
||||
"ramrequired": "4",
|
||||
"parameters": "3 billion",
|
||||
"quant": "f16",
|
||||
"type": "Replit",
|
||||
"description": "
|
||||
<strong>Trained on subset of the Stack</strong>
|
||||
<br>
|
||||
<ul>
|
||||
<li>Code completion based
|
||||
<li>Licensed for commercial use
|
||||
</ul>",
|
||||
"url": "https://huggingface.co/nomic-ai/ggml-replit-code-v1-3b/resolve/main/ggml-replit-code-v1-3b.bin"
|
||||
}
|
||||
]
|
||||
|
@ -0,0 +1,484 @@
|
||||
#include "modellist.h"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
bool InstalledModels::filterAcceptsRow(int sourceRow,
|
||||
const QModelIndex &sourceParent) const
|
||||
{
|
||||
QModelIndex index = sourceModel()->index(sourceRow, 0, sourceParent);
|
||||
bool isInstalled = sourceModel()->data(index, ModelList::InstalledRole).toBool();
|
||||
return isInstalled;
|
||||
}
|
||||
|
||||
DownloadableModels::DownloadableModels(QObject *parent)
|
||||
: QSortFilterProxyModel(parent)
|
||||
, m_expanded(false)
|
||||
, m_limit(5)
|
||||
{
|
||||
connect(this, &DownloadableModels::rowsInserted, this, &DownloadableModels::countChanged);
|
||||
connect(this, &DownloadableModels::rowsRemoved, this, &DownloadableModels::countChanged);
|
||||
connect(this, &DownloadableModels::modelReset, this, &DownloadableModels::countChanged);
|
||||
connect(this, &DownloadableModels::layoutChanged, this, &DownloadableModels::countChanged);
|
||||
}
|
||||
|
||||
bool DownloadableModels::filterAcceptsRow(int sourceRow,
|
||||
const QModelIndex &sourceParent) const
|
||||
{
|
||||
bool withinLimit = sourceRow < (m_expanded ? sourceModel()->rowCount() : m_limit);
|
||||
QModelIndex index = sourceModel()->index(sourceRow, 0, sourceParent);
|
||||
bool isDownloadable = !sourceModel()->data(index, ModelList::DescriptionRole).toString().isEmpty();
|
||||
bool showInGUI = !sourceModel()->data(index, ModelList::DisableGUIRole).toBool();
|
||||
return withinLimit && isDownloadable && showInGUI;
|
||||
}
|
||||
|
||||
int DownloadableModels::count() const
|
||||
{
|
||||
return rowCount();
|
||||
}
|
||||
|
||||
bool DownloadableModels::isExpanded() const
|
||||
{
|
||||
return m_expanded;
|
||||
}
|
||||
|
||||
void DownloadableModels::setExpanded(bool expanded)
|
||||
{
|
||||
if (m_expanded != expanded) {
|
||||
m_expanded = expanded;
|
||||
invalidateFilter();
|
||||
emit expandedChanged(m_expanded);
|
||||
}
|
||||
}
|
||||
|
||||
class MyModelList: public ModelList { };
|
||||
Q_GLOBAL_STATIC(MyModelList, modelListInstance)
|
||||
ModelList *ModelList::globalInstance()
|
||||
{
|
||||
return modelListInstance();
|
||||
}
|
||||
|
||||
ModelList::ModelList()
|
||||
: QAbstractListModel(nullptr)
|
||||
, m_installedModels(new InstalledModels(this))
|
||||
, m_downloadableModels(new DownloadableModels(this))
|
||||
{
|
||||
m_installedModels->setSourceModel(this);
|
||||
m_downloadableModels->setSourceModel(this);
|
||||
m_watcher = new QFileSystemWatcher(this);
|
||||
QSettings settings;
|
||||
settings.sync();
|
||||
m_localModelsPath = settings.value("modelPath", defaultLocalModelsPath()).toString();
|
||||
const QString exePath = QCoreApplication::applicationDirPath() + QDir::separator();
|
||||
m_watcher->addPath(exePath);
|
||||
m_watcher->addPath(m_localModelsPath);
|
||||
connect(m_watcher, &QFileSystemWatcher::directoryChanged, this, &ModelList::updateModelsFromDirectory);
|
||||
updateModelsFromDirectory();
|
||||
}
|
||||
|
||||
QString ModelList::incompleteDownloadPath(const QString &modelFile)
|
||||
{
|
||||
return localModelsPath() + "incomplete-" + modelFile;
|
||||
}
|
||||
|
||||
const QList<ModelInfo> ModelList::exportModelList() const
|
||||
{
|
||||
QMutexLocker locker(&m_mutex);
|
||||
QList<ModelInfo> infos;
|
||||
for (ModelInfo *info : m_models)
|
||||
if (info->installed)
|
||||
infos.append(*info);
|
||||
return infos;
|
||||
}
|
||||
|
||||
const QList<QString> ModelList::userDefaultModelList() const
|
||||
{
|
||||
QMutexLocker locker(&m_mutex);
|
||||
|
||||
QSettings settings;
|
||||
settings.sync();
|
||||
|
||||
const QString userDefaultModelName = settings.value("userDefaultModel").toString();
|
||||
QList<QString> models;
|
||||
bool foundUserDefault = false;
|
||||
for (ModelInfo *info : m_models) {
|
||||
if (info->installed && (info->name == userDefaultModelName || info->filename == userDefaultModelName)) {
|
||||
foundUserDefault = true;
|
||||
models.prepend(info->name.isEmpty() ? info->filename : info->name);
|
||||
} else if (info->installed) {
|
||||
models.append(info->name.isEmpty() ? info->filename : info->name);
|
||||
}
|
||||
}
|
||||
|
||||
const QString defaultFileName = "Application default";
|
||||
if (foundUserDefault)
|
||||
models.append(defaultFileName);
|
||||
else
|
||||
models.prepend(defaultFileName);
|
||||
return models;
|
||||
}
|
||||
|
||||
ModelInfo ModelList::defaultModelInfo() const
|
||||
{
|
||||
QMutexLocker locker(&m_mutex);
|
||||
|
||||
QSettings settings;
|
||||
settings.sync();
|
||||
|
||||
// The user default model can be set by the user in the settings dialog. The "default" user
|
||||
// default model is "Application default" which signals we should use the default model that was
|
||||
// specified by the models.json file.
|
||||
const QString defaultModelName = settings.value("userDefaultModel").toString();
|
||||
const bool hasDefaultName = !defaultModelName.isEmpty() && defaultModelName != "Application default";
|
||||
ModelInfo *defaultModel = nullptr;
|
||||
for (ModelInfo *info : m_models) {
|
||||
if (!info->installed)
|
||||
continue;
|
||||
defaultModel = info;
|
||||
if (!hasDefaultName && defaultModel->isDefault) break;
|
||||
if (hasDefaultName && (defaultModel->name == defaultModelName || defaultModel->filename == defaultModelName)) break;
|
||||
}
|
||||
if (defaultModel)
|
||||
return *defaultModel;
|
||||
return ModelInfo();
|
||||
}
|
||||
|
||||
bool ModelList::contains(const QString &filename) const
|
||||
{
|
||||
QMutexLocker locker(&m_mutex);
|
||||
return m_modelMap.contains(filename);
|
||||
}
|
||||
|
||||
bool ModelList::lessThan(const ModelInfo* a, const ModelInfo* b)
|
||||
{
|
||||
// Rule 1: Non-empty 'order' before empty
|
||||
if (a->order.isEmpty() != b->order.isEmpty()) {
|
||||
return !a->order.isEmpty();
|
||||
}
|
||||
|
||||
// Rule 2: Both 'order' are non-empty, sort alphanumerically
|
||||
if (!a->order.isEmpty() && !b->order.isEmpty()) {
|
||||
return a->order < b->order;
|
||||
}
|
||||
|
||||
// Rule 3: Both 'order' are empty, sort by filename
|
||||
return a->filename < b->filename;
|
||||
}
|
||||
|
||||
void ModelList::addModel(const QString &filename)
|
||||
{
|
||||
QMutexLocker locker(&m_mutex);
|
||||
Q_ASSERT(!m_modelMap.contains(filename));
|
||||
if (m_modelMap.contains(filename)) {
|
||||
qWarning() << "ERROR: model list already contains" << filename;
|
||||
return;
|
||||
}
|
||||
|
||||
beginInsertRows(QModelIndex(), m_models.size(), m_models.size());
|
||||
ModelInfo *info = new ModelInfo;
|
||||
info->filename = filename;
|
||||
m_models.append(info);
|
||||
m_modelMap.insert(filename, info);
|
||||
endInsertRows();
|
||||
|
||||
std::stable_sort(m_models.begin(), m_models.end(), ModelList::lessThan);
|
||||
emit dataChanged(index(0, 0), index(m_models.size() - 1, 0));
|
||||
|
||||
emit userDefaultModelListChanged();
|
||||
}
|
||||
|
||||
int ModelList::rowCount(const QModelIndex &parent) const
|
||||
{
|
||||
Q_UNUSED(parent)
|
||||
QMutexLocker locker(&m_mutex);
|
||||
return m_models.size();
|
||||
}
|
||||
|
||||
QVariant ModelList::dataInternal(const ModelInfo *info, int role) const
|
||||
{
|
||||
switch (role) {
|
||||
case NameRole:
|
||||
return info->name;
|
||||
case FilenameRole:
|
||||
return info->filename;
|
||||
case DirpathRole:
|
||||
return info->dirpath;
|
||||
case FilesizeRole:
|
||||
return info->filesize;
|
||||
case Md5sumRole:
|
||||
return info->md5sum;
|
||||
case CalcHashRole:
|
||||
return info->calcHash;
|
||||
case InstalledRole:
|
||||
return info->installed;
|
||||
case DefaultRole:
|
||||
return info->isDefault;
|
||||
case ChatGPTRole:
|
||||
return info->isChatGPT;
|
||||
case DisableGUIRole:
|
||||
return info->disableGUI;
|
||||
case DescriptionRole:
|
||||
return info->description;
|
||||
case RequiresVersionRole:
|
||||
return info->requiresVersion;
|
||||
case DeprecatedVersionRole:
|
||||
return info->deprecatedVersion;
|
||||
case UrlRole:
|
||||
return info->url;
|
||||
case BytesReceivedRole:
|
||||
return info->bytesReceived;
|
||||
case BytesTotalRole:
|
||||
return info->bytesTotal;
|
||||
case TimestampRole:
|
||||
return info->timestamp;
|
||||
case SpeedRole:
|
||||
return info->speed;
|
||||
case DownloadingRole:
|
||||
return info->isDownloading;
|
||||
case IncompleteRole:
|
||||
return info->isIncomplete;
|
||||
case DownloadErrorRole:
|
||||
return info->downloadError;
|
||||
case OrderRole:
|
||||
return info->order;
|
||||
case RamrequiredRole:
|
||||
return info->ramrequired;
|
||||
case ParametersRole:
|
||||
return info->parameters;
|
||||
case QuantRole:
|
||||
return info->quant;
|
||||
case TypeRole:
|
||||
return info->type;
|
||||
}
|
||||
|
||||
return QVariant();
|
||||
}
|
||||
|
||||
QVariant ModelList::data(const QString &filename, int role) const
|
||||
{
|
||||
QMutexLocker locker(&m_mutex);
|
||||
ModelInfo *info = m_modelMap.value(filename);
|
||||
return dataInternal(info, role);
|
||||
}
|
||||
|
||||
QVariant ModelList::data(const QModelIndex &index, int role) const
|
||||
{
|
||||
QMutexLocker locker(&m_mutex);
|
||||
if (!index.isValid() || index.row() < 0 || index.row() >= m_models.size())
|
||||
return QVariant();
|
||||
const ModelInfo *info = m_models.at(index.row());
|
||||
return dataInternal(info, role);
|
||||
}
|
||||
|
||||
void ModelList::updateData(const QString &filename, int role, const QVariant &value)
|
||||
{
|
||||
QMutexLocker locker(&m_mutex);
|
||||
if (!m_modelMap.contains(filename)) {
|
||||
qWarning() << "ERROR: cannot update as model map does not contain" << filename;
|
||||
return;
|
||||
}
|
||||
|
||||
ModelInfo *info = m_modelMap.value(filename);
|
||||
const int index = m_models.indexOf(info);
|
||||
if (index == -1) {
|
||||
qWarning() << "ERROR: cannot update as model list does not contain" << filename;
|
||||
return;
|
||||
}
|
||||
|
||||
switch (role) {
|
||||
case NameRole:
|
||||
info->name = value.toString(); break;
|
||||
case FilenameRole:
|
||||
info->filename = value.toString(); break;
|
||||
case DirpathRole:
|
||||
info->dirpath = value.toString(); break;
|
||||
case FilesizeRole:
|
||||
info->filesize = value.toString(); break;
|
||||
case Md5sumRole:
|
||||
info->md5sum = value.toByteArray(); break;
|
||||
case CalcHashRole:
|
||||
info->calcHash = value.toBool(); break;
|
||||
case InstalledRole:
|
||||
info->installed = value.toBool(); break;
|
||||
case DefaultRole:
|
||||
info->isDefault = value.toBool(); break;
|
||||
case ChatGPTRole:
|
||||
info->isChatGPT = value.toBool(); break;
|
||||
case DisableGUIRole:
|
||||
info->disableGUI = value.toBool(); break;
|
||||
case DescriptionRole:
|
||||
info->description = value.toString(); break;
|
||||
case RequiresVersionRole:
|
||||
info->requiresVersion = value.toString(); break;
|
||||
case DeprecatedVersionRole:
|
||||
info->deprecatedVersion = value.toString(); break;
|
||||
case UrlRole:
|
||||
info->url = value.toString(); break;
|
||||
case BytesReceivedRole:
|
||||
info->bytesReceived = value.toLongLong(); break;
|
||||
case BytesTotalRole:
|
||||
info->bytesTotal = value.toLongLong(); break;
|
||||
case TimestampRole:
|
||||
info->timestamp = value.toLongLong(); break;
|
||||
case SpeedRole:
|
||||
info->speed = value.toString(); break;
|
||||
case DownloadingRole:
|
||||
info->isDownloading = value.toBool(); break;
|
||||
case IncompleteRole:
|
||||
info->isIncomplete = value.toBool(); break;
|
||||
case DownloadErrorRole:
|
||||
info->downloadError = value.toString(); break;
|
||||
case OrderRole:
|
||||
info->order = value.toString(); break;
|
||||
case RamrequiredRole:
|
||||
info->ramrequired = value.toInt(); break;
|
||||
case ParametersRole:
|
||||
info->parameters = value.toString(); break;
|
||||
case QuantRole:
|
||||
info->quant = value.toString(); break;
|
||||
case TypeRole:
|
||||
info->type = value.toString(); break;
|
||||
}
|
||||
|
||||
// Extra guarantee that these always remains in sync with filesystem
|
||||
QFileInfo fileInfo(info->dirpath + info->filename);
|
||||
if (info->installed != fileInfo.exists()) {
|
||||
info->installed = fileInfo.exists();
|
||||
emit dataChanged(createIndex(index, 0), createIndex(index, 0), {InstalledRole});
|
||||
}
|
||||
QFileInfo incompleteInfo(incompleteDownloadPath(info->filename));
|
||||
if (info->isIncomplete != incompleteInfo.exists()) {
|
||||
info->isIncomplete = incompleteInfo.exists();
|
||||
emit dataChanged(createIndex(index, 0), createIndex(index, 0), {IncompleteRole});
|
||||
}
|
||||
|
||||
std::stable_sort(m_models.begin(), m_models.end(), ModelList::lessThan);
|
||||
emit dataChanged(createIndex(0, 0), createIndex(m_models.size() - 1, 0));
|
||||
emit userDefaultModelListChanged();
|
||||
}
|
||||
|
||||
ModelInfo ModelList::modelInfo(const QString &filename) const
|
||||
{
|
||||
QMutexLocker locker(&m_mutex);
|
||||
if (!m_modelMap.contains(filename))
|
||||
return ModelInfo();
|
||||
return *m_modelMap.value(filename);
|
||||
}
|
||||
|
||||
QString ModelList::defaultLocalModelsPath() const
|
||||
{
|
||||
QString localPath = QStandardPaths::writableLocation(QStandardPaths::AppLocalDataLocation)
|
||||
+ "/";
|
||||
QString testWritePath = localPath + QString("test_write.txt");
|
||||
QString canonicalLocalPath = QFileInfo(localPath).canonicalFilePath() + "/";
|
||||
QDir localDir(localPath);
|
||||
if (!localDir.exists()) {
|
||||
if (!localDir.mkpath(localPath)) {
|
||||
qWarning() << "ERROR: Local download directory can't be created:" << canonicalLocalPath;
|
||||
return canonicalLocalPath;
|
||||
}
|
||||
}
|
||||
|
||||
if (QFileInfo::exists(testWritePath))
|
||||
return canonicalLocalPath;
|
||||
|
||||
QFile testWriteFile(testWritePath);
|
||||
if (testWriteFile.open(QIODeviceBase::ReadWrite)) {
|
||||
testWriteFile.close();
|
||||
return canonicalLocalPath;
|
||||
}
|
||||
|
||||
qWarning() << "ERROR: Local download path appears not writeable:" << canonicalLocalPath;
|
||||
return canonicalLocalPath;
|
||||
}
|
||||
|
||||
QString ModelList::localModelsPath() const
|
||||
{
|
||||
return m_localModelsPath;
|
||||
}
|
||||
|
||||
void ModelList::setLocalModelsPath(const QString &modelPath)
|
||||
{
|
||||
QString filePath = (modelPath.startsWith("file://") ?
|
||||
QUrl(modelPath).toLocalFile() : modelPath);
|
||||
QString canonical = QFileInfo(filePath).canonicalFilePath() + "/";
|
||||
if (m_localModelsPath != canonical) {
|
||||
m_localModelsPath = canonical;
|
||||
emit localModelsPathChanged();
|
||||
}
|
||||
}
|
||||
|
||||
QString ModelList::modelDirPath(const QString &modelName, bool isChatGPT)
|
||||
{
|
||||
QVector<QString> possibleFilePaths;
|
||||
if (isChatGPT)
|
||||
possibleFilePaths << "/" + modelName + ".txt";
|
||||
else {
|
||||
possibleFilePaths << "/ggml-" + modelName + ".bin";
|
||||
possibleFilePaths << "/" + modelName + ".bin";
|
||||
}
|
||||
for (const QString &modelFilename : possibleFilePaths) {
|
||||
QString appPath = QCoreApplication::applicationDirPath() + modelFilename;
|
||||
QFileInfo infoAppPath(appPath);
|
||||
if (infoAppPath.exists())
|
||||
return QCoreApplication::applicationDirPath();
|
||||
|
||||
QString downloadPath = localModelsPath() + modelFilename;
|
||||
QFileInfo infoLocalPath(downloadPath);
|
||||
if (infoLocalPath.exists())
|
||||
return localModelsPath();
|
||||
}
|
||||
return QString();
|
||||
}
|
||||
|
||||
void ModelList::updateModelsFromDirectory()
|
||||
{
|
||||
const QString exePath = QCoreApplication::applicationDirPath() + QDir::separator();
|
||||
const QString localPath = localModelsPath();
|
||||
{
|
||||
QDir dir(exePath);
|
||||
QStringList allFiles = dir.entryList(QDir::Files);
|
||||
|
||||
// All files that end with .bin and have 'ggml' somewhere in the name
|
||||
QStringList fileNames;
|
||||
for(const QString& filename : allFiles) {
|
||||
if (filename.endsWith(".bin") && filename.contains("ggml")) {
|
||||
fileNames.append(filename);
|
||||
}
|
||||
}
|
||||
|
||||
for (const QString& f : fileNames) {
|
||||
QString filePath = exePath + f;
|
||||
QFileInfo info(filePath);
|
||||
if (!info.exists())
|
||||
continue;
|
||||
if (!contains(f))
|
||||
addModel(f);
|
||||
updateData(f, DirpathRole, exePath);
|
||||
updateData(f, FilesizeRole, toFileSize(info.size()));
|
||||
}
|
||||
}
|
||||
|
||||
if (localPath != exePath) {
|
||||
QDir dir(localPath);
|
||||
QStringList allFiles = dir.entryList(QDir::Files);
|
||||
QStringList fileNames;
|
||||
for(const QString& filename : allFiles) {
|
||||
if ((filename.endsWith(".bin") && filename.contains("ggml"))
|
||||
|| (filename.endsWith(".txt") && filename.startsWith("chatgpt-"))) {
|
||||
fileNames.append(filename);
|
||||
}
|
||||
}
|
||||
|
||||
for (const QString& f : fileNames) {
|
||||
QString filePath = localPath + f;
|
||||
QFileInfo info(filePath);
|
||||
if (!info.exists())
|
||||
continue;
|
||||
if (!contains(f))
|
||||
addModel(f);
|
||||
updateData(f, ChatGPTRole, f.startsWith("chatgpt-"));
|
||||
updateData(f, DirpathRole, localPath);
|
||||
updateData(f, FilesizeRole, toFileSize(info.size()));
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,246 @@
|
||||
#ifndef MODELLIST_H
|
||||
#define MODELLIST_H
|
||||
|
||||
#include <QAbstractListModel>
|
||||
#include <QtQml>
|
||||
|
||||
struct ModelInfo {
|
||||
Q_GADGET
|
||||
Q_PROPERTY(QString name MEMBER name)
|
||||
Q_PROPERTY(QString filename MEMBER filename)
|
||||
Q_PROPERTY(QString dirpath MEMBER dirpath)
|
||||
Q_PROPERTY(QString filesize MEMBER filesize)
|
||||
Q_PROPERTY(QByteArray md5sum MEMBER md5sum)
|
||||
Q_PROPERTY(bool calcHash MEMBER calcHash)
|
||||
Q_PROPERTY(bool installed MEMBER installed)
|
||||
Q_PROPERTY(bool isDefault MEMBER isDefault)
|
||||
Q_PROPERTY(bool disableGUI MEMBER disableGUI)
|
||||
Q_PROPERTY(bool isChatGPT MEMBER isChatGPT)
|
||||
Q_PROPERTY(QString description MEMBER description)
|
||||
Q_PROPERTY(QString requiresVersion MEMBER requiresVersion)
|
||||
Q_PROPERTY(QString deprecatedVersion MEMBER deprecatedVersion)
|
||||
Q_PROPERTY(QString url MEMBER url)
|
||||
Q_PROPERTY(qint64 bytesReceived MEMBER bytesReceived)
|
||||
Q_PROPERTY(qint64 bytesTotal MEMBER bytesTotal)
|
||||
Q_PROPERTY(qint64 timestamp MEMBER timestamp)
|
||||
Q_PROPERTY(QString speed MEMBER speed)
|
||||
Q_PROPERTY(bool isDownloading MEMBER isDownloading)
|
||||
Q_PROPERTY(bool isIncomplete MEMBER isIncomplete)
|
||||
Q_PROPERTY(QString downloadError MEMBER downloadError)
|
||||
Q_PROPERTY(QString order MEMBER order)
|
||||
Q_PROPERTY(int ramrequired MEMBER ramrequired)
|
||||
Q_PROPERTY(QString parameters MEMBER parameters)
|
||||
Q_PROPERTY(QString quant MEMBER quant)
|
||||
Q_PROPERTY(QString type MEMBER type)
|
||||
|
||||
public:
|
||||
QString name;
|
||||
QString filename;
|
||||
QString dirpath;
|
||||
QString filesize;
|
||||
QByteArray md5sum;
|
||||
bool calcHash = false;
|
||||
bool installed = false;
|
||||
bool isDefault = false;
|
||||
bool isChatGPT = false;
|
||||
bool disableGUI = false;
|
||||
QString description;
|
||||
QString requiresVersion;
|
||||
QString deprecatedVersion;
|
||||
QString url;
|
||||
qint64 bytesReceived = 0;
|
||||
qint64 bytesTotal = 0;
|
||||
qint64 timestamp = 0;
|
||||
QString speed;
|
||||
bool isDownloading = false;
|
||||
bool isIncomplete = false;
|
||||
QString downloadError;
|
||||
QString order;
|
||||
int ramrequired = 0;
|
||||
QString parameters;
|
||||
QString quant;
|
||||
QString type;
|
||||
bool operator==(const ModelInfo &other) const {
|
||||
return filename == other.filename;
|
||||
}
|
||||
};
|
||||
Q_DECLARE_METATYPE(ModelInfo)
|
||||
|
||||
class InstalledModels : public QSortFilterProxyModel
|
||||
{
|
||||
Q_OBJECT
|
||||
public:
|
||||
explicit InstalledModels(QObject *parent) : QSortFilterProxyModel(parent) {}
|
||||
|
||||
protected:
|
||||
bool filterAcceptsRow(int sourceRow, const QModelIndex &sourceParent) const override;
|
||||
};
|
||||
|
||||
class DownloadableModels : public QSortFilterProxyModel
|
||||
{
|
||||
Q_OBJECT
|
||||
Q_PROPERTY(int count READ count NOTIFY countChanged)
|
||||
Q_PROPERTY(bool expanded READ isExpanded WRITE setExpanded NOTIFY expandedChanged)
|
||||
public:
|
||||
explicit DownloadableModels(QObject *parent);
|
||||
int count() const;
|
||||
|
||||
bool isExpanded() const;
|
||||
void setExpanded(bool expanded);
|
||||
|
||||
Q_SIGNALS:
|
||||
void countChanged();
|
||||
|
||||
protected:
|
||||
bool filterAcceptsRow(int sourceRow, const QModelIndex &sourceParent) const override;
|
||||
|
||||
Q_SIGNALS:
|
||||
void expandedChanged(bool expanded);
|
||||
|
||||
private:
|
||||
bool m_expanded;
|
||||
int m_limit;
|
||||
};
|
||||
|
||||
class ModelList : public QAbstractListModel
|
||||
{
|
||||
Q_OBJECT
|
||||
Q_PROPERTY(int count READ count NOTIFY countChanged)
|
||||
Q_PROPERTY(QString localModelsPath READ localModelsPath WRITE setLocalModelsPath NOTIFY localModelsPathChanged)
|
||||
Q_PROPERTY(InstalledModels* installedModels READ installedModels NOTIFY installedModelsChanged)
|
||||
Q_PROPERTY(DownloadableModels* downloadableModels READ downloadableModels NOTIFY downloadableModelsChanged)
|
||||
Q_PROPERTY(QList<QString> userDefaultModelList READ userDefaultModelList NOTIFY userDefaultModelListChanged)
|
||||
|
||||
public:
|
||||
static ModelList *globalInstance();
|
||||
|
||||
enum Roles {
|
||||
NameRole = Qt::UserRole + 1,
|
||||
FilenameRole,
|
||||
DirpathRole,
|
||||
FilesizeRole,
|
||||
Md5sumRole,
|
||||
CalcHashRole,
|
||||
InstalledRole,
|
||||
DefaultRole,
|
||||
ChatGPTRole,
|
||||
DisableGUIRole,
|
||||
DescriptionRole,
|
||||
RequiresVersionRole,
|
||||
DeprecatedVersionRole,
|
||||
UrlRole,
|
||||
BytesReceivedRole,
|
||||
BytesTotalRole,
|
||||
TimestampRole,
|
||||
SpeedRole,
|
||||
DownloadingRole,
|
||||
IncompleteRole,
|
||||
DownloadErrorRole,
|
||||
OrderRole,
|
||||
RamrequiredRole,
|
||||
ParametersRole,
|
||||
QuantRole,
|
||||
TypeRole
|
||||
};
|
||||
|
||||
QHash<int, QByteArray> roleNames() const override
|
||||
{
|
||||
QHash<int, QByteArray> roles;
|
||||
roles[NameRole] = "name";
|
||||
roles[FilenameRole] = "filename";
|
||||
roles[DirpathRole] = "dirpath";
|
||||
roles[FilesizeRole] = "filesize";
|
||||
roles[Md5sumRole] = "md5sum";
|
||||
roles[CalcHashRole] = "calcHash";
|
||||
roles[InstalledRole] = "installed";
|
||||
roles[DefaultRole] = "isDefault";
|
||||
roles[ChatGPTRole] = "isChatGPT";
|
||||
roles[DisableGUIRole] = "disableGUI";
|
||||
roles[DescriptionRole] = "description";
|
||||
roles[RequiresVersionRole] = "requiresVersion";
|
||||
roles[DeprecatedVersionRole] = "deprecatedVersion";
|
||||
roles[UrlRole] = "url";
|
||||
roles[BytesReceivedRole] = "bytesReceived";
|
||||
roles[BytesTotalRole] = "bytesTotal";
|
||||
roles[TimestampRole] = "timestamp";
|
||||
roles[SpeedRole] = "speed";
|
||||
roles[DownloadingRole] = "isDownloading";
|
||||
roles[IncompleteRole] = "isIncomplete";
|
||||
roles[DownloadErrorRole] = "downloadError";
|
||||
roles[OrderRole] = "order";
|
||||
roles[RamrequiredRole] = "ramrequired";
|
||||
roles[ParametersRole] = "parameters";
|
||||
roles[QuantRole] = "quant";
|
||||
roles[TypeRole] = "type";
|
||||
return roles;
|
||||
}
|
||||
|
||||
int rowCount(const QModelIndex &parent = QModelIndex()) const override;
|
||||
QVariant data(const QModelIndex &index, int role = Qt::DisplayRole) const override;
|
||||
QVariant data(const QString &filename, int role) const;
|
||||
void updateData(const QString &filename, int role, const QVariant &value);
|
||||
|
||||
int count() const { return m_models.size(); }
|
||||
|
||||
bool contains(const QString &filename) const;
|
||||
Q_INVOKABLE ModelInfo modelInfo(const QString &filename) const;
|
||||
ModelInfo defaultModelInfo() const;
|
||||
|
||||
void addModel(const QString &filename);
|
||||
|
||||
Q_INVOKABLE QString defaultLocalModelsPath() const;
|
||||
Q_INVOKABLE QString localModelsPath() const;
|
||||
Q_INVOKABLE void setLocalModelsPath(const QString &modelPath);
|
||||
|
||||
const QList<ModelInfo> exportModelList() const;
|
||||
const QList<QString> userDefaultModelList() const;
|
||||
|
||||
InstalledModels *installedModels() const { return m_installedModels; }
|
||||
DownloadableModels *downloadableModels() const { return m_downloadableModels; }
|
||||
|
||||
static inline QString toFileSize(quint64 sz) {
|
||||
if (sz < 1024) {
|
||||
return QString("%1 bytes").arg(sz);
|
||||
} else if (sz < 1024 * 1024) {
|
||||
return QString("%1 KB").arg(qreal(sz) / 1024, 0, 'g', 3);
|
||||
} else if (sz < 1024 * 1024 * 1024) {
|
||||
return QString("%1 MB").arg(qreal(sz) / (1024 * 1024), 0, 'g', 3);
|
||||
} else {
|
||||
return QString("%1 GB").arg(qreal(sz) / (1024 * 1024 * 1024), 0, 'g', 3);
|
||||
}
|
||||
}
|
||||
|
||||
QString incompleteDownloadPath(const QString &modelFile);
|
||||
|
||||
Q_SIGNALS:
|
||||
void countChanged();
|
||||
void localModelsPathChanged();
|
||||
void installedModelsChanged();
|
||||
void downloadableModelsChanged();
|
||||
void userDefaultModelListChanged();
|
||||
|
||||
private Q_SLOTS:
|
||||
void updateModelsFromDirectory();
|
||||
|
||||
private:
|
||||
QString modelDirPath(const QString &modelName, bool isChatGPT);
|
||||
int indexForModel(ModelInfo *model);
|
||||
QVariant dataInternal(const ModelInfo *info, int role) const;
|
||||
static bool lessThan(const ModelInfo* a, const ModelInfo* b);
|
||||
|
||||
private:
|
||||
mutable QRecursiveMutex m_mutex;
|
||||
InstalledModels *m_installedModels;
|
||||
DownloadableModels *m_downloadableModels;
|
||||
QList<ModelInfo*> m_models;
|
||||
QHash<QString, ModelInfo*> m_modelMap;
|
||||
QString m_localModelsPath;
|
||||
QFileSystemWatcher *m_watcher;
|
||||
|
||||
private:
|
||||
explicit ModelList();
|
||||
~ModelList() {}
|
||||
friend class MyModelList;
|
||||
};
|
||||
|
||||
#endif // MODELLIST_H
|
@ -1,49 +1,61 @@
|
||||
#include <QtCore/QCoreApplication>
|
||||
#include <QDebug>
|
||||
#include <QFile>
|
||||
#include <QTextStream>
|
||||
#include <QRegularExpression>
|
||||
#ifndef SYSINFO_H
|
||||
#define SYSINFO_H
|
||||
|
||||
#if defined(Q_OS_MAC)
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
#include <iomanip>
|
||||
|
||||
#if defined(__linux__)
|
||||
#include <unistd.h>
|
||||
#elif defined(__APPLE__)
|
||||
#include <sys/types.h>
|
||||
#include <sys/sysctl.h>
|
||||
#elif defined(_WIN32)
|
||||
#include <windows.h>
|
||||
#endif
|
||||
|
||||
#if defined(Q_OS_WIN)
|
||||
#include <Windows.h>
|
||||
#endif
|
||||
|
||||
QString getSystemTotalRAM()
|
||||
static long long getSystemTotalRAMInBytes()
|
||||
{
|
||||
qint64 totalRAM = 0;
|
||||
|
||||
#if defined(Q_OS_LINUX)
|
||||
QFile file("/proc/meminfo");
|
||||
if (file.open(QIODevice::ReadOnly | QIODevice::Text)) {
|
||||
QTextStream in(&file);
|
||||
QString line = in.readLine();
|
||||
while (!line.isNull()) {
|
||||
if (line.startsWith("MemTotal")) {
|
||||
static QRegularExpression spaces("\\s+");
|
||||
QStringList parts = line.split(spaces);
|
||||
totalRAM = parts[1].toLongLong() * 1024; // Convert from KB to bytes
|
||||
break;
|
||||
}
|
||||
line = in.readLine();
|
||||
long long totalRAM = 0;
|
||||
|
||||
#if defined(__linux__)
|
||||
std::ifstream file("/proc/meminfo");
|
||||
std::string line;
|
||||
while (std::getline(file, line)) {
|
||||
if (line.find("MemTotal") != std::string::npos) {
|
||||
std::string memTotalStr = line.substr(line.find(":") + 1);
|
||||
memTotalStr.erase(0, memTotalStr.find_first_not_of(" "));
|
||||
memTotalStr = memTotalStr.substr(0, memTotalStr.find(" "));
|
||||
totalRAM = std::stoll(memTotalStr) * 1024; // Convert from KB to bytes
|
||||
break;
|
||||
}
|
||||
file.close();
|
||||
}
|
||||
#elif defined(Q_OS_MAC)
|
||||
file.close();
|
||||
#elif defined(__APPLE__)
|
||||
int mib[2] = {CTL_HW, HW_MEMSIZE};
|
||||
size_t length = sizeof(totalRAM);
|
||||
sysctl(mib, 2, &totalRAM, &length, NULL, 0);
|
||||
#elif defined(Q_OS_WIN)
|
||||
#elif defined(_WIN32)
|
||||
MEMORYSTATUSEX memoryStatus;
|
||||
memoryStatus.dwLength = sizeof(memoryStatus);
|
||||
GlobalMemoryStatusEx(&memoryStatus);
|
||||
totalRAM = memoryStatus.ullTotalPhys;
|
||||
#endif
|
||||
|
||||
double totalRAM_GB = static_cast<double>(totalRAM) / (1024 * 1024 * 1024);
|
||||
return QString::number(totalRAM_GB, 'f', 2) + " GB";
|
||||
return totalRAM;
|
||||
}
|
||||
|
||||
static double getSystemTotalRAMInGB()
|
||||
{
|
||||
return static_cast<double>(getSystemTotalRAMInBytes()) / (1024 * 1024 * 1024);
|
||||
}
|
||||
|
||||
static std::string getSystemTotalRAMInGBString()
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << std::fixed << std::setprecision(2) << getSystemTotalRAMInGB() << " GB";
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
#endif // SYSINFO_H
|
||||
|
Loading…
Reference in New Issue