gpt4all/llm.cpp

134 lines
3.1 KiB
C++
Raw Normal View History

2023-04-09 03:28:39 +00:00
#include "llm.h"
#include <QCoreApplication>
#include <QDir>
#include <QFile>
#include <QResource>
2023-04-10 20:33:14 +00:00
#include <fstream>
2023-04-09 03:28:39 +00:00
class MyLLM: public LLM { };
Q_GLOBAL_STATIC(MyLLM, llmInstance)
LLM *LLM::globalInstance()
{
return llmInstance();
}
GPTJObject::GPTJObject()
: QObject{nullptr}
, m_gptj(new GPTJ)
{
moveToThread(&m_llmThread);
connect(&m_llmThread, &QThread::started, this, &GPTJObject::loadModel);
m_llmThread.setObjectName("llm thread");
m_llmThread.start();
}
bool GPTJObject::loadModel()
{
if (isModelLoaded())
return true;
QString modelName("ggml-model-q4_0.bin");
2023-04-10 20:33:14 +00:00
QString fileName = QCoreApplication::applicationDirPath() + QDir::separator() + modelName;
QFile file(fileName);
if (file.exists()) {
2023-04-09 03:28:39 +00:00
2023-04-10 20:33:14 +00:00
auto fin = std::ifstream(fileName.toStdString(), std::ios::binary);
m_gptj->loadModel(modelName.toStdString(), fin);
2023-04-09 03:28:39 +00:00
emit isModelLoadedChanged();
}
return m_gptj;
}
bool GPTJObject::isModelLoaded() const
{
return m_gptj->isModelLoaded();
}
void GPTJObject::resetResponse()
{
m_response = std::string();
}
QString GPTJObject::response() const
{
return QString::fromStdString(m_response);
}
bool GPTJObject::handleResponse(const std::string &response)
{
#if 0
printf("%s", response.c_str());
fflush(stdout);
#endif
m_response.append(response);
emit responseChanged();
return !m_stopGenerating;
}
bool GPTJObject::prompt(const QString &prompt)
{
if (!isModelLoaded())
return false;
m_stopGenerating = false;
auto func = std::bind(&GPTJObject::handleResponse, this, std::placeholders::_1);
emit responseStarted();
2023-04-09 14:27:35 +00:00
static GPTJ::PromptContext ctx;
m_gptj->prompt(prompt.toStdString(), func, ctx, 4096 /*number of chars to predict*/);
2023-04-09 03:28:39 +00:00
emit responseStopped();
return true;
}
LLM::LLM()
: QObject{nullptr}
, m_gptj(new GPTJObject)
, m_responseInProgress(false)
{
connect(m_gptj, &GPTJObject::isModelLoadedChanged, this, &LLM::isModelLoadedChanged, Qt::QueuedConnection);
connect(m_gptj, &GPTJObject::responseChanged, this, &LLM::responseChanged, Qt::QueuedConnection);
connect(m_gptj, &GPTJObject::responseStarted, this, &LLM::responseStarted, Qt::QueuedConnection);
connect(m_gptj, &GPTJObject::responseStopped, this, &LLM::responseStopped, Qt::QueuedConnection);
connect(this, &LLM::promptRequested, m_gptj, &GPTJObject::prompt, Qt::QueuedConnection);
connect(this, &LLM::resetResponseRequested, m_gptj, &GPTJObject::resetResponse, Qt::BlockingQueuedConnection);
}
bool LLM::isModelLoaded() const
{
return m_gptj->isModelLoaded();
}
void LLM::prompt(const QString &prompt)
{
emit promptRequested(prompt);
}
void LLM::resetResponse()
{
emit resetResponseRequested(); // blocking queued connection
}
void LLM::stopGenerating()
{
m_gptj->stopGenerating();
}
QString LLM::response() const
{
return m_gptj->response();
}
void LLM::responseStarted()
{
m_responseInProgress = true;
emit responseInProgressChanged();
}
void LLM::responseStopped()
{
m_responseInProgress = false;
emit responseInProgressChanged();
}