mirror of
https://github.com/nomic-ai/gpt4all
synced 2024-11-04 12:00:10 +00:00
102 lines
2.7 KiB
C++
102 lines
2.7 KiB
C++
#ifndef LLM_H
|
|
#define LLM_H
|
|
|
|
#include <QObject>
|
|
#include <QThread>
|
|
#include "gptj.h"
|
|
|
|
class LLMObject : public QObject
|
|
{
|
|
Q_OBJECT
|
|
Q_PROPERTY(bool isModelLoaded READ isModelLoaded NOTIFY isModelLoadedChanged)
|
|
Q_PROPERTY(QString response READ response NOTIFY responseChanged)
|
|
Q_PROPERTY(QString modelName READ modelName NOTIFY modelNameChanged)
|
|
|
|
public:
|
|
|
|
LLMObject();
|
|
|
|
bool loadModel();
|
|
bool isModelLoaded() const;
|
|
void resetResponse();
|
|
void resetContext();
|
|
void stopGenerating() { m_stopGenerating = true; }
|
|
|
|
QString response() const;
|
|
QString modelName() const;
|
|
|
|
public Q_SLOTS:
|
|
bool prompt(const QString &prompt, const QString &prompt_template, int32_t n_predict, int32_t top_k, float top_p,
|
|
float temp, int32_t n_batch);
|
|
|
|
Q_SIGNALS:
|
|
void isModelLoadedChanged();
|
|
void responseChanged();
|
|
void responseStarted();
|
|
void responseStopped();
|
|
void modelNameChanged();
|
|
|
|
private:
|
|
bool handleResponse(const std::string &response);
|
|
|
|
private:
|
|
LLModel *m_llmodel;
|
|
std::string m_response;
|
|
quint32 m_responseTokens;
|
|
quint32 m_responseLogits;
|
|
QString m_modelName;
|
|
QThread m_llmThread;
|
|
std::atomic<bool> m_stopGenerating;
|
|
};
|
|
|
|
class LLM : public QObject
|
|
{
|
|
Q_OBJECT
|
|
Q_PROPERTY(bool isModelLoaded READ isModelLoaded NOTIFY isModelLoadedChanged)
|
|
Q_PROPERTY(QString response READ response NOTIFY responseChanged)
|
|
Q_PROPERTY(QString modelName READ modelName NOTIFY modelNameChanged)
|
|
Q_PROPERTY(bool responseInProgress READ responseInProgress NOTIFY responseInProgressChanged)
|
|
public:
|
|
|
|
static LLM *globalInstance();
|
|
|
|
Q_INVOKABLE bool isModelLoaded() const;
|
|
Q_INVOKABLE void prompt(const QString &prompt, const QString &prompt_template, int32_t n_predict, int32_t top_k, float top_p,
|
|
float temp, int32_t n_batch);
|
|
Q_INVOKABLE void resetContext();
|
|
Q_INVOKABLE void resetResponse();
|
|
Q_INVOKABLE void stopGenerating();
|
|
|
|
QString response() const;
|
|
bool responseInProgress() const { return m_responseInProgress; }
|
|
|
|
QString modelName() const;
|
|
|
|
Q_INVOKABLE bool checkForUpdates() const;
|
|
|
|
Q_SIGNALS:
|
|
void isModelLoadedChanged();
|
|
void responseChanged();
|
|
void responseInProgressChanged();
|
|
void promptRequested(const QString &prompt, const QString &prompt_template, int32_t n_predict, int32_t top_k, float top_p,
|
|
float temp, int32_t n_batch);
|
|
void resetResponseRequested();
|
|
void resetContextRequested();
|
|
void modelNameChanged();
|
|
|
|
private Q_SLOTS:
|
|
void responseStarted();
|
|
void responseStopped();
|
|
|
|
private:
|
|
LLMObject *m_llmodel;
|
|
bool m_responseInProgress;
|
|
|
|
private:
|
|
explicit LLM();
|
|
~LLM() {}
|
|
friend class MyLLM;
|
|
};
|
|
|
|
#endif // LLM_H
|