gpt4all/llm.h

41 lines
827 B
C
Raw Normal View History

2023-04-09 03:28:39 +00:00
#ifndef LLM_H
#define LLM_H
#include <QObject>
2023-05-01 21:13:20 +00:00
#include "chatlistmodel.h"
2023-04-09 03:28:39 +00:00
class LLM : public QObject
{
Q_OBJECT
2023-05-01 21:13:20 +00:00
Q_PROPERTY(ChatListModel *chatListModel READ chatListModel NOTIFY chatListModelChanged)
Q_PROPERTY(int32_t threadCount READ threadCount WRITE setThreadCount NOTIFY threadCountChanged)
2023-04-09 03:28:39 +00:00
public:
static LLM *globalInstance();
2023-05-01 21:13:20 +00:00
ChatListModel *chatListModel() const { return m_chatListModel; }
int32_t threadCount() const;
void setThreadCount(int32_t n_threads);
Q_INVOKABLE bool checkForUpdates() const;
2023-04-09 03:28:39 +00:00
Q_SIGNALS:
2023-05-01 21:13:20 +00:00
void chatListModelChanged();
void threadCountChanged();
2023-04-09 03:28:39 +00:00
2023-05-01 21:13:20 +00:00
private Q_SLOTS:
void aboutToQuit();
private:
2023-05-01 21:13:20 +00:00
ChatListModel *m_chatListModel;
int32_t m_threadCount;
2023-04-09 03:28:39 +00:00
private:
explicit LLM();
~LLM() {}
friend class MyLLM;
};
#endif // LLM_H