mirror of
https://github.com/arc53/DocsGPT
synced 2024-11-19 21:25:39 +00:00
Merge pull request #965 from siiddhantt/feature/set-tokens-message-history
feat: dropdown to adjust conversational history limits
This commit is contained in:
commit
2d12a3cd7a
@ -78,7 +78,7 @@ def get_data_from_api_key(api_key):
|
|||||||
if data is None:
|
if data is None:
|
||||||
return bad_request(401, "Invalid API key")
|
return bad_request(401, "Invalid API key")
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def get_vectorstore(data):
|
def get_vectorstore(data):
|
||||||
if "active_docs" in data:
|
if "active_docs" in data:
|
||||||
@ -95,6 +95,7 @@ def get_vectorstore(data):
|
|||||||
vectorstore = os.path.join("application", vectorstore)
|
vectorstore = os.path.join("application", vectorstore)
|
||||||
return vectorstore
|
return vectorstore
|
||||||
|
|
||||||
|
|
||||||
def is_azure_configured():
|
def is_azure_configured():
|
||||||
return (
|
return (
|
||||||
settings.OPENAI_API_BASE
|
settings.OPENAI_API_BASE
|
||||||
@ -221,7 +222,10 @@ def stream():
|
|||||||
chunks = int(data["chunks"])
|
chunks = int(data["chunks"])
|
||||||
else:
|
else:
|
||||||
chunks = 2
|
chunks = 2
|
||||||
|
if "token_limit" in data:
|
||||||
|
token_limit = data["token_limit"]
|
||||||
|
else:
|
||||||
|
token_limit = settings.DEFAULT_MAX_HISTORY
|
||||||
|
|
||||||
# check if active_docs or api_key is set
|
# check if active_docs or api_key is set
|
||||||
|
|
||||||
@ -255,6 +259,7 @@ def stream():
|
|||||||
chat_history=history,
|
chat_history=history,
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
chunks=chunks,
|
chunks=chunks,
|
||||||
|
token_limit=token_limit,
|
||||||
gpt_model=gpt_model,
|
gpt_model=gpt_model,
|
||||||
user_api_key=user_api_key,
|
user_api_key=user_api_key,
|
||||||
)
|
)
|
||||||
@ -291,6 +296,10 @@ def api_answer():
|
|||||||
chunks = int(data["chunks"])
|
chunks = int(data["chunks"])
|
||||||
else:
|
else:
|
||||||
chunks = 2
|
chunks = 2
|
||||||
|
if "token_limit" in data:
|
||||||
|
token_limit = data["token_limit"]
|
||||||
|
else:
|
||||||
|
token_limit = settings.DEFAULT_MAX_HISTORY
|
||||||
|
|
||||||
# use try and except to check for exception
|
# use try and except to check for exception
|
||||||
try:
|
try:
|
||||||
@ -314,7 +323,7 @@ def api_answer():
|
|||||||
retriever_name = source["active_docs"]
|
retriever_name = source["active_docs"]
|
||||||
|
|
||||||
prompt = get_prompt(prompt_id)
|
prompt = get_prompt(prompt_id)
|
||||||
|
|
||||||
retriever = RetrieverCreator.create_retriever(
|
retriever = RetrieverCreator.create_retriever(
|
||||||
retriever_name,
|
retriever_name,
|
||||||
question=question,
|
question=question,
|
||||||
@ -322,6 +331,7 @@ def api_answer():
|
|||||||
chat_history=history,
|
chat_history=history,
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
chunks=chunks,
|
chunks=chunks,
|
||||||
|
token_limit=token_limit,
|
||||||
gpt_model=gpt_model,
|
gpt_model=gpt_model,
|
||||||
user_api_key=user_api_key,
|
user_api_key=user_api_key,
|
||||||
)
|
)
|
||||||
@ -370,7 +380,6 @@ def api_search():
|
|||||||
else:
|
else:
|
||||||
source = {}
|
source = {}
|
||||||
user_api_key = None
|
user_api_key = None
|
||||||
|
|
||||||
|
|
||||||
if (
|
if (
|
||||||
source["active_docs"].split("/")[0] == "default"
|
source["active_docs"].split("/")[0] == "default"
|
||||||
@ -379,6 +388,10 @@ def api_search():
|
|||||||
retriever_name = "classic"
|
retriever_name = "classic"
|
||||||
else:
|
else:
|
||||||
retriever_name = source["active_docs"]
|
retriever_name = source["active_docs"]
|
||||||
|
if "token_limit" in data:
|
||||||
|
token_limit = data["token_limit"]
|
||||||
|
else:
|
||||||
|
token_limit = settings.DEFAULT_MAX_HISTORY
|
||||||
|
|
||||||
retriever = RetrieverCreator.create_retriever(
|
retriever = RetrieverCreator.create_retriever(
|
||||||
retriever_name,
|
retriever_name,
|
||||||
@ -387,8 +400,9 @@ def api_search():
|
|||||||
chat_history=[],
|
chat_history=[],
|
||||||
prompt="default",
|
prompt="default",
|
||||||
chunks=chunks,
|
chunks=chunks,
|
||||||
|
token_limit=token_limit,
|
||||||
gpt_model=gpt_model,
|
gpt_model=gpt_model,
|
||||||
user_api_key=user_api_key,
|
user_api_key=user_api_key,
|
||||||
)
|
)
|
||||||
docs = retriever.search()
|
docs = retriever.search()
|
||||||
return docs
|
return docs
|
||||||
|
@ -15,7 +15,8 @@ class Settings(BaseSettings):
|
|||||||
CELERY_RESULT_BACKEND: str = "redis://localhost:6379/1"
|
CELERY_RESULT_BACKEND: str = "redis://localhost:6379/1"
|
||||||
MONGO_URI: str = "mongodb://localhost:27017/docsgpt"
|
MONGO_URI: str = "mongodb://localhost:27017/docsgpt"
|
||||||
MODEL_PATH: str = os.path.join(current_dir, "models/docsgpt-7b-f16.gguf")
|
MODEL_PATH: str = os.path.join(current_dir, "models/docsgpt-7b-f16.gguf")
|
||||||
TOKENS_MAX_HISTORY: int = 150
|
DEFAULT_MAX_HISTORY: int = 150
|
||||||
|
MODEL_TOKEN_LIMITS: dict = {"gpt-3.5-turbo": 4096, "claude-2": 1e5}
|
||||||
UPLOAD_FOLDER: str = "inputs"
|
UPLOAD_FOLDER: str = "inputs"
|
||||||
VECTOR_STORE: str = "faiss" # "faiss" or "elasticsearch" or "qdrant"
|
VECTOR_STORE: str = "faiss" # "faiss" or "elasticsearch" or "qdrant"
|
||||||
RETRIEVERS_ENABLED: list = ["classic_rag", "duckduck_search"] # also brave_search
|
RETRIEVERS_ENABLED: list = ["classic_rag", "duckduck_search"] # also brave_search
|
||||||
|
@ -15,6 +15,7 @@ class BraveRetSearch(BaseRetriever):
|
|||||||
chat_history,
|
chat_history,
|
||||||
prompt,
|
prompt,
|
||||||
chunks=2,
|
chunks=2,
|
||||||
|
token_limit=150,
|
||||||
gpt_model="docsgpt",
|
gpt_model="docsgpt",
|
||||||
user_api_key=None,
|
user_api_key=None,
|
||||||
):
|
):
|
||||||
@ -24,6 +25,16 @@ class BraveRetSearch(BaseRetriever):
|
|||||||
self.prompt = prompt
|
self.prompt = prompt
|
||||||
self.chunks = chunks
|
self.chunks = chunks
|
||||||
self.gpt_model = gpt_model
|
self.gpt_model = gpt_model
|
||||||
|
self.token_limit = (
|
||||||
|
token_limit
|
||||||
|
if token_limit
|
||||||
|
< settings.MODEL_TOKEN_LIMITS.get(
|
||||||
|
self.gpt_model, settings.DEFAULT_MAX_HISTORY
|
||||||
|
)
|
||||||
|
else settings.MODEL_TOKEN_LIMITS.get(
|
||||||
|
self.gpt_model, settings.DEFAULT_MAX_HISTORY
|
||||||
|
)
|
||||||
|
)
|
||||||
self.user_api_key = user_api_key
|
self.user_api_key = user_api_key
|
||||||
|
|
||||||
def _get_data(self):
|
def _get_data(self):
|
||||||
@ -70,10 +81,7 @@ class BraveRetSearch(BaseRetriever):
|
|||||||
tokens_batch = count_tokens(i["prompt"]) + count_tokens(
|
tokens_batch = count_tokens(i["prompt"]) + count_tokens(
|
||||||
i["response"]
|
i["response"]
|
||||||
)
|
)
|
||||||
if (
|
if tokens_current_history + tokens_batch < self.token_limit:
|
||||||
tokens_current_history + tokens_batch
|
|
||||||
< settings.TOKENS_MAX_HISTORY
|
|
||||||
):
|
|
||||||
tokens_current_history += tokens_batch
|
tokens_current_history += tokens_batch
|
||||||
messages_combine.append(
|
messages_combine.append(
|
||||||
{"role": "user", "content": i["prompt"]}
|
{"role": "user", "content": i["prompt"]}
|
||||||
|
@ -16,6 +16,7 @@ class ClassicRAG(BaseRetriever):
|
|||||||
chat_history,
|
chat_history,
|
||||||
prompt,
|
prompt,
|
||||||
chunks=2,
|
chunks=2,
|
||||||
|
token_limit=150,
|
||||||
gpt_model="docsgpt",
|
gpt_model="docsgpt",
|
||||||
user_api_key=None,
|
user_api_key=None,
|
||||||
):
|
):
|
||||||
@ -25,6 +26,16 @@ class ClassicRAG(BaseRetriever):
|
|||||||
self.prompt = prompt
|
self.prompt = prompt
|
||||||
self.chunks = chunks
|
self.chunks = chunks
|
||||||
self.gpt_model = gpt_model
|
self.gpt_model = gpt_model
|
||||||
|
self.token_limit = (
|
||||||
|
token_limit
|
||||||
|
if token_limit
|
||||||
|
< settings.MODEL_TOKEN_LIMITS.get(
|
||||||
|
self.gpt_model, settings.DEFAULT_MAX_HISTORY
|
||||||
|
)
|
||||||
|
else settings.MODEL_TOKEN_LIMITS.get(
|
||||||
|
self.gpt_model, settings.DEFAULT_MAX_HISTORY
|
||||||
|
)
|
||||||
|
)
|
||||||
self.user_api_key = user_api_key
|
self.user_api_key = user_api_key
|
||||||
|
|
||||||
def _get_vectorstore(self, source):
|
def _get_vectorstore(self, source):
|
||||||
@ -85,10 +96,7 @@ class ClassicRAG(BaseRetriever):
|
|||||||
tokens_batch = count_tokens(i["prompt"]) + count_tokens(
|
tokens_batch = count_tokens(i["prompt"]) + count_tokens(
|
||||||
i["response"]
|
i["response"]
|
||||||
)
|
)
|
||||||
if (
|
if tokens_current_history + tokens_batch < self.token_limit:
|
||||||
tokens_current_history + tokens_batch
|
|
||||||
< settings.TOKENS_MAX_HISTORY
|
|
||||||
):
|
|
||||||
tokens_current_history += tokens_batch
|
tokens_current_history += tokens_batch
|
||||||
messages_combine.append(
|
messages_combine.append(
|
||||||
{"role": "user", "content": i["prompt"]}
|
{"role": "user", "content": i["prompt"]}
|
||||||
|
@ -15,6 +15,7 @@ class DuckDuckSearch(BaseRetriever):
|
|||||||
chat_history,
|
chat_history,
|
||||||
prompt,
|
prompt,
|
||||||
chunks=2,
|
chunks=2,
|
||||||
|
token_limit=150,
|
||||||
gpt_model="docsgpt",
|
gpt_model="docsgpt",
|
||||||
user_api_key=None,
|
user_api_key=None,
|
||||||
):
|
):
|
||||||
@ -24,6 +25,16 @@ class DuckDuckSearch(BaseRetriever):
|
|||||||
self.prompt = prompt
|
self.prompt = prompt
|
||||||
self.chunks = chunks
|
self.chunks = chunks
|
||||||
self.gpt_model = gpt_model
|
self.gpt_model = gpt_model
|
||||||
|
self.token_limit = (
|
||||||
|
token_limit
|
||||||
|
if token_limit
|
||||||
|
< settings.MODEL_TOKEN_LIMITS.get(
|
||||||
|
self.gpt_model, settings.DEFAULT_MAX_HISTORY
|
||||||
|
)
|
||||||
|
else settings.MODEL_TOKEN_LIMITS.get(
|
||||||
|
self.gpt_model, settings.DEFAULT_MAX_HISTORY
|
||||||
|
)
|
||||||
|
)
|
||||||
self.user_api_key = user_api_key
|
self.user_api_key = user_api_key
|
||||||
|
|
||||||
def _parse_lang_string(self, input_string):
|
def _parse_lang_string(self, input_string):
|
||||||
@ -87,10 +98,7 @@ class DuckDuckSearch(BaseRetriever):
|
|||||||
tokens_batch = count_tokens(i["prompt"]) + count_tokens(
|
tokens_batch = count_tokens(i["prompt"]) + count_tokens(
|
||||||
i["response"]
|
i["response"]
|
||||||
)
|
)
|
||||||
if (
|
if tokens_current_history + tokens_batch < self.token_limit:
|
||||||
tokens_current_history + tokens_batch
|
|
||||||
< settings.TOKENS_MAX_HISTORY
|
|
||||||
):
|
|
||||||
tokens_current_history += tokens_batch
|
tokens_current_history += tokens_batch
|
||||||
messages_combine.append(
|
messages_combine.append(
|
||||||
{"role": "user", "content": i["prompt"]}
|
{"role": "user", "content": i["prompt"]}
|
||||||
|
@ -20,12 +20,18 @@ function Dropdown({
|
|||||||
options:
|
options:
|
||||||
| string[]
|
| string[]
|
||||||
| { name: string; id: string; type: string }[]
|
| { name: string; id: string; type: string }[]
|
||||||
| { label: string; value: string }[];
|
| { label: string; value: string }[]
|
||||||
selectedValue: string | { label: string; value: string } | null;
|
| { value: number; description: string }[];
|
||||||
|
selectedValue:
|
||||||
|
| string
|
||||||
|
| { label: string; value: string }
|
||||||
|
| { value: number; description: string }
|
||||||
|
| null;
|
||||||
onSelect:
|
onSelect:
|
||||||
| ((value: string) => void)
|
| ((value: string) => void)
|
||||||
| ((value: { name: string; id: string; type: string }) => void)
|
| ((value: { name: string; id: string; type: string }) => void)
|
||||||
| ((value: { label: string; value: string }) => void);
|
| ((value: { label: string; value: string }) => void)
|
||||||
|
| ((value: { value: number; description: string }) => void);
|
||||||
size?: string;
|
size?: string;
|
||||||
rounded?: 'xl' | '3xl';
|
rounded?: 'xl' | '3xl';
|
||||||
border?: 'border' | 'border-2';
|
border?: 'border' | 'border-2';
|
||||||
@ -64,8 +70,14 @@ function Dropdown({
|
|||||||
!selectedValue && 'text-silver dark:text-gray-400'
|
!selectedValue && 'text-silver dark:text-gray-400'
|
||||||
}`}
|
}`}
|
||||||
>
|
>
|
||||||
{selectedValue
|
{selectedValue && 'label' in selectedValue
|
||||||
? selectedValue.label
|
? selectedValue.label
|
||||||
|
: selectedValue && 'description' in selectedValue
|
||||||
|
? `${
|
||||||
|
selectedValue.value < 1e9
|
||||||
|
? selectedValue.value + ` (${selectedValue.description})`
|
||||||
|
: selectedValue.description
|
||||||
|
}`
|
||||||
: placeholder
|
: placeholder
|
||||||
? placeholder
|
? placeholder
|
||||||
: 'From URL'}
|
: 'From URL'}
|
||||||
@ -99,7 +111,13 @@ function Dropdown({
|
|||||||
? option
|
? option
|
||||||
: option.name
|
: option.name
|
||||||
? option.name
|
? option.name
|
||||||
: option.label}
|
: option.label
|
||||||
|
? option.label
|
||||||
|
: `${
|
||||||
|
option.value < 1e9
|
||||||
|
? option.value + ` (${option.description})`
|
||||||
|
: option.description
|
||||||
|
}`}
|
||||||
</span>
|
</span>
|
||||||
{showEdit && onEdit && (
|
{showEdit && onEdit && (
|
||||||
<img
|
<img
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import { Answer, FEEDBACK } from './conversationModels';
|
import { Answer, FEEDBACK } from './conversationModels';
|
||||||
import { Doc } from '../preferences/preferenceApi';
|
import { Doc } from '../preferences/preferenceApi';
|
||||||
|
import { selectTokenLimit } from '../preferences/preferenceSlice';
|
||||||
|
|
||||||
const apiHost = import.meta.env.VITE_API_HOST || 'https://docsapi.arc53.com';
|
const apiHost = import.meta.env.VITE_API_HOST || 'https://docsapi.arc53.com';
|
||||||
|
|
||||||
@ -38,6 +39,7 @@ export function fetchAnswerApi(
|
|||||||
conversationId: string | null,
|
conversationId: string | null,
|
||||||
promptId: string | null,
|
promptId: string | null,
|
||||||
chunks: string,
|
chunks: string,
|
||||||
|
token_limit: number,
|
||||||
): Promise<
|
): Promise<
|
||||||
| {
|
| {
|
||||||
result: any;
|
result: any;
|
||||||
@ -73,6 +75,7 @@ export function fetchAnswerApi(
|
|||||||
conversation_id: conversationId,
|
conversation_id: conversationId,
|
||||||
prompt_id: promptId,
|
prompt_id: promptId,
|
||||||
chunks: chunks,
|
chunks: chunks,
|
||||||
|
token_limit: token_limit,
|
||||||
}),
|
}),
|
||||||
signal,
|
signal,
|
||||||
})
|
})
|
||||||
@ -103,6 +106,7 @@ export function fetchAnswerSteaming(
|
|||||||
conversationId: string | null,
|
conversationId: string | null,
|
||||||
promptId: string | null,
|
promptId: string | null,
|
||||||
chunks: string,
|
chunks: string,
|
||||||
|
token_limit: number,
|
||||||
onEvent: (event: MessageEvent) => void,
|
onEvent: (event: MessageEvent) => void,
|
||||||
): Promise<Answer> {
|
): Promise<Answer> {
|
||||||
const docPath = getDocPath(selectedDocs);
|
const docPath = getDocPath(selectedDocs);
|
||||||
@ -119,6 +123,7 @@ export function fetchAnswerSteaming(
|
|||||||
conversation_id: conversationId,
|
conversation_id: conversationId,
|
||||||
prompt_id: promptId,
|
prompt_id: promptId,
|
||||||
chunks: chunks,
|
chunks: chunks,
|
||||||
|
token_limit: token_limit,
|
||||||
};
|
};
|
||||||
fetch(apiHost + '/stream', {
|
fetch(apiHost + '/stream', {
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
@ -181,6 +186,7 @@ export function searchEndpoint(
|
|||||||
conversation_id: string | null,
|
conversation_id: string | null,
|
||||||
history: Array<any> = [],
|
history: Array<any> = [],
|
||||||
chunks: string,
|
chunks: string,
|
||||||
|
token_limit: number,
|
||||||
) {
|
) {
|
||||||
const docPath = getDocPath(selectedDocs);
|
const docPath = getDocPath(selectedDocs);
|
||||||
|
|
||||||
@ -190,6 +196,7 @@ export function searchEndpoint(
|
|||||||
conversation_id,
|
conversation_id,
|
||||||
history,
|
history,
|
||||||
chunks: chunks,
|
chunks: chunks,
|
||||||
|
token_limit: token_limit,
|
||||||
};
|
};
|
||||||
return fetch(`${apiHost}/api/search`, {
|
return fetch(`${apiHost}/api/search`, {
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
|
@ -28,6 +28,7 @@ export const fetchAnswer = createAsyncThunk<Answer, { question: string }>(
|
|||||||
state.conversation.conversationId,
|
state.conversation.conversationId,
|
||||||
state.preference.prompt.id,
|
state.preference.prompt.id,
|
||||||
state.preference.chunks,
|
state.preference.chunks,
|
||||||
|
state.preference.token_limit,
|
||||||
|
|
||||||
(event) => {
|
(event) => {
|
||||||
const data = JSON.parse(event.data);
|
const data = JSON.parse(event.data);
|
||||||
@ -51,6 +52,7 @@ export const fetchAnswer = createAsyncThunk<Answer, { question: string }>(
|
|||||||
state.conversation.conversationId,
|
state.conversation.conversationId,
|
||||||
state.conversation.queries,
|
state.conversation.queries,
|
||||||
state.preference.chunks,
|
state.preference.chunks,
|
||||||
|
state.preference.token_limit,
|
||||||
).then((sources) => {
|
).then((sources) => {
|
||||||
//dispatch streaming sources
|
//dispatch streaming sources
|
||||||
dispatch(
|
dispatch(
|
||||||
@ -86,6 +88,7 @@ export const fetchAnswer = createAsyncThunk<Answer, { question: string }>(
|
|||||||
state.conversation.conversationId,
|
state.conversation.conversationId,
|
||||||
state.preference.prompt.id,
|
state.preference.prompt.id,
|
||||||
state.preference.chunks,
|
state.preference.chunks,
|
||||||
|
state.preference.token_limit,
|
||||||
);
|
);
|
||||||
if (answer) {
|
if (answer) {
|
||||||
let sourcesPrepped = [];
|
let sourcesPrepped = [];
|
||||||
|
@ -11,8 +11,9 @@ import { ActiveState } from '../models/misc';
|
|||||||
interface Preference {
|
interface Preference {
|
||||||
apiKey: string;
|
apiKey: string;
|
||||||
prompt: { name: string; id: string; type: string };
|
prompt: { name: string; id: string; type: string };
|
||||||
selectedDocs: Doc | null;
|
|
||||||
chunks: string;
|
chunks: string;
|
||||||
|
token_limit: number;
|
||||||
|
selectedDocs: Doc | null;
|
||||||
sourceDocs: Doc[] | null;
|
sourceDocs: Doc[] | null;
|
||||||
conversations: { name: string; id: string }[] | null;
|
conversations: { name: string; id: string }[] | null;
|
||||||
modalState: ActiveState;
|
modalState: ActiveState;
|
||||||
@ -22,6 +23,7 @@ const initialState: Preference = {
|
|||||||
apiKey: 'xxx',
|
apiKey: 'xxx',
|
||||||
prompt: { name: 'default', id: 'default', type: 'public' },
|
prompt: { name: 'default', id: 'default', type: 'public' },
|
||||||
chunks: '2',
|
chunks: '2',
|
||||||
|
token_limit: 2000,
|
||||||
selectedDocs: {
|
selectedDocs: {
|
||||||
name: 'default',
|
name: 'default',
|
||||||
language: 'default',
|
language: 'default',
|
||||||
@ -60,6 +62,9 @@ export const prefSlice = createSlice({
|
|||||||
setChunks: (state, action) => {
|
setChunks: (state, action) => {
|
||||||
state.chunks = action.payload;
|
state.chunks = action.payload;
|
||||||
},
|
},
|
||||||
|
setTokenLimit: (state, action) => {
|
||||||
|
state.token_limit = action.payload;
|
||||||
|
},
|
||||||
setModalStateDeleteConv: (state, action: PayloadAction<ActiveState>) => {
|
setModalStateDeleteConv: (state, action: PayloadAction<ActiveState>) => {
|
||||||
state.modalState = action.payload;
|
state.modalState = action.payload;
|
||||||
},
|
},
|
||||||
@ -73,6 +78,7 @@ export const {
|
|||||||
setConversations,
|
setConversations,
|
||||||
setPrompt,
|
setPrompt,
|
||||||
setChunks,
|
setChunks,
|
||||||
|
setTokenLimit,
|
||||||
setModalStateDeleteConv,
|
setModalStateDeleteConv,
|
||||||
} = prefSlice.actions;
|
} = prefSlice.actions;
|
||||||
export default prefSlice.reducer;
|
export default prefSlice.reducer;
|
||||||
@ -115,6 +121,18 @@ prefListenerMiddleware.startListening({
|
|||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
|
prefListenerMiddleware.startListening({
|
||||||
|
matcher: isAnyOf(setTokenLimit),
|
||||||
|
effect: (action, listenerApi) => {
|
||||||
|
localStorage.setItem(
|
||||||
|
'DocsGPTTokenLimit',
|
||||||
|
JSON.stringify(
|
||||||
|
(listenerApi.getState() as RootState).preference.token_limit,
|
||||||
|
),
|
||||||
|
);
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
export const selectApiKey = (state: RootState) => state.preference.apiKey;
|
export const selectApiKey = (state: RootState) => state.preference.apiKey;
|
||||||
export const selectApiKeyStatus = (state: RootState) =>
|
export const selectApiKeyStatus = (state: RootState) =>
|
||||||
!!state.preference.apiKey;
|
!!state.preference.apiKey;
|
||||||
@ -132,3 +150,5 @@ export const selectConversationId = (state: RootState) =>
|
|||||||
state.conversation.conversationId;
|
state.conversation.conversationId;
|
||||||
export const selectPrompt = (state: RootState) => state.preference.prompt;
|
export const selectPrompt = (state: RootState) => state.preference.prompt;
|
||||||
export const selectChunks = (state: RootState) => state.preference.chunks;
|
export const selectChunks = (state: RootState) => state.preference.chunks;
|
||||||
|
export const selectTokenLimit = (state: RootState) =>
|
||||||
|
state.preference.token_limit;
|
||||||
|
@ -8,6 +8,8 @@ import {
|
|||||||
setPrompt,
|
setPrompt,
|
||||||
setChunks,
|
setChunks,
|
||||||
selectChunks,
|
selectChunks,
|
||||||
|
setTokenLimit,
|
||||||
|
selectTokenLimit,
|
||||||
setModalStateDeleteConv,
|
setModalStateDeleteConv,
|
||||||
} from '../preferences/preferenceSlice';
|
} from '../preferences/preferenceSlice';
|
||||||
|
|
||||||
@ -17,10 +19,19 @@ const General: React.FC = () => {
|
|||||||
const themes = ['Light', 'Dark'];
|
const themes = ['Light', 'Dark'];
|
||||||
const languages = ['English'];
|
const languages = ['English'];
|
||||||
const chunks = ['0', '2', '4', '6', '8', '10'];
|
const chunks = ['0', '2', '4', '6', '8', '10'];
|
||||||
|
const token_limits = new Map([
|
||||||
|
[0, 'None'],
|
||||||
|
[100, 'Low'],
|
||||||
|
[1000, 'Medium'],
|
||||||
|
[2000, 'Default'],
|
||||||
|
[4000, 'High'],
|
||||||
|
[1e9, 'Unlimited'],
|
||||||
|
]);
|
||||||
const [prompts, setPrompts] = React.useState<
|
const [prompts, setPrompts] = React.useState<
|
||||||
{ name: string; id: string; type: string }[]
|
{ name: string; id: string; type: string }[]
|
||||||
>([]);
|
>([]);
|
||||||
const selectedChunks = useSelector(selectChunks);
|
const selectedChunks = useSelector(selectChunks);
|
||||||
|
const selectedTokenLimit = useSelector(selectTokenLimit);
|
||||||
const [isDarkTheme, toggleTheme] = useDarkTheme();
|
const [isDarkTheme, toggleTheme] = useDarkTheme();
|
||||||
const [selectedTheme, setSelectedTheme] = React.useState(
|
const [selectedTheme, setSelectedTheme] = React.useState(
|
||||||
isDarkTheme ? 'Dark' : 'Light',
|
isDarkTheme ? 'Dark' : 'Light',
|
||||||
@ -87,6 +98,31 @@ const General: React.FC = () => {
|
|||||||
border="border"
|
border="border"
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
|
<div className="mb-5">
|
||||||
|
<p className="mb-2 font-bold text-jet dark:text-bright-gray">
|
||||||
|
Conversational history
|
||||||
|
</p>
|
||||||
|
<Dropdown
|
||||||
|
options={Array.from(token_limits, ([value, desc]) => ({
|
||||||
|
value: value,
|
||||||
|
description: desc,
|
||||||
|
}))}
|
||||||
|
selectedValue={{
|
||||||
|
value: selectedTokenLimit,
|
||||||
|
description: token_limits.get(selectedTokenLimit) as string,
|
||||||
|
}}
|
||||||
|
onSelect={({
|
||||||
|
value,
|
||||||
|
description,
|
||||||
|
}: {
|
||||||
|
value: number;
|
||||||
|
description: string;
|
||||||
|
}) => dispatch(setTokenLimit(value))}
|
||||||
|
size="w-56"
|
||||||
|
rounded="3xl"
|
||||||
|
border="border"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
<div className="mb-5">
|
<div className="mb-5">
|
||||||
<Prompts
|
<Prompts
|
||||||
prompts={prompts}
|
prompts={prompts}
|
||||||
|
@ -7,19 +7,21 @@ import {
|
|||||||
|
|
||||||
const key = localStorage.getItem('DocsGPTApiKey');
|
const key = localStorage.getItem('DocsGPTApiKey');
|
||||||
const prompt = localStorage.getItem('DocsGPTPrompt');
|
const prompt = localStorage.getItem('DocsGPTPrompt');
|
||||||
const doc = localStorage.getItem('DocsGPTRecentDocs');
|
|
||||||
const chunks = localStorage.getItem('DocsGPTChunks');
|
const chunks = localStorage.getItem('DocsGPTChunks');
|
||||||
|
const token_limit = localStorage.getItem('DocsGPTTokenLimit');
|
||||||
|
const doc = localStorage.getItem('DocsGPTRecentDocs');
|
||||||
|
|
||||||
const store = configureStore({
|
const store = configureStore({
|
||||||
preloadedState: {
|
preloadedState: {
|
||||||
preference: {
|
preference: {
|
||||||
apiKey: key ?? '',
|
apiKey: key ?? '',
|
||||||
chunks: JSON.parse(chunks ?? '2').toString(),
|
|
||||||
selectedDocs: doc !== null ? JSON.parse(doc) : null,
|
|
||||||
prompt:
|
prompt:
|
||||||
prompt !== null
|
prompt !== null
|
||||||
? JSON.parse(prompt)
|
? JSON.parse(prompt)
|
||||||
: { name: 'default', id: 'default', type: 'private' },
|
: { name: 'default', id: 'default', type: 'private' },
|
||||||
|
chunks: JSON.parse(chunks ?? '2').toString(),
|
||||||
|
token_limit: token_limit ? parseInt(token_limit) : 2000,
|
||||||
|
selectedDocs: doc !== null ? JSON.parse(doc) : null,
|
||||||
conversations: null,
|
conversations: null,
|
||||||
sourceDocs: [
|
sourceDocs: [
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user