From 74845aed64fa87f7819dae410c280c91022dca95 Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 18 May 2023 14:27:13 +0100 Subject: [PATCH 1/3] history init --- application/app.py | 25 +++++++++++-------- frontend/src/conversation/conversationApi.ts | 3 ++- .../src/conversation/conversationSlice.ts | 1 + 3 files changed, 18 insertions(+), 11 deletions(-) diff --git a/application/app.py b/application/app.py index d68c5b93..fa56c1d4 100644 --- a/application/app.py +++ b/application/app.py @@ -23,6 +23,7 @@ from langchain.prompts.chat import ( ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, + AIMessagePromptTemplate, ) from pymongo import MongoClient from werkzeug.utils import secure_filename @@ -107,6 +108,8 @@ def run_async_chain(chain, question, chat_history): result["answer"] = answer return result + + @celery.task(bind=True) def ingest(self, directory, formats, name_job, filename, user): @@ -164,16 +167,6 @@ def api_answer(): docsearch = FAISS.load_local(vectorstore, CohereEmbeddings(cohere_api_key=embeddings_key)) # create a prompt template - if history: - history = json.loads(history) - template_temp = template_hist.replace("{historyquestion}", history[0]).replace("{historyanswer}", - history[1]) - c_prompt = PromptTemplate(input_variables=["summaries", "question"], template=template_temp, - template_format="jinja2") - else: - c_prompt = PromptTemplate(input_variables=["summaries", "question"], template=template, - template_format="jinja2") - q_prompt = PromptTemplate(input_variables=["context", "question"], template=template_quest, template_format="jinja2") if settings.LLM_NAME == "openai_chat": @@ -182,6 +175,18 @@ def api_answer(): SystemMessagePromptTemplate.from_template(chat_combine_template), HumanMessagePromptTemplate.from_template("{question}") ] + if history: + tokens_current_history = 0 + tokens_max_history = 1000 + #count tokens in history + for i in history: + if "prompt" in i and "response" in i: + tokens_batch = llm.get_num_tokens(i["prompt"]) + llm.get_num_tokens(i["response"]) + if tokens_current_history + tokens_batch < tokens_max_history: + tokens_current_history += tokens_batch + messages_combine.append(HumanMessagePromptTemplate.from_template(i["prompt"])) + messages_combine.append(SystemMessagePromptTemplate.from_template(i["response"])) + p_chat_combine = ChatPromptTemplate.from_messages(messages_combine) elif settings.LLM_NAME == "openai": llm = OpenAI(openai_api_key=api_key, temperature=0) diff --git a/frontend/src/conversation/conversationApi.ts b/frontend/src/conversation/conversationApi.ts index c7320342..4d5bdfb7 100644 --- a/frontend/src/conversation/conversationApi.ts +++ b/frontend/src/conversation/conversationApi.ts @@ -7,6 +7,7 @@ export function fetchAnswerApi( question: string, apiKey: string, selectedDocs: Doc, + history: Array = [], ): Promise { let namePath = selectedDocs.name; if (selectedDocs.language === namePath) { @@ -37,7 +38,7 @@ export function fetchAnswerApi( question: question, api_key: apiKey, embeddings_key: apiKey, - history: localStorage.getItem('chatHistory'), + history: history, active_docs: docPath, }), }) diff --git a/frontend/src/conversation/conversationSlice.ts b/frontend/src/conversation/conversationSlice.ts index c728b9e0..a822c9bd 100644 --- a/frontend/src/conversation/conversationSlice.ts +++ b/frontend/src/conversation/conversationSlice.ts @@ -19,6 +19,7 @@ export const fetchAnswer = createAsyncThunk< question, state.preference.apiKey, state.preference.selectedDocs!, + state.conversation.queries, ); return answer; }); From bc9f1c17ed80ff22fcd34189b71d9c708d84c07f Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 18 May 2023 18:42:23 +0100 Subject: [PATCH 2/3] History Co-Authored-By: riccardofresi <89981746+riccardofresi@users.noreply.github.com> --- application/app.py | 6 +++--- docker-compose.yaml | 8 -------- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/application/app.py b/application/app.py index fa56c1d4..013b8708 100644 --- a/application/app.py +++ b/application/app.py @@ -177,7 +177,7 @@ def api_answer(): ] if history: tokens_current_history = 0 - tokens_max_history = 1000 + tokens_max_history = 500 #count tokens in history for i in history: if "prompt" in i and "response" in i: @@ -185,7 +185,7 @@ def api_answer(): if tokens_current_history + tokens_batch < tokens_max_history: tokens_current_history += tokens_batch messages_combine.append(HumanMessagePromptTemplate.from_template(i["prompt"])) - messages_combine.append(SystemMessagePromptTemplate.from_template(i["response"])) + messages_combine.append(AIMessagePromptTemplate.from_template(i["response"])) p_chat_combine = ChatPromptTemplate.from_messages(messages_combine) elif settings.LLM_NAME == "openai": @@ -213,7 +213,7 @@ def api_answer(): result = run_async_chain(chain, question, chat_history) else: qa_chain = load_qa_chain(llm=llm, chain_type="map_reduce", - combine_prompt=c_prompt, question_prompt=q_prompt) + combine_prompt=chat_combine_template, question_prompt=q_prompt) chain = VectorDBQA(combine_documents_chain=qa_chain, vectorstore=docsearch, k=3) result = chain({"query": question}) diff --git a/docker-compose.yaml b/docker-compose.yaml index 1052b614..4a97fbf6 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -1,14 +1,6 @@ version: "3.9" services: - frontend: - build: ./frontend - environment: - - VITE_API_HOST=http://localhost:5001 - ports: - - "5173:5173" - depends_on: - - backend backend: build: ./application From ba9c50524915680620b2667dc74c6c17b640c6c6 Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 18 May 2023 18:45:15 +0100 Subject: [PATCH 3/3] accidentaly deleted frontend container --- docker-compose.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docker-compose.yaml b/docker-compose.yaml index 4a97fbf6..703ed4e7 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -1,6 +1,14 @@ version: "3.9" services: + frontend: + build: ./frontend + environment: + - VITE_API_HOST=http://localhost:5001 + ports: + - "5173:5173" + depends_on: + - backend backend: build: ./application