diff --git a/.gitignore b/.gitignore
index 0003c21c..5af75524 100644
--- a/.gitignore
+++ b/.gitignore
@@ -108,7 +108,7 @@ venv/
ENV/
env.bak/
venv.bak/
-
+.flaskenv
# Spyder project settings
.spyderproject
.spyproject
diff --git a/application/app.py b/application/app.py
index b6360aee..a509ed63 100644
--- a/application/app.py
+++ b/application/app.py
@@ -9,7 +9,7 @@ from langchain import OpenAI, VectorDBQA, HuggingFaceHub, Cohere
from langchain.chains.question_answering import load_qa_chain
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceHubEmbeddings, CohereEmbeddings, HuggingFaceInstructEmbeddings
from langchain.prompts import PromptTemplate
-
+from error import bad_request
# os.environ["LANGCHAIN_HANDLER"] = "langchain"
if os.getenv("LLM_NAME") is not None:
@@ -74,6 +74,7 @@ def api_answer():
data = request.get_json()
question = data["question"]
history = data["history"]
+ print('-'*5)
if not api_key_set:
api_key = data["api_key"]
else:
@@ -83,62 +84,68 @@ def api_answer():
else:
embeddings_key = os.getenv("EMBEDDINGS_KEY")
+ # use try and except to check for exception
+ try:
- # check if the vectorstore is set
- if "active_docs" in data:
- vectorstore = "vectors/" + data["active_docs"]
- if data['active_docs'] == "default":
+ # check if the vectorstore is set
+ if "active_docs" in data:
+ vectorstore = "vectors/" + data["active_docs"]
+ if data['active_docs'] == "default":
+ vectorstore = ""
+ else:
vectorstore = ""
- else:
- vectorstore = ""
- # loading the index and the store and the prompt template
- # Note if you have used other embeddings than OpenAI, you need to change the embeddings
- if embeddings_choice == "openai_text-embedding-ada-002":
- docsearch = FAISS.load_local(vectorstore, OpenAIEmbeddings(openai_api_key=embeddings_key))
- elif embeddings_choice == "huggingface_sentence-transformers/all-mpnet-base-v2":
- docsearch = FAISS.load_local(vectorstore, HuggingFaceHubEmbeddings())
- elif embeddings_choice == "huggingface_hkunlp/instructor-large":
- docsearch = FAISS.load_local(vectorstore, HuggingFaceInstructEmbeddings())
- elif embeddings_choice == "cohere_medium":
- docsearch = FAISS.load_local(vectorstore, CohereEmbeddings(cohere_api_key=embeddings_key))
+ # loading the index and the store and the prompt template
+ # Note if you have used other embeddings than OpenAI, you need to change the embeddings
+ if embeddings_choice == "openai_text-embedding-ada-002":
+ docsearch = FAISS.load_local(vectorstore, OpenAIEmbeddings(openai_api_key=embeddings_key))
+ elif embeddings_choice == "huggingface_sentence-transformers/all-mpnet-base-v2":
+ docsearch = FAISS.load_local(vectorstore, HuggingFaceHubEmbeddings())
+ elif embeddings_choice == "huggingface_hkunlp/instructor-large":
+ docsearch = FAISS.load_local(vectorstore, HuggingFaceInstructEmbeddings())
+ elif embeddings_choice == "cohere_medium":
+ docsearch = FAISS.load_local(vectorstore, CohereEmbeddings(cohere_api_key=embeddings_key))
- # create a prompt template
- if history:
- history = json.loads(history)
- template_temp = template_hist.replace("{historyquestion}", history[0]).replace("{historyanswer}", history[1])
- c_prompt = PromptTemplate(input_variables=["summaries", "question"], template=template_temp, template_format="jinja2")
- else:
- c_prompt = PromptTemplate(input_variables=["summaries", "question"], template=template, template_format="jinja2")
+ # create a prompt template
+ if history:
+ history = json.loads(history)
+ template_temp = template_hist.replace("{historyquestion}", history[0]).replace("{historyanswer}", history[1])
+ c_prompt = PromptTemplate(input_variables=["summaries", "question"], template=template_temp, template_format="jinja2")
+ else:
+ c_prompt = PromptTemplate(input_variables=["summaries", "question"], template=template, template_format="jinja2")
- if llm_choice == "openai":
- llm = OpenAI(openai_api_key=api_key, temperature=0)
- elif llm_choice == "manifest":
- llm = ManifestWrapper(client=manifest, llm_kwargs={"temperature": 0.001, "max_tokens": 2048})
- elif llm_choice == "huggingface":
- llm = HuggingFaceHub(repo_id="bigscience/bloom", huggingfacehub_api_token=api_key)
- elif llm_choice == "cohere":
- llm = Cohere(model="command-xlarge-nightly", cohere_api_key=api_key)
+ if llm_choice == "openai":
+ llm = OpenAI(openai_api_key=api_key, temperature=0)
+ elif llm_choice == "manifest":
+ llm = ManifestWrapper(client=manifest, llm_kwargs={"temperature": 0.001, "max_tokens": 2048})
+ elif llm_choice == "huggingface":
+ llm = HuggingFaceHub(repo_id="bigscience/bloom", huggingfacehub_api_token=api_key)
+ elif llm_choice == "cohere":
+ llm = Cohere(model="command-xlarge-nightly", cohere_api_key=api_key)
- qa_chain = load_qa_chain(llm=llm, chain_type="map_reduce",
- combine_prompt=c_prompt)
+ qa_chain = load_qa_chain(llm=llm, chain_type="map_reduce",
+ combine_prompt=c_prompt)
- chain = VectorDBQA(combine_documents_chain=qa_chain, vectorstore=docsearch, k=4)
+ chain = VectorDBQA(combine_documents_chain=qa_chain, vectorstore=docsearch, k=4)
- # fetch the answer
- result = chain({"query": question})
- print(result)
+
+ # fetch the answer
+ result = chain({"query": question})
+ print(result)
- # some formatting for the frontend
- result['answer'] = result['result']
- result['answer'] = result['answer'].replace("\\n", "
")
- result['answer'] = result['answer'].replace("SOURCES:", "")
- # mock result
- # result = {
- # "answer": "The answer is 42",
- # "sources": ["https://en.wikipedia.org/wiki/42_(number)", "https://en.wikipedia.org/wiki/42_(number)"]
- # }
- return result
+ # some formatting for the frontend
+ result['answer'] = result['result']
+ result['answer'] = result['answer'].replace("\\n", "
")
+ result['answer'] = result['answer'].replace("SOURCES:", "")
+ # mock result
+ # result = {
+ # "answer": "The answer is 42",
+ # "sources": ["https://en.wikipedia.org/wiki/42_(number)", "https://en.wikipedia.org/wiki/42_(number)"]
+ # }
+ return result
+ except Exception as e:
+ print(str(e))
+ return bad_request(500,str(e))
@app.route("/api/docs_check", methods=["POST"])
diff --git a/application/error.py b/application/error.py
new file mode 100644
index 00000000..cab5ea39
--- /dev/null
+++ b/application/error.py
@@ -0,0 +1,13 @@
+from flask import jsonify
+from werkzeug.http import HTTP_STATUS_CODES
+
+def response_error(code_status,message=None):
+ payload = {'error':HTTP_STATUS_CODES.get(code_status,"something went wrong")}
+ if message:
+ payload['message'] = message
+ response = jsonify(payload)
+ response.status_code = code_status
+ return response
+
+def bad_request(status_code=400,message=''):
+ return response_error(code_status=status_code,message=message)
\ No newline at end of file
diff --git a/application/static/src/chat.js b/application/static/src/chat.js
index 553d7f3f..3c997ad5 100644
--- a/application/static/src/chat.js
+++ b/application/static/src/chat.js
@@ -1,55 +1,73 @@
-var el = document.getElementById('message-form');
-if (el) {
- el.addEventListener("submit", function (event) {
- console.log("submitting")
- event.preventDefault()
- var message = document.getElementById("message-input").value;
- msg_html = '
' - msg_html += message - msg_html += '
' + msg_html += message + msg_html += '
'
+ msg_html += data.answer
+ msg_html += '
'
- msg_html += data.answer
- msg_html += '