Merge branch 'arc53:main' into patch-1

pull/454/head
Aindree Chatterjee 1 year ago committed by GitHub
commit edc19e99a9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -38,7 +38,7 @@ When deploying your DocsGPT to a live environment, we're eager to provide person
You can find our [Roadmap](https://github.com/orgs/arc53/projects/2) here. Please don't hesitate to contribute or create issues, it helps us make DocsGPT better! You can find our [Roadmap](https://github.com/orgs/arc53/projects/2) here. Please don't hesitate to contribute or create issues, it helps us make DocsGPT better!
## Our Open-Source models optimised for DocsGPT: ## Our Open-Source models optimized for DocsGPT:
| Name | Base Model | Requirements (or similar) | | Name | Base Model | Requirements (or similar) |
|-------------------|------------|----------------------------------------------------------| |-------------------|------------|----------------------------------------------------------|
@ -47,7 +47,7 @@ You can find our [Roadmap](https://github.com/orgs/arc53/projects/2) here. Pleas
| [Docsgpt-40b-falcon](https://huggingface.co/Arc53/docsgpt-40b-falcon) | falcon-40b | 8xA10G gpu's | | [Docsgpt-40b-falcon](https://huggingface.co/Arc53/docsgpt-40b-falcon) | falcon-40b | 8xA10G gpu's |
If you don't have enough resources to run it you can use bitsnbytes to quantize If you don't have enough resources to run it you can use [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) to quantize.
## Features ## Features

@ -1,68 +1,41 @@
import platform import platform
import dotenv import dotenv
from application.celery import celery from application.celery import celery
from flask import Flask, request, redirect from flask import Flask, request, redirect
from application.core.settings import settings from application.core.settings import settings
from application.api.user.routes import user from application.api.user.routes import user
from application.api.answer.routes import answer from application.api.answer.routes import answer
from application.api.internal.routes import internal from application.api.internal.routes import internal
# Redirect PosixPath to WindowsPath on Windows
if platform.system() == "Windows": if platform.system() == "Windows":
import pathlib import pathlib
temp = pathlib.PosixPath
pathlib.PosixPath = pathlib.WindowsPath pathlib.PosixPath = pathlib.WindowsPath
# loading the .env file
dotenv.load_dotenv() dotenv.load_dotenv()
app = Flask(__name__) app = Flask(__name__)
app.register_blueprint(user) app.register_blueprint(user)
app.register_blueprint(answer) app.register_blueprint(answer)
app.register_blueprint(internal) app.register_blueprint(internal)
app.config["UPLOAD_FOLDER"] = UPLOAD_FOLDER = "inputs" app.config.update(
app.config["CELERY_BROKER_URL"] = settings.CELERY_BROKER_URL UPLOAD_FOLDER="inputs",
app.config["CELERY_RESULT_BACKEND"] = settings.CELERY_RESULT_BACKEND CELERY_BROKER_URL=settings.CELERY_BROKER_URL,
app.config["MONGO_URI"] = settings.MONGO_URI CELERY_RESULT_BACKEND=settings.CELERY_RESULT_BACKEND,
MONGO_URI=settings.MONGO_URI
)
celery.config_from_object("application.celeryconfig") celery.config_from_object("application.celeryconfig")
@app.route("/") @app.route("/")
def home(): def home():
""" return redirect('http://localhost:5173') if request.remote_addr in ('0.0.0.0', '127.0.0.1', 'localhost', '172.18.0.1') else 'Welcome to DocsGPT Backend!'
The frontend source code lives in the /frontend directory of the repository.
"""
if request.remote_addr in ('0.0.0.0', '127.0.0.1', 'localhost', '172.18.0.1'):
# If users locally try to access DocsGPT running in Docker,
# they will be redirected to the Frontend application.
return redirect('http://localhost:5173')
else:
# Handle other cases or render the default page
return 'Welcome to DocsGPT Backend!'
# handling CORS
@app.after_request @app.after_request
def after_request(response): def after_request(response):
response.headers.add("Access-Control-Allow-Origin", "*") response.headers.add("Access-Control-Allow-Origin", "*")
response.headers.add("Access-Control-Allow-Headers", "Content-Type,Authorization") response.headers.add("Access-Control-Allow-Headers", "Content-Type,Authorization")
response.headers.add("Access-Control-Allow-Methods", "GET,PUT,POST,DELETE,OPTIONS") response.headers.add("Access-Control-Allow-Methods", "GET,PUT,POST,DELETE,OPTIONS")
# response.headers.add("Access-Control-Allow-Credentials", "true")
return response return response
if __name__ == "__main__": if __name__ == "__main__":
app.run(debug=True, port=7091) app.run(debug=True, port=7091)

@ -10,7 +10,7 @@ It will install all the dependencies and give you an option to download the loca
Otherwise, refer to this Guide: Otherwise, refer to this Guide:
1. Open and download this repository with `git clone https://github.com/arc53/DocsGPT.git`. 1. Open and download this repository with `git clone https://github.com/arc53/DocsGPT.git`.
2. Create a `.env` file in your root directory and set your `API_KEY` with your openai api key. 2. Create a `.env` file in your root directory and set your `API_KEY` with your [OpenAI api key](https://platform.openai.com/account/api-keys).
3. Run `docker-compose build && docker-compose up`. 3. Run `docker-compose build && docker-compose up`.
4. Navigate to `http://localhost:5173/`. 4. Navigate to `http://localhost:5173/`.

@ -133,7 +133,7 @@ There are two types of responses:
``` ```
### /api/delete_old ### /api/delete_old
Deletes old vecotstores: Deletes old vectorstores:
```js ```js
// Task status (GET http://127.0.0.1:5000/api/docs_check) // Task status (GET http://127.0.0.1:5000/api/docs_check)
fetch("http://localhost:5001/api/task_status?task_id=b2d2a0f4-387c-44fd-a443-e4fe2e7454d1", { fetch("http://localhost:5001/api/task_status?task_id=b2d2a0f4-387c-44fd-a443-e4fe2e7454d1", {

@ -1,4 +1,4 @@
## To customise a main prompt navigate to `/application/prompt/combine_prompt.txt` ## To customize a main prompt navigate to `/application/prompt/combine_prompt.txt`
You can try editing it to see how the model responses. You can try editing it to see how the model responses.

@ -35,7 +35,7 @@ It will tell you how much it will cost
Once you run it will use new context that is relevant to your documentation Once you run it will use new context that is relevant to your documentation
Make sure you select default in the dropdown in the UI Make sure you select default in the dropdown in the UI
## Customisation ## Customization
You can learn more about options while running ingest.py by running: You can learn more about options while running ingest.py by running:
`python ingest.py --help` `python ingest.py --help`

Loading…
Cancel
Save