diff --git a/.github/labeler.yml b/.github/labeler.yml new file mode 100644 index 0000000..0c9b183 --- /dev/null +++ b/.github/labeler.yml @@ -0,0 +1,23 @@ +repo: + - '*' + +github: + - .github/**/* + +application: + - application/**/* + +docs: + - docs/**/* + +extensions: + - extensions/**/* + +frontend: + - frontend/**/* + +scripts: + - scripts/**/* + +tests: + - tests/**/* diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml new file mode 100644 index 0000000..f85abb1 --- /dev/null +++ b/.github/workflows/labeler.yml @@ -0,0 +1,15 @@ +# https://github.com/actions/labeler +name: Pull Request Labeler +on: + - pull_request_target +jobs: + triage: + permissions: + contents: read + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@v4 + with: + repo-token: "${{ secrets.GITHUB_TOKEN }}" + sync-labels: true diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index ef6fe19..512b6c2 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -2,7 +2,7 @@ ## Our Pledge -We as members, contributors, and leaders pledge to make participation in our +We as members, contributors, and leaders, pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, @@ -10,20 +10,20 @@ nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, -diverse, inclusive, and healthy community. +diverse, inclusive, and a healthy community. ## Our Standards -Examples of behavior that contributes to a positive environment for our +Examples of behavior that contribute to a positive environment for our community include: -* Demonstrating empathy and kindness toward other people -* Being respectful of differing opinions, viewpoints, and experiences +* Demonstrating empathy and kindness towards other people +* Being respectful and open to differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback -* Accepting responsibility and apologizing to those affected by our mistakes, - and learning from the experience +* Taking accountability and offering apologies to those who have been impacted by our errors, + while also gaining insights from the situation * Focusing on what is best not just for us as individuals, but for the - overall community + community as a whole Examples of unacceptable behavior include: @@ -31,7 +31,7 @@ Examples of unacceptable behavior include: advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment -* Publishing others' private information, such as a physical or email +* Publishing other's private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting @@ -74,7 +74,7 @@ the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed -unprofessional or unwelcome in the community. +unprofessional or unwelcome in the community space. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the @@ -107,7 +107,7 @@ Violating these terms may lead to a permanent ban. **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an -individual, or aggression toward or disparagement of classes of individuals. +individual, or aggression towards or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 78a3d9b..b759fd4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ -# Welcome to DocsGPT Contributing guideline +# Welcome to DocsGPT Contributing Guidelines -Thank you for choosing this project to contribute to, we are all very grateful! +Thank you for choosing this project to contribute to. We are all very grateful! ### [πŸŽ‰ Join the Hacktoberfest with DocsGPT and Earn a Free T-shirt! πŸŽ‰](https://github.com/arc53/DocsGPT/blob/main/HACKTOBERFEST.md) @@ -17,30 +17,36 @@ Thank you for choosing this project to contribute to, we are all very grateful! ## 🐞 Issues and Pull requests -We value contributions to our issues in the form of discussion or suggestion, we recommend that you check out existing issues and our [Roadmap](https://github.com/orgs/arc53/projects/2) +We value contributions to our issues in the form of discussion or suggestions. We recommend that you check out existing issues and our [roadmap](https://github.com/orgs/arc53/projects/2). -If you want to contribute by writing code there are a few things that you should know before doing it: -We have frontend (React, Vite) and Backend (python) +If you want to contribute by writing code, there are a few things that you should know before doing it: + +We have a frontend in React (Vite) and backend in Python. + +### If you are looking to contribute to frontend (βš›οΈReact, Vite): + +- The current frontend is being migrated from `/application` to `/frontend` with a new design, so please contribute to the new one. +- Check out this [milestone](https://github.com/arc53/DocsGPT/milestone/1) and its issues. +- The Figma design can be found [here](https://www.figma.com/file/OXLtrl1EAy885to6S69554/DocsGPT?node-id=0%3A1&t=hjWVuxRg9yi5YkJ9-1). -### If you are looking to contribute to Frontend (βš›οΈReact, Vite): -The current frontend is being migrated from /application to /frontend with a new design, so please contribute to the new one. Check out this [Milestone](https://github.com/arc53/DocsGPT/milestone/1) and its issues also [Figma](https://www.figma.com/file/OXLtrl1EAy885to6S69554/DocsGPT?node-id=0%3A1&t=hjWVuxRg9yi5YkJ9-1) Please try to follow the guidelines. -### If you are looking to contribute to Backend (🐍Python): -* Check out our issues, and contribute to /application or /scripts (ignore old ingest_rst.py ingest_rst_sphinx.py files, they will be deprecated soon) -* All new code should be covered with unit tests ([pytest](https://github.com/pytest-dev/pytest)). Please find tests under [/tests](https://github.com/arc53/DocsGPT/tree/main/tests) folder. -* Before submitting your PR make sure that after you ingested some test data it is queryable. +### If you are looking to contribute to Backend (🐍 Python): +- Check out our issues and contribute to `/application` or `/scripts` (ignore old `ingest_rst.py` `ingest_rst_sphinx.py` files; they will be deprecated soon). +- All new code should be covered with unit tests ([pytest](https://github.com/pytest-dev/pytest)). Please find tests under [`/tests`](https://github.com/arc53/DocsGPT/tree/main/tests) folder. +- Before submitting your PR, ensure it is queryable after ingesting some test data. ### Testing -To run unit tests, from the root of the repository execute: + +To run unit tests from the root of the repository, execute: ``` python -m pytest ``` ### Workflow: -Create a fork, make changes on your forked repository, and submit changes in the form of a pull request. +Create a fork, make changes on your forked repository, and submit changes as a pull request. ## Questions/collaboration -Please join our [Discord](https://discord.gg/n5BX8dh8rU) don't hesitate, we are very friendly and welcoming to new contributors. +Please join our [Discord](https://discord.gg/n5BX8dh8rU). Don't hesitate; we are very friendly and welcoming to new contributors. # Thank you so much for considering contributing to DocsGPT!πŸ™ diff --git a/HACKTOBERFEST.md b/HACKTOBERFEST.md index 7a0e016..1a39e56 100644 --- a/HACKTOBERFEST.md +++ b/HACKTOBERFEST.md @@ -2,13 +2,13 @@ Welcome, contributors! We're excited to announce that DocsGPT is participating in Hacktoberfest. Get involved by submitting a **meaningful** pull request, and earn a free shirt in return! -All contributors with accepted PR's will receive a cool holopin! 🀩 (Watchout for a reply in your PR to collect it) +All contributors with accepted PRs will receive a cool Holopin! 🀩 (Watch out for a reply in your PR to collect it). πŸ“œ Here's How to Contribute: πŸ› οΈ Code: This is the golden ticket! Make meaningful contributions through PRs. πŸ“š Wiki: Improve our documentation, Create a guide or change existing documentation. - πŸ–₯️ Design: Improve the UI/UX, or design a new feature. + πŸ–₯️ Design: Improve the UI/UX or design a new feature. πŸ“ Guidelines for Pull Requests: @@ -16,20 +16,20 @@ Familiarize yourself with the current contributions and our [Roadmap](https://gi Deciding to contribute with code? Here are some insights based on the area of your interest: -Frontend (βš›οΈReact, Vite): - Most of the code is located in /frontend folder. You can also check out our React extension in /extensions/react-widget. - For design references, here's the [Figma](https://www.figma.com/file/OXLtrl1EAy885to6S69554/DocsGPT?node-id=0%3A1&t=hjWVuxRg9yi5YkJ9-1). - Ensure you adhere to the established guidelines. +- Frontend (βš›οΈReact, Vite): + - Most of the code is located in `/frontend` folder. You can also check out our React extension in /extensions/react-widget. + - For design references, here's the [Figma](https://www.figma.com/file/OXLtrl1EAy885to6S69554/DocsGPT?node-id=0%3A1&t=hjWVuxRg9yi5YkJ9-1). + - Ensure you adhere to the established guidelines. -Backend (🐍Python): - Focus on /application or /scripts. However, avoid the files ingest_rst.py and ingest_rst_sphinx.py as they are soon to be deprecated. - Newly added code should come with relevant unit tests (pytest). - Refer to the /tests folder for test suites. +- Backend (🐍Python): + - Focus on `/application` or `/scripts`. However, avoid the files ingest_rst.py and ingest_rst_sphinx.py, as they will soon be deprecated. + - Newly added code should come with relevant unit tests (pytest). + - Refer to the `/tests` folder for test suites. -Check out [Contributing Guidelines](https://github.com/arc53/DocsGPT/blob/main/CONTRIBUTING.md) +Check out our [Contributing Guidelines](https://github.com/arc53/DocsGPT/blob/main/CONTRIBUTING.md) -Once you have Created your PR and it was merged, please fill in this [form](https://airtable.com/appfkqFVjB0RpYCJh/shrXXM98xgRsbjO7s) +Once you have created your PR and our maintainers have merged it, please fill in this [form](https://airtable.com/appfkqFVjB0RpYCJh/shrXXM98xgRsbjO7s). -Don't be shy! Hop into our [Discord](https://discord.gg/n5BX8dh8rU) Server. We're a friendly bunch and eager to assist newcomers. +Feel free to join our Discord server. We're here to help newcomers, so don't hesitate to jump in! [Join us here](https://discord.gg/n5BX8dh8rU). -Big thanks for considering contributing to DocsGPT during Hacktoberfest! πŸ™ Your effort can earn you a swanky new t-shirt. 🎁 Let's code together! πŸš€ +Thank you very much for considering contributing to DocsGPT during Hacktoberfest! πŸ™ Your contributions could earn you a stylish new t-shirt as a token of our appreciation. 🎁 Join us, and let's code together! πŸš€ diff --git a/README.md b/README.md index 5c115b4..b02df1a 100644 --- a/README.md +++ b/README.md @@ -18,14 +18,12 @@ Say goodbye to time-consuming manual searches, and let DocsGPT ![example2](https://img.shields.io/github/forks/arc53/docsgpt?style=social) ![example3](https://img.shields.io/github/license/arc53/docsgpt) ![example3](https://img.shields.io/discord/1070046503302877216) - - - + -### Production Support/ Help for companies: +### Production Support / Help for companies: -When deploying your DocsGPT to a live environment, we're eager to provide personalized assistance. +We're eager to provide personalized assistance when deploying your DocsGPT to a live environment. - [Schedule Demo πŸ‘‹](https://cal.com/arc53/docsgpt-demo-b2b?date=2023-10-04&month=2023-10) - [Send Email βœ‰οΈ](mailto:contact@arc53.com?subject=DocsGPT%20support%2Fsolutions) @@ -36,9 +34,9 @@ When deploying your DocsGPT to a live environment, we're eager to provide person ## Roadmap -You can find our [Roadmap](https://github.com/orgs/arc53/projects/2) here. Please don't hesitate to contribute or create issues, it helps us make DocsGPT better! +You can find our roadmap [here](https://github.com/orgs/arc53/projects/2). Please don't hesitate to contribute or create issues, it helps us improve DocsGPT! -## Our Open-Source models optimised for DocsGPT: +## Our Open-Source models optimized for DocsGPT: | Name | Base Model | Requirements (or similar) | |-------------------|------------|----------------------------------------------------------| @@ -47,7 +45,7 @@ You can find our [Roadmap](https://github.com/orgs/arc53/projects/2) here. Pleas | [Docsgpt-40b-falcon](https://huggingface.co/Arc53/docsgpt-40b-falcon) | falcon-40b | 8xA10G gpu's | -If you don't have enough resources to run it you can use bitsnbytes to quantize +If you don't have enough resources to run it, you can use bitsnbytes to quantize. ## Features @@ -56,7 +54,7 @@ If you don't have enough resources to run it you can use bitsnbytes to quantize ## Useful links - Audit [Live preview](https://docsgpt.arc53.com/) + Discord [Join Our Discord](https://discord.gg/n5BX8dh8rU) @@ -74,28 +72,28 @@ If you don't have enough resources to run it you can use bitsnbytes to quantize ## Project structure -- Application - Flask app (main application) +- Application - Flask app (main application). -- Extensions - Chrome extension +- Extensions - Chrome extension. -- Scripts - Script that creates similarity search index and store for other libraries. +- Scripts - Script that creates similarity search index and stores for other libraries. -- Frontend - Frontend uses Vite and React +- Frontend - Frontend uses Vite and React. ## QuickStart Note: Make sure you have Docker installed -On Mac OS or Linux just write: +On Mac OS or Linux, write: `./setup.sh` -It will install all the dependencies and give you an option to download local model or use OpenAI +It will install all the dependencies and allow you to download the local model or use OpenAI. -Otherwise refer to this Guide: +Otherwise, refer to this Guide: 1. Download and open this repository with `git clone https://github.com/arc53/DocsGPT.git` -2. Create a .env file in your root directory and set the env variable OPENAI_API_KEY with your OpenAI API key and VITE_API_STREAMING to true or false, depending on if you want streaming answers or not +2. Create a `.env` file in your root directory and set the env variable `OPENAI_API_KEY` with your OpenAI API key and `VITE_API_STREAMING` to true or false, depending on if you want streaming answers or not. It should look like this inside: ``` @@ -103,15 +101,15 @@ Otherwise refer to this Guide: VITE_API_STREAMING=true ``` See optional environment variables in the `/.env-template` and `/application/.env_sample` files. -3. Run `./run-with-docker-compose.sh` -4. Navigate to http://localhost:5173/ +3. Run `./run-with-docker-compose.sh`. +4. Navigate to http://localhost:5173/. -To stop just run Ctrl + C +To stop, just run `Ctrl + C`. ## Development environments ### Spin up mongo and redis -For development only 2 containers are used from docker-compose.yaml (by deleting all services except for Redis and Mongo). +For development, only two containers are used from `docker-compose.yaml` (by deleting all services except for Redis and Mongo). See file [docker-compose-dev.yaml](./docker-compose-dev.yaml). Run @@ -124,33 +122,32 @@ docker compose -f docker-compose-dev.yaml up -d Make sure you have Python 3.10 or 3.11 installed. -1. Export required environment variables or prep .env file in application folder -Prepare .env file -Copy `.env_sample` and create `.env` with your OpenAI API token for the API_KEY and EMBEDDINGS_KEY fields +1. Export required environment variables or prepare a `.env` file in the `/application` folder: + - Copy `.env_sample` and create `.env` with your OpenAI API token for the `API_KEY` and `EMBEDDINGS_KEY` fields. -(check out application/core/settings.py if you want to see more config options) -3. (optional) Create a Python virtual environment +(check out [`application/core/settings.py`](application/core/settings.py) if you want to see more config options.) + +2. (optional) Create a Python virtual environment: ```commandline python -m venv venv . venv/bin/activate ``` -4. Change to `application/` subdir and install dependencies for the backend +3. Change to the `application/` subdir and install dependencies for the backend: ```commandline pip install -r application/requirements.txt ``` -5. Run the app `flask run --host=0.0.0.0 --port=7091` -6. Start worker with `celery -A application.app.celery worker -l INFO` +4. Run the app using `flask run --host=0.0.0.0 --port=7091`. +5. Start worker with `celery -A application.app.celery worker -l INFO`. ### Start frontend + Make sure you have Node version 16 or higher. -1. Navigate to `/frontend` folder -2. Install dependencies -`npm install` -3. Run the app -`npm run dev` +1. Navigate to the `/frontend` folder. +2. Install dependencies by running `npm install`. +3. Run the app using `npm run dev`. -## All Thanks To Our Contributors +## Many Thanks To Our Contributors @@ -158,4 +155,3 @@ Make sure you have Node version 16 or higher. Built with [πŸ¦œοΈπŸ”— LangChain](https://github.com/hwchase17/langchain) - diff --git a/application/app.py b/application/app.py index 41b821b..ae61997 100644 --- a/application/app.py +++ b/application/app.py @@ -1,68 +1,44 @@ import platform - - import dotenv from application.celery import celery from flask import Flask, request, redirect - - from application.core.settings import settings from application.api.user.routes import user from application.api.answer.routes import answer from application.api.internal.routes import internal - - -# Redirect PosixPath to WindowsPath on Windows - if platform.system() == "Windows": import pathlib - - temp = pathlib.PosixPath pathlib.PosixPath = pathlib.WindowsPath -# loading the .env file dotenv.load_dotenv() - - app = Flask(__name__) app.register_blueprint(user) app.register_blueprint(answer) app.register_blueprint(internal) -app.config["UPLOAD_FOLDER"] = UPLOAD_FOLDER = "inputs" -app.config["CELERY_BROKER_URL"] = settings.CELERY_BROKER_URL -app.config["CELERY_RESULT_BACKEND"] = settings.CELERY_RESULT_BACKEND -app.config["MONGO_URI"] = settings.MONGO_URI +app.config.update( + UPLOAD_FOLDER="inputs", + CELERY_BROKER_URL=settings.CELERY_BROKER_URL, + CELERY_RESULT_BACKEND=settings.CELERY_RESULT_BACKEND, + MONGO_URI=settings.MONGO_URI +) celery.config_from_object("application.celeryconfig") - - @app.route("/") def home(): - """ - The frontend source code lives in the /frontend directory of the repository. - """ if request.remote_addr in ('0.0.0.0', '127.0.0.1', 'localhost', '172.18.0.1'): - # If users locally try to access DocsGPT running in Docker, - # they will be redirected to the Frontend application. return redirect('http://localhost:5173') else: - # Handle other cases or render the default page return 'Welcome to DocsGPT Backend!' - - - -# handling CORS @app.after_request def after_request(response): response.headers.add("Access-Control-Allow-Origin", "*") response.headers.add("Access-Control-Allow-Headers", "Content-Type,Authorization") response.headers.add("Access-Control-Allow-Methods", "GET,PUT,POST,DELETE,OPTIONS") - # response.headers.add("Access-Control-Allow-Credentials", "true") return response - if __name__ == "__main__": app.run(debug=True, port=7091) + diff --git a/application/core/settings.py b/application/core/settings.py index a05fd00..116735a 100644 --- a/application/core/settings.py +++ b/application/core/settings.py @@ -32,6 +32,12 @@ class Settings(BaseSettings): ELASTIC_URL: str = None # url for elasticsearch ELASTIC_INDEX: str = "docsgpt" # index name for elasticsearch + # SageMaker config + SAGEMAKER_ENDPOINT: str = None # SageMaker endpoint name + SAGEMAKER_REGION: str = None # SageMaker region name + SAGEMAKER_ACCESS_KEY: str = None # SageMaker access key + SAGEMAKER_SECRET_KEY: str = None # SageMaker secret key + path = Path(__file__).parent.parent.absolute() settings = Settings(_env_file=path.joinpath(".env"), _env_file_encoding="utf-8") diff --git a/application/llm/sagemaker.py b/application/llm/sagemaker.py index 9ef5d0a..84ae09a 100644 --- a/application/llm/sagemaker.py +++ b/application/llm/sagemaker.py @@ -1,27 +1,139 @@ from application.llm.base import BaseLLM from application.core.settings import settings -import requests import json +import io + + + +class LineIterator: + """ + A helper class for parsing the byte stream input. + + The output of the model will be in the following format: + ``` + b'{"outputs": [" a"]}\n' + b'{"outputs": [" challenging"]}\n' + b'{"outputs": [" problem"]}\n' + ... + ``` + + While usually each PayloadPart event from the event stream will contain a byte array + with a full json, this is not guaranteed and some of the json objects may be split across + PayloadPart events. For example: + ``` + {'PayloadPart': {'Bytes': b'{"outputs": '}} + {'PayloadPart': {'Bytes': b'[" problem"]}\n'}} + ``` + + This class accounts for this by concatenating bytes written via the 'write' function + and then exposing a method which will return lines (ending with a '\n' character) within + the buffer via the 'scan_lines' function. It maintains the position of the last read + position to ensure that previous bytes are not exposed again. + """ + + def __init__(self, stream): + self.byte_iterator = iter(stream) + self.buffer = io.BytesIO() + self.read_pos = 0 + + def __iter__(self): + return self + + def __next__(self): + while True: + self.buffer.seek(self.read_pos) + line = self.buffer.readline() + if line and line[-1] == ord('\n'): + self.read_pos += len(line) + return line[:-1] + try: + chunk = next(self.byte_iterator) + except StopIteration: + if self.read_pos < self.buffer.getbuffer().nbytes: + continue + raise + if 'PayloadPart' not in chunk: + print('Unknown event type:' + chunk) + continue + self.buffer.seek(0, io.SEEK_END) + self.buffer.write(chunk['PayloadPart']['Bytes']) class SagemakerAPILLM(BaseLLM): def __init__(self, *args, **kwargs): - self.url = settings.SAGEMAKER_API_URL + import boto3 + runtime = boto3.client( + 'runtime.sagemaker', + aws_access_key_id='xxx', + aws_secret_access_key='xxx', + region_name='us-west-2' + ) + + + self.endpoint = settings.SAGEMAKER_ENDPOINT + self.runtime = runtime + def gen(self, model, engine, messages, stream=False, **kwargs): context = messages[0]['content'] user_question = messages[-1]['content'] prompt = f"### Instruction \n {user_question} \n ### Context \n {context} \n ### Answer \n" + - response = requests.post( - url=self.url, - headers={ - "Content-Type": "application/json; charset=utf-8", - }, - data=json.dumps({"input": prompt}) - ) + # Construct payload for endpoint + payload = { + "inputs": prompt, + "stream": False, + "parameters": { + "do_sample": True, + "temperature": 0.1, + "max_new_tokens": 30, + "repetition_penalty": 1.03, + "stop": ["", "###"] + } + } + body_bytes = json.dumps(payload).encode('utf-8') - return response.json()['answer'] + # Invoke the endpoint + response = self.runtime.invoke_endpoint(EndpointName=self.endpoint, + ContentType='application/json', + Body=body_bytes) + result = json.loads(response['Body'].read().decode()) + import sys + print(result[0]['generated_text'], file=sys.stderr) + return result[0]['generated_text'][len(prompt):] def gen_stream(self, model, engine, messages, stream=True, **kwargs): - raise NotImplementedError("Sagemaker does not support streaming") \ No newline at end of file + context = messages[0]['content'] + user_question = messages[-1]['content'] + prompt = f"### Instruction \n {user_question} \n ### Context \n {context} \n ### Answer \n" + + + # Construct payload for endpoint + payload = { + "inputs": prompt, + "stream": True, + "parameters": { + "do_sample": True, + "temperature": 0.1, + "max_new_tokens": 512, + "repetition_penalty": 1.03, + "stop": ["", "###"] + } + } + body_bytes = json.dumps(payload).encode('utf-8') + + # Invoke the endpoint + response = self.runtime.invoke_endpoint_with_response_stream(EndpointName=self.endpoint, + ContentType='application/json', + Body=body_bytes) + #result = json.loads(response['Body'].read().decode()) + event_stream = response['Body'] + start_json = b'{' + for line in LineIterator(event_stream): + if line != b'' and start_json in line: + #print(line) + data = json.loads(line[line.find(start_json):].decode('utf-8')) + if data['token']['text'] not in ["", "###"]: + print(data['token']['text'],end='') + yield data['token']['text'] \ No newline at end of file diff --git a/application/requirements.txt b/application/requirements.txt index 8e5823a..b4c712f 100644 --- a/application/requirements.txt +++ b/application/requirements.txt @@ -41,7 +41,7 @@ Jinja2==3.1.2 jmespath==1.0.1 joblib==1.2.0 kombu==5.2.4 -langchain==0.0.263 +langchain==0.0.308 loguru==0.6.0 lxml==4.9.2 MarkupSafe==2.1.2 @@ -59,7 +59,7 @@ numpy==1.24.2 openai==0.27.8 packaging==23.0 pathos==0.3.0 -Pillow==9.4.0 +Pillow==10.0.1 pox==0.3.2 ppft==1.7.6.6 prompt-toolkit==3.0.38 diff --git a/application/vectorstore/faiss.py b/application/vectorstore/faiss.py index 5c5cee7..217b045 100644 --- a/application/vectorstore/faiss.py +++ b/application/vectorstore/faiss.py @@ -1,5 +1,5 @@ from application.vectorstore.base import BaseVectorStore -from langchain import FAISS +from langchain.vectorstores import FAISS from application.core.settings import settings class FaissStore(BaseVectorStore): diff --git a/docker-compose.yaml b/docker-compose.yaml index 7535a4b..84cc568 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -19,7 +19,6 @@ services: - CELERY_BROKER_URL=redis://redis:6379/0 - CELERY_RESULT_BACKEND=redis://redis:6379/1 - MONGO_URI=mongodb://mongo:27017/docsgpt - - SELF_HOSTED_MODEL=$SELF_HOSTED_MODEL ports: - "7091:7091" volumes: diff --git a/docs/pages/Deploying/Hosting-the-app.md b/docs/pages/Deploying/Hosting-the-app.md index fe9f16f..7505f60 100644 --- a/docs/pages/Deploying/Hosting-the-app.md +++ b/docs/pages/Deploying/Hosting-the-app.md @@ -4,7 +4,7 @@ Here's a step-by-step guide on how to setup an Amazon Lightsail instance to host ## Configuring your instance -(If you know how to create a Lightsail instance, you can skip to the recommended configuration part by clicking here) +(If you know how to create a Lightsail instance, you can skip to the recommended configuration part by clicking here). ### 1. Create an account or login to https://lightsail.aws.amazon.com @@ -36,7 +36,7 @@ Your instance will be ready for use a few minutes after being created. To access #### Clone the repository -A terminal window will pop up, and the first step will be to clone the DocsGPT git repository. +A terminal window will pop up, and the first step will be to clone the DocsGPT git repository: `git clone https://github.com/arc53/DocsGPT.git` @@ -64,11 +64,11 @@ Enter the following command to access the folder in which DocsGPT docker-compose #### Prepare the environment -Inside the DocsGPT folder create a .env file and copy the contents of .env_sample into it. +Inside the DocsGPT folder create a `.env` file and copy the contents of `.env_sample` into it. `nano .env` -Make sure your .env file looks like this: +Make sure your `.env` file looks like this: ``` OPENAI_API_KEY=(Your OpenAI API key) @@ -103,10 +103,10 @@ Before you are able to access your live instance, you must first enable the port Open your Lightsail instance and head to "Networking". -Then click on "Add rule" under "IPv4 Firewall", enter 5173 as your port, and hit "Create". -Repeat the process for port 7091. +Then click on "Add rule" under "IPv4 Firewall", enter `5173` as your port, and hit "Create". +Repeat the process for port `7091`. #### Access your instance -Your instance will now be available under your Public IP Address and port 5173. Enjoy! +Your instance will now be available under your Public IP Address and port `5173`. Enjoy! diff --git a/docs/pages/Deploying/Quickstart.md b/docs/pages/Deploying/Quickstart.md index 6c0b974..2cc03c5 100644 --- a/docs/pages/Deploying/Quickstart.md +++ b/docs/pages/Deploying/Quickstart.md @@ -9,23 +9,23 @@ It will install all the dependencies and give you an option to download the loca Otherwise, refer to this Guide: -1. Open and download this repository with `git clone https://github.com/arc53/DocsGPT.git` -2. Create a .env file in your root directory and set your `API_KEY` with your openai api key -3. Run `docker-compose build && docker-compose up` -4. Navigate to `http://localhost:5173/` +1. Open and download this repository with `git clone https://github.com/arc53/DocsGPT.git`. +2. Create a `.env` file in your root directory and set your `API_KEY` with your [OpenAI api key](https://platform.openai.com/account/api-keys). +3. Run `docker-compose build && docker-compose up`. +4. Navigate to `http://localhost:5173/`. -To stop just run Ctrl + C +To stop just run `Ctrl + C`. ### Chrome Extension To install the Chrome extension: -1. In the DocsGPT GitHub repository, click on the "Code" button and select Download ZIP -2. Unzip the downloaded file to a location you can easily access -3. Open the Google Chrome browser and click on the three dots menu (upper right corner) -4. Select "More Tools" and then "Extensions" -5. Turn on the "Developer mode" switch in the top right corner of the Extensions page -6. Click on the "Load unpacked" button -7. Select the "Chrome" folder where the DocsGPT files have been unzipped (docsgpt-main > extensions > chrome) -8. The extension should now be added to Google Chrome and can be managed on the Extensions page +1. In the DocsGPT GitHub repository, click on the "Code" button and select "Download ZIP". +2. Unzip the downloaded file to a location you can easily access. +3. Open the Google Chrome browser and click on the three dots menu (upper right corner). +4. Select "More Tools" and then "Extensions". +5. Turn on the "Developer mode" switch in the top right corner of the Extensions page. +6. Click on the "Load unpacked" button. +7. Select the "Chrome" folder where the DocsGPT files have been unzipped (docsgpt-main > extensions > chrome). +8. The extension should now be added to Google Chrome and can be managed on the Extensions page. 9. To disable or remove the extension, simply turn off the toggle switch on the extension card or click the "Remove" button. diff --git a/docs/pages/Developing/API-docs.md b/docs/pages/Developing/API-docs.md index 4109eb1..2583874 100644 --- a/docs/pages/Developing/API-docs.md +++ b/docs/pages/Developing/API-docs.md @@ -1,8 +1,8 @@ -App currently has two main api endpoints: +Currently, the application provides the following main API endpoints: ### /api/answer -Its a POST request that sends a JSON in body with 4 values. Here is a JavaScript fetch example -It will receive an answer for a user provided question +It's a POST request that sends a JSON in body with 4 values. It will receive an answer for a user provided question. +Here is a JavaScript fetch example: ```js // answer (POST http://127.0.0.1:5000/api/answer) @@ -29,8 +29,8 @@ In response you will get a json document like this one: ``` ### /api/docs_check -It will make sure documentation is loaded on a server (just run it every time user is switching between libraries (documentations) -Its a POST request that sends a JSON in body with 1 value. Here is a JavaScript fetch example +It will make sure documentation is loaded on a server (just run it every time user is switching between libraries (documentations)). +It's a POST request that sends a JSON in body with 1 value. Here is a JavaScript fetch example: ```js // answer (POST http://127.0.0.1:5000/api/docs_check) @@ -54,10 +54,10 @@ In response you will get a json document like this one: ### /api/combine -Provides json that tells UI which vectors are available and where they are located with a simple get request +Provides json that tells UI which vectors are available and where they are located with a simple get request. -Respsonse will include: -date, description, docLink, fullName, language, location (local or docshub), model, name, version +Response will include: +`date`, `description`, `docLink`, `fullName`, `language`, `location` (local or docshub), `model`, `name`, `version`. Example of json in Docshub and local: image @@ -69,15 +69,14 @@ HTML example: ```html
- - - - - - -
+ + + + + + ``` Response: @@ -90,7 +89,7 @@ Response: ``` ### /api/task_status -Gets task status (task_id) from /api/upload +Gets task status (`task_id`) from `/api/upload`: ```js // Task status (Get http://127.0.0.1:5000/api/task_status) fetch("http://localhost:5001/api/task_status?task_id=b2d2a0f4-387c-44fd-a443-e4fe2e7454d1", { @@ -105,7 +104,7 @@ fetch("http://localhost:5001/api/task_status?task_id=b2d2a0f4-387c-44fd-a443-e4f Responses: There are two types of responses: -1. while task it still running, where "current" will show progress from 0 - 100 +1. while task it still running, where "current" will show progress from 0 to 100 ```json { "result": { @@ -134,7 +133,7 @@ There are two types of responses: ``` ### /api/delete_old -deletes old vecotstores +Deletes old vectorstores: ```js // Task status (GET http://127.0.0.1:5000/api/docs_check) fetch("http://localhost:5001/api/task_status?task_id=b2d2a0f4-387c-44fd-a443-e4fe2e7454d1", { @@ -146,7 +145,8 @@ fetch("http://localhost:5001/api/task_status?task_id=b2d2a0f4-387c-44fd-a443-e4f .then((res) => res.text()) .then(console.log.bind(console)) ``` -response: + +Response: ```json { "status": "ok" } diff --git a/docs/pages/Extensions/Chatwoot-extension.md b/docs/pages/Extensions/Chatwoot-extension.md index 09420f4..4dd5782 100644 --- a/docs/pages/Extensions/Chatwoot-extension.md +++ b/docs/pages/Extensions/Chatwoot-extension.md @@ -1,9 +1,8 @@ ### To start chatwoot extension: -1. Prepare and start the DocsGPT itself (load your documentation too) -Follow our [wiki](https://github.com/arc53/DocsGPT/wiki) to start it and to [ingest](https://github.com/arc53/DocsGPT/wiki/How-to-train-on-other-documentation) data -2. Go to chatwoot, Navigate to your profile (bottom left), click on profile settings, scroll to the bottom and copy Access Token -2. Navigate to `/extensions/chatwoot`. Copy .env_sample and create .env file -3. Fill in the values +1. Prepare and start the DocsGPT itself (load your documentation too). Follow our [wiki](https://github.com/arc53/DocsGPT/wiki) to start it and to [ingest](https://github.com/arc53/DocsGPT/wiki/How-to-train-on-other-documentation) data. +2. Go to chatwoot, **Navigate** to your profile (bottom left), click on profile settings, scroll to the bottom and copy **Access Token**. +3. Navigate to `/extensions/chatwoot`. Copy `.env_sample` and create `.env` file. +4. Fill in the values. ``` docsgpt_url= @@ -12,18 +11,19 @@ docsgpt_key= chatwoot_token= ``` -4. start with `flask run` command +5. Start with `flask run` command. -If you want for bot to stop responding to questions for a specific user or session just add label `human-requested` in your conversation +If you want for bot to stop responding to questions for a specific user or session just add label `human-requested` in your conversation. ### Optional (extra validation) -In app.py uncomment lines 12-13 and 71-75 +In `app.py` uncomment lines 12-13 and 71-75 -in your .env file add: +in your `.env` file add: -`account_id=(optional) 1 ` - -`assignee_id=(optional) 1` +``` +account_id=(optional) 1 +assignee_id=(optional) 1 +``` -Those are chatwoot values and will allow you to check if you are responding to correct widget and responding to questions assigned to specific user \ No newline at end of file +Those are chatwoot values and will allow you to check if you are responding to correct widget and responding to questions assigned to specific user. \ No newline at end of file diff --git a/docs/pages/Extensions/react-widget.md b/docs/pages/Extensions/react-widget.md index 393c736..be4d6bd 100644 --- a/docs/pages/Extensions/react-widget.md +++ b/docs/pages/Extensions/react-widget.md @@ -1,7 +1,7 @@ ### How to set up react docsGPT widget on your website: ### Installation -Got to your project and install a new dependency: `npm install docsgpt` +Got to your project and install a new dependency: `npm install docsgpt`. ### Usage Go to your project and in the file where you want to use the widget import it: @@ -14,9 +14,9 @@ import "docsgpt/dist/style.css"; Then you can use it like this: `` DocsGPTWidget takes 3 props: -- `apiHost` - url of your DocsGPT API -- `selectDocs` - documentation that you want to use for your widget (eg. `default` or `local/docs1.zip`) -- `apiKey` - usually its empty +- `apiHost` β€” url of your DocsGPT API. +- `selectDocs` β€” documentation that you want to use for your widget (eg. `default` or `local/docs1.zip`). +- `apiKey` β€” usually its empty. ### How to use DocsGPTWidget with [Nextra](https://nextra.site/) (Next.js + MDX) Install you widget as described above and then go to your `pages/` folder and create a new file `_app.js` with the following content: diff --git a/docs/pages/Guides/Customising-prompts.md b/docs/pages/Guides/Customising-prompts.md index b74956d..1d3a7d4 100644 --- a/docs/pages/Guides/Customising-prompts.md +++ b/docs/pages/Guides/Customising-prompts.md @@ -1,4 +1,4 @@ -## To customise a main prompt navigate to `/application/prompt/combine_prompt.txt` +## To customize a main prompt navigate to `/application/prompt/combine_prompt.txt` -You can try editing it to see how the model responds. +You can try editing it to see how the model responses. diff --git a/docs/pages/Guides/How-to-train-on-other-documentation.md b/docs/pages/Guides/How-to-train-on-other-documentation.md index 0aa24d1..c9549ae 100644 --- a/docs/pages/Guides/How-to-train-on-other-documentation.md +++ b/docs/pages/Guides/How-to-train-on-other-documentation.md @@ -3,14 +3,13 @@ This AI can use any documentation, but first it needs to be prepared for similar ![video-example-of-how-to-do-it](https://d3dg1063dc54p9.cloudfront.net/videos/how-to-vectorise.gif) -Start by going to -`/scripts/` folder +Start by going to `/scripts/` folder. If you open this file you will see that it uses RST files from the folder to create a `index.faiss` and `index.pkl`. -It currently uses OPEN_AI to create vector store, so make sure your documentation is not too big. Pandas cost me around 3-4$ +It currently uses OPEN_AI to create vector store, so make sure your documentation is not too big. Pandas cost me around 3-4$. -You can usually find documentation on github in docs/ folder for most open-source projects. +You can usually find documentation on github in `docs/` folder for most open-source projects. ### 1. Find documentation in .rst/.md and create a folder with it in your scripts directory Name it `inputs/` @@ -36,7 +35,7 @@ It will tell you how much it will cost Once you run it will use new context that is relevant to your documentation Make sure you select default in the dropdown in the UI -## Customisation +## Customization You can learn more about options while running ingest.py by running: `python ingest.py --help` diff --git a/docs/pages/Guides/How-to-use-different-LLM.md b/docs/pages/Guides/How-to-use-different-LLM.md index 65cce50..aa5815f 100644 --- a/docs/pages/Guides/How-to-use-different-LLM.md +++ b/docs/pages/Guides/How-to-use-different-LLM.md @@ -1,10 +1,10 @@ Fortunately there are many providers for LLM's and some of them can even be ran locally There are two models used in the app: -1. Embeddings -2. Text generation +1. Embeddings. +2. Text generation. -By default we use OpenAI's models but if you want to change it or even run it locally, its very simple! +By default, we use OpenAI's models but if you want to change it or even run it locally, it's very simple! ### Go to .env file or set environment variables: @@ -18,7 +18,7 @@ By default we use OpenAI's models but if you want to change it or even run it lo `VITE_API_STREAMING=` -You dont need to provide keys if you are happy with users providing theirs, so make sure you set LLM_NAME and EMBEDDINGS_NAME +You don't need to provide keys if you are happy with users providing theirs, so make sure you set `LLM_NAME` and `EMBEDDINGS_NAME`. Options: LLM_NAME (openai, manifest, cohere, Arc53/docsgpt-14b, Arc53/docsgpt-7b-falcon) @@ -27,6 +27,6 @@ EMBEDDINGS_NAME (openai_text-embedding-ada-002, huggingface_sentence-transformer That's it! ### Hosting everything locally and privately (for using our optimised open-source models) -If you are working with important data and dont want anything to leave your premises. +If you are working with important data and don't want anything to leave your premises. -Make sure you set SELF_HOSTED_MODEL as true in you .env variable and for your LLM_NAME you can use anything that's on Hugging Face +Make sure you set `SELF_HOSTED_MODEL` as true in you `.env` variable and for your `LLM_NAME` you can use anything that's on Hugging Face. diff --git a/extensions/chrome/package-lock.json b/extensions/chrome/package-lock.json index 510b4a7..4f8145d 100644 --- a/extensions/chrome/package-lock.json +++ b/extensions/chrome/package-lock.json @@ -1,9 +1,12 @@ { "name": "doc-ext", + "version": "0.0.1", "lockfileVersion": 2, "requires": true, "packages": { "": { + "version": "0.0.1", + "license": "MIT", "devDependencies": { "tailwindcss": "^3.2.4" } @@ -407,10 +410,16 @@ } }, "node_modules/nanoid": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.4.tgz", - "integrity": "sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw==", + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", + "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==", "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], "bin": { "nanoid": "bin/nanoid.cjs" }, @@ -470,9 +479,9 @@ } }, "node_modules/postcss": { - "version": "8.4.21", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.21.tgz", - "integrity": "sha512-tP7u/Sn/dVxK2NnruI4H9BG+x+Wxz6oeZ1cJ8P6G/PZY0IKk4k/63TDsQf2kQq3+qoJeLm2kIBUNlZe3zgb4Zg==", + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", "dev": true, "funding": [ { @@ -482,10 +491,14 @@ { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } ], "dependencies": { - "nanoid": "^3.3.4", + "nanoid": "^3.3.6", "picocolors": "^1.0.0", "source-map-js": "^1.0.2" }, @@ -1094,9 +1107,9 @@ "dev": true }, "nanoid": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.4.tgz", - "integrity": "sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw==", + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", + "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==", "dev": true }, "normalize-path": { @@ -1136,12 +1149,12 @@ "dev": true }, "postcss": { - "version": "8.4.21", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.21.tgz", - "integrity": "sha512-tP7u/Sn/dVxK2NnruI4H9BG+x+Wxz6oeZ1cJ8P6G/PZY0IKk4k/63TDsQf2kQq3+qoJeLm2kIBUNlZe3zgb4Zg==", + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", "dev": true, "requires": { - "nanoid": "^3.3.4", + "nanoid": "^3.3.6", "picocolors": "^1.0.0", "source-map-js": "^1.0.2" } diff --git a/extensions/react-widget/package-lock.json b/extensions/react-widget/package-lock.json index 18861aa..21239b4 100644 --- a/extensions/react-widget/package-lock.json +++ b/extensions/react-widget/package-lock.json @@ -1,12 +1,12 @@ { "name": "docsgpt", - "version": "0.2.3", + "version": "0.2.4", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "docsgpt", - "version": "0.2.3", + "version": "0.2.4", "license": "Apache-2.0", "dependencies": { "postcss-cli": "^10.1.0", @@ -19,7 +19,7 @@ "@types/react-dom": "^18.0.9", "@vitejs/plugin-react-swc": "^3.0.0", "autoprefixer": "^10.4.13", - "postcss": "^8.4.20", + "postcss": "^8.4.31", "typescript": "^4.9.3", "vite": "^4.0.0", "vite-plugin-dts": "^1.7.1" @@ -1784,9 +1784,9 @@ } }, "node_modules/postcss": { - "version": "8.4.29", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.29.tgz", - "integrity": "sha512-cbI+jaqIeu/VGqXEarWkRCCffhjgXc0qjBtXpqJhTBohMUjUQnbBr0xqX3vEKudc4iviTewcJo5ajcec5+wdJw==", + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", "funding": [ { "type": "opencollective", diff --git a/extensions/react-widget/package.json b/extensions/react-widget/package.json index e527c27..6343fa1 100644 --- a/extensions/react-widget/package.json +++ b/extensions/react-widget/package.json @@ -38,7 +38,7 @@ "@types/react-dom": "^18.0.9", "@vitejs/plugin-react-swc": "^3.0.0", "autoprefixer": "^10.4.13", - "postcss": "^8.4.20", + "postcss": "^8.4.31", "typescript": "^4.9.3", "vite": "^4.0.0", "vite-plugin-dts": "^1.7.1" diff --git a/extensions/web-widget/package-lock.json b/extensions/web-widget/package-lock.json index af23ce5..addedaf 100644 --- a/extensions/web-widget/package-lock.json +++ b/extensions/web-widget/package-lock.json @@ -620,9 +620,9 @@ } }, "node_modules/postcss": { - "version": "8.4.23", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.23.tgz", - "integrity": "sha512-bQ3qMcpF6A/YjR55xtoTr0jGOlnPOKAIMdOWiv0EIT6HVPEaJiJB4NLljSbiHoC2RX7DN5Uvjtpbg1NPdwv1oA==", + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", "dev": true, "funding": [ { diff --git a/frontend/public/lock.svg b/frontend/public/lock.svg new file mode 100644 index 0000000..b85fd5f --- /dev/null +++ b/frontend/public/lock.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/frontend/public/message-programming.svg b/frontend/public/message-programming.svg new file mode 100644 index 0000000..f017810 --- /dev/null +++ b/frontend/public/message-programming.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/frontend/public/message-text.svg b/frontend/public/message-text.svg new file mode 100644 index 0000000..9228566 --- /dev/null +++ b/frontend/public/message-text.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/frontend/src/Hero.tsx b/frontend/src/Hero.tsx index 1552887..8ed80fc 100644 --- a/frontend/src/Hero.tsx +++ b/frontend/src/Hero.tsx @@ -1,7 +1,7 @@ export default function Hero({ className = '' }: { className?: string }) { return ( -
-
+
+

DocsGPT

πŸ¦–

@@ -17,6 +17,53 @@ export default function Hero({ className = '' }: { className?: string }) { Start by entering your query in the input field below and we will do the rest!

+
+
+
+ lock +

Chat with Your Data

+

+ DocsGPT will use your data to answer questions. Whether its + documentation, source code, or Microsoft files, DocsGPT allows you + to have interactive conversations and find answers based on the + provided data. +

+
+
+ +
+
+ lock +

Secure Data Storage

+

+ The security of your data is our top priority. DocsGPT ensures the + utmost protection for your sensitive information. With secure data + storage and privacy measures in place, you can trust that your + data is kept safe and confidential. +

+
+
+
+
+ lock +

Open Source Code

+

+ DocsGPT is built on open source principles, promoting transparency + and collaboration. The source code is freely available, enabling + developers to contribute, enhance, and customize the app to meet + their specific needs. +

+
+
+
); } diff --git a/frontend/src/Navigation.tsx b/frontend/src/Navigation.tsx index cb5d021..4778976 100644 --- a/frontend/src/Navigation.tsx +++ b/frontend/src/Navigation.tsx @@ -8,6 +8,8 @@ import Hamburger from './assets/hamburger.svg'; import Key from './assets/key.svg'; import Info from './assets/info.svg'; import Link from './assets/link.svg'; +import Discord from './assets/discord.svg'; +import Github from './assets/github.svg'; import UploadIcon from './assets/upload.svg'; import { ActiveState } from './models/misc'; import APIKeyModal from './preferences/APIKeyModal'; @@ -225,11 +227,11 @@ export default function Navigation() {
setIsDocsListOpen(!isDocsListOpen)} > {selectedDocs && ( -

+

{selectedDocs.name} {selectedDocs.version}

)} @@ -323,26 +325,26 @@ export default function Navigation() { link

Documentation

- - - link -

Discord

-
- - - link -

Github

-
+
diff --git a/frontend/src/assets/discord.svg b/frontend/src/assets/discord.svg new file mode 100644 index 0000000..1cff96f --- /dev/null +++ b/frontend/src/assets/discord.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/frontend/src/assets/github.svg b/frontend/src/assets/github.svg new file mode 100644 index 0000000..76bbe2c --- /dev/null +++ b/frontend/src/assets/github.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/frontend/src/conversation/Conversation.tsx b/frontend/src/conversation/Conversation.tsx index c791081..ab43576 100644 --- a/frontend/src/conversation/Conversation.tsx +++ b/frontend/src/conversation/Conversation.tsx @@ -113,7 +113,7 @@ export default function Conversation() { }; return ( -
+
{queries.length > 0 && !hasScrolledToLast ? (