Compare commits
No commits in common. 'main' and '0.9.0' have entirely different histories.
@ -1,15 +0,0 @@
|
|||||||
# To get started with Dependabot version updates, you'll need to specify which
|
|
||||||
# package ecosystems to update and where the package manifests are located.
|
|
||||||
# Please see the documentation for all configuration options:
|
|
||||||
# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
|
|
||||||
|
|
||||||
version: 2
|
|
||||||
updates:
|
|
||||||
- package-ecosystem: "pip" # See documentation for possible values
|
|
||||||
directory: "/application" # Location of package manifests
|
|
||||||
schedule:
|
|
||||||
interval: "weekly"
|
|
||||||
- package-ecosystem: "npm" # See documentation for possible values
|
|
||||||
directory: "/frontend" # Location of package manifests
|
|
||||||
schedule:
|
|
||||||
interval: "weekly"
|
|
@ -1,36 +0,0 @@
|
|||||||
# **🎉 Join the Hacktoberfest with DocsGPT and win a Free T-shirt and other prizes! 🎉**
|
|
||||||
|
|
||||||
Welcome, contributors! We're excited to announce that DocsGPT is participating in Hacktoberfest. Get involved by submitting meaningful pull requests.
|
|
||||||
|
|
||||||
All contributors with accepted PRs will receive a cool Holopin! 🤩 (Watch out for a reply in your PR to collect it).
|
|
||||||
|
|
||||||
### 🏆 Top 50 contributors will recieve a special T-shirt
|
|
||||||
|
|
||||||
### 🏆 LLM Document analysis by LexEU competition:
|
|
||||||
A separate competition is available for those sumbit best new retrieval method that will analyze a Document using EU laws.
|
|
||||||
You will find more information about it on 1st of October.
|
|
||||||
|
|
||||||
## 📜 Here's How to Contribute:
|
|
||||||
```text
|
|
||||||
🛠️ Code: This is the golden ticket! Make meaningful contributions through PRs.
|
|
||||||
|
|
||||||
🧩 API extention: Build an app utilising DocsGPT API. We prefer submissions that showcase original ideas and turn the API into an AI agent.
|
|
||||||
|
|
||||||
Non-Code Contributions:
|
|
||||||
|
|
||||||
📚 Wiki: Improve our documentation, Create a guide or change existing documentation.
|
|
||||||
|
|
||||||
🖥️ Design: Improve the UI/UX or design a new feature.
|
|
||||||
|
|
||||||
📝 Blogging or Content Creation: Write articles or create videos to showcase DocsGPT or highlight your contributions!
|
|
||||||
```
|
|
||||||
|
|
||||||
### 📝 Guidelines for Pull Requests:
|
|
||||||
- Familiarize yourself with the current contributions and our [Roadmap](https://github.com/orgs/arc53/projects/2).
|
|
||||||
- Before contributing we highly advise that you check existing [issues](https://github.com/arc53/DocsGPT/issues) or [create](https://github.com/arc53/DocsGPT/issues/new/choose) an issue and wait to get assigned.
|
|
||||||
- Once you are finished with your contribution, please fill in this [form](https://airtable.com/appikMaJwdHhC1SDP/pagoblCJ9W29wf6Hf/form).
|
|
||||||
- Refer to the [Documentation](https://docs.docsgpt.cloud/).
|
|
||||||
- Feel free to join our [Discord](https://discord.gg/n5BX8dh8rU) server. We're here to help newcomers, so don't hesitate to jump in! Join us [here](https://discord.gg/n5BX8dh8rU).
|
|
||||||
|
|
||||||
Thank you very much for considering contributing to DocsGPT during Hacktoberfest! 🙏 Your contributions (not just simple typo) could earn you a stylish new t-shirt and other prizes as a token of our appreciation. 🎁 Join us, and let's code together! 🚀
|
|
||||||
|
|
@ -1,88 +1,31 @@
|
|||||||
# Builder Stage
|
FROM python:3.11-slim-bullseye as builder
|
||||||
FROM ubuntu:24.04 as builder
|
|
||||||
|
# Tiktoken requires Rust toolchain, so build it in a separate stage
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
RUN apt-get update && apt-get install -y gcc curl
|
||||||
|
RUN apt-get install -y wget unzip
|
||||||
RUN apt-get update && \
|
RUN wget https://d3dg1063dc54p9.cloudfront.net/models/embeddings/mpnet-base-v2.zip
|
||||||
apt-get install -y software-properties-common && \
|
RUN unzip mpnet-base-v2.zip -d model
|
||||||
add-apt-repository ppa:deadsnakes/ppa && \
|
RUN rm mpnet-base-v2.zip
|
||||||
# Install necessary packages and Python
|
RUN curl https://sh.rustup.rs -sSf | sh -s -- -y && apt-get install --reinstall libc6-dev -y
|
||||||
apt-get update && \
|
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||||
apt-get install -y --no-install-recommends gcc wget unzip libc6-dev python3.11 python3.11-distutils python3.11-venv && \
|
RUN pip install --upgrade pip && pip install tiktoken==0.5.2
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Verify Python installation and setup symlink
|
|
||||||
RUN if [ -f /usr/bin/python3.11 ]; then \
|
|
||||||
ln -s /usr/bin/python3.11 /usr/bin/python; \
|
|
||||||
else \
|
|
||||||
echo "Python 3.11 not found"; exit 1; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Download and unzip the model
|
|
||||||
RUN wget https://d3dg1063dc54p9.cloudfront.net/models/embeddings/mpnet-base-v2.zip && \
|
|
||||||
unzip mpnet-base-v2.zip -d model && \
|
|
||||||
rm mpnet-base-v2.zip
|
|
||||||
|
|
||||||
# Install Rust
|
|
||||||
RUN wget -q -O - https://sh.rustup.rs | sh -s -- -y
|
|
||||||
|
|
||||||
# Clean up to reduce container size
|
|
||||||
RUN apt-get remove --purge -y wget unzip && apt-get autoremove -y && rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Copy requirements.txt
|
|
||||||
COPY requirements.txt .
|
COPY requirements.txt .
|
||||||
|
RUN pip install -r requirements.txt
|
||||||
|
|
||||||
# Setup Python virtual environment
|
|
||||||
RUN python3.11 -m venv /venv
|
|
||||||
|
|
||||||
# Activate virtual environment and install Python packages
|
|
||||||
ENV PATH="/venv/bin:$PATH"
|
|
||||||
|
|
||||||
# Install Python packages
|
FROM python:3.11-slim-bullseye
|
||||||
RUN pip install --no-cache-dir --upgrade pip && \
|
|
||||||
pip install --no-cache-dir tiktoken && \
|
|
||||||
pip install --no-cache-dir -r requirements.txt
|
|
||||||
|
|
||||||
# Final Stage
|
# Copy pre-built packages and binaries from builder stage
|
||||||
FROM ubuntu:24.04 as final
|
COPY --from=builder /usr/local/ /usr/local/
|
||||||
|
|
||||||
RUN apt-get update && \
|
|
||||||
apt-get install -y software-properties-common && \
|
|
||||||
add-apt-repository ppa:deadsnakes/ppa && \
|
|
||||||
# Install Python
|
|
||||||
apt-get update && apt-get install -y --no-install-recommends python3.11 && \
|
|
||||||
ln -s /usr/bin/python3.11 /usr/bin/python && \
|
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Set working directory
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
# Create a non-root user: `appuser` (Feel free to choose a name)
|
|
||||||
RUN groupadd -r appuser && \
|
|
||||||
useradd -r -g appuser -d /app -s /sbin/nologin -c "Docker image user" appuser
|
|
||||||
|
|
||||||
# Copy the virtual environment and model from the builder stage
|
|
||||||
COPY --from=builder /venv /venv
|
|
||||||
COPY --from=builder /model /app/model
|
COPY --from=builder /model /app/model
|
||||||
|
|
||||||
# Copy your application code
|
|
||||||
COPY . /app/application
|
COPY . /app/application
|
||||||
|
ENV FLASK_APP=app.py
|
||||||
|
ENV FLASK_DEBUG=true
|
||||||
|
|
||||||
# Change the ownership of the /app directory to the appuser
|
|
||||||
|
|
||||||
RUN mkdir -p /app/application/inputs/local
|
|
||||||
RUN chown -R appuser:appuser /app
|
|
||||||
|
|
||||||
# Set environment variables
|
|
||||||
ENV FLASK_APP=app.py \
|
|
||||||
FLASK_DEBUG=true \
|
|
||||||
PATH="/venv/bin:$PATH"
|
|
||||||
|
|
||||||
# Expose the port the app runs on
|
|
||||||
EXPOSE 7091
|
EXPOSE 7091
|
||||||
|
|
||||||
# Switch to non-root user
|
|
||||||
USER appuser
|
|
||||||
|
|
||||||
# Start Gunicorn
|
|
||||||
CMD ["gunicorn", "-w", "2", "--timeout", "120", "--bind", "0.0.0.0:7091", "application.wsgi:app"]
|
CMD ["gunicorn", "-w", "2", "--timeout", "120", "--bind", "0.0.0.0:7091", "application.wsgi:app"]
|
@ -1,15 +1,9 @@
|
|||||||
from celery import Celery
|
from celery import Celery
|
||||||
from application.core.settings import settings
|
from application.core.settings import settings
|
||||||
from celery.signals import setup_logging
|
|
||||||
|
|
||||||
def make_celery(app_name=__name__):
|
def make_celery(app_name=__name__):
|
||||||
celery = Celery(app_name, broker=settings.CELERY_BROKER_URL, backend=settings.CELERY_RESULT_BACKEND)
|
celery = Celery(app_name, broker=settings.CELERY_BROKER_URL, backend=settings.CELERY_RESULT_BACKEND)
|
||||||
celery.conf.update(settings)
|
celery.conf.update(settings)
|
||||||
return celery
|
return celery
|
||||||
|
|
||||||
@setup_logging.connect
|
|
||||||
def config_loggers(*args, **kwargs):
|
|
||||||
from application.core.logging_config import setup_logging
|
|
||||||
setup_logging()
|
|
||||||
|
|
||||||
celery = make_celery()
|
celery = make_celery()
|
@ -1,22 +0,0 @@
|
|||||||
from logging.config import dictConfig
|
|
||||||
|
|
||||||
def setup_logging():
|
|
||||||
dictConfig({
|
|
||||||
'version': 1,
|
|
||||||
'formatters': {
|
|
||||||
'default': {
|
|
||||||
'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"handlers": {
|
|
||||||
"console": {
|
|
||||||
"class": "logging.StreamHandler",
|
|
||||||
"stream": "ext://sys.stdout",
|
|
||||||
"formatter": "default",
|
|
||||||
}
|
|
||||||
},
|
|
||||||
'root': {
|
|
||||||
'level': 'INFO',
|
|
||||||
'handlers': ['console'],
|
|
||||||
},
|
|
||||||
})
|
|
@ -1,32 +1,22 @@
|
|||||||
from application.parser.remote.base import BaseRemote
|
from application.parser.remote.base import BaseRemote
|
||||||
from langchain_community.document_loaders import WebBaseLoader
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
"User-Agent": "Mozilla/5.0",
|
|
||||||
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*"
|
|
||||||
";q=0.8",
|
|
||||||
"Accept-Language": "en-US,en;q=0.5",
|
|
||||||
"Referer": "https://www.google.com/",
|
|
||||||
"DNT": "1",
|
|
||||||
"Connection": "keep-alive",
|
|
||||||
"Upgrade-Insecure-Requests": "1",
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class WebLoader(BaseRemote):
|
class WebLoader(BaseRemote):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
from langchain.document_loaders import WebBaseLoader
|
||||||
self.loader = WebBaseLoader
|
self.loader = WebBaseLoader
|
||||||
|
|
||||||
def load_data(self, inputs):
|
def load_data(self, inputs):
|
||||||
urls = inputs
|
urls = inputs
|
||||||
|
|
||||||
if isinstance(urls, str):
|
if isinstance(urls, str):
|
||||||
urls = [urls]
|
urls = [urls] # Convert string to list if a single URL is passed
|
||||||
|
|
||||||
documents = []
|
documents = []
|
||||||
for url in urls:
|
for url in urls:
|
||||||
try:
|
try:
|
||||||
loader = self.loader([url], header_template=headers)
|
loader = self.loader([url]) # Process URLs one by one
|
||||||
documents.extend(loader.load())
|
documents.extend(loader.load())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error processing URL {url}: {e}")
|
print(f"Error processing URL {url}: {e}")
|
||||||
continue
|
continue # Continue with the next URL if an error occurs
|
||||||
return documents
|
return documents
|
@ -1,85 +1,35 @@
|
|||||||
anthropic==0.34.2
|
anthropic==0.12.0
|
||||||
boto3==1.34.153
|
boto3==1.34.6
|
||||||
beautifulsoup4==4.12.3
|
|
||||||
celery==5.3.6
|
celery==5.3.6
|
||||||
dataclasses-json==0.6.7
|
dataclasses_json==0.6.3
|
||||||
docx2txt==0.8
|
docx2txt==0.8
|
||||||
duckduckgo-search==6.2.6
|
duckduckgo-search==5.3.0
|
||||||
ebooklib==0.18
|
EbookLib==0.18
|
||||||
elastic-transport==8.15.0
|
elasticsearch==8.12.0
|
||||||
elasticsearch==8.15.1
|
|
||||||
escodegen==1.0.11
|
escodegen==1.0.11
|
||||||
esprima==4.0.1
|
esprima==4.0.1
|
||||||
esutils==1.0.1
|
faiss-cpu==1.7.4
|
||||||
Flask==3.0.3
|
Flask==3.0.1
|
||||||
faiss-cpu==1.8.0.post1
|
gunicorn==21.2.0
|
||||||
gunicorn==23.0.0
|
html2text==2020.1.16
|
||||||
html2text==2024.2.26
|
|
||||||
javalang==0.13.0
|
javalang==0.13.0
|
||||||
jinja2==3.1.4
|
langchain==0.1.4
|
||||||
jiter==0.5.0
|
langchain-openai==0.0.5
|
||||||
jmespath==1.0.1
|
nltk==3.8.1
|
||||||
joblib==1.4.2
|
openapi3_parser==1.1.16
|
||||||
jsonpatch==1.33
|
pandas==2.2.0
|
||||||
jsonpointer==3.0.0
|
pydantic_settings==2.1.0
|
||||||
jsonschema==4.23.0
|
pymongo==4.6.3
|
||||||
jsonschema-spec==0.2.4
|
PyPDF2==3.0.1
|
||||||
jsonschema-specifications==2023.7.1
|
|
||||||
kombu==5.4.2
|
|
||||||
langchain==0.3.0
|
|
||||||
langchain-community==0.3.0
|
|
||||||
langchain-core==0.3.2
|
|
||||||
langchain-openai==0.2.0
|
|
||||||
langchain-text-splitters==0.3.0
|
|
||||||
langsmith==0.1.125
|
|
||||||
lazy-object-proxy==1.10.0
|
|
||||||
lxml==5.3.0
|
|
||||||
markupsafe==2.1.5
|
|
||||||
marshmallow==3.22.0
|
|
||||||
mpmath==1.3.0
|
|
||||||
multidict==6.1.0
|
|
||||||
mypy-extensions==1.0.0
|
|
||||||
networkx==3.3
|
|
||||||
numpy==1.26.4
|
|
||||||
openai==1.46.1
|
|
||||||
openapi-schema-validator==0.6.2
|
|
||||||
openapi-spec-validator==0.6.0
|
|
||||||
openapi3-parser==1.1.18
|
|
||||||
orjson==3.10.7
|
|
||||||
packaging==24.1
|
|
||||||
pandas==2.2.3
|
|
||||||
pathable==0.4.3
|
|
||||||
pillow==10.4.0
|
|
||||||
portalocker==2.10.1
|
|
||||||
prance==23.6.21.0
|
|
||||||
primp==0.6.2
|
|
||||||
prompt-toolkit==3.0.47
|
|
||||||
protobuf==5.28.2
|
|
||||||
py==1.11.0
|
|
||||||
pydantic==2.9.2
|
|
||||||
pydantic-core==2.23.4
|
|
||||||
pydantic-settings==2.4.0
|
|
||||||
pymongo==4.8.0
|
|
||||||
pypdf2==3.0.1
|
|
||||||
python-dateutil==2.9.0.post0
|
|
||||||
python-dotenv==1.0.1
|
python-dotenv==1.0.1
|
||||||
qdrant-client==1.11.0
|
qdrant-client==1.8.2
|
||||||
redis==5.0.1
|
redis==5.0.1
|
||||||
referencing==0.30.2
|
Requests==2.31.0
|
||||||
regex==2024.9.11
|
|
||||||
requests==2.32.3
|
|
||||||
retry==0.9.2
|
retry==0.9.2
|
||||||
sentence-transformers==3.0.1
|
sentence-transformers
|
||||||
tiktoken==0.7.0
|
tiktoken==0.5.2
|
||||||
tokenizers==0.19.1
|
torch==2.1.2
|
||||||
torch==2.4.1
|
tqdm==4.66.1
|
||||||
tqdm==4.66.5
|
transformers==4.36.2
|
||||||
transformers==4.44.2
|
unstructured==0.12.2
|
||||||
typing-extensions==4.12.2
|
Werkzeug==3.0.1
|
||||||
typing-inspect==0.9.0
|
|
||||||
tzdata==2024.2
|
|
||||||
urllib3==2.2.3
|
|
||||||
vine==5.1.0
|
|
||||||
wcwidth==0.2.13
|
|
||||||
werkzeug==3.0.4
|
|
||||||
yarl==1.11.1
|
|
||||||
|
@ -1,22 +1,6 @@
|
|||||||
import tiktoken
|
from transformers import GPT2TokenizerFast
|
||||||
|
|
||||||
_encoding = None
|
|
||||||
|
|
||||||
def get_encoding():
|
def count_tokens(string):
|
||||||
global _encoding
|
tokenizer = GPT2TokenizerFast.from_pretrained('gpt2')
|
||||||
if _encoding is None:
|
return len(tokenizer(string)['input_ids'])
|
||||||
_encoding = tiktoken.get_encoding("cl100k_base")
|
|
||||||
return _encoding
|
|
||||||
|
|
||||||
def num_tokens_from_string(string: str) -> int:
|
|
||||||
encoding = get_encoding()
|
|
||||||
num_tokens = len(encoding.encode(string))
|
|
||||||
return num_tokens
|
|
||||||
|
|
||||||
def count_tokens_docs(docs):
|
|
||||||
docs_content = ""
|
|
||||||
for doc in docs:
|
|
||||||
docs_content += doc.page_content
|
|
||||||
|
|
||||||
tokens = num_tokens_from_string(docs_content)
|
|
||||||
return tokens
|
|
@ -1,37 +0,0 @@
|
|||||||
from typing import List, Optional
|
|
||||||
from uuid import uuid4
|
|
||||||
|
|
||||||
|
|
||||||
from application.core.settings import settings
|
|
||||||
from application.vectorstore.base import BaseVectorStore
|
|
||||||
|
|
||||||
|
|
||||||
class MilvusStore(BaseVectorStore):
|
|
||||||
def __init__(self, path: str = "", embeddings_key: str = "embeddings"):
|
|
||||||
super().__init__()
|
|
||||||
from langchain_milvus import Milvus
|
|
||||||
|
|
||||||
connection_args = {
|
|
||||||
"uri": settings.MILVUS_URI,
|
|
||||||
"token": settings.MILVUS_TOKEN,
|
|
||||||
}
|
|
||||||
self._docsearch = Milvus(
|
|
||||||
embedding_function=self._get_embeddings(settings.EMBEDDINGS_NAME, embeddings_key),
|
|
||||||
collection_name=settings.MILVUS_COLLECTION_NAME,
|
|
||||||
connection_args=connection_args,
|
|
||||||
)
|
|
||||||
self._path = path
|
|
||||||
|
|
||||||
def search(self, question, k=2, *args, **kwargs):
|
|
||||||
return self._docsearch.similarity_search(query=question, k=k, filter={"path": self._path} *args, **kwargs)
|
|
||||||
|
|
||||||
def add_texts(self, texts: List[str], metadatas: Optional[List[dict]], *args, **kwargs):
|
|
||||||
ids = [str(uuid4()) for _ in range(len(texts))]
|
|
||||||
|
|
||||||
return self._docsearch.add_texts(texts=texts, metadatas=metadatas, ids=ids, *args, **kwargs)
|
|
||||||
|
|
||||||
def save_local(self, *args, **kwargs):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def delete_index(self, *args, **kwargs):
|
|
||||||
pass
|
|
@ -1,10 +0,0 @@
|
|||||||
{
|
|
||||||
"API-docs": {
|
|
||||||
"title": "🗂️️ API-docs",
|
|
||||||
"href": "/API/API-docs"
|
|
||||||
},
|
|
||||||
"api-key-guide": {
|
|
||||||
"title": "🔐 API Keys guide",
|
|
||||||
"href": "/API/api-key-guide"
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,100 +0,0 @@
|
|||||||
# Self-hosting DocsGPT on Kubernetes
|
|
||||||
|
|
||||||
This guide will walk you through deploying DocsGPT on Kubernetes.
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
Ensure you have the following installed before proceeding:
|
|
||||||
|
|
||||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
|
|
||||||
- Access to a Kubernetes cluster
|
|
||||||
|
|
||||||
## Folder Structure
|
|
||||||
|
|
||||||
The `k8s` folder contains the necessary deployment and service configuration files:
|
|
||||||
|
|
||||||
- `deployments/`
|
|
||||||
- `services/`
|
|
||||||
- `docsgpt-secrets.yaml`
|
|
||||||
|
|
||||||
## Deployment Instructions
|
|
||||||
|
|
||||||
1. **Clone the Repository**
|
|
||||||
|
|
||||||
```sh
|
|
||||||
git clone https://github.com/arc53/DocsGPT.git
|
|
||||||
cd docsgpt/k8s
|
|
||||||
```
|
|
||||||
|
|
||||||
2. **Configure Secrets (optional)**
|
|
||||||
|
|
||||||
Ensure that you have all the necessary secrets in `docsgpt-secrets.yaml`. Update it with your secrets before applying if you want. By default we will use qdrant as a vectorstore and public docsgpt llm as llm for inference.
|
|
||||||
|
|
||||||
3. **Apply Kubernetes Deployments**
|
|
||||||
|
|
||||||
Deploy your DocsGPT resources using the following commands:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
kubectl apply -f deployments/
|
|
||||||
```
|
|
||||||
|
|
||||||
4. **Apply Kubernetes Services**
|
|
||||||
|
|
||||||
Set up your services using the following commands:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
kubectl apply -f services/
|
|
||||||
```
|
|
||||||
|
|
||||||
5. **Apply Secrets**
|
|
||||||
|
|
||||||
Apply the secret configurations:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
kubectl apply -f docsgpt-secrets.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
6. **Substitute API URL**
|
|
||||||
|
|
||||||
After deploying the services, you need to update the environment variable `VITE_API_HOST` in your deployment file `deployments/docsgpt-deploy.yaml` with the actual endpoint URL created by your `docsgpt-api-service`.
|
|
||||||
|
|
||||||
```sh
|
|
||||||
kubectl get services/docsgpt-api-service -o jsonpath='{.status.loadBalancer.ingress[0].ip}' | xargs -I {} sed -i "s|<your-api-endpoint>|{}|g" deployments/docsgpt-deploy.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
7. **Rerun Deployment**
|
|
||||||
|
|
||||||
After making the changes, reapply the deployment configuration to update the environment variables:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
kubectl apply -f deployments/
|
|
||||||
```
|
|
||||||
|
|
||||||
## Verifying the Deployment
|
|
||||||
|
|
||||||
To verify if everything is set up correctly, you can run the following:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
kubectl get pods
|
|
||||||
kubectl get services
|
|
||||||
```
|
|
||||||
|
|
||||||
Ensure that the pods are running and the services are available.
|
|
||||||
|
|
||||||
## Accessing DocsGPT
|
|
||||||
|
|
||||||
To access DocsGPT, you need to find the external IP address of the frontend service. You can do this by running:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
kubectl get services/docsgpt-frontend-service | awk 'NR>1 {print "http://" $4}'
|
|
||||||
```
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
If you encounter any issues, you can check the logs of the pods for more details:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
kubectl logs <pod-name>
|
|
||||||
```
|
|
||||||
|
|
||||||
Replace `<pod-name>` with the actual name of your DocsGPT pod.
|
|
@ -0,0 +1,6 @@
|
|||||||
|
{
|
||||||
|
"API-docs": {
|
||||||
|
"title": "🗂️️ API-docs",
|
||||||
|
"href": "/Developing/API-docs"
|
||||||
|
}
|
||||||
|
}
|
@ -1,34 +0,0 @@
|
|||||||
|
|
||||||
import {Steps} from 'nextra/components'
|
|
||||||
import { Callout } from 'nextra/components'
|
|
||||||
|
|
||||||
|
|
||||||
## Chrome Extension Setup Guide
|
|
||||||
|
|
||||||
To enhance your DocsGPT experience, you can install the DocsGPT Chrome extension. Here's how:
|
|
||||||
<Steps >
|
|
||||||
### Step 1
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
In the DocsGPT GitHub repository, click on the **Code** button and select **Download ZIP**.
|
|
||||||
### Step 2
|
|
||||||
Unzip the downloaded file to a location you can easily access.
|
|
||||||
### Step 3
|
|
||||||
Open the Google Chrome browser and click on the three dots menu (upper right corner).
|
|
||||||
### Step 4
|
|
||||||
Select **More Tools** and then **Extensions**.
|
|
||||||
### Step 5
|
|
||||||
Turn on the **Developer mode** switch in the top right corner of the **Extensions page**.
|
|
||||||
### Step 6
|
|
||||||
Click on the **Load unpacked** button.
|
|
||||||
### Step 7
|
|
||||||
7. Select the **Chrome** folder where the DocsGPT files have been unzipped (docsgpt-main > extensions > chrome).
|
|
||||||
### Step 8
|
|
||||||
The extension should now be added to Google Chrome and can be managed on the Extensions page.
|
|
||||||
### Step 9
|
|
||||||
To disable or remove the extension, simply turn off the toggle switch on the extension card or click the **Remove** button.
|
|
||||||
</Steps>
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,25 +1,10 @@
|
|||||||
import Image from 'next/image'
|
|
||||||
|
|
||||||
# Customizing the Main Prompt
|
# Customizing the Main Prompt
|
||||||
|
|
||||||
Customizing the main prompt for DocsGPT gives you the ability to tailor the AI's responses to your specific requirements. By modifying the prompt text, you can achieve more accurate and relevant answers. Here's how you can do it:
|
Customizing the main prompt for DocsGPT gives you the ability to tailor the AI's responses to your specific requirements. By modifying the prompt text, you can achieve more accurate and relevant answers. Here's how you can do it:
|
||||||
|
|
||||||
1. Navigate to `SideBar -> Settings`.
|
1. Navigate to `/application/prompts/combine_prompt.txt`.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
2.In Settings select the `Active Prompt` now you will be able to see various prompts style.x
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
3.Click on the `edit icon` on the prompt of your choice and you will be able to see the current prompt for it,you can now customise the prompt as per your choice.
|
|
||||||
|
|
||||||
### Video Demo
|
|
||||||
<Image src="/prompts.gif" alt="prompts" width={800} height={500} />
|
|
||||||
|
|
||||||
|
|
||||||
|
2. Open the `combine_prompt.txt` file and modify the prompt text to suit your needs. You can experiment with different phrasings and structures to observe how the model responds. The main prompt serves as guidance to the AI model on how to generate responses.
|
||||||
|
|
||||||
## Example Prompt Modification
|
## Example Prompt Modification
|
||||||
|
|
@ -0,0 +1,63 @@
|
|||||||
|
## How to train on other documentation
|
||||||
|
|
||||||
|
This AI can utilize any documentation, but it requires preparation for similarity search. Follow these steps to get your documentation ready:
|
||||||
|
|
||||||
|
**Step 1: Prepare Your Documentation**
|
||||||
|
![video-example-of-how-to-do-it](https://d3dg1063dc54p9.cloudfront.net/videos/how-to-vectorise.gif)
|
||||||
|
|
||||||
|
Start by going to `/scripts/` folder.
|
||||||
|
|
||||||
|
If you open this file, you will see that it uses RST files from the folder to create a `index.faiss` and `index.pkl`.
|
||||||
|
|
||||||
|
It currently uses OPENAI to create the vector store, so make sure your documentation is not too large. Using Pandas cost me around $3-$4.
|
||||||
|
|
||||||
|
You can typically find documentation on GitHub in the `docs/` folder for most open-source projects.
|
||||||
|
|
||||||
|
### 1. Find documentation in .rst/.md format and create a folder with it in your scripts directory.
|
||||||
|
- Name it `inputs/`.
|
||||||
|
- Put all your .rst/.md files in there.
|
||||||
|
- The search is recursive, so you don't need to flatten them.
|
||||||
|
|
||||||
|
If there are no .rst/.md files, convert whatever you find to a .txt file and feed it. (Don't forget to change the extension in the script).
|
||||||
|
|
||||||
|
### Step 2: Configure Your OpenAI API Key
|
||||||
|
1. Create a .env file in the scripts/ folder.
|
||||||
|
- Add your OpenAI API key inside: OPENAI_API_KEY=<your-api-key>.
|
||||||
|
|
||||||
|
### Step 3: Run the Ingestion Script
|
||||||
|
|
||||||
|
`python ingest.py ingest`
|
||||||
|
|
||||||
|
It will provide you with the estimated cost.
|
||||||
|
|
||||||
|
### Step 4: Move `index.faiss` and `index.pkl` generated in `scripts/output` to `application/` folder.
|
||||||
|
|
||||||
|
|
||||||
|
### Step 5: Run the Web App
|
||||||
|
Once you run it, it will use new context relevant to your documentation.Make sure you select default in the dropdown in the UI.
|
||||||
|
|
||||||
|
## Customization
|
||||||
|
You can learn more about options while running ingest.py by running:
|
||||||
|
- Make sure you select 'default' from the dropdown in the UI.
|
||||||
|
|
||||||
|
## Customization
|
||||||
|
You can learn more about options while running ingest.py by executing:
|
||||||
|
`python ingest.py --help`
|
||||||
|
| Options | |
|
||||||
|
|:--------------------------------:|:------------------------------------------------------------------------------------------------------------------------------:|
|
||||||
|
| **ingest** | Runs 'ingest' function, converting documentation to Faiss plus Index format |
|
||||||
|
| --dir TEXT | List of paths to directory for index creation. E.g. --dir inputs --dir inputs2 [default: inputs] |
|
||||||
|
| --file TEXT | File paths to use (Optional; overrides directory) E.g. --files inputs/1.md --files inputs/2.md |
|
||||||
|
| --recursive / --no-recursive | Whether to recursively search in subdirectories [default: recursive] |
|
||||||
|
| --limit INTEGER | Maximum number of files to read |
|
||||||
|
| --formats TEXT | List of required extensions (list with .) Currently supported: .rst, .md, .pdf, .docx, .csv, .epub, .html [default: .rst, .md] |
|
||||||
|
| --exclude / --no-exclude | Whether to exclude hidden files (dotfiles) [default: exclude] |
|
||||||
|
| -y, --yes | Whether to skip price confirmation |
|
||||||
|
| --sample / --no-sample | Whether to output sample of the first 5 split documents. [default: no-sample] |
|
||||||
|
| --token-check / --no-token-check | Whether to group small documents and split large. Improves semantics. [default: token-check] |
|
||||||
|
| --min_tokens INTEGER | Minimum number of tokens to not group. [default: 150] |
|
||||||
|
| --max_tokens INTEGER | Maximum number of tokens to not split. [default: 2000] |
|
||||||
|
| | |
|
||||||
|
| **convert** | Creates documentation in .md format from source code |
|
||||||
|
| --dir TEXT | Path to a directory with source code. E.g. --dir inputs [default: inputs] |
|
||||||
|
| --formats TEXT | Source code language from which to create documentation. Supports py, js and java. E.g. --formats py [default: py] |
|
@ -1,44 +0,0 @@
|
|||||||
|
|
||||||
import { Callout } from 'nextra/components'
|
|
||||||
import Image from 'next/image'
|
|
||||||
import { Steps } from 'nextra/components'
|
|
||||||
|
|
||||||
## How to train on other documentation
|
|
||||||
|
|
||||||
Training on other documentation sources can greatly enhance the versatility and depth of DocsGPT's knowledge. By incorporating diverse materials, you can broaden the AI's understanding and improve its ability to generate insightful responses across a range of topics. Here's a step-by-step guide on how to effectively train DocsGPT on additional documentation sources:
|
|
||||||
|
|
||||||
**Get your document ready**:
|
|
||||||
|
|
||||||
Make sure you have the document on which you want to train on ready with you on the device which you are using .You can also use links to the documentation to train on.
|
|
||||||
|
|
||||||
<Callout type="warning" emoji="⚠️">
|
|
||||||
Note: The document should be either of the given file formats .pdf, .txt, .rst, .docx, .md, .zip and limited to 25mb.You can also train using the link of the documentation.
|
|
||||||
|
|
||||||
</Callout>
|
|
||||||
|
|
||||||
### Video Demo
|
|
||||||
|
|
||||||
<Image src="/docs.gif" alt="prompts" width={800} height={500} />
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<Steps>
|
|
||||||
### Step1
|
|
||||||
Navigate to the sidebar where you will find `Source Docs` option,here you will find 3 options built in which are default,Web Search and None.
|
|
||||||
|
|
||||||
|
|
||||||
### Step 2
|
|
||||||
Click on the `Upload icon` just beside the source docs options,now borwse and upload the document which you want to train on or select the `remote` option if you have to insert the link of the documentation.
|
|
||||||
|
|
||||||
|
|
||||||
### Step 3
|
|
||||||
Now you will be able to see the name of the file uploaded under the Uploaded Files ,now click on `Train`,once you click on train it might take some time to train on the document. You will be able to see the `Training progress` and once the training is completed you can click the `finish` button and there you go your docuemnt is uploaded.
|
|
||||||
|
|
||||||
|
|
||||||
### Step 4
|
|
||||||
Go to `New chat` and from the side bar select the document you uploaded under the `Source Docs` and go ahead with your chat, now you can ask qestions regarding the document you uploaded and you will get the effective answer based on it.
|
|
||||||
|
|
||||||
</Steps>
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -0,0 +1,48 @@
|
|||||||
|
# Setting Up Local Language Models for Your App
|
||||||
|
|
||||||
|
Your app relies on two essential models: Embeddings and Text Generation. While OpenAI's default models work seamlessly, you have the flexibility to switch providers or even run the models locally.
|
||||||
|
|
||||||
|
## Step 1: Configure Environment Variables
|
||||||
|
|
||||||
|
Navigate to the `.env` file or set the following environment variables:
|
||||||
|
|
||||||
|
```env
|
||||||
|
LLM_NAME=<your Text Generation model>
|
||||||
|
API_KEY=<API key for Text Generation>
|
||||||
|
EMBEDDINGS_NAME=<LLM for Embeddings>
|
||||||
|
EMBEDDINGS_KEY=<API key for Embeddings>
|
||||||
|
VITE_API_STREAMING=<true or false>
|
||||||
|
```
|
||||||
|
|
||||||
|
You can omit the keys if users provide their own. Ensure you set `LLM_NAME` and `EMBEDDINGS_NAME`.
|
||||||
|
|
||||||
|
## Step 2: Choose Your Models
|
||||||
|
|
||||||
|
**Options for `LLM_NAME`:**
|
||||||
|
- openai ([More details](https://platform.openai.com/docs/models))
|
||||||
|
- anthropic ([More details](https://docs.anthropic.com/claude/reference/selecting-a-model))
|
||||||
|
- manifest ([More details](https://python.langchain.com/docs/integrations/llms/manifest))
|
||||||
|
- cohere ([More details](https://docs.cohere.com/docs/llmu))
|
||||||
|
- llama.cpp ([More details](https://python.langchain.com/docs/integrations/llms/llamacpp))
|
||||||
|
- huggingface (Arc53/DocsGPT-7B by default)
|
||||||
|
- sagemaker ([Mode details](https://aws.amazon.com/sagemaker/))
|
||||||
|
|
||||||
|
|
||||||
|
Note: for huggingface you can choose any model inside application/llm/huggingface.py or pass llm_name on init, loads
|
||||||
|
|
||||||
|
**Options for `EMBEDDINGS_NAME`:**
|
||||||
|
- openai_text-embedding-ada-002
|
||||||
|
- huggingface_sentence-transformers/all-mpnet-base-v2
|
||||||
|
- huggingface_hkunlp/instructor-large
|
||||||
|
- cohere_medium
|
||||||
|
|
||||||
|
If you want to be completely local, set `EMBEDDINGS_NAME` to `huggingface_sentence-transformers/all-mpnet-base-v2`.
|
||||||
|
|
||||||
|
For llama.cpp Download the required model and place it in the `models/` folder.
|
||||||
|
|
||||||
|
Alternatively, for local Llama setup, run `setup.sh` and choose option 1. The script handles the DocsGPT model addition.
|
||||||
|
|
||||||
|
## Step 3: Local Hosting for Privacy
|
||||||
|
|
||||||
|
If working with sensitive data, host everything locally by setting `LLM_NAME`, llama.cpp or huggingface, use any model available on Hugging Face, for llama.cpp you need to convert it into gguf format.
|
||||||
|
That's it! Your app is now configured for local and private hosting, ensuring optimal security for critical data.
|
@ -1,49 +0,0 @@
|
|||||||
|
|
||||||
import { Callout } from 'nextra/components'
|
|
||||||
import Image from 'next/image'
|
|
||||||
import { Steps } from 'nextra/components'
|
|
||||||
|
|
||||||
# Setting Up Local Language Models for Your App
|
|
||||||
|
|
||||||
Setting up local language models for your app can significantly enhance its capabilities, enabling it to understand and generate text in multiple languages without relying on external APIs. By integrating local language models, you can improve privacy, reduce latency, and ensure continuous functionality even in offline environments. Here's a comprehensive guide on how to set up local language models for your application:
|
|
||||||
|
|
||||||
## Steps:
|
|
||||||
### For cloud version LLM change:
|
|
||||||
<Steps >
|
|
||||||
### Step 1
|
|
||||||
Visit the chat screen and you will be to see the default LLM selected.
|
|
||||||
### Step 2
|
|
||||||
Click on it and you will get a drop down of various LLM's available to choose.
|
|
||||||
### Step 3
|
|
||||||
Choose the LLM of your choice.
|
|
||||||
|
|
||||||
</Steps>
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Video Demo
|
|
||||||
<Image src="/llms.gif" alt="prompts" width={800} height={500} />
|
|
||||||
|
|
||||||
### For Open source llm change:
|
|
||||||
<Steps >
|
|
||||||
### Step 1
|
|
||||||
For open source you have to edit .env file with LLM_NAME with their desired LLM name.
|
|
||||||
### Step 2
|
|
||||||
All the supported LLM providers are here application/llm and you can check what env variable are needed for each
|
|
||||||
List of latest supported LLMs are https://github.com/arc53/DocsGPT/blob/main/application/llm/llm_creator.py
|
|
||||||
### Step 3
|
|
||||||
Visit application/llm and select the file of your selected llm and there you will find the speicifc requirements needed to be filled in order to use it,i.e API key of that llm.
|
|
||||||
</Steps>
|
|
||||||
|
|
||||||
### For OpenAI-Compatible Endpoints:
|
|
||||||
DocsGPT supports the use of OpenAI-compatible endpoints through base URL substitution. This feature allows you to use alternative AI models or services that implement the OpenAI API interface.
|
|
||||||
|
|
||||||
|
|
||||||
Set the OPENAI_BASE_URL in your environment. You can change .env file with OPENAI_BASE_URL with the desired base URL or docker-compose.yml file and add the environment variable to the backend container.
|
|
||||||
|
|
||||||
> Make sure you have the right API_KEY and correct LLM_NAME.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
Before Width: | Height: | Size: 839 KiB |
Before Width: | Height: | Size: 23 MiB |
Before Width: | Height: | Size: 500 KiB |
Before Width: | Height: | Size: 974 KiB |
@ -1,43 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
## chmod +x publish.sh - to upgrade ownership
|
|
||||||
set -e
|
|
||||||
cat package.json >> package_copy.json
|
|
||||||
cat package-lock.json >> package-lock_copy.json
|
|
||||||
publish_package() {
|
|
||||||
PACKAGE_NAME=$1
|
|
||||||
BUILD_COMMAND=$2
|
|
||||||
# Update package name in package.json
|
|
||||||
jq --arg name "$PACKAGE_NAME" '.name=$name' package.json > temp.json && mv temp.json package.json
|
|
||||||
|
|
||||||
# Remove 'target' key if the package name is 'docsgpt-react'
|
|
||||||
if [ "$PACKAGE_NAME" = "docsgpt-react" ]; then
|
|
||||||
jq 'del(.targets)' package.json > temp.json && mv temp.json package.json
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -d "dist" ]; then
|
|
||||||
echo "Deleting existing dist directory..."
|
|
||||||
rm -rf dist
|
|
||||||
fi
|
|
||||||
|
|
||||||
npm version patch
|
|
||||||
|
|
||||||
npm run "$BUILD_COMMAND"
|
|
||||||
|
|
||||||
# Publish to npm
|
|
||||||
npm publish
|
|
||||||
# Clean up
|
|
||||||
mv package_copy.json package.json
|
|
||||||
mv package-lock_copy.json package-lock.json
|
|
||||||
echo "Published ${PACKAGE_NAME}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Publish docsgpt package
|
|
||||||
publish_package "docsgpt" "build"
|
|
||||||
|
|
||||||
# Publish docsgpt-react package
|
|
||||||
publish_package "docsgpt-react" "build:react"
|
|
||||||
|
|
||||||
|
|
||||||
rm -rf package_copy.json
|
|
||||||
rm -rf package-lock_copy.json
|
|
||||||
echo "---Process completed---"
|
|
@ -1,4 +0,0 @@
|
|||||||
<svg width="14" height="14" viewBox="0 0 16 16" fill="current" xmlns="http://www.w3.org/2000/svg">
|
|
||||||
<path d="M6.37776 10.1001V12.9C6.37776 13.457 6.599 13.9911 6.99282 14.3849C7.38664 14.7788 7.92077 15 8.47772 15L11.2777 8.70011V1.00025H3.38181C3.04419 0.996436 2.71656 1.11477 2.45929 1.33344C2.20203 1.55212 2.03246 1.8564 1.98184 2.19023L1.01585 8.49012C0.985398 8.69076 0.998931 8.89563 1.05551 9.09053C1.1121 9.28543 1.21038 9.46569 1.34355 9.61884C1.47671 9.77198 1.64159 9.89434 1.82674 9.97744C2.01189 10.0605 2.2129 10.1024 2.41583 10.1001H6.37776ZM11.2777 1.00025H13.1466C13.5428 0.993247 13.9277 1.13195 14.2284 1.39002C14.5291 1.64809 14.7245 2.00758 14.7776 2.40023V7.30014C14.7245 7.69279 14.5291 8.05227 14.2284 8.31035C13.9277 8.56842 13.5428 8.70712 13.1466 8.70011H11.2777" fill="none"/>
|
|
||||||
<path d="M11.2777 8.70011L8.47772 15C7.92077 15 7.38664 14.7788 6.99282 14.3849C6.599 13.9911 6.37776 13.457 6.37776 12.9V10.1001H2.41583C2.2129 10.1024 2.01189 10.0605 1.82674 9.97744C1.64159 9.89434 1.47671 9.77198 1.34355 9.61884C1.21038 9.46569 1.1121 9.28543 1.05551 9.09053C0.998931 8.89563 0.985398 8.69076 1.01585 8.49012L1.98184 2.19023C2.03246 1.8564 2.20203 1.55212 2.45929 1.33344C2.71656 1.11477 3.04419 0.996436 3.38181 1.00025H11.2777M11.2777 8.70011V1.00025M11.2777 8.70011H13.1466C13.5428 8.70712 13.9277 8.56842 14.2284 8.31035C14.5291 8.05227 14.7245 7.69279 14.7776 7.30014V2.40023C14.7245 2.00758 14.5291 1.64809 14.2284 1.39002C13.9277 1.13195 13.5428 0.993247 13.1466 1.00025H11.2777" stroke="current" stroke-width="1.4" stroke-linecap="round" stroke-linejoin="round"/>
|
|
||||||
</svg>
|
|
Before Width: | Height: | Size: 1.6 KiB |
@ -1,4 +0,0 @@
|
|||||||
<svg width="14" height="14" viewBox="0 0 16 16" fill="current" xmlns="http://www.w3.org/2000/svg">
|
|
||||||
<path d="M9.39995 5.89997V3.09999C9.39995 2.54304 9.1787 2.0089 8.78487 1.61507C8.39105 1.22125 7.85691 1 7.29996 1L4.49998 7.29996V14.9999H12.3959C12.7336 15.0037 13.0612 14.8854 13.3185 14.6667C13.5757 14.448 13.7453 14.1437 13.7959 13.8099L14.7619 7.50996C14.7924 7.30931 14.7788 7.10444 14.7222 6.90954C14.6657 6.71464 14.5674 6.53437 14.4342 6.38123C14.301 6.22808 14.1362 6.10572 13.951 6.02262C13.7659 5.93952 13.5649 5.89767 13.3619 5.89997H9.39995ZM4.49998 14.9999H2.39999C2.02869 14.9999 1.6726 14.8524 1.41005 14.5899C1.1475 14.3273 1 13.9712 1 13.5999V8.69995C1 8.32865 1.1475 7.97256 1.41005 7.71001C1.6726 7.44746 2.02869 7.29996 2.39999 7.29996H4.49998" fill="none"/>
|
|
||||||
<path d="M4.49998 7.29996L7.29996 1C7.85691 1 8.39105 1.22125 8.78487 1.61507C9.1787 2.0089 9.39995 2.54304 9.39995 3.09999V5.89997H13.3619C13.5649 5.89767 13.7659 5.93952 13.951 6.02262C14.1362 6.10572 14.301 6.22808 14.4342 6.38123C14.5674 6.53437 14.6657 6.71464 14.7223 6.90954C14.7788 7.10444 14.7924 7.30931 14.7619 7.50996L13.7959 13.8099C13.7453 14.1437 13.5757 14.448 13.3185 14.6667C13.0612 14.8854 12.7336 15.0037 12.3959 14.9999H4.49998M4.49998 7.29996V14.9999M4.49998 7.29996H2.39999C2.02869 7.29996 1.6726 7.44746 1.41005 7.71001C1.1475 7.97256 1 8.32865 1 8.69995V13.5999C1 13.9712 1.1475 14.3273 1.41005 14.5899C1.6726 14.8524 2.02869 14.9999 2.39999 14.9999H4.49998" stroke="current" stroke-width="1.39999" stroke-linecap="round" stroke-linejoin="round"/>
|
|
||||||
</svg>
|
|
Before Width: | Height: | Size: 1.5 KiB |
@ -0,0 +1,7 @@
|
|||||||
|
<svg width="36" height="36" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<path d="M4.37891 9.44824H7.75821" stroke="white" stroke-width="1.68965" stroke-linecap="round" stroke-linejoin="round"/>
|
||||||
|
<path d="M11.1377 9.44824H12.8273" stroke="white" stroke-width="1.68965" stroke-linecap="round" stroke-linejoin="round"/>
|
||||||
|
<path d="M4.37891 6.06934H6.06856" stroke="white" stroke-width="1.68965" stroke-linecap="round" stroke-linejoin="round"/>
|
||||||
|
<path d="M9.44824 6.06934H12.8276" stroke="white" stroke-width="1.68965" stroke-linecap="round" stroke-linejoin="round"/>
|
||||||
|
<path d="M16.2069 11.1379C16.2069 11.5861 16.0289 12.0158 15.712 12.3327C15.3951 12.6496 14.9654 12.8276 14.5172 12.8276H4.37931L1 16.2069V2.68965C1 2.24153 1.17802 1.81176 1.49489 1.49489C1.81176 1.17802 2.24153 1 2.68965 1H14.5172C14.9654 1 15.3951 1.17802 15.712 1.49489C16.0289 1.81176 16.2069 2.24153 16.2069 2.68965V11.1379Z" stroke="white" stroke-width="1.68965" stroke-linecap="round" stroke-linejoin="round"/>
|
||||||
|
</svg>
|
After Width: | Height: | Size: 1009 B |
@ -1,12 +1,6 @@
|
|||||||
import React from 'react';
|
|
||||||
import { createRoot } from 'react-dom/client';
|
import { createRoot } from 'react-dom/client';
|
||||||
import { DocsGPTWidget } from './components/DocsGPTWidget';
|
import App from './App.tsx';
|
||||||
|
import React from 'react';
|
||||||
|
const root = createRoot(document.getElementById('app') as HTMLElement);
|
||||||
|
|
||||||
if (typeof window !== 'undefined') {
|
root.render(<App />);
|
||||||
const renderWidget = (elementId: string, props = {}) => {
|
|
||||||
const root = createRoot(document.getElementById(elementId) as HTMLElement);
|
|
||||||
root.render(<DocsGPTWidget {...props} />);
|
|
||||||
};
|
|
||||||
(window as any).renderDocsGPTWidget = renderWidget;
|
|
||||||
}
|
|
||||||
export { DocsGPTWidget };
|
|
||||||
|
@ -1,106 +1,92 @@
|
|||||||
import { FEEDBACK } from "@/types";
|
|
||||||
interface HistoryItem {
|
interface HistoryItem {
|
||||||
prompt: string;
|
prompt: string;
|
||||||
response?: string;
|
response?: string;
|
||||||
}
|
}
|
||||||
interface FetchAnswerStreamingProps {
|
interface FetchAnswerStreamingProps {
|
||||||
question?: string;
|
question?: string;
|
||||||
apiKey?: string;
|
apiKey?: string;
|
||||||
selectedDocs?: string;
|
selectedDocs?: string;
|
||||||
history?: HistoryItem[];
|
history?: HistoryItem[];
|
||||||
conversationId?: string | null;
|
conversationId?: string | null;
|
||||||
apiHost?: string;
|
apiHost?: string;
|
||||||
onEvent?: (event: MessageEvent) => void;
|
onEvent?: (event: MessageEvent) => void;
|
||||||
}
|
}
|
||||||
interface FeedbackPayload {
|
|
||||||
question: string;
|
|
||||||
answer: string;
|
|
||||||
apikey: string;
|
|
||||||
feedback: FEEDBACK;
|
|
||||||
}
|
|
||||||
export function fetchAnswerStreaming({
|
export function fetchAnswerStreaming({
|
||||||
question = '',
|
question = '',
|
||||||
apiKey = '',
|
apiKey = '',
|
||||||
history = [],
|
selectedDocs = '',
|
||||||
conversationId = null,
|
history = [],
|
||||||
apiHost = '',
|
conversationId = null,
|
||||||
onEvent = () => { console.log("Event triggered, but no handler provided."); }
|
apiHost = '',
|
||||||
}: FetchAnswerStreamingProps): Promise<void> {
|
onEvent = () => {console.log("Event triggered, but no handler provided.");}
|
||||||
return new Promise<void>((resolve, reject) => {
|
}: FetchAnswerStreamingProps): Promise<void> {
|
||||||
const body = {
|
let docPath = 'default';
|
||||||
question: question,
|
if (selectedDocs) {
|
||||||
history: JSON.stringify(history),
|
docPath = selectedDocs;
|
||||||
conversation_id: conversationId,
|
}
|
||||||
model: 'default',
|
|
||||||
api_key: apiKey
|
|
||||||
};
|
|
||||||
fetch(apiHost + '/stream', {
|
|
||||||
method: 'POST',
|
|
||||||
headers: {
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
},
|
|
||||||
body: JSON.stringify(body),
|
|
||||||
})
|
|
||||||
.then((response) => {
|
|
||||||
if (!response.body) throw Error('No response body');
|
|
||||||
|
|
||||||
const reader = response.body.getReader();
|
return new Promise<void>((resolve, reject) => {
|
||||||
const decoder = new TextDecoder('utf-8');
|
const body = {
|
||||||
let counterrr = 0;
|
question: question,
|
||||||
const processStream = ({
|
api_key: apiKey,
|
||||||
done,
|
embeddings_key: apiKey,
|
||||||
value,
|
active_docs: docPath,
|
||||||
}: ReadableStreamReadResult<Uint8Array>) => {
|
history: JSON.stringify(history),
|
||||||
if (done) {
|
conversation_id: conversationId,
|
||||||
resolve();
|
model: 'default'
|
||||||
return;
|
};
|
||||||
}
|
|
||||||
|
|
||||||
counterrr += 1;
|
fetch(apiHost + '/stream', {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify(body),
|
||||||
|
})
|
||||||
|
.then((response) => {
|
||||||
|
if (!response.body) throw Error('No response body');
|
||||||
|
|
||||||
const chunk = decoder.decode(value);
|
const reader = response.body.getReader();
|
||||||
|
const decoder = new TextDecoder('utf-8');
|
||||||
|
let counterrr = 0;
|
||||||
|
const processStream = ({
|
||||||
|
done,
|
||||||
|
value,
|
||||||
|
}: ReadableStreamReadResult<Uint8Array>) => {
|
||||||
|
if (done) {
|
||||||
|
resolve();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
const lines = chunk.split('\n');
|
counterrr += 1;
|
||||||
|
|
||||||
for (let line of lines) {
|
const chunk = decoder.decode(value);
|
||||||
if (line.trim() == '') {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (line.startsWith('data:')) {
|
|
||||||
line = line.substring(5);
|
|
||||||
}
|
|
||||||
|
|
||||||
const messageEvent = new MessageEvent('message', {
|
const lines = chunk.split('\n');
|
||||||
data: line,
|
|
||||||
});
|
|
||||||
|
|
||||||
onEvent(messageEvent); // handle each message
|
for (let line of lines) {
|
||||||
}
|
if (line.trim() == '') {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (line.startsWith('data:')) {
|
||||||
|
line = line.substring(5);
|
||||||
|
}
|
||||||
|
|
||||||
reader.read().then(processStream).catch(reject);
|
const messageEvent = new MessageEvent('message', {
|
||||||
};
|
data: line,
|
||||||
|
});
|
||||||
|
|
||||||
reader.read().then(processStream).catch(reject);
|
onEvent(messageEvent); // handle each message
|
||||||
})
|
}
|
||||||
.catch((error) => {
|
|
||||||
console.error('Connection failed:', error);
|
|
||||||
reject(error);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
|
reader.read().then(processStream).catch(reject);
|
||||||
|
};
|
||||||
|
|
||||||
export const sendFeedback = (payload: FeedbackPayload,apiHost:string): Promise<Response> => {
|
reader.read().then(processStream).catch(reject);
|
||||||
return fetch(`${apiHost}/api/feedback`, {
|
})
|
||||||
method: 'POST',
|
.catch((error) => {
|
||||||
headers: {
|
console.error('Connection failed:', error);
|
||||||
'Content-Type': 'application/json'
|
reject(error);
|
||||||
},
|
});
|
||||||
body: JSON.stringify({
|
});
|
||||||
question: payload.question,
|
}
|
||||||
answer: payload.answer,
|
|
||||||
feedback: payload.feedback,
|
|
||||||
api_key:payload.apikey
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
};
|
|
@ -1,17 +1,13 @@
|
|||||||
<!DOCTYPE html>
|
<!DOCTYPE html>
|
||||||
<html lang="en">
|
<html lang="en">
|
||||||
|
<head>
|
||||||
<head>
|
<meta charset="UTF-8" />
|
||||||
<meta charset="UTF-8" />
|
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0,viewport-fit=cover" />
|
<title>DocsGPT 🦖</title>
|
||||||
<meta name="apple-mobile-web-app-capable" content="yes">
|
<link rel="shortcut icon" type="image/x-icon" href="/favicon.ico" />
|
||||||
<title>DocsGPT 🦖</title>
|
</head>
|
||||||
<link rel="shortcut icon" type="image/x-icon" href="/favicon.ico" />
|
<body>
|
||||||
</head>
|
<div id="root" class="h-screen"></div>
|
||||||
|
<script type="module" src="/src/main.tsx"></script>
|
||||||
<body>
|
</body>
|
||||||
<div id="root" class="h-screen"></div>
|
|
||||||
<script type="module" src="/src/main.tsx"></script>
|
|
||||||
</body>
|
|
||||||
|
|
||||||
</html>
|
</html>
|
@ -1,69 +0,0 @@
|
|||||||
const baseURL = import.meta.env.VITE_API_HOST || 'https://docsapi.arc53.com';
|
|
||||||
|
|
||||||
const defaultHeaders = {
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
};
|
|
||||||
|
|
||||||
const apiClient = {
|
|
||||||
get: (url: string, headers = {}, signal?: AbortSignal): Promise<any> =>
|
|
||||||
fetch(`${baseURL}${url}`, {
|
|
||||||
method: 'GET',
|
|
||||||
headers: {
|
|
||||||
...defaultHeaders,
|
|
||||||
...headers,
|
|
||||||
},
|
|
||||||
signal,
|
|
||||||
}).then((response) => {
|
|
||||||
return response;
|
|
||||||
}),
|
|
||||||
|
|
||||||
post: (
|
|
||||||
url: string,
|
|
||||||
data: any,
|
|
||||||
headers = {},
|
|
||||||
signal?: AbortSignal,
|
|
||||||
): Promise<any> =>
|
|
||||||
fetch(`${baseURL}${url}`, {
|
|
||||||
method: 'POST',
|
|
||||||
headers: {
|
|
||||||
...defaultHeaders,
|
|
||||||
...headers,
|
|
||||||
},
|
|
||||||
body: JSON.stringify(data),
|
|
||||||
signal,
|
|
||||||
}).then((response) => {
|
|
||||||
return response;
|
|
||||||
}),
|
|
||||||
|
|
||||||
put: (
|
|
||||||
url: string,
|
|
||||||
data: any,
|
|
||||||
headers = {},
|
|
||||||
signal?: AbortSignal,
|
|
||||||
): Promise<any> =>
|
|
||||||
fetch(`${baseURL}${url}`, {
|
|
||||||
method: 'PUT',
|
|
||||||
headers: {
|
|
||||||
...defaultHeaders,
|
|
||||||
...headers,
|
|
||||||
},
|
|
||||||
body: JSON.stringify(data),
|
|
||||||
signal,
|
|
||||||
}).then((response) => {
|
|
||||||
return response;
|
|
||||||
}),
|
|
||||||
|
|
||||||
delete: (url: string, headers = {}, signal?: AbortSignal): Promise<any> =>
|
|
||||||
fetch(`${baseURL}${url}`, {
|
|
||||||
method: 'DELETE',
|
|
||||||
headers: {
|
|
||||||
...defaultHeaders,
|
|
||||||
...headers,
|
|
||||||
},
|
|
||||||
signal,
|
|
||||||
}).then((response) => {
|
|
||||||
return response;
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
|
|
||||||
export default apiClient;
|
|
@ -1,38 +0,0 @@
|
|||||||
const endpoints = {
|
|
||||||
USER: {
|
|
||||||
DOCS: '/api/combine',
|
|
||||||
DOCS_CHECK: '/api/docs_check',
|
|
||||||
API_KEYS: '/api/get_api_keys',
|
|
||||||
CREATE_API_KEY: '/api/create_api_key',
|
|
||||||
DELETE_API_KEY: '/api/delete_api_key',
|
|
||||||
PROMPTS: '/api/get_prompts',
|
|
||||||
CREATE_PROMPT: '/api/create_prompt',
|
|
||||||
DELETE_PROMPT: '/api/delete_prompt',
|
|
||||||
UPDATE_PROMPT: '/api/update_prompt',
|
|
||||||
SINGLE_PROMPT: (id: string) => `/api/get_single_prompt?id=${id}`,
|
|
||||||
DELETE_PATH: (docPath: string) => `/api/delete_old?source_id=${docPath}`,
|
|
||||||
TASK_STATUS: (task_id: string) => `/api/task_status?task_id=${task_id}`,
|
|
||||||
MESSAGE_ANALYTICS: '/api/get_message_analytics',
|
|
||||||
TOKEN_ANALYTICS: '/api/get_token_analytics',
|
|
||||||
FEEDBACK_ANALYTICS: '/api/get_feedback_analytics',
|
|
||||||
LOGS: `/api/get_user_logs`,
|
|
||||||
MANAGE_SYNC: '/api/manage_sync',
|
|
||||||
},
|
|
||||||
CONVERSATION: {
|
|
||||||
ANSWER: '/api/answer',
|
|
||||||
ANSWER_STREAMING: '/stream',
|
|
||||||
SEARCH: '/api/search',
|
|
||||||
FEEDBACK: '/api/feedback',
|
|
||||||
CONVERSATION: (id: string) => `/api/get_single_conversation?id=${id}`,
|
|
||||||
CONVERSATIONS: '/api/get_conversations',
|
|
||||||
SHARE_CONVERSATION: (isPromptable: boolean) =>
|
|
||||||
`/api/share?isPromptable=${isPromptable}`,
|
|
||||||
SHARED_CONVERSATION: (identifier: string) =>
|
|
||||||
`/api/shared_conversation/${identifier}`,
|
|
||||||
DELETE: (id: string) => `/api/delete_conversation?id=${id}`,
|
|
||||||
DELETE_ALL: '/api/delete_all_conversations',
|
|
||||||
UPDATE: '/api/update_conversation_name',
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
export default endpoints;
|
|
@ -1,32 +0,0 @@
|
|||||||
import apiClient from '../client';
|
|
||||||
import endpoints from '../endpoints';
|
|
||||||
|
|
||||||
const conversationService = {
|
|
||||||
answer: (data: any, signal: AbortSignal): Promise<any> =>
|
|
||||||
apiClient.post(endpoints.CONVERSATION.ANSWER, data, {}, signal),
|
|
||||||
answerStream: (data: any, signal: AbortSignal): Promise<any> =>
|
|
||||||
apiClient.post(endpoints.CONVERSATION.ANSWER_STREAMING, data, {}, signal),
|
|
||||||
search: (data: any): Promise<any> =>
|
|
||||||
apiClient.post(endpoints.CONVERSATION.SEARCH, data),
|
|
||||||
feedback: (data: any): Promise<any> =>
|
|
||||||
apiClient.post(endpoints.CONVERSATION.FEEDBACK, data),
|
|
||||||
getConversation: (id: string): Promise<any> =>
|
|
||||||
apiClient.get(endpoints.CONVERSATION.CONVERSATION(id)),
|
|
||||||
getConversations: (): Promise<any> =>
|
|
||||||
apiClient.get(endpoints.CONVERSATION.CONVERSATIONS),
|
|
||||||
shareConversation: (isPromptable: boolean, data: any): Promise<any> =>
|
|
||||||
apiClient.post(
|
|
||||||
endpoints.CONVERSATION.SHARE_CONVERSATION(isPromptable),
|
|
||||||
data,
|
|
||||||
),
|
|
||||||
getSharedConversation: (identifier: string): Promise<any> =>
|
|
||||||
apiClient.get(endpoints.CONVERSATION.SHARED_CONVERSATION(identifier)),
|
|
||||||
delete: (id: string, data: any): Promise<any> =>
|
|
||||||
apiClient.post(endpoints.CONVERSATION.DELETE(id), data),
|
|
||||||
deleteAll: (): Promise<any> =>
|
|
||||||
apiClient.get(endpoints.CONVERSATION.DELETE_ALL),
|
|
||||||
update: (data: any): Promise<any> =>
|
|
||||||
apiClient.post(endpoints.CONVERSATION.UPDATE, data),
|
|
||||||
};
|
|
||||||
|
|
||||||
export default conversationService;
|
|
@ -1,38 +0,0 @@
|
|||||||
import apiClient from '../client';
|
|
||||||
import endpoints from '../endpoints';
|
|
||||||
|
|
||||||
const userService = {
|
|
||||||
getDocs: (): Promise<any> => apiClient.get(endpoints.USER.DOCS),
|
|
||||||
checkDocs: (data: any): Promise<any> =>
|
|
||||||
apiClient.post(endpoints.USER.DOCS_CHECK, data),
|
|
||||||
getAPIKeys: (): Promise<any> => apiClient.get(endpoints.USER.API_KEYS),
|
|
||||||
createAPIKey: (data: any): Promise<any> =>
|
|
||||||
apiClient.post(endpoints.USER.CREATE_API_KEY, data),
|
|
||||||
deleteAPIKey: (data: any): Promise<any> =>
|
|
||||||
apiClient.post(endpoints.USER.DELETE_API_KEY, data),
|
|
||||||
getPrompts: (): Promise<any> => apiClient.get(endpoints.USER.PROMPTS),
|
|
||||||
createPrompt: (data: any): Promise<any> =>
|
|
||||||
apiClient.post(endpoints.USER.CREATE_PROMPT, data),
|
|
||||||
deletePrompt: (data: any): Promise<any> =>
|
|
||||||
apiClient.post(endpoints.USER.DELETE_PROMPT, data),
|
|
||||||
updatePrompt: (data: any): Promise<any> =>
|
|
||||||
apiClient.post(endpoints.USER.UPDATE_PROMPT, data),
|
|
||||||
getSinglePrompt: (id: string): Promise<any> =>
|
|
||||||
apiClient.get(endpoints.USER.SINGLE_PROMPT(id)),
|
|
||||||
deletePath: (docPath: string): Promise<any> =>
|
|
||||||
apiClient.get(endpoints.USER.DELETE_PATH(docPath)),
|
|
||||||
getTaskStatus: (task_id: string): Promise<any> =>
|
|
||||||
apiClient.get(endpoints.USER.TASK_STATUS(task_id)),
|
|
||||||
getMessageAnalytics: (data: any): Promise<any> =>
|
|
||||||
apiClient.post(endpoints.USER.MESSAGE_ANALYTICS, data),
|
|
||||||
getTokenAnalytics: (data: any): Promise<any> =>
|
|
||||||
apiClient.post(endpoints.USER.TOKEN_ANALYTICS, data),
|
|
||||||
getFeedbackAnalytics: (data: any): Promise<any> =>
|
|
||||||
apiClient.post(endpoints.USER.FEEDBACK_ANALYTICS, data),
|
|
||||||
getLogs: (data: any): Promise<any> =>
|
|
||||||
apiClient.post(endpoints.USER.LOGS, data),
|
|
||||||
manageSync: (data: any): Promise<any> =>
|
|
||||||
apiClient.post(endpoints.USER.MANAGE_SYNC, data),
|
|
||||||
};
|
|
||||||
|
|
||||||
export default userService;
|
|
@ -1,3 +0,0 @@
|
|||||||
<svg width="18" height="18" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg">
|
|
||||||
<path d="M14.175 0.843262H16.9354L10.9054 7.75269L18 17.1564H12.4457L8.09229 11.4543L3.11657 17.1564H0.353571L6.80271 9.76355L0 0.844547H5.69571L9.62486 6.05555L14.175 0.843262ZM13.2043 15.5004H14.7343L4.86 2.41312H3.21943L13.2043 15.5004Z" fill="#747474"/>
|
|
||||||
</svg>
|
|
Before Width: | Height: | Size: 361 B |
@ -1,3 +0,0 @@
|
|||||||
<svg width="7" height="12" viewBox="0 0 7 12" fill="none" xmlns="http://www.w3.org/2000/svg">
|
|
||||||
<path d="M6.29154 4.88202L1.70154 0.29202C1.60896 0.199438 1.49905 0.125998 1.37808 0.0758932C1.25712 0.0257882 1.12747 -2.37536e-07 0.99654 -2.44235e-07C0.86561 -2.50934e-07 0.735961 0.0257882 0.614997 0.0758931C0.494033 0.125998 0.384122 0.199438 0.29154 0.29202C0.198958 0.384602 0.125519 0.494513 0.0754137 0.615477C0.0253086 0.736441 -0.00048069 0.86609 -0.000480695 0.99702C-0.000480701 1.12795 0.0253086 1.2576 0.0754136 1.37856C0.125519 1.49953 0.198958 1.60944 0.29154 1.70202L4.17154 5.59202L0.29154 9.47202C0.198958 9.5646 0.125518 9.67451 0.0754133 9.79548C0.0253082 9.91644 -0.000481091 10.0461 -0.000481097 10.177C-0.000481102 10.3079 0.0253082 10.4376 0.0754132 10.5586C0.125518 10.6795 0.198958 10.7894 0.29154 10.882C0.384121 10.9746 0.494032 11.048 0.614996 11.0981C0.73596 11.1483 0.865609 11.174 0.99654 11.174C1.12747 11.174 1.25712 11.1483 1.37808 11.0981C1.49905 11.048 1.60896 10.9746 1.70154 10.882L6.29154 6.29202C6.38424 6.19951 6.45779 6.08962 6.50797 5.96864C6.55815 5.84767 6.58398 5.71799 6.58398 5.58702C6.58398 5.45605 6.55815 5.32637 6.50797 5.2054C6.45779 5.08442 6.38424 4.97453 6.29154 4.88202Z" fill="#666666"/>
|
|
||||||
</svg>
|
|
Before Width: | Height: | Size: 1.2 KiB |
@ -1,3 +0,0 @@
|
|||||||
<svg width="14" height="15" viewBox="0 0 14 15" fill="none" xmlns="http://www.w3.org/2000/svg">
|
|
||||||
<path d="M12.9294 5.375V12.4583C12.9294 12.8341 12.7801 13.1944 12.5145 13.4601C12.2488 13.7257 11.8885 13.875 11.5127 13.875H3.01274C2.63701 13.875 2.27668 13.7257 2.011 13.4601C1.74532 13.1944 1.59607 12.8341 1.59607 12.4583V2.54167C1.59607 2.16594 1.74532 1.80561 2.011 1.53993C2.27668 1.27426 2.63701 1.125 3.01274 1.125H8.6794M12.9294 5.375V5.25317C12.9293 4.87747 12.78 4.5172 12.5143 4.25158L9.80282 1.54008C9.53721 1.27439 9.17693 1.12508 8.80124 1.125H8.6794M12.9294 5.375H10.0961C9.72035 5.375 9.36001 5.22574 9.09434 4.96007C8.82866 4.69439 8.6794 4.33406 8.6794 3.95833V1.125" stroke="#949494" stroke-width="1.41667" stroke-linecap="round" stroke-linejoin="round"/>
|
|
||||||
</svg>
|
|
Before Width: | Height: | Size: 781 B |