Add spinner to indicate loading files

- Added a spinner using the Halo package to indicate when files are being loaded.
- The spinner appears while the load_files function is running and disappears once the function has completed.
- This should improve the user experience by providing feedback that the program is actively doing something.
pull/1/head
Saryev Rustam 1 year ago
parent 71a1befd7e
commit 569b7d9536

1
.gitignore vendored

@ -2,3 +2,4 @@
/.idea/
/.vscode/
/.venv/
/talk_codebase/__pycache__/

@ -2,7 +2,7 @@
[![Node.js Package](https://github.com/rsaryev/talk-codebase/actions/workflows/python-publish.yml/badge.svg)](https://github.com/rsaryev/talk-codebase/actions/workflows/python-publish.yml)
<p align="center">
<img src="https://github.com/rsaryev/talk-codebase/assets/70219513/87a031ec-51e2-4123-abe6-91bb4d248b4d" width="800" alt="chat">
<img src="https://github.com/rsaryev/talk-codebase/assets/70219513/6d92e905-fb1b-4235-857b-e6e19041ad79" width="800" alt="chat">
</p>
## Description

49
poetry.lock generated

@ -480,6 +480,27 @@ files = [
docs = ["Sphinx", "docutils (<0.18)"]
test = ["objgraph", "psutil"]
[[package]]
name = "halo"
version = "0.0.31"
description = "Beautiful terminal spinners in Python"
optional = false
python-versions = ">=3.4"
files = [
{file = "halo-0.0.31-py2-none-any.whl", hash = "sha256:5350488fb7d2aa7c31a1344120cee67a872901ce8858f60da7946cef96c208ab"},
{file = "halo-0.0.31.tar.gz", hash = "sha256:7b67a3521ee91d53b7152d4ee3452811e1d2a6321975137762eb3d70063cc9d6"},
]
[package.dependencies]
colorama = ">=0.3.9"
log-symbols = ">=0.0.14"
six = ">=1.12.0"
spinners = ">=0.0.24"
termcolor = ">=1.1.0"
[package.extras]
ipython = ["IPython (==5.7.0)", "ipywidgets (==7.1.0)"]
[[package]]
name = "idna"
version = "3.4"
@ -527,6 +548,20 @@ openai = ["openai (>=0,<1)", "tiktoken (>=0.3.2,<0.4.0)"]
qdrant = ["qdrant-client (>=1.1.2,<2.0.0)"]
text-helpers = ["chardet (>=5.1.0,<6.0.0)"]
[[package]]
name = "log-symbols"
version = "0.0.14"
description = "Colored symbols for various log levels for Python"
optional = false
python-versions = "*"
files = [
{file = "log_symbols-0.0.14-py3-none-any.whl", hash = "sha256:4952106ff8b605ab7d5081dd2c7e6ca7374584eff7086f499c06edd1ce56dcca"},
{file = "log_symbols-0.0.14.tar.gz", hash = "sha256:cf0bbc6fe1a8e53f0d174a716bc625c4f87043cc21eb55dd8a740cfe22680556"},
]
[package.dependencies]
colorama = ">=0.3.9"
[[package]]
name = "marshmallow"
version = "3.19.0"
@ -830,6 +865,7 @@ files = [
typing-extensions = ">=4.2.0"
[package.extras]
dotenv = ["python-dotenv (>=0.10.4)"]
email = ["email-validator (>=1.0.3)"]
[[package]]
@ -1010,6 +1046,17 @@ files = [
{file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
]
[[package]]
name = "spinners"
version = "0.0.24"
description = "Spinners for terminals"
optional = false
python-versions = "*"
files = [
{file = "spinners-0.0.24-py3-none-any.whl", hash = "sha256:2fa30d0b72c9650ad12bbe031c9943b8d441e41b4f5602b0ec977a19f3290e98"},
{file = "spinners-0.0.24.tar.gz", hash = "sha256:1eb6aeb4781d72ab42ed8a01dcf20f3002bf50740d7154d12fb8c9769bf9e27f"},
]
[[package]]
name = "sqlalchemy"
version = "2.0.15"
@ -1313,4 +1360,4 @@ multidict = ">=4.0"
[metadata]
lock-version = "2.0"
python-versions = "^3.9"
content-hash = "0fe65ae533b6399793a74211f7a49fc1e30d2ef315c5c0c72b8cb3b4e6b07201"
content-hash = "e59a3a2acab37f4fe8dcbdff28cb946c3ff62e1ab0546aca3225c678c02d8060"

@ -1,6 +1,6 @@
[tool.poetry]
name = "talk-codebase"
version = "0.1.12"
version = "0.1.13"
description = "talk-codebase is a powerful tool for querying and analyzing codebases."
authors = ["Saryev Rustam <rustam1997@gmail.com>"]
readme = "README.md"
@ -14,6 +14,7 @@ fire = "^0.5.0"
openai = "^0.27.7"
tiktoken = "^0.4.0"
faiss-cpu = "^1.7.4"
halo = "^0.0.31"
[build-system]

@ -50,3 +50,5 @@ urllib3==2.0.2
webencodings==0.5.1
yarl==1.9.2
zipp==3.15.0
halo~=0.0.31

@ -6,10 +6,12 @@ from langchain.chains import ConversationalRetrievalChain
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from halo import Halo
from talk_codebase.utils import StreamStdOut, load_files
@Halo(text='Creating vector store', spinner='dots')
def create_vector_store(root_dir, openai_api_key):
docs = load_files(root_dir)
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)

@ -3,6 +3,7 @@ import sys
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.document_loaders import TextLoader
from halo import Halo
from talk_codebase.consts import EXCLUDE_DIRS, EXCLUDE_FILES, ALLOW_FILES
@ -21,6 +22,7 @@ class StreamStdOut(StreamingStdOutCallbackHandler):
def load_files(root_dir):
spinners = Halo(text='Loading files', spinner='dots')
docs = []
for dirpath, dirnames, filenames in os.walk(root_dir):
if any(exclude_dir in dirpath for exclude_dir in EXCLUDE_DIRS):
@ -35,5 +37,5 @@ def load_files(root_dir):
docs.extend(loader.load_and_split())
except Exception as e:
print(f"Error loading file {file}: {e}")
print(f"🤖 Loaded {len(docs)} documents")
spinners.succeed(f"Loaded {len(docs)} documents")
return docs

Loading…
Cancel
Save