mirror of
https://github.com/hwchase17/langchain
synced 2024-11-18 09:25:54 +00:00
36 lines
1007 B
Python
36 lines
1007 B
Python
|
import os
|
||
|
from pathlib import Path
|
||
|
|
||
|
from langchain.vectorstores import Chroma
|
||
|
from langchain_experimental.open_clip import OpenCLIPEmbeddings
|
||
|
|
||
|
# Load images
|
||
|
img_dump_path = Path(__file__).parent / "docs/"
|
||
|
rel_img_dump_path = img_dump_path.relative_to(Path.cwd())
|
||
|
image_uris = sorted(
|
||
|
[
|
||
|
os.path.join(rel_img_dump_path, image_name)
|
||
|
for image_name in os.listdir(rel_img_dump_path)
|
||
|
if image_name.endswith(".jpg")
|
||
|
]
|
||
|
)
|
||
|
|
||
|
# Index
|
||
|
vectorstore = Path(__file__).parent / "chroma_db_multi_modal"
|
||
|
re_vectorstore_path = vectorstore.relative_to(Path.cwd())
|
||
|
|
||
|
# Load embedding function
|
||
|
print("Loading embedding function")
|
||
|
embedding = OpenCLIPEmbeddings(model_name="ViT-H-14", checkpoint="laion2b_s32b_b79k")
|
||
|
|
||
|
# Create chroma
|
||
|
vectorstore_mmembd = Chroma(
|
||
|
collection_name="multi-modal-rag",
|
||
|
persist_directory=str(Path(__file__).parent / "chroma_db_multi_modal"),
|
||
|
embedding_function=embedding,
|
||
|
)
|
||
|
|
||
|
# Add images
|
||
|
print("Embedding images")
|
||
|
vectorstore_mmembd.add_images(uris=image_uris)
|