langchain/libs/community/langchain_community/document_loaders/image_captions.py

100 lines
3.5 KiB
Python
Raw Normal View History

community[major], core[patch], langchain[patch], experimental[patch]: Create langchain-community (#14463) Moved the following modules to new package langchain-community in a backwards compatible fashion: ``` mv langchain/langchain/adapters community/langchain_community mv langchain/langchain/callbacks community/langchain_community/callbacks mv langchain/langchain/chat_loaders community/langchain_community mv langchain/langchain/chat_models community/langchain_community mv langchain/langchain/document_loaders community/langchain_community mv langchain/langchain/docstore community/langchain_community mv langchain/langchain/document_transformers community/langchain_community mv langchain/langchain/embeddings community/langchain_community mv langchain/langchain/graphs community/langchain_community mv langchain/langchain/llms community/langchain_community mv langchain/langchain/memory/chat_message_histories community/langchain_community mv langchain/langchain/retrievers community/langchain_community mv langchain/langchain/storage community/langchain_community mv langchain/langchain/tools community/langchain_community mv langchain/langchain/utilities community/langchain_community mv langchain/langchain/vectorstores community/langchain_community mv langchain/langchain/agents/agent_toolkits community/langchain_community mv langchain/langchain/cache.py community/langchain_community mv langchain/langchain/adapters community/langchain_community mv langchain/langchain/callbacks community/langchain_community/callbacks mv langchain/langchain/chat_loaders community/langchain_community mv langchain/langchain/chat_models community/langchain_community mv langchain/langchain/document_loaders community/langchain_community mv langchain/langchain/docstore community/langchain_community mv langchain/langchain/document_transformers community/langchain_community mv langchain/langchain/embeddings community/langchain_community mv langchain/langchain/graphs community/langchain_community mv langchain/langchain/llms community/langchain_community mv langchain/langchain/memory/chat_message_histories community/langchain_community mv langchain/langchain/retrievers community/langchain_community mv langchain/langchain/storage community/langchain_community mv langchain/langchain/tools community/langchain_community mv langchain/langchain/utilities community/langchain_community mv langchain/langchain/vectorstores community/langchain_community mv langchain/langchain/agents/agent_toolkits community/langchain_community mv langchain/langchain/cache.py community/langchain_community ``` Moved the following to core ``` mv langchain/langchain/utils/json_schema.py core/langchain_core/utils mv langchain/langchain/utils/html.py core/langchain_core/utils mv langchain/langchain/utils/strings.py core/langchain_core/utils cat langchain/langchain/utils/env.py >> core/langchain_core/utils/env.py rm langchain/langchain/utils/env.py ``` See .scripts/community_split/script_integrations.sh for all changes
2023-12-11 21:53:30 +00:00
from io import BytesIO
from typing import Any, List, Tuple, Union
import requests
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class ImageCaptionLoader(BaseLoader):
"""Load image captions.
By default, the loader utilizes the pre-trained
Salesforce BLIP image captioning model.
https://huggingface.co/Salesforce/blip-image-captioning-base
"""
def __init__(
self,
images: Union[str, bytes, List[Union[str, bytes]]],
blip_processor: str = "Salesforce/blip-image-captioning-base",
blip_model: str = "Salesforce/blip-image-captioning-base",
):
"""Initialize with a list of image data (bytes) or file paths
Args:
images: Either a single image or a list of images. Accepts
image data (bytes) or file paths to images.
blip_processor: The name of the pre-trained BLIP processor.
blip_model: The name of the pre-trained BLIP model.
"""
if isinstance(images, (str, bytes)):
self.images = [images]
else:
self.images = images
self.blip_processor = blip_processor
self.blip_model = blip_model
def load(self) -> List[Document]:
"""Load from a list of image data or file paths"""
try:
from transformers import BlipForConditionalGeneration, BlipProcessor
except ImportError:
raise ImportError(
"`transformers` package not found, please install with "
"`pip install transformers`."
)
processor = BlipProcessor.from_pretrained(self.blip_processor)
model = BlipForConditionalGeneration.from_pretrained(self.blip_model)
results = []
for image in self.images:
caption, metadata = self._get_captions_and_metadata(
model=model, processor=processor, image=image
)
doc = Document(page_content=caption, metadata=metadata)
results.append(doc)
return results
def _get_captions_and_metadata(
self, model: Any, processor: Any, image: Union[str, bytes]
) -> Tuple[str, dict]:
"""Helper function for getting the captions and metadata of an image."""
try:
from PIL import Image
except ImportError:
raise ImportError(
"`PIL` package not found, please install with `pip install pillow`"
)
image_source = image # Save the original source for later reference
try:
if isinstance(image, bytes):
image = Image.open(BytesIO(image)).convert("RGB")
elif image.startswith("http://") or image.startswith("https://"):
image = Image.open(requests.get(image, stream=True).raw).convert("RGB")
else:
image = Image.open(image).convert("RGB")
except Exception:
if isinstance(image_source, bytes):
msg = "Could not get image data from bytes"
else:
msg = f"Could not get image data for {image_source}"
raise ValueError(msg)
inputs = processor(image, "an image of", return_tensors="pt")
output = model.generate(**inputs)
caption: str = processor.decode(output[0])
if isinstance(image_source, bytes):
metadata: dict = {"image_source": "Image bytes provided"}
else:
metadata = {"image_path": image_source}
return caption, metadata