diff --git a/docs/modules/indexes/document_loaders/examples/pdf.ipynb b/docs/modules/indexes/document_loaders/examples/pdf.ipynb index 2175d4a1..cb90debd 100644 --- a/docs/modules/indexes/document_loaders/examples/pdf.ipynb +++ b/docs/modules/indexes/document_loaders/examples/pdf.ipynb @@ -104,10 +104,11 @@ "Efficient Data AnnotationC u s t o m i z e d M o d e l T r a i n i n gModel Cust omizationDI A Model HubDI A Pipeline SharingCommunity PlatformLa y out Detection ModelsDocument Images \n", "T h e C o r e L a y o u t P a r s e r L i b r a r yOCR ModuleSt or age & VisualizationLa y out Data Structur e\n", "Fig. 1: The overall architecture of LayoutParser . For an input document image,\n", - "the core LayoutParser library provides a set of o\u000B", + "the core LayoutParser library provides a set of o\u000b", + "\n", "-the-shelf tools for layout\n", "detection, OCR, visualization, and storage, backed by a carefully designed layout\n", - "data structure. LayoutParser also supports high level customization via e\u000Ecient\n", + "data structure. LayoutParser also supports high level customization via e\u000ecient\n", "layout annotation and model training functions. These improve model accuracy\n", "on the target samples. The community platform enables the easy sharing of DIA\n", "models and whole digitization pipelines to promote reusability and reproducibility.\n", @@ -117,6 +118,7 @@ "DL-based support for developing and deploying models for general computer\n", "vision and natural language processing problems. LayoutParser , on the other\n", "hand, specializes speci\f", + "\n", "cally in DIA tasks. LayoutParser is also equipped with a\n", "community platform inspired by established model hubs such as Torch Hub [23]\n", "andTensorFlow Hub [1]. It enables the sharing of pretrained models as well as\n", @@ -125,13 +127,16 @@ "development of DL models. Some examples include PRImA [ 3](magazine layouts),\n", "PubLayNet [ 38](academic paper layouts), Table Bank [ 18](tables in academic\n", "papers), Newspaper Navigator Dataset [ 16,17](newspaper \f", + "\n", "gure layouts) and\n", "HJDataset [31](historical Japanese document layouts). A spectrum of models\n", "trained on these datasets are currently available in the LayoutParser model zoo\n", - "to support di\u000B", + "to support di\u000b", + "\n", "erent use cases.\n", "3 The Core LayoutParser Library\n", - "At the core of LayoutParser is an o\u000B", + "At the core of LayoutParser is an o\u000b", + "\n", "-the-shelf toolkit that streamlines DL-\n", "based document image analysis. Five components support a simple interface\n", "with comprehensive functionalities: 1) The layout detection models enable using\n", @@ -226,7 +231,9 @@ "outputs": [ { "data": { - "text/plain": "Document(page_content='LayoutParser: A Unified Toolkit for Deep\\nLearning Based Document Image Analysis\\nZejiang Shen1 (�), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain\\nLee4, Jacob Carlson3, and Weining Li5\\n1 Allen Institute for AI\\nshannons@allenai.org\\n2 Brown University\\nruochen zhang@brown.edu\\n3 Harvard University\\n{melissadell,jacob carlson}@fas.harvard.edu\\n4 University of Washington\\nbcgl@cs.washington.edu\\n5 University of Waterloo\\nw422li@uwaterloo.ca\\nAbstract. Recent advances in document image analysis (DIA) have been\\nprimarily driven by the application of neural networks. Ideally, research\\noutcomes could be easily deployed in production and extended for further\\ninvestigation. However, various factors like loosely organized codebases\\nand sophisticated model configurations complicate the easy reuse of im-\\nportant innovations by a wide audience. Though there have been on-going\\nefforts to improve reusability and simplify deep learning (DL) model\\ndevelopment in disciplines like natural language processing and computer\\nvision, none of them are optimized for challenges in the domain of DIA.\\nThis represents a major gap in the existing toolkit, as DIA is central to\\nacademic research across a wide range of disciplines in the social sciences\\nand humanities. This paper introduces LayoutParser, an open-source\\nlibrary for streamlining the usage of DL in DIA research and applica-\\ntions. The core LayoutParser library comes with a set of simple and\\nintuitive interfaces for applying and customizing DL models for layout de-\\ntection, character recognition, and many other document processing tasks.\\nTo promote extensibility, LayoutParser also incorporates a community\\nplatform for sharing both pre-trained models and full document digiti-\\nzation pipelines. We demonstrate that LayoutParser is helpful for both\\nlightweight and large-scale digitization pipelines in real-word use cases.\\nThe library is publicly available at https://layout-parser.github.io.\\nKeywords: Document Image Analysis · Deep Learning · Layout Analysis\\n· Character Recognition · Open Source library · Toolkit.\\n1\\nIntroduction\\nDeep Learning(DL)-based approaches are the state-of-the-art for a wide range of\\ndocument image analysis (DIA) tasks including document image classification [11,\\narXiv:2103.15348v2 [cs.CV] 21 Jun 2021\\n', lookup_str='', metadata={'file_path': 'example_data/layout-parser-paper.pdf', 'page_number': 1, 'total_pages': 16, 'format': 'PDF 1.5', 'title': '', 'author': '', 'subject': '', 'keywords': '', 'creator': 'LaTeX with hyperref', 'producer': 'pdfTeX-1.40.21', 'creationDate': 'D:20210622012710Z', 'modDate': 'D:20210622012710Z', 'trapped': '', 'encryption': None}, lookup_index=0)" + "text/plain": [ + "Document(page_content='LayoutParser: A Unified Toolkit for Deep\\nLearning Based Document Image Analysis\\nZejiang Shen1 (�), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain\\nLee4, Jacob Carlson3, and Weining Li5\\n1 Allen Institute for AI\\nshannons@allenai.org\\n2 Brown University\\nruochen zhang@brown.edu\\n3 Harvard University\\n{melissadell,jacob carlson}@fas.harvard.edu\\n4 University of Washington\\nbcgl@cs.washington.edu\\n5 University of Waterloo\\nw422li@uwaterloo.ca\\nAbstract. Recent advances in document image analysis (DIA) have been\\nprimarily driven by the application of neural networks. Ideally, research\\noutcomes could be easily deployed in production and extended for further\\ninvestigation. However, various factors like loosely organized codebases\\nand sophisticated model configurations complicate the easy reuse of im-\\nportant innovations by a wide audience. Though there have been on-going\\nefforts to improve reusability and simplify deep learning (DL) model\\ndevelopment in disciplines like natural language processing and computer\\nvision, none of them are optimized for challenges in the domain of DIA.\\nThis represents a major gap in the existing toolkit, as DIA is central to\\nacademic research across a wide range of disciplines in the social sciences\\nand humanities. This paper introduces LayoutParser, an open-source\\nlibrary for streamlining the usage of DL in DIA research and applica-\\ntions. The core LayoutParser library comes with a set of simple and\\nintuitive interfaces for applying and customizing DL models for layout de-\\ntection, character recognition, and many other document processing tasks.\\nTo promote extensibility, LayoutParser also incorporates a community\\nplatform for sharing both pre-trained models and full document digiti-\\nzation pipelines. We demonstrate that LayoutParser is helpful for both\\nlightweight and large-scale digitization pipelines in real-word use cases.\\nThe library is publicly available at https://layout-parser.github.io.\\nKeywords: Document Image Analysis · Deep Learning · Layout Analysis\\n· Character Recognition · Open Source library · Toolkit.\\n1\\nIntroduction\\nDeep Learning(DL)-based approaches are the state-of-the-art for a wide range of\\ndocument image analysis (DIA) tasks including document image classification [11,\\narXiv:2103.15348v2 [cs.CV] 21 Jun 2021\\n', lookup_str='', metadata={'file_path': 'example_data/layout-parser-paper.pdf', 'page_number': 1, 'total_pages': 16, 'format': 'PDF 1.5', 'title': '', 'author': '', 'subject': '', 'keywords': '', 'creator': 'LaTeX with hyperref', 'producer': 'pdfTeX-1.40.21', 'creationDate': 'D:20210622012710Z', 'modDate': 'D:20210622012710Z', 'trapped': '', 'encryption': None}, lookup_index=0)" + ] }, "execution_count": 5, "metadata": {}, @@ -239,53 +246,51 @@ }, { "cell_type": "markdown", + "id": "278c881f", + "metadata": {}, "source": [ "### Fetching remote PDFs using Unstructured\n", "\n", "This covers how to load online pdfs into a document format that we can use downstream. This can be used for various online pdf sites such as https://open.umn.edu/opentextbooks/textbooks/ and https://arxiv.org/archive/\n", "\n", "Note: all other pdf loaders can also be used to fetch remote PDFs, but `OnlinePDFLoader` is a legacy function, and works specifically with `UnstructuredPDFLoader`.\n" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": 6, + "id": "0c2686fc", + "metadata": {}, "outputs": [], "source": [ "from langchain.document_loaders import OnlinePDFLoader" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": 7, + "id": "101e0b82", + "metadata": {}, "outputs": [], "source": [ "loader = OnlinePDFLoader(\"https://arxiv.org/pdf/2302.03803.pdf\")" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": 8, + "id": "be3ccbfa", + "metadata": {}, "outputs": [], "source": [ "data = loader.load()" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": 9, + "id": "e1298dd6", + "metadata": {}, "outputs": [ { "name": "stdout", @@ -297,17 +302,13 @@ ], "source": [ "print(data)" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", - "source": [], - "metadata": { - "collapsed": false - } + "id": "05187b33", + "metadata": {}, + "source": [] }, { "cell_type": "markdown", @@ -349,55 +350,101 @@ }, { "cell_type": "markdown", + "id": "c90a5fe8", + "metadata": {}, "source": [ - "## Using PyMuPDF\n", - "\n", - "This is the fastest of the PDF parsing options, and contains detailed metadata about the PDF and its pages, as well as returns one document per page." - ], - "metadata": { - "collapsed": false - } + "## Using PDFMiner to generate HTML text" + ] + }, + { + "cell_type": "markdown", + "id": "eb785e1c", + "metadata": {}, + "source": [ + "This can be helpful for chunking texts semantically into sections as the output html content can be parsed via `BeautifulSoup` to get more structured and rich information about font size, page numbers, pdf headers/footers, etc." + ] }, { "cell_type": "code", "execution_count": 1, + "id": "601000d7", + "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import PyMuPDFLoader" - ], - "metadata": { - "collapsed": false - } - }, - { - "cell_type": "code", - "execution_count": 2, - "outputs": [], - "source": [ - "loader = PyMuPDFLoader(\"example_data/layout-parser-paper.pdf\")" - ], - "metadata": { - "collapsed": false - } + "from langchain.document_loaders import PDFMinerPDFasHTMLLoader" + ] }, { "cell_type": "code", "execution_count": 3, + "id": "a5525fb0", + "metadata": {}, "outputs": [], "source": [ - "data = loader.load()" - ], - "metadata": { - "collapsed": false - } + "loader = PDFMinerPDFasHTMLLoader(\"example_data/layout-parser-paper.pdf\")" + ] }, { "cell_type": "code", "execution_count": 4, + "id": "dac7ff68", + "metadata": {}, + "outputs": [], + "source": [ + "data = loader.load()" + ] + }, + { + "cell_type": "markdown", + "id": "cc2c2f4f", + "metadata": {}, + "source": [ + "## Using PyMuPDF\n", + "\n", + "This is the fastest of the PDF parsing options, and contains detailed metadata about the PDF and its pages, as well as returns one document per page." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "55f0c4d8", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.document_loaders import PyMuPDFLoader" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "718cbfbc", + "metadata": {}, + "outputs": [], + "source": [ + "loader = PyMuPDFLoader(\"example_data/layout-parser-paper.pdf\")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "f2f93a15", + "metadata": {}, + "outputs": [], + "source": [ + "data = loader.load()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "a24dfaa6", + "metadata": {}, "outputs": [ { "data": { - "text/plain": "Document(page_content='LayoutParser: A Unified Toolkit for Deep\\nLearning Based Document Image Analysis\\nZejiang Shen1 (�), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain\\nLee4, Jacob Carlson3, and Weining Li5\\n1 Allen Institute for AI\\nshannons@allenai.org\\n2 Brown University\\nruochen zhang@brown.edu\\n3 Harvard University\\n{melissadell,jacob carlson}@fas.harvard.edu\\n4 University of Washington\\nbcgl@cs.washington.edu\\n5 University of Waterloo\\nw422li@uwaterloo.ca\\nAbstract. Recent advances in document image analysis (DIA) have been\\nprimarily driven by the application of neural networks. Ideally, research\\noutcomes could be easily deployed in production and extended for further\\ninvestigation. However, various factors like loosely organized codebases\\nand sophisticated model configurations complicate the easy reuse of im-\\nportant innovations by a wide audience. Though there have been on-going\\nefforts to improve reusability and simplify deep learning (DL) model\\ndevelopment in disciplines like natural language processing and computer\\nvision, none of them are optimized for challenges in the domain of DIA.\\nThis represents a major gap in the existing toolkit, as DIA is central to\\nacademic research across a wide range of disciplines in the social sciences\\nand humanities. This paper introduces LayoutParser, an open-source\\nlibrary for streamlining the usage of DL in DIA research and applica-\\ntions. The core LayoutParser library comes with a set of simple and\\nintuitive interfaces for applying and customizing DL models for layout de-\\ntection, character recognition, and many other document processing tasks.\\nTo promote extensibility, LayoutParser also incorporates a community\\nplatform for sharing both pre-trained models and full document digiti-\\nzation pipelines. We demonstrate that LayoutParser is helpful for both\\nlightweight and large-scale digitization pipelines in real-word use cases.\\nThe library is publicly available at https://layout-parser.github.io.\\nKeywords: Document Image Analysis · Deep Learning · Layout Analysis\\n· Character Recognition · Open Source library · Toolkit.\\n1\\nIntroduction\\nDeep Learning(DL)-based approaches are the state-of-the-art for a wide range of\\ndocument image analysis (DIA) tasks including document image classification [11,\\narXiv:2103.15348v2 [cs.CV] 21 Jun 2021\\n', lookup_str='', metadata={'file_path': 'example_data/layout-parser-paper.pdf', 'page_number': 1, 'total_pages': 16, 'format': 'PDF 1.5', 'title': '', 'author': '', 'subject': '', 'keywords': '', 'creator': 'LaTeX with hyperref', 'producer': 'pdfTeX-1.40.21', 'creationDate': 'D:20210622012710Z', 'modDate': 'D:20210622012710Z', 'trapped': '', 'encryption': None}, lookup_index=0)" + "text/plain": [ + "Document(page_content='LayoutParser: A Unified Toolkit for Deep\\nLearning Based Document Image Analysis\\nZejiang Shen1 (�), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain\\nLee4, Jacob Carlson3, and Weining Li5\\n1 Allen Institute for AI\\nshannons@allenai.org\\n2 Brown University\\nruochen zhang@brown.edu\\n3 Harvard University\\n{melissadell,jacob carlson}@fas.harvard.edu\\n4 University of Washington\\nbcgl@cs.washington.edu\\n5 University of Waterloo\\nw422li@uwaterloo.ca\\nAbstract. Recent advances in document image analysis (DIA) have been\\nprimarily driven by the application of neural networks. Ideally, research\\noutcomes could be easily deployed in production and extended for further\\ninvestigation. However, various factors like loosely organized codebases\\nand sophisticated model configurations complicate the easy reuse of im-\\nportant innovations by a wide audience. Though there have been on-going\\nefforts to improve reusability and simplify deep learning (DL) model\\ndevelopment in disciplines like natural language processing and computer\\nvision, none of them are optimized for challenges in the domain of DIA.\\nThis represents a major gap in the existing toolkit, as DIA is central to\\nacademic research across a wide range of disciplines in the social sciences\\nand humanities. This paper introduces LayoutParser, an open-source\\nlibrary for streamlining the usage of DL in DIA research and applica-\\ntions. The core LayoutParser library comes with a set of simple and\\nintuitive interfaces for applying and customizing DL models for layout de-\\ntection, character recognition, and many other document processing tasks.\\nTo promote extensibility, LayoutParser also incorporates a community\\nplatform for sharing both pre-trained models and full document digiti-\\nzation pipelines. We demonstrate that LayoutParser is helpful for both\\nlightweight and large-scale digitization pipelines in real-word use cases.\\nThe library is publicly available at https://layout-parser.github.io.\\nKeywords: Document Image Analysis · Deep Learning · Layout Analysis\\n· Character Recognition · Open Source library · Toolkit.\\n1\\nIntroduction\\nDeep Learning(DL)-based approaches are the state-of-the-art for a wide range of\\ndocument image analysis (DIA) tasks including document image classification [11,\\narXiv:2103.15348v2 [cs.CV] 21 Jun 2021\\n', lookup_str='', metadata={'file_path': 'example_data/layout-parser-paper.pdf', 'page_number': 1, 'total_pages': 16, 'format': 'PDF 1.5', 'title': '', 'author': '', 'subject': '', 'keywords': '', 'creator': 'LaTeX with hyperref', 'producer': 'pdfTeX-1.40.21', 'creationDate': 'D:20210622012710Z', 'modDate': 'D:20210622012710Z', 'trapped': '', 'encryption': None}, lookup_index=0)" + ] }, "execution_count": 4, "metadata": {}, @@ -406,28 +453,23 @@ ], "source": [ "data[0]" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "id": "83cb52a0", + "metadata": {}, "source": [ "Additionally, you can pass along any of the options from the [PyMuPDF documentation](https://pymupdf.readthedocs.io/en/latest/app1.html#plain-text/) as keyword arguments in the `load` call, and it will be pass along to the `get_text()` call." - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": null, + "id": "1bf73c97", + "metadata": {}, "outputs": [], - "source": [], - "metadata": { - "collapsed": false - } + "source": [] } ], "metadata": { @@ -446,7 +488,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.1" + "version": "3.10.8" } }, "nbformat": 4, diff --git a/langchain/document_loaders/__init__.py b/langchain/document_loaders/__init__.py index c73afdfe..a4c5ce4f 100644 --- a/langchain/document_loaders/__init__.py +++ b/langchain/document_loaders/__init__.py @@ -43,6 +43,7 @@ from langchain.document_loaders.obsidian import ObsidianLoader from langchain.document_loaders.pdf import ( OnlinePDFLoader, PDFMinerLoader, + PDFMinerPDFasHTMLLoader, PyMuPDFLoader, PyPDFLoader, UnstructuredPDFLoader, @@ -116,6 +117,7 @@ __all__ = [ "AirbyteJSONLoader", "OnlinePDFLoader", "PDFMinerLoader", + "PDFMinerPDFasHTMLLoader", "PyMuPDFLoader", "TelegramChatLoader", "SRTLoader", diff --git a/langchain/document_loaders/pdf.py b/langchain/document_loaders/pdf.py index 0ff912b8..16a17115 100644 --- a/langchain/document_loaders/pdf.py +++ b/langchain/document_loaders/pdf.py @@ -2,6 +2,7 @@ import os import tempfile from abc import ABC +from io import StringIO from typing import Any, List, Optional from urllib.parse import urlparse @@ -129,6 +130,40 @@ class PDFMinerLoader(BasePDFLoader): return [Document(page_content=text, metadata=metadata)] +class PDFMinerPDFasHTMLLoader(BasePDFLoader): + """Loader that uses PDFMiner to load PDF files as HTML content.""" + + def __init__(self, file_path: str): + """Initialize with file path.""" + try: + from pdfminer.high_level import extract_text_to_fp # noqa:F401 + except ImportError: + raise ValueError( + "pdfminer package not found, please install it with " + "`pip install pdfminer.six`" + ) + + super().__init__(file_path) + + def load(self) -> List[Document]: + """Load file.""" + from pdfminer.high_level import extract_text_to_fp + from pdfminer.layout import LAParams + from pdfminer.utils import open_filename + + output_string = StringIO() + with open_filename(self.file_path, "rb") as fp: + extract_text_to_fp( + fp, # type: ignore[arg-type] + output_string, + codec="", + laparams=LAParams(), + output_type="html", + ) + metadata = {"source": self.file_path} + return [Document(page_content=output_string.getvalue(), metadata=metadata)] + + class PyMuPDFLoader(BasePDFLoader): """Loader that uses PyMuPDF to load PDF files.""" diff --git a/tests/integration_tests/document_loaders/test_pdf.py b/tests/integration_tests/document_loaders/test_pdf.py index dc046b47..f7a768a2 100644 --- a/tests/integration_tests/document_loaders/test_pdf.py +++ b/tests/integration_tests/document_loaders/test_pdf.py @@ -2,6 +2,7 @@ from pathlib import Path from langchain.document_loaders import ( PDFMinerLoader, + PDFMinerPDFasHTMLLoader, PyMuPDFLoader, UnstructuredPDFLoader, ) @@ -31,6 +32,21 @@ def test_pdfminer_loader() -> None: assert len(docs) == 1 +def test_pdfminer_pdf_as_html_loader() -> None: + """Test PDFMinerPDFasHTMLLoader.""" + file_path = Path(__file__).parent.parent / "examples/hello.pdf" + loader = PDFMinerPDFasHTMLLoader(str(file_path)) + docs = loader.load() + + assert len(docs) == 1 + + file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf" + loader = PDFMinerPDFasHTMLLoader(str(file_path)) + + docs = loader.load() + assert len(docs) == 1 + + def test_pymupdf_loader() -> None: """Test PyMuPDF loader.""" file_path = Path(__file__).parent.parent / "examples/hello.pdf"