langchain/libs/community/langchain_community/document_loaders/news.py
Bagatur ed58eeb9c5
community[major], core[patch], langchain[patch], experimental[patch]: Create langchain-community (#14463)
Moved the following modules to new package langchain-community in a backwards compatible fashion:

```
mv langchain/langchain/adapters community/langchain_community
mv langchain/langchain/callbacks community/langchain_community/callbacks
mv langchain/langchain/chat_loaders community/langchain_community
mv langchain/langchain/chat_models community/langchain_community
mv langchain/langchain/document_loaders community/langchain_community
mv langchain/langchain/docstore community/langchain_community
mv langchain/langchain/document_transformers community/langchain_community
mv langchain/langchain/embeddings community/langchain_community
mv langchain/langchain/graphs community/langchain_community
mv langchain/langchain/llms community/langchain_community
mv langchain/langchain/memory/chat_message_histories community/langchain_community
mv langchain/langchain/retrievers community/langchain_community
mv langchain/langchain/storage community/langchain_community
mv langchain/langchain/tools community/langchain_community
mv langchain/langchain/utilities community/langchain_community
mv langchain/langchain/vectorstores community/langchain_community
mv langchain/langchain/agents/agent_toolkits community/langchain_community
mv langchain/langchain/cache.py community/langchain_community
mv langchain/langchain/adapters community/langchain_community
mv langchain/langchain/callbacks community/langchain_community/callbacks
mv langchain/langchain/chat_loaders community/langchain_community
mv langchain/langchain/chat_models community/langchain_community
mv langchain/langchain/document_loaders community/langchain_community
mv langchain/langchain/docstore community/langchain_community
mv langchain/langchain/document_transformers community/langchain_community
mv langchain/langchain/embeddings community/langchain_community
mv langchain/langchain/graphs community/langchain_community
mv langchain/langchain/llms community/langchain_community
mv langchain/langchain/memory/chat_message_histories community/langchain_community
mv langchain/langchain/retrievers community/langchain_community
mv langchain/langchain/storage community/langchain_community
mv langchain/langchain/tools community/langchain_community
mv langchain/langchain/utilities community/langchain_community
mv langchain/langchain/vectorstores community/langchain_community
mv langchain/langchain/agents/agent_toolkits community/langchain_community
mv langchain/langchain/cache.py community/langchain_community
```

Moved the following to core
```
mv langchain/langchain/utils/json_schema.py core/langchain_core/utils
mv langchain/langchain/utils/html.py core/langchain_core/utils
mv langchain/langchain/utils/strings.py core/langchain_core/utils
cat langchain/langchain/utils/env.py >> core/langchain_core/utils/env.py
rm langchain/langchain/utils/env.py
```

See .scripts/community_split/script_integrations.sh for all changes
2023-12-11 13:53:30 -08:00

126 lines
4.2 KiB
Python

"""Loader that uses unstructured to load HTML files."""
import logging
from typing import Any, Iterator, List
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class NewsURLLoader(BaseLoader):
"""Load news articles from URLs using `Unstructured`.
Args:
urls: URLs to load. Each is loaded into its own document.
text_mode: If True, extract text from URL and use that for page content.
Otherwise, extract raw HTML.
nlp: If True, perform NLP on the extracted contents, like providing a summary
and extracting keywords.
continue_on_failure: If True, continue loading documents even if
loading fails for a particular URL.
show_progress_bar: If True, use tqdm to show a loading progress bar. Requires
tqdm to be installed, ``pip install tqdm``.
**newspaper_kwargs: Any additional named arguments to pass to
newspaper.Article().
Example:
.. code-block:: python
from langchain_community.document_loaders import NewsURLLoader
loader = NewsURLLoader(
urls=["<url-1>", "<url-2>"],
)
docs = loader.load()
Newspaper reference:
https://newspaper.readthedocs.io/en/latest/
"""
def __init__(
self,
urls: List[str],
text_mode: bool = True,
nlp: bool = False,
continue_on_failure: bool = True,
show_progress_bar: bool = False,
**newspaper_kwargs: Any,
) -> None:
"""Initialize with file path."""
try:
import newspaper # noqa:F401
self.__version = newspaper.__version__
except ImportError:
raise ImportError(
"newspaper package not found, please install it with "
"`pip install newspaper3k`"
)
self.urls = urls
self.text_mode = text_mode
self.nlp = nlp
self.continue_on_failure = continue_on_failure
self.newspaper_kwargs = newspaper_kwargs
self.show_progress_bar = show_progress_bar
def load(self) -> List[Document]:
iter = self.lazy_load()
if self.show_progress_bar:
try:
from tqdm import tqdm
except ImportError as e:
raise ImportError(
"Package tqdm must be installed if show_progress_bar=True. "
"Please install with 'pip install tqdm' or set "
"show_progress_bar=False."
) from e
iter = tqdm(iter)
return list(iter)
def lazy_load(self) -> Iterator[Document]:
try:
from newspaper import Article
except ImportError as e:
raise ImportError(
"Cannot import newspaper, please install with `pip install newspaper3k`"
) from e
for url in self.urls:
try:
article = Article(url, **self.newspaper_kwargs)
article.download()
article.parse()
if self.nlp:
article.nlp()
except Exception as e:
if self.continue_on_failure:
logger.error(f"Error fetching or processing {url}, exception: {e}")
continue
else:
raise e
metadata = {
"title": getattr(article, "title", ""),
"link": getattr(article, "url", getattr(article, "canonical_link", "")),
"authors": getattr(article, "authors", []),
"language": getattr(article, "meta_lang", ""),
"description": getattr(article, "meta_description", ""),
"publish_date": getattr(article, "publish_date", ""),
}
if self.text_mode:
content = article.text
else:
content = article.html
if self.nlp:
metadata["keywords"] = getattr(article, "keywords", [])
metadata["summary"] = getattr(article, "summary", "")
yield Document(page_content=content, metadata=metadata)