2023-12-11 21:53:30 +00:00
|
|
|
import asyncio
|
|
|
|
import logging
|
2024-06-05 15:20:34 +00:00
|
|
|
from typing import AsyncIterator, Iterator, List, Optional
|
2023-12-11 21:53:30 +00:00
|
|
|
|
|
|
|
from langchain_core.documents import Document
|
|
|
|
|
|
|
|
from langchain_community.document_loaders.base import BaseLoader
|
2024-06-05 15:20:34 +00:00
|
|
|
from langchain_community.utils.user_agent import get_user_agent
|
2023-12-11 21:53:30 +00:00
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
class AsyncChromiumLoader(BaseLoader):
|
|
|
|
"""Scrape HTML pages from URLs using a
|
|
|
|
headless instance of the Chromium."""
|
|
|
|
|
2024-06-05 15:20:34 +00:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
urls: List[str],
|
|
|
|
*,
|
|
|
|
headless: bool = True,
|
|
|
|
user_agent: Optional[str] = None,
|
|
|
|
):
|
2024-05-01 05:20:57 +00:00
|
|
|
"""Initialize the loader with a list of URL paths.
|
2023-12-11 21:53:30 +00:00
|
|
|
|
|
|
|
Args:
|
2024-04-17 23:00:28 +00:00
|
|
|
urls: A list of URLs to scrape content from.
|
|
|
|
headless: Whether to run browser in headless mode.
|
2024-06-05 15:20:34 +00:00
|
|
|
user_agent: The user agent to use for the browser
|
2023-12-11 21:53:30 +00:00
|
|
|
|
|
|
|
Raises:
|
|
|
|
ImportError: If the required 'playwright' package is not installed.
|
|
|
|
"""
|
|
|
|
self.urls = urls
|
2024-04-17 23:00:28 +00:00
|
|
|
self.headless = headless
|
2024-06-05 15:20:34 +00:00
|
|
|
self.user_agent = user_agent or get_user_agent()
|
2023-12-11 21:53:30 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
import playwright # noqa: F401
|
|
|
|
except ImportError:
|
|
|
|
raise ImportError(
|
|
|
|
"playwright is required for AsyncChromiumLoader. "
|
|
|
|
"Please install it with `pip install playwright`."
|
|
|
|
)
|
|
|
|
|
|
|
|
async def ascrape_playwright(self, url: str) -> str:
|
|
|
|
"""
|
|
|
|
Asynchronously scrape the content of a given URL using Playwright's async API.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
url (str): The URL to scrape.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
str: The scraped HTML content or an error message if an exception occurs.
|
|
|
|
|
|
|
|
"""
|
|
|
|
from playwright.async_api import async_playwright
|
|
|
|
|
|
|
|
logger.info("Starting scraping...")
|
|
|
|
results = ""
|
|
|
|
async with async_playwright() as p:
|
2024-04-17 23:00:28 +00:00
|
|
|
browser = await p.chromium.launch(headless=self.headless)
|
2023-12-11 21:53:30 +00:00
|
|
|
try:
|
2024-06-05 15:20:34 +00:00
|
|
|
page = await browser.new_page(user_agent=self.user_agent)
|
2023-12-11 21:53:30 +00:00
|
|
|
await page.goto(url)
|
|
|
|
results = await page.content() # Simply get the HTML content
|
|
|
|
logger.info("Content scraped")
|
|
|
|
except Exception as e:
|
|
|
|
results = f"Error: {e}"
|
|
|
|
await browser.close()
|
|
|
|
return results
|
|
|
|
|
|
|
|
def lazy_load(self) -> Iterator[Document]:
|
|
|
|
"""
|
|
|
|
Lazily load text content from the provided URLs.
|
|
|
|
|
|
|
|
This method yields Documents one at a time as they're scraped,
|
|
|
|
instead of waiting to scrape all URLs before returning.
|
|
|
|
|
|
|
|
Yields:
|
|
|
|
Document: The scraped content encapsulated within a Document object.
|
|
|
|
|
|
|
|
"""
|
|
|
|
for url in self.urls:
|
|
|
|
html_content = asyncio.run(self.ascrape_playwright(url))
|
|
|
|
metadata = {"source": url}
|
|
|
|
yield Document(page_content=html_content, metadata=metadata)
|
2024-05-01 05:20:57 +00:00
|
|
|
|
|
|
|
async def alazy_load(self) -> AsyncIterator[Document]:
|
|
|
|
"""
|
|
|
|
Asynchronously load text content from the provided URLs.
|
|
|
|
|
|
|
|
This method leverages asyncio to initiate the scraping of all provided URLs
|
|
|
|
simultaneously. It improves performance by utilizing concurrent asynchronous
|
|
|
|
requests. Each Document is yielded as soon as its content is available,
|
|
|
|
encapsulating the scraped content.
|
|
|
|
|
|
|
|
Yields:
|
|
|
|
Document: A Document object containing the scraped content, along with its
|
|
|
|
source URL as metadata.
|
|
|
|
"""
|
|
|
|
tasks = [self.ascrape_playwright(url) for url in self.urls]
|
|
|
|
results = await asyncio.gather(*tasks)
|
|
|
|
for url, content in zip(self.urls, results):
|
|
|
|
metadata = {"source": url}
|
|
|
|
yield Document(page_content=content, metadata=metadata)
|