From aad4bff0987378eef06e8ef007f8b5cd6e878bec Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Wed, 15 Mar 2023 13:13:21 -0700 Subject: [PATCH] Harrison/headers (#1696) Co-authored-by: Tim Asp <707699+timothyasp@users.noreply.github.com> --- langchain/document_loaders/web_base.py | 36 ++++++++++++++++++++++---- 1 file changed, 31 insertions(+), 5 deletions(-) diff --git a/langchain/document_loaders/web_base.py b/langchain/document_loaders/web_base.py index ca59fe61..90616a8c 100644 --- a/langchain/document_loaders/web_base.py +++ b/langchain/document_loaders/web_base.py @@ -1,24 +1,50 @@ """Web base loader class.""" -from typing import Any, List +import logging +from typing import Any, List, Optional import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader +logger = logging.getLogger(__file__) + +default_header_template = { + "User-Agent": "", + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*" + ";q=0.8", + "Accept-Language": "en-US,en;q=0.5", + "Referer": "https://www.google.com/", + "DNT": "1", + "Connection": "keep-alive", + "Upgrade-Insecure-Requests": "1", +} + class WebBaseLoader(BaseLoader): """Loader that uses urllib and beautiful soup to load webpages.""" - def __init__(self, web_path: str): + def __init__(self, web_path: str, header_template: Optional[dict] = None): """Initialize with webpage path.""" self.web_path = web_path + self.session = requests.Session() - @staticmethod - def _scrape(url: str) -> Any: + try: + from fake_useragent import UserAgent + + headers = header_template or default_header_template + headers["User-Agent"] = UserAgent().random + self.session.headers = dict(headers) + except ImportError: + logger.info( + "fake_useragent not found, using default user agent." + "To get a realistic header for requests, `pip install fake_useragent`." + ) + + def _scrape(self, url: str) -> Any: from bs4 import BeautifulSoup - html_doc = requests.get(url) + html_doc = self.session.get(url) soup = BeautifulSoup(html_doc.text, "html.parser") return soup