2023-10-12 08:35:26 +00:00
|
|
|
import requests
|
|
|
|
from urllib.parse import urlparse, urljoin
|
|
|
|
from bs4 import BeautifulSoup
|
|
|
|
from application.parser.remote.base import BaseRemote
|
|
|
|
|
|
|
|
class CrawlerLoader(BaseRemote):
|
2023-10-12 08:53:33 +00:00
|
|
|
def __init__(self, limit=10):
|
2023-10-12 08:35:26 +00:00
|
|
|
from langchain.document_loaders import WebBaseLoader
|
2023-10-12 15:40:23 +00:00
|
|
|
self.loader = WebBaseLoader # Initialize the document loader
|
|
|
|
self.limit = limit # Set the limit for the number of pages to scrape
|
2023-10-12 08:35:26 +00:00
|
|
|
|
2023-10-13 17:52:56 +00:00
|
|
|
def load_data(self, inputs):
|
2024-02-14 15:17:56 +00:00
|
|
|
url = inputs
|
2023-10-12 15:40:23 +00:00
|
|
|
# Check if the input is a list and if it is, use the first element
|
|
|
|
if isinstance(url, list) and url:
|
|
|
|
url = url[0]
|
2023-10-12 08:35:26 +00:00
|
|
|
|
2023-10-12 15:40:23 +00:00
|
|
|
# Check if the URL scheme is provided, if not, assume http
|
|
|
|
if not urlparse(url).scheme:
|
|
|
|
url = "http://" + url
|
2023-10-12 08:35:26 +00:00
|
|
|
|
2023-10-12 15:40:23 +00:00
|
|
|
visited_urls = set() # Keep track of URLs that have been visited
|
|
|
|
base_url = urlparse(url).scheme + "://" + urlparse(url).hostname # Extract the base URL
|
|
|
|
urls_to_visit = [url] # List of URLs to be visited, starting with the initial URL
|
|
|
|
loaded_content = [] # Store the loaded content from each URL
|
2023-10-12 08:53:33 +00:00
|
|
|
|
2023-10-12 15:40:23 +00:00
|
|
|
# Continue crawling until there are no more URLs to visit
|
2023-10-12 08:53:33 +00:00
|
|
|
while urls_to_visit:
|
2023-10-12 15:40:23 +00:00
|
|
|
current_url = urls_to_visit.pop(0) # Get the next URL to visit
|
|
|
|
visited_urls.add(current_url) # Mark the URL as visited
|
|
|
|
|
|
|
|
# Try to load and process the content from the current URL
|
|
|
|
try:
|
|
|
|
response = requests.get(current_url) # Fetch the content of the current URL
|
|
|
|
response.raise_for_status() # Raise an exception for HTTP errors
|
|
|
|
loader = self.loader([current_url]) # Initialize the document loader for the current URL
|
|
|
|
loaded_content.extend(loader.load()) # Load the content and add it to the loaded_content list
|
|
|
|
except Exception as e:
|
|
|
|
# Print an error message if loading or processing fails and continue with the next URL
|
|
|
|
print(f"Error processing URL {current_url}: {e}")
|
2023-10-12 08:53:33 +00:00
|
|
|
continue
|
|
|
|
|
2023-10-12 15:40:23 +00:00
|
|
|
# Parse the HTML content to extract all links
|
2023-10-12 08:53:33 +00:00
|
|
|
soup = BeautifulSoup(response.text, 'html.parser')
|
2023-10-12 15:40:23 +00:00
|
|
|
all_links = [
|
|
|
|
urljoin(current_url, a['href'])
|
|
|
|
for a in soup.find_all('a', href=True)
|
|
|
|
if base_url in urljoin(current_url, a['href']) # Ensure links are from the same domain
|
|
|
|
]
|
2023-10-12 08:53:33 +00:00
|
|
|
|
2023-10-12 15:40:23 +00:00
|
|
|
# Add new links to the list of URLs to visit if they haven't been visited yet
|
2023-10-12 08:53:33 +00:00
|
|
|
urls_to_visit.extend([link for link in all_links if link not in visited_urls])
|
2023-10-12 15:40:23 +00:00
|
|
|
urls_to_visit = list(set(urls_to_visit)) # Remove duplicate URLs
|
2023-10-12 08:35:26 +00:00
|
|
|
|
2023-10-12 15:40:23 +00:00
|
|
|
# Stop crawling if the limit of pages to scrape is reached
|
2023-10-12 08:53:33 +00:00
|
|
|
if self.limit is not None and len(visited_urls) >= self.limit:
|
|
|
|
break
|
2023-10-12 08:35:26 +00:00
|
|
|
|
2023-10-12 15:40:23 +00:00
|
|
|
return loaded_content # Return the loaded content from all visited URLs
|