From 2006eb468087c045c46c7d9e1d771e8ab2dfed7b Mon Sep 17 00:00:00 2001 From: Alexandre Flament Date: Fri, 2 Oct 2020 18:13:56 +0200 Subject: [PATCH 1/3] [mod] move extract_text, extract_url to searx.utils --- searx/engines/1337x.py | 3 +- searx/engines/acgsou.py | 3 +- searx/engines/apkmirror.py | 2 +- searx/engines/archlinux.py | 2 +- searx/engines/bing.py | 3 +- searx/engines/btdigg.py | 3 +- searx/engines/deviantart.py | 2 +- searx/engines/digbt.py | 3 +- searx/engines/doku.py | 3 +- searx/engines/duckduckgo.py | 3 +- searx/engines/duckduckgo_definitions.py | 3 +- searx/engines/duckduckgo_images.py | 2 +- searx/engines/duden.py | 3 +- searx/engines/etools.py | 3 +- searx/engines/fdroid.py | 2 +- searx/engines/framalibre.py | 2 +- searx/engines/gentoo.py | 2 +- searx/engines/google.py | 3 +- searx/engines/google_images.py | 3 +- searx/engines/google_videos.py | 2 +- searx/engines/ina.py | 2 +- searx/engines/kickass.py | 3 +- searx/engines/nyaa.py | 3 +- searx/engines/piratebay.py | 3 +- searx/engines/seedpeer.py | 2 +- searx/engines/stackoverflow.py | 2 +- searx/engines/startpage.py | 3 +- searx/engines/tokyotoshokan.py | 3 +- searx/engines/torrentz.py | 3 +- searx/engines/twitter.py | 2 +- searx/engines/wikidata.py | 3 +- searx/engines/www1x.py | 2 +- searx/engines/xpath.py | 75 +------------------------ searx/engines/yahoo.py | 3 +- searx/engines/yahoo_news.py | 3 +- searx/engines/yggtorrent.py | 3 +- searx/engines/youtube_noapi.py | 3 +- searx/utils.py | 74 +++++++++++++++++++++++- tests/unit/test_utils.py | 49 ++++++++++++++-- 39 files changed, 156 insertions(+), 137 deletions(-) diff --git a/searx/engines/1337x.py b/searx/engines/1337x.py index 76a7a1634..9e045bc51 100644 --- a/searx/engines/1337x.py +++ b/searx/engines/1337x.py @@ -1,7 +1,6 @@ from urllib.parse import quote, urljoin from lxml import html -from searx.engines.xpath import extract_text -from searx.utils import get_torrent_size +from searx.utils import extract_text, get_torrent_size url = 'https://1337x.to/' diff --git a/searx/engines/acgsou.py b/searx/engines/acgsou.py index d5d3e3178..7bab64f5b 100644 --- a/searx/engines/acgsou.py +++ b/searx/engines/acgsou.py @@ -11,8 +11,7 @@ from urllib.parse import urlencode from lxml import html -from searx.engines.xpath import extract_text -from searx.utils import get_torrent_size, int_or_zero +from searx.utils import extract_text, get_torrent_size, int_or_zero # engine dependent config categories = ['files', 'images', 'videos', 'music'] diff --git a/searx/engines/apkmirror.py b/searx/engines/apkmirror.py index 4e6dcd486..a8ff499af 100644 --- a/searx/engines/apkmirror.py +++ b/searx/engines/apkmirror.py @@ -11,7 +11,7 @@ from urllib.parse import urlencode from lxml import html -from searx.engines.xpath import extract_text +from searx.utils import extract_text # engine dependent config diff --git a/searx/engines/archlinux.py b/searx/engines/archlinux.py index e2f44b0f5..8f93f4f38 100644 --- a/searx/engines/archlinux.py +++ b/searx/engines/archlinux.py @@ -13,7 +13,7 @@ from urllib.parse import urlencode, urljoin from lxml import html -from searx.engines.xpath import extract_text +from searx.utils import extract_text # engine dependent config categories = ['it'] diff --git a/searx/engines/bing.py b/searx/engines/bing.py index c7b619369..a4fa1db78 100644 --- a/searx/engines/bing.py +++ b/searx/engines/bing.py @@ -17,8 +17,7 @@ import re from urllib.parse import urlencode from lxml import html from searx import logger, utils -from searx.engines.xpath import extract_text -from searx.utils import match_language, gen_useragent, eval_xpath +from searx.utils import extract_text, match_language, gen_useragent, eval_xpath logger = logger.getChild('bing engine') diff --git a/searx/engines/btdigg.py b/searx/engines/btdigg.py index 2faade3e2..d73ee23a6 100644 --- a/searx/engines/btdigg.py +++ b/searx/engines/btdigg.py @@ -13,8 +13,7 @@ from lxml import html from operator import itemgetter from urllib.parse import quote, urljoin -from searx.engines.xpath import extract_text -from searx.utils import get_torrent_size +from searx.utils import extract_text, get_torrent_size # engine dependent config categories = ['videos', 'music', 'files'] diff --git a/searx/engines/deviantart.py b/searx/engines/deviantart.py index 2bd21fa5d..b91816daa 100644 --- a/searx/engines/deviantart.py +++ b/searx/engines/deviantart.py @@ -15,7 +15,7 @@ from lxml import html import re from urllib.parse import urlencode -from searx.engines.xpath import extract_text +from searx.utils import extract_text # engine dependent config diff --git a/searx/engines/digbt.py b/searx/engines/digbt.py index e2c0389c6..b1a90fb2f 100644 --- a/searx/engines/digbt.py +++ b/searx/engines/digbt.py @@ -12,8 +12,7 @@ from urllib.parse import urljoin from lxml import html -from searx.engines.xpath import extract_text -from searx.utils import get_torrent_size +from searx.utils import extract_text, get_torrent_size categories = ['videos', 'music', 'files'] diff --git a/searx/engines/doku.py b/searx/engines/doku.py index 513ffda89..e1b10d664 100644 --- a/searx/engines/doku.py +++ b/searx/engines/doku.py @@ -11,8 +11,7 @@ from urllib.parse import urlencode from lxml.html import fromstring -from searx.engines.xpath import extract_text -from searx.utils import eval_xpath +from searx.utils import extract_text, eval_xpath # engine dependent config categories = ['general'] # TODO , 'images', 'music', 'videos', 'files' diff --git a/searx/engines/duckduckgo.py b/searx/engines/duckduckgo.py index fb1ea2b2d..4a42fe7f1 100644 --- a/searx/engines/duckduckgo.py +++ b/searx/engines/duckduckgo.py @@ -16,9 +16,8 @@ from lxml.html import fromstring from json import loads from urllib.parse import urlencode -from searx.engines.xpath import extract_text from searx.poolrequests import get -from searx.utils import match_language, eval_xpath +from searx.utils import extract_text, match_language, eval_xpath # engine dependent config categories = ['general'] diff --git a/searx/engines/duckduckgo_definitions.py b/searx/engines/duckduckgo_definitions.py index 73154a525..7ce54f056 100644 --- a/searx/engines/duckduckgo_definitions.py +++ b/searx/engines/duckduckgo_definitions.py @@ -13,9 +13,8 @@ import json from urllib.parse import urlencode from lxml import html from re import compile -from searx.engines.xpath import extract_text from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url, language_aliases -from searx.utils import html_to_text, match_language +from searx.utils import extract_text, html_to_text, match_language url = 'https://api.duckduckgo.com/'\ + '?{query}&format=json&pretty=0&no_redirect=1&d=1' diff --git a/searx/engines/duckduckgo_images.py b/searx/engines/duckduckgo_images.py index 38e141f8b..27ef7442e 100644 --- a/searx/engines/duckduckgo_images.py +++ b/searx/engines/duckduckgo_images.py @@ -15,12 +15,12 @@ from json import loads from urllib.parse import urlencode -from searx.engines.xpath import extract_text from searx.engines.duckduckgo import ( _fetch_supported_languages, supported_languages_url, get_region_code, language_aliases ) from searx.poolrequests import get +from searx.utils import extract_text # engine dependent config categories = ['images'] diff --git a/searx/engines/duden.py b/searx/engines/duden.py index a711f422e..bbe11de9b 100644 --- a/searx/engines/duden.py +++ b/searx/engines/duden.py @@ -11,8 +11,7 @@ from lxml import html, etree import re from urllib.parse import quote, urljoin -from searx.engines.xpath import extract_text -from searx.utils import eval_xpath +from searx.utils import extract_text, eval_xpath from searx import logger categories = ['general'] diff --git a/searx/engines/etools.py b/searx/engines/etools.py index efc102ef6..a0762d1c7 100644 --- a/searx/engines/etools.py +++ b/searx/engines/etools.py @@ -11,8 +11,7 @@ from lxml import html from urllib.parse import quote -from searx.engines.xpath import extract_text -from searx.utils import eval_xpath +from searx.utils import extract_text, eval_xpath categories = ['general'] paging = False diff --git a/searx/engines/fdroid.py b/searx/engines/fdroid.py index a2a5114df..3d37db44e 100644 --- a/searx/engines/fdroid.py +++ b/searx/engines/fdroid.py @@ -11,7 +11,7 @@ from urllib.parse import urlencode from lxml import html -from searx.engines.xpath import extract_text +from searx.utils import extract_text # engine dependent config categories = ['files'] diff --git a/searx/engines/framalibre.py b/searx/engines/framalibre.py index 14b659b5f..e3d056425 100644 --- a/searx/engines/framalibre.py +++ b/searx/engines/framalibre.py @@ -13,7 +13,7 @@ from html import escape from urllib.parse import urljoin, urlencode from lxml import html -from searx.engines.xpath import extract_text +from searx.utils import extract_text # engine dependent config categories = ['it'] diff --git a/searx/engines/gentoo.py b/searx/engines/gentoo.py index b6bc99fab..16b3e692d 100644 --- a/searx/engines/gentoo.py +++ b/searx/engines/gentoo.py @@ -13,7 +13,7 @@ from urllib.parse import urlencode, urljoin from lxml import html -from searx.engines.xpath import extract_text +from searx.utils import extract_text # engine dependent config categories = ['it'] diff --git a/searx/engines/google.py b/searx/engines/google.py index c9faadb6e..83b18a9a0 100644 --- a/searx/engines/google.py +++ b/searx/engines/google.py @@ -21,9 +21,8 @@ Definitions`_. from urllib.parse import urlencode, urlparse from lxml import html from flask_babel import gettext -from searx.engines.xpath import extract_text from searx import logger -from searx.utils import match_language, eval_xpath +from searx.utils import match_language, extract_text, eval_xpath logger = logger.getChild('google engine') diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py index 9dd5fad2c..8cfb1e17f 100644 --- a/searx/engines/google_images.py +++ b/searx/engines/google_images.py @@ -28,8 +28,7 @@ from urllib.parse import urlencode, urlparse, unquote from lxml import html from flask_babel import gettext from searx import logger -from searx.utils import eval_xpath -from searx.engines.xpath import extract_text +from searx.utils import extract_text, eval_xpath # pylint: disable=unused-import from searx.engines.google import ( diff --git a/searx/engines/google_videos.py b/searx/engines/google_videos.py index 08af55902..78e1eb1cb 100644 --- a/searx/engines/google_videos.py +++ b/searx/engines/google_videos.py @@ -14,7 +14,7 @@ from datetime import date, timedelta from json import loads from urllib.parse import urlencode from lxml import html -from searx.engines.xpath import extract_text +from searx.utils import extract_text import re # engine dependent config diff --git a/searx/engines/ina.py b/searx/engines/ina.py index cce580273..52c939498 100644 --- a/searx/engines/ina.py +++ b/searx/engines/ina.py @@ -16,7 +16,7 @@ from urllib.parse import urlencode from lxml import html from dateutil import parser from html.parser import HTMLParser -from searx.engines.xpath import extract_text +from searx.utils import extract_text # engine dependent config diff --git a/searx/engines/kickass.py b/searx/engines/kickass.py index af48d990b..90bd33063 100644 --- a/searx/engines/kickass.py +++ b/searx/engines/kickass.py @@ -13,8 +13,7 @@ from lxml import html from operator import itemgetter from urllib.parse import quote, urljoin -from searx.engines.xpath import extract_text -from searx.utils import get_torrent_size, convert_str_to_int +from searx.utils import extract_text, get_torrent_size, convert_str_to_int # engine dependent config categories = ['videos', 'music', 'files'] diff --git a/searx/engines/nyaa.py b/searx/engines/nyaa.py index ed8897ddc..e0a91494f 100644 --- a/searx/engines/nyaa.py +++ b/searx/engines/nyaa.py @@ -11,8 +11,7 @@ from lxml import html from urllib.parse import urlencode -from searx.engines.xpath import extract_text -from searx.utils import get_torrent_size, int_or_zero +from searx.utils import extract_text, get_torrent_size, int_or_zero # engine dependent config categories = ['files', 'images', 'videos', 'music'] diff --git a/searx/engines/piratebay.py b/searx/engines/piratebay.py index 42866d058..b56b92a24 100644 --- a/searx/engines/piratebay.py +++ b/searx/engines/piratebay.py @@ -13,8 +13,7 @@ from datetime import datetime from operator import itemgetter from urllib.parse import quote, urljoin -from searx.engines.xpath import extract_text -from searx.utils import get_torrent_size +from searx.utils import extract_text, get_torrent_size # engine dependent config categories = ["videos", "music", "files"] diff --git a/searx/engines/seedpeer.py b/searx/engines/seedpeer.py index 3778abe7b..39916da6e 100644 --- a/searx/engines/seedpeer.py +++ b/searx/engines/seedpeer.py @@ -12,7 +12,7 @@ from lxml import html from json import loads from operator import itemgetter from urllib.parse import quote, urljoin -from searx.engines.xpath import extract_text +from searx.utils import extract_text url = 'https://seedpeer.me/' diff --git a/searx/engines/stackoverflow.py b/searx/engines/stackoverflow.py index 90e4543d7..c6d58de65 100644 --- a/searx/engines/stackoverflow.py +++ b/searx/engines/stackoverflow.py @@ -12,7 +12,7 @@ from urllib.parse import urlencode, urljoin from lxml import html -from searx.engines.xpath import extract_text +from searx.utils import extract_text # engine dependent config categories = ['it'] diff --git a/searx/engines/startpage.py b/searx/engines/startpage.py index b6d0c24f7..d26c2d105 100644 --- a/searx/engines/startpage.py +++ b/searx/engines/startpage.py @@ -17,9 +17,8 @@ import re from unicodedata import normalize, combining from babel import Locale from babel.localedata import locale_identifiers -from searx.engines.xpath import extract_text from searx.languages import language_codes -from searx.utils import eval_xpath, match_language +from searx.utils import extract_text, eval_xpath, match_language # engine dependent config categories = ['general'] diff --git a/searx/engines/tokyotoshokan.py b/searx/engines/tokyotoshokan.py index 9c8774d7c..9fffba8a6 100644 --- a/searx/engines/tokyotoshokan.py +++ b/searx/engines/tokyotoshokan.py @@ -13,9 +13,8 @@ import re from urllib.parse import urlencode from lxml import html -from searx.engines.xpath import extract_text from datetime import datetime -from searx.utils import get_torrent_size, int_or_zero +from searx.utils import extract_text, get_torrent_size, int_or_zero # engine dependent config categories = ['files', 'videos', 'music'] diff --git a/searx/engines/torrentz.py b/searx/engines/torrentz.py index fcc8c042c..4d3e6fdd7 100644 --- a/searx/engines/torrentz.py +++ b/searx/engines/torrentz.py @@ -15,8 +15,7 @@ import re from urllib.parse import urlencode from lxml import html from datetime import datetime -from searx.engines.xpath import extract_text -from searx.utils import get_torrent_size +from searx.utils import extract_text, get_torrent_size # engine dependent config categories = ['files', 'videos', 'music'] diff --git a/searx/engines/twitter.py b/searx/engines/twitter.py index 549b14e96..6d9bdbb5c 100644 --- a/searx/engines/twitter.py +++ b/searx/engines/twitter.py @@ -15,7 +15,7 @@ from urllib.parse import urlencode, urljoin from lxml import html from datetime import datetime -from searx.engines.xpath import extract_text +from searx.utils import extract_text # engine dependent config categories = ['social media'] diff --git a/searx/engines/wikidata.py b/searx/engines/wikidata.py index ffa3724fd..c557f4e59 100644 --- a/searx/engines/wikidata.py +++ b/searx/engines/wikidata.py @@ -13,9 +13,8 @@ from searx import logger from searx.poolrequests import get -from searx.engines.xpath import extract_text from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url -from searx.utils import match_language, eval_xpath +from searx.utils import extract_text, match_language, eval_xpath from urllib.parse import urlencode from json import loads diff --git a/searx/engines/www1x.py b/searx/engines/www1x.py index 1cb74dbad..8d691c852 100644 --- a/searx/engines/www1x.py +++ b/searx/engines/www1x.py @@ -12,7 +12,7 @@ from lxml import html from urllib.parse import urlencode, urljoin -from searx.engines.xpath import extract_text +from searx.utils import extract_text # engine dependent config categories = ['images'] diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py index bd97a93a5..a269253d7 100644 --- a/searx/engines/xpath.py +++ b/searx/engines/xpath.py @@ -1,7 +1,6 @@ -from urllib.parse import unquote, urlencode, urljoin, urlparse from lxml import html -from lxml.etree import _ElementStringResult, _ElementUnicodeResult -from searx.utils import html_to_text, eval_xpath +from urllib.parse import urlencode +from searx.utils import extract_text, extract_url, eval_xpath search_url = None url_xpath = None @@ -21,76 +20,6 @@ page_size = 1 first_page_num = 1 -''' -if xpath_results is list, extract the text from each result and concat the list -if xpath_results is a xml element, extract all the text node from it - ( text_content() method from lxml ) -if xpath_results is a string element, then it's already done -''' - - -def extract_text(xpath_results): - if type(xpath_results) == list: - # it's list of result : concat everything using recursive call - result = '' - for e in xpath_results: - result = result + extract_text(e) - return result.strip() - elif type(xpath_results) in [_ElementStringResult, _ElementUnicodeResult]: - # it's a string - return ''.join(xpath_results) - else: - # it's a element - text = html.tostring( - xpath_results, encoding='unicode', method='text', with_tail=False - ) - text = text.strip().replace('\n', ' ') - return ' '.join(text.split()) - - -def extract_url(xpath_results, search_url): - if xpath_results == []: - raise Exception('Empty url resultset') - url = extract_text(xpath_results) - - if url.startswith('//'): - # add http or https to this kind of url //example.com/ - parsed_search_url = urlparse(search_url) - url = '{0}:{1}'.format(parsed_search_url.scheme or 'http', url) - elif url.startswith('/'): - # fix relative url to the search engine - url = urljoin(search_url, url) - - # fix relative urls that fall through the crack - if '://' not in url: - url = urljoin(search_url, url) - - # normalize url - url = normalize_url(url) - - return url - - -def normalize_url(url): - parsed_url = urlparse(url) - - # add a / at this end of the url if there is no path - if not parsed_url.netloc: - raise Exception('Cannot parse url') - if not parsed_url.path: - url += '/' - - # FIXME : hack for yahoo - if parsed_url.hostname == 'search.yahoo.com'\ - and parsed_url.path.startswith('/r'): - p = parsed_url.path - mark = p.find('/**') - if mark != -1: - return unquote(p[mark + 3:]).decode() - - return url - - def request(query, params): query = urlencode({'q': query})[2:] diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py index 0133b57b5..3420aa6d5 100644 --- a/searx/engines/yahoo.py +++ b/searx/engines/yahoo.py @@ -13,8 +13,7 @@ from urllib.parse import unquote, urlencode from lxml import html -from searx.engines.xpath import extract_text, extract_url -from searx.utils import match_language, eval_xpath +from searx.utils import extract_text, extract_url, match_language, eval_xpath # engine dependent config categories = ['general'] diff --git a/searx/engines/yahoo_news.py b/searx/engines/yahoo_news.py index 345e4d91f..e9dd5d6db 100644 --- a/searx/engines/yahoo_news.py +++ b/searx/engines/yahoo_news.py @@ -13,12 +13,11 @@ import re from datetime import datetime, timedelta from urllib.parse import urlencode from lxml import html -from searx.engines.xpath import extract_text, extract_url from searx.engines.yahoo import ( parse_url, _fetch_supported_languages, supported_languages_url, language_aliases ) from dateutil import parser -from searx.utils import match_language +from searx.utils import extract_text, extract_url, match_language # engine dependent config categories = ['news'] diff --git a/searx/engines/yggtorrent.py b/searx/engines/yggtorrent.py index 37bf3b1d9..ec84d2c6b 100644 --- a/searx/engines/yggtorrent.py +++ b/searx/engines/yggtorrent.py @@ -12,8 +12,7 @@ from lxml import html from operator import itemgetter from datetime import datetime from urllib.parse import quote -from searx.engines.xpath import extract_text -from searx.utils import get_torrent_size +from searx.utils import extract_text, get_torrent_size from searx.poolrequests import get as http_get # engine dependent config diff --git a/searx/engines/youtube_noapi.py b/searx/engines/youtube_noapi.py index fef501458..16c0eddeb 100644 --- a/searx/engines/youtube_noapi.py +++ b/searx/engines/youtube_noapi.py @@ -11,8 +11,7 @@ from functools import reduce from json import loads from urllib.parse import quote_plus -from searx.engines.xpath import extract_text -from searx.utils import list_get +from searx.utils import extract_text, list_get # engine dependent config categories = ['videos', 'music'] diff --git a/searx/utils.py b/searx/utils.py index f74f2ac88..d3969df58 100644 --- a/searx/utils.py +++ b/searx/utils.py @@ -10,9 +10,13 @@ from os.path import splitext, join from io import open from random import choice from html.parser import HTMLParser -from lxml.etree import XPath +from urllib.parse import urljoin, urlparse, unquote + +from lxml import html +from lxml.etree import XPath, _ElementStringResult, _ElementUnicodeResult from babel.core import get_global + from searx import settings from searx.version import VERSION_STRING from searx.languages import language_codes @@ -106,6 +110,74 @@ def html_to_text(html): return s.get_text() +def extract_text(xpath_results): + ''' + if xpath_results is list, extract the text from each result and concat the list + if xpath_results is a xml element, extract all the text node from it + ( text_content() method from lxml ) + if xpath_results is a string element, then it's already done + ''' + if type(xpath_results) == list: + # it's list of result : concat everything using recursive call + result = '' + for e in xpath_results: + result = result + extract_text(e) + return result.strip() + elif type(xpath_results) in [_ElementStringResult, _ElementUnicodeResult]: + # it's a string + return ''.join(xpath_results) + else: + # it's a element + text = html.tostring( + xpath_results, encoding='unicode', method='text', with_tail=False + ) + text = text.strip().replace('\n', ' ') + return ' '.join(text.split()) + + +def extract_url(xpath_results, search_url): + if xpath_results == []: + raise Exception('Empty url resultset') + url = extract_text(xpath_results) + + if url.startswith('//'): + # add http or https to this kind of url //example.com/ + parsed_search_url = urlparse(search_url) + url = '{0}:{1}'.format(parsed_search_url.scheme or 'http', url) + elif url.startswith('/'): + # fix relative url to the search engine + url = urljoin(search_url, url) + + # fix relative urls that fall through the crack + if '://' not in url: + url = urljoin(search_url, url) + + # normalize url + url = normalize_url(url) + + return url + + +def normalize_url(url): + parsed_url = urlparse(url) + + # add a / at this end of the url if there is no path + if not parsed_url.netloc: + raise Exception('Cannot parse url') + if not parsed_url.path: + url += '/' + + # FIXME : hack for yahoo + if parsed_url.hostname == 'search.yahoo.com'\ + and parsed_url.path.startswith('/r'): + p = parsed_url.path + mark = p.find('/**') + if mark != -1: + return unquote(p[mark + 3:]).decode() + + return url + + def dict_subset(d, properties): result = {} for k in properties: diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 69f5ef92a..f3a98ad71 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -1,4 +1,7 @@ # -*- coding: utf-8 -*- +import lxml.etree +from lxml import html + from searx.testing import SearxTestCase from searx import utils @@ -16,7 +19,30 @@ class TestUtils(SearxTestCase): self.assertTrue(utils.searx_useragent().startswith('searx')) def test_html_to_text(self): - html = """ + html_str = """ + + + + + + + + + Test text + + + + """ + self.assertIsInstance(utils.html_to_text(html_str), str) + self.assertIsNotNone(utils.html_to_text(html_str)) + self.assertEqual(utils.html_to_text(html_str), "Test text") + + def test_extract_text(self): + html_str = """ @@ -28,9 +54,24 @@ class TestUtils(SearxTestCase): """ - self.assertIsInstance(utils.html_to_text(html), str) - self.assertIsNotNone(utils.html_to_text(html)) - self.assertEqual(utils.html_to_text(html), "Test text") + dom = html.fromstring(html_str) + self.assertEqual(utils.extract_text(dom), 'Test text') + self.assertEqual(utils.extract_text(dom.xpath('//span')), 'Test text') + self.assertEqual(utils.extract_text(dom.xpath('//img/@src')), 'test.jpg') + self.assertEqual(utils.extract_text(dom.xpath('//unexistingtag')), '') + + def test_extract_url(self): + def f(html_str, search_url): + return utils.extract_url(html.fromstring(html_str), search_url) + self.assertEqual(f('https://example.com', 'http://example.com/'), 'https://example.com/') + self.assertEqual(f('https://example.com', 'http://example.com/'), 'https://example.com/') + self.assertEqual(f('//example.com', 'http://example.com/'), 'http://example.com/') + self.assertEqual(f('//example.com', 'https://example.com/'), 'https://example.com/') + self.assertEqual(f('/path?a=1', 'https://example.com'), 'https://example.com/path?a=1') + with self.assertRaises(lxml.etree.ParserError): + f('', 'https://example.com') + with self.assertRaises(Exception): + utils.extract_url([], 'https://example.com') def test_html_to_text_invalid(self): html = '

Lorem ipsumdolor sit amet

' From c1d10bde0227e2ef714aec67a18d1e8180f1ed27 Mon Sep 17 00:00:00 2001 From: Alexandre Flament Date: Fri, 2 Oct 2020 18:17:01 +0200 Subject: [PATCH 2/3] [mod] searx/utils.py: add docstring --- searx/utils.py | 195 +++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 157 insertions(+), 38 deletions(-) diff --git a/searx/utils.py b/searx/utils.py index d3969df58..23bcbc124 100644 --- a/searx/utils.py +++ b/searx/utils.py @@ -39,12 +39,17 @@ lang_to_lc_cache = dict() def searx_useragent(): + """Return the searx User Agent""" return 'searx/{searx_version} {suffix}'.format( searx_version=VERSION_STRING, suffix=settings['outgoing'].get('useragent_suffix', '')) def gen_useragent(os=None): + """Return a random browser User Agent + + See searx/data/useragents.json + """ return str(useragents['ua'].format(os=os or choice(useragents['os']), version=choice(useragents['versions']))) @@ -99,24 +104,40 @@ class HTMLTextExtractor(HTMLParser): return ''.join(self.result).strip() -def html_to_text(html): - html = html.replace('\n', ' ') - html = ' '.join(html.split()) +def html_to_text(html_str): + """Extract text from a HTML string + + Args: + * html_str (str): string HTML + + Returns: + * str: extracted text + + Examples: + >>> html_to_text('Example #2') + 'Example #2' + + >>> html_to_text('Example') + 'Example' + """ + html_str = html_str.replace('\n', ' ') + html_str = ' '.join(html_str.split()) s = HTMLTextExtractor() try: - s.feed(html) + s.feed(html_str) except HTMLTextExtractorException: - logger.debug("HTMLTextExtractor: invalid HTML\n%s", html) + logger.debug("HTMLTextExtractor: invalid HTML\n%s", html_str) return s.get_text() def extract_text(xpath_results): - ''' - if xpath_results is list, extract the text from each result and concat the list - if xpath_results is a xml element, extract all the text node from it - ( text_content() method from lxml ) - if xpath_results is a string element, then it's already done - ''' + """Extract text from a lxml result + + * if xpath_results is list, extract the text from each result and concat the list + * if xpath_results is a xml element, extract all the text node from it + ( text_content() method from lxml ) + * if xpath_results is a string element, then it's already done + """ if type(xpath_results) == list: # it's list of result : concat everything using recursive call result = '' @@ -135,7 +156,58 @@ def extract_text(xpath_results): return ' '.join(text.split()) +def normalize_url(url): + parsed_url = urlparse(url) + + # add a / at this end of the url if there is no path + if not parsed_url.netloc: + raise Exception('Cannot parse url') + if not parsed_url.path: + url += '/' + + # FIXME : hack for yahoo + if parsed_url.hostname == 'search.yahoo.com'\ + and parsed_url.path.startswith('/r'): + p = parsed_url.path + mark = p.find('/**') + if mark != -1: + return unquote(p[mark + 3:]).decode() + + return url + + def extract_url(xpath_results, search_url): + """Extract and normalize URL from lxml Element + + Args: + * xpath_results (Union[List[html.HtmlElement], html.HtmlElement]): lxml Element(s) + * search_url (str): Base URL + + Example: + >>> def f(s, search_url): + >>> return searx.utils.extract_url(html.fromstring(s), search_url) + >>> f('https://example.com', 'http://example.com/') + 'https://example.com/' + >>> f('https://example.com', 'http://example.com/') + 'https://example.com/' + >>> f('//example.com', 'http://example.com/') + 'http://example.com/' + >>> f('//example.com', 'https://example.com/') + 'https://example.com/' + >>> f('/path?a=1', 'https://example.com') + 'https://example.com/path?a=1' + >>> f('', 'https://example.com') + raise lxml.etree.ParserError + >>> searx.utils.extract_url([], 'https://example.com') + raise Exception + + Raises: + * Exception + * lxml.etree.ParserError + + Returns: + * str: normalized URL + """ if xpath_results == []: raise Exception('Empty url resultset') url = extract_text(xpath_results) @@ -158,27 +230,15 @@ def extract_url(xpath_results, search_url): return url -def normalize_url(url): - parsed_url = urlparse(url) - - # add a / at this end of the url if there is no path - if not parsed_url.netloc: - raise Exception('Cannot parse url') - if not parsed_url.path: - url += '/' - - # FIXME : hack for yahoo - if parsed_url.hostname == 'search.yahoo.com'\ - and parsed_url.path.startswith('/r'): - p = parsed_url.path - mark = p.find('/**') - if mark != -1: - return unquote(p[mark + 3:]).decode() - - return url - - def dict_subset(d, properties): + """Extract a subset of a dict + + Examples: + >>> dict_subset({'A': 'a', 'B': 'b', 'C': 'c'}, ['A', 'C']) + {'A': 'a', 'C': 'c'} + >>> >> dict_subset({'A': 'a', 'B': 'b', 'C': 'c'}, ['A', 'D']) + {'A': 'a'} + """ result = {} for k in properties: if k in d: @@ -186,8 +246,19 @@ def dict_subset(d, properties): return result -# get element in list or default value def list_get(a_list, index, default=None): + """Get element in list or default value + + Examples: + >>> list_get(['A', 'B', 'C'], 0) + 'A' + >>> list_get(['A', 'B', 'C'], 3) + None + >>> list_get(['A', 'B', 'C'], 3, 'default') + 'default' + >>> list_get(['A', 'B', 'C'], -1) + 'C' + """ if len(a_list) > index: return a_list[index] else: @@ -195,6 +266,21 @@ def list_get(a_list, index, default=None): def get_torrent_size(filesize, filesize_multiplier): + """ + + Args: + * filesize (str): size + * filesize_multiplier (str): TB, GB, .... TiB, GiB... + + Returns: + * int: number of bytes + + Example: + >>> get_torrent_size('5', 'GB') + 5368709120 + >>> get_torrent_size('3.14', 'MiB') + 3140000 + """ try: filesize = float(filesize) @@ -221,14 +307,18 @@ def get_torrent_size(filesize, filesize_multiplier): def convert_str_to_int(number_str): + """Convert number_str to int or 0 if number_str is not a number.""" if number_str.isdigit(): return int(number_str) else: return 0 -# convert a variable to integer or return 0 if it's not a number def int_or_zero(num): + """Convert num to int or 0. num can be either a str or a list. + If num is a list, the first element is converted to int (or return 0 if the list is empty). + If num is a str, see convert_str_to_int + """ if isinstance(num, list): if len(num) < 1: return 0 @@ -237,6 +327,22 @@ def int_or_zero(num): def is_valid_lang(lang): + """Return language code and name if lang describe a language. + + Examples: + >>> is_valid_lang('zz') + False + >>> is_valid_lang('uk') + (True, 'uk', 'ukrainian') + >>> is_valid_lang(b'uk') + (True, 'uk', 'ukrainian') + >>> is_valid_lang('en') + (True, 'en', 'english') + >>> searx.utils.is_valid_lang('Español') + (True, 'es', 'spanish') + >>> searx.utils.is_valid_lang('Spanish') + (True, 'es', 'spanish') + """ if isinstance(lang, bytes): lang = lang.decode() is_abbr = (len(lang) == 2) @@ -264,8 +370,8 @@ def _get_lang_to_lc_dict(lang_list): return value -# auxiliary function to match lang_code in lang_list def _match_language(lang_code, lang_list=[], custom_aliases={}): + """auxiliary function to match lang_code in lang_list""" # replace language code with a custom alias if necessary if lang_code in custom_aliases: lang_code = custom_aliases[lang_code] @@ -287,8 +393,8 @@ def _match_language(lang_code, lang_list=[], custom_aliases={}): return _get_lang_to_lc_dict(lang_list).get(lang_code, None) -# get the language code from lang_list that best matches locale_code def match_language(locale_code, lang_list=[], custom_aliases={}, fallback='en-US'): + """get the language code from lang_list that best matches locale_code""" # try to get language from given locale_code language = _match_language(locale_code, lang_list, custom_aliases) if language: @@ -330,6 +436,7 @@ def load_module(filename, module_dir): def to_string(obj): + """Convert obj to its string representation.""" if isinstance(obj, str): return obj if isinstance(obj, Number): @@ -341,13 +448,19 @@ def to_string(obj): def ecma_unescape(s): - """ - python implementation of the unescape javascript function + """Python implementation of the unescape javascript function https://www.ecma-international.org/ecma-262/6.0/#sec-unescape-string https://developer.mozilla.org/fr/docs/Web/JavaScript/Reference/Objets_globaux/unescape + + Examples: + >>> ecma_unescape('%u5409') + '吉' + >>> ecma_unescape('%20') + ' ' + >>> ecma_unescape('%F3') + 'ó' """ - # s = unicode(s) # "%u5409" becomes "吉" s = ecma_unescape4_re.sub(lambda e: chr(int(e.group(1), 16)), s) # "%20" becomes " ", "%F3" becomes "ó" @@ -371,6 +484,11 @@ def get_engine_from_settings(name): def get_xpath(xpath_str): + """Return cached compiled XPath + + There is no thread lock. + Worst case scenario, xpath_str is compiled more than one time. + """ result = xpath_cache.get(xpath_str, None) if result is None: result = XPath(xpath_str) @@ -379,5 +497,6 @@ def get_xpath(xpath_str): def eval_xpath(element, xpath_str): + """Equivalent of element.xpath(xpath_str) but compile xpath_str once for all.""" xpath = get_xpath(xpath_str) return xpath(element) From 8f914a28facec314a2b98b11d3cc1207eb8ee8ab Mon Sep 17 00:00:00 2001 From: Alexandre Flament Date: Sat, 3 Oct 2020 10:02:50 +0200 Subject: [PATCH 3/3] [mod] searx.utils.normalize_url: remove Yahoo hack * The hack for Yahoo URLs is not necessary anymore. (see searx.engines.yahoo.parse_url) * move the URL normalization in extract_url to normalize_url --- searx/utils.py | 71 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 43 insertions(+), 28 deletions(-) diff --git a/searx/utils.py b/searx/utils.py index 23bcbc124..0be3c5b00 100644 --- a/searx/utils.py +++ b/searx/utils.py @@ -156,7 +156,45 @@ def extract_text(xpath_results): return ' '.join(text.split()) -def normalize_url(url): +def normalize_url(url, base_url): + """Normalize URL: add protocol, join URL with base_url, add trailing slash if there is no path + + Args: + * url (str): Relative URL + * base_url (str): Base URL, it must be an absolute URL. + + Example: + >>> normalize_url('https://example.com', 'http://example.com/') + 'https://example.com/' + >>> normalize_url('//example.com', 'http://example.com/') + 'http://example.com/' + >>> normalize_url('//example.com', 'https://example.com/') + 'https://example.com/' + >>> normalize_url('/path?a=1', 'https://example.com') + 'https://example.com/path?a=1' + >>> normalize_url('', 'https://example.com') + 'https://example.com/' + >>> normalize_url('/test', '/path') + raise Exception + + Raises: + * lxml.etree.ParserError + + Returns: + * str: normalized URL + """ + if url.startswith('//'): + # add http or https to this kind of url //example.com/ + parsed_search_url = urlparse(base_url) + url = '{0}:{1}'.format(parsed_search_url.scheme or 'http', url) + elif url.startswith('/'): + # fix relative url to the search engine + url = urljoin(base_url, url) + + # fix relative urls that fall through the crack + if '://' not in url: + url = urljoin(base_url, url) + parsed_url = urlparse(url) # add a / at this end of the url if there is no path @@ -165,23 +203,15 @@ def normalize_url(url): if not parsed_url.path: url += '/' - # FIXME : hack for yahoo - if parsed_url.hostname == 'search.yahoo.com'\ - and parsed_url.path.startswith('/r'): - p = parsed_url.path - mark = p.find('/**') - if mark != -1: - return unquote(p[mark + 3:]).decode() - return url -def extract_url(xpath_results, search_url): +def extract_url(xpath_results, base_url): """Extract and normalize URL from lxml Element Args: * xpath_results (Union[List[html.HtmlElement], html.HtmlElement]): lxml Element(s) - * search_url (str): Base URL + * base_url (str): Base URL Example: >>> def f(s, search_url): @@ -210,24 +240,9 @@ def extract_url(xpath_results, search_url): """ if xpath_results == []: raise Exception('Empty url resultset') - url = extract_text(xpath_results) - - if url.startswith('//'): - # add http or https to this kind of url //example.com/ - parsed_search_url = urlparse(search_url) - url = '{0}:{1}'.format(parsed_search_url.scheme or 'http', url) - elif url.startswith('/'): - # fix relative url to the search engine - url = urljoin(search_url, url) - - # fix relative urls that fall through the crack - if '://' not in url: - url = urljoin(search_url, url) - - # normalize url - url = normalize_url(url) - return url + url = extract_text(xpath_results) + return normalize_url(url, base_url) def dict_subset(d, properties):