From 509afbbb846b98094a9b596b1abb4e193092a3cb Mon Sep 17 00:00:00 2001 From: Markus Heiser Date: Fri, 31 Mar 2023 17:25:39 +0200 Subject: [PATCH] [fix] engine seznam: fix issues reported by black & pylint Signed-off-by: Markus Heiser --- searx/engines/seznam.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/searx/engines/seznam.py b/searx/engines/seznam.py index 5588c230f..36a38848a 100644 --- a/searx/engines/seznam.py +++ b/searx/engines/seznam.py @@ -1,6 +1,7 @@ # SPDX-License-Identifier: AGPL-3.0-or-later -""" - Seznam +# lint: pylint +"""Seznam + """ from urllib.parse import urlencode @@ -11,7 +12,6 @@ from searx.utils import ( extract_text, eval_xpath_list, eval_xpath_getindex, - eval_xpath, ) # about @@ -30,8 +30,7 @@ base_url = 'https://search.seznam.cz/' def request(query, params): - response_index = get( - base_url, headers=params['headers'], raise_for_httperror=True) + response_index = get(base_url, headers=params['headers'], raise_for_httperror=True) dom = html.fromstring(response_index.text) url_params = { @@ -55,9 +54,12 @@ def response(resp): results = [] dom = html.fromstring(resp.content.decode()) - for result_element in eval_xpath_list(dom, '//div[@id="searchpage-root"]//div[@class="Layout--left"]/div[@class="f2c528"]'): + for result_element in eval_xpath_list( + dom, '//div[@id="searchpage-root"]//div[@class="Layout--left"]/div[@class="f2c528"]' + ): result_data = eval_xpath_getindex( - result_element, './/div[@class="c8774a" or @class="e69e8d a11657"]', 0, default=None) + result_element, './/div[@class="c8774a" or @class="e69e8d a11657"]', 0, default=None + ) if result_data is None: continue title_element = eval_xpath_getindex(result_element, './/h3/a', 0)