[mod] pylint all engines without PYLINT_SEARXNG_DISABLE_OPTION

Signed-off-by: Markus Heiser <markus.heiser@darmarit.de>
pull/3333/head
Markus Heiser 2 months ago committed by Markus Heiser
parent 707d6270c8
commit 8205f170ff

@ -64,6 +64,11 @@ pylint.FILES() {
find . -name searxng.msg
}
PYLINT_FILES=()
while IFS= read -r line; do
PYLINT_FILES+=("$line")
done <<< "$(pylint.FILES)"
YAMLLINT_FILES=()
while IFS= read -r line; do
YAMLLINT_FILES+=("$line")
@ -77,9 +82,6 @@ PYLINT_SEARXNG_DISABLE_OPTION="\
I,C,R,\
W0105,W0212,W0511,W0603,W0613,W0621,W0702,W0703,W1401,\
E1136"
PYLINT_ADDITIONAL_BUILTINS_FOR_ENGINES="traits,supported_languages,language_aliases,logger,categories"
PYLINT_OPTIONS="-m pylint -j 0 --rcfile .pylintrc"
help() {
nvm.help
cat <<EOF
@ -338,12 +340,6 @@ format.python() {
dump_return $?
}
PYLINT_FILES=()
while IFS= read -r line; do
PYLINT_FILES+=("$line")
done <<< "$(pylint.FILES)"
# shellcheck disable=SC2119
main() {

@ -1,6 +1,7 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
1337x
# pylint: disable=invalid-name
"""1337x
"""
from urllib.parse import quote, urljoin

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
# pylint: disable=invalid-name
"""9GAG (social media)"""

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Load and initialize the ``engines``, see :py:func:`load_engines` and register
:py:obj:`engine_shortcuts`.

@ -74,7 +74,7 @@ def response(resp):
if number_of_results:
try:
results.append({'number_of_results': int(extract_text(number_of_results))})
except:
except: # pylint: disable=bare-except
pass
return results

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""`Anna's Archive`_ is a free non-profit online shadow library metasearch
engine providing access to a variety of book resources (also via IPFS), created
by a team of anonymous archivists (AnnaArchivist_).

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""APKMirror
"""

@ -1,7 +1,6 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""
Apple App Store
"""Apple App Store
"""
from json import loads

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Apple Maps"""
from json import loads

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""
Arch Linux Wiki
~~~~~~~~~~~~~~~

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""The Art Institute of Chicago
Explore thousands of artworks from The Art Institute of Chicago.

@ -1,11 +1,12 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
ArXiV (Scientific preprints)
"""ArXiV (Scientific preprints)
"""
from datetime import datetime
from lxml import etree
from lxml.etree import XPath
from datetime import datetime
from searx.utils import eval_xpath, eval_xpath_list, eval_xpath_getindex
# about
@ -50,7 +51,7 @@ def request(query, params):
# basic search
offset = (params['pageno'] - 1) * number_of_results
string_args = dict(query=query, offset=offset, number_of_results=number_of_results)
string_args = {'query': query, 'offset': offset, 'number_of_results': number_of_results}
params['url'] = base_url.format(**string_args)

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Ask.com"""
from urllib.parse import urlencode

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Bandcamp (Music)
@website https://bandcamp.com/

@ -1,12 +1,12 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
"""BASE (Scholar publications)
"""
BASE (Scholar publications)
"""
from datetime import datetime
import re
from urllib.parse import urlencode
from lxml import etree
from datetime import datetime
import re
from searx.utils import searx_useragent
# about
@ -55,13 +55,17 @@ shorcut_dict = {
def request(query, params):
# replace shortcuts with API advanced search keywords
for key in shorcut_dict.keys():
query = re.sub(key, shorcut_dict[key], query)
for key, val in shorcut_dict.items():
query = re.sub(key, val, query)
# basic search
offset = (params['pageno'] - 1) * number_of_results
string_args = dict(query=urlencode({'query': query}), offset=offset, hits=number_of_results)
string_args = {
'query': urlencode({'query': query}),
'offset': offset,
'hits': number_of_results,
}
params['url'] = base_url.format(**string_args)
@ -99,7 +103,7 @@ def response(resp):
try:
publishedDate = datetime.strptime(date, date_format)
break
except:
except: # pylint: disable=bare-except
pass
if publishedDate is not None:

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Bilibili is a Chinese video sharing website.
.. _Bilibili: https://www.bilibili.com

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""This is the implementation of the Bing-WEB engine. Some of this
implementations are shared by other engines:

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Bing-Images: description see :py:obj:`searx.engines.bing`.
"""
# pylint: disable=invalid-name

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Bing-News: description see :py:obj:`searx.engines.bing`.
.. hint::

@ -1,8 +1,7 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
# pylint: disable=invalid-name
"""Bing-Videos: description see :py:obj:`searx.engines.bing`.
"""
# pylint: disable=invalid-name
from typing import TYPE_CHECKING
import json

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""BPB refers to ``Bundeszentrale für poltische Bildung``, which is a German
governmental institution aiming to reduce misinformation by providing resources
about politics and history.

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Brave supports the categories listed in :py:obj:`brave_category` (General,
news, videos, images). The support of :py:obj:`paging` and :py:obj:`time range
<time_range_support>` is limited (see remarks).

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""BT4G_ (bt4g.com) is not a tracker and doesn't store any content and only
collects torrent metadata (such as file names and file sizes) and a magnet link
(torrent identifier).

@ -3,8 +3,9 @@
BTDigg (Videos, Music, Files)
"""
from lxml import html
from urllib.parse import quote, urljoin
from lxml import html
from searx.utils import extract_text, get_torrent_size
# about
@ -67,7 +68,7 @@ def response(resp):
# convert files to int if possible
try:
files = int(files)
except:
except: # pylint: disable=bare-except
files = None
magnetlink = result.xpath('.//div[@class="torrent_magnet"]//a')[0].attrib['href']

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Chefkoch is a German database of recipes.
"""

@ -106,7 +106,7 @@ def init(engine_settings):
if 'command' not in engine_settings:
raise ValueError('engine command : missing configuration key: command')
global command, working_dir, delimiter, parse_regex, environment_variables
global command, working_dir, delimiter, parse_regex, environment_variables # pylint: disable=global-statement
command = engine_settings['command']
@ -172,7 +172,7 @@ def _get_results_from_process(results, cmd, pageno):
_command_logger.debug('skipped result:', raw_result)
continue
if start <= count and count <= end:
if start <= count and count <= end: # pylint: disable=chained-comparison
result['template'] = result_template
results.append(result)
@ -185,6 +185,7 @@ def _get_results_from_process(results, cmd, pageno):
return_code = process.wait(timeout=timeout)
if return_code != 0:
raise RuntimeError('non-zero return code when running command', cmd, return_code)
return None
def __get_results_limits(pageno):
@ -230,7 +231,7 @@ def __parse_single_result(raw_result):
elements = raw_result.split(delimiter['chars'], maxsplit=len(delimiter['keys']) - 1)
if len(elements) != len(delimiter['keys']):
return {}
for i in range(len(elements)):
for i in range(len(elements)): # pylint: disable=consider-using-enumerate
result[delimiter['keys'][i]] = elements[i]
if parse_regex:

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""CORE (science)
"""

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Cppreference
"""
from lxml import html

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""CrossRef"""
from urllib.parse import urlencode

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Currency convert (DuckDuckGo)
"""

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""
Dailymotion (Videos)
~~~~~~~~~~~~~~~~~~~~

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Deepl translation engine"""
from json import loads

@ -45,7 +45,7 @@ def response(resp):
for result in search_res.get('data', []):
if result['type'] == 'track':
title = result['title']
url = result['link']
url = result['link'] # pylint: disable=redefined-outer-name
if url.startswith('http://'):
url = 'https' + url[4:]

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Within this module we implement a *demo offline engine*. Do not look to
close to the implementation, its just a simple example. To get in use of this
*demo* engine add the following entry to your engines list in ``settings.yml``:

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Within this module we implement a *demo online engine*. Do not look to
close to the implementation, its just a simple example which queries `The Art
Institute of Chicago <https://www.artic.edu>`_

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""DeStatis
"""

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Deviantart (Images)
"""

@ -26,7 +26,7 @@ results_xpath = './/table[@id="r"]/tr'
https_support = True
def request(query, params):
def request(query, params): # pylint: disable=unused-argument
params['url'] = url.format(from_lang=params['from_lang'][2], to_lang=params['to_lang'][2], query=params['query'])
return params
@ -40,7 +40,7 @@ def response(resp):
for k, result in enumerate(eval_xpath(dom, results_xpath)[1:]):
try:
from_result, to_results_raw = eval_xpath(result, './td')
except:
except: # pylint: disable=bare-except
continue
to_results = []

@ -37,9 +37,9 @@ def response(resp):
search_res = dom.xpath('.//td[@class="x-item"]')
if not search_res:
return list()
return []
results = list()
results = []
for result in search_res:
url = urljoin(URL, result.xpath('.//a[@title]/@href')[0])
title = extract_text(result.xpath('.//a[@title]'))

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Docker Hub (IT)
"""

@ -18,7 +18,7 @@ about = {
}
# engine dependent config
categories = ['general'] # TODO , 'images', 'music', 'videos', 'files'
categories = ['general'] # 'images', 'music', 'videos', 'files'
paging = False
number_of_results = 5
@ -31,8 +31,8 @@ search_url = (
'&{query}'
# fmt: on
)
# TODO '&startRecord={offset}'
# TODO '&maximumRecords={limit}'
# '&startRecord={offset}'
# '&maximumRecords={limit}'
# do search-request
@ -54,7 +54,7 @@ def response(resp):
for r in eval_xpath(doc, '//div[@class="search_quickresult"]/ul/li'):
try:
res_url = eval_xpath(r, './/a[@class="wikilink1"]/@href')[-1]
except:
except: # pylint: disable=bare-except
continue
if not res_url:
@ -76,7 +76,7 @@ def response(resp):
# append result
results.append({'title': title, 'content': content, 'url': base_url + res_url})
except:
except: # pylint: disable=bare-except
continue
if not res_url:

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""
DuckDuckGo Lite
~~~~~~~~~~~~~~~

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""
DuckDuckGo Instant Answer API
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""
DuckDuckGo Weather
~~~~~~~~~~~~~~~~~~

@ -1,6 +1,7 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
Dummy Offline
# pylint: disable=invalid-name
"""Dummy Offline
"""
@ -14,7 +15,7 @@ about = {
}
def search(query, request_params):
def search(query, request_params): # pylint: disable=unused-argument
return [
{
'result': 'this is what you get',

@ -1,6 +1,6 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
Dummy
"""Dummy
"""
# about
@ -15,10 +15,10 @@ about = {
# do search-request
def request(query, params):
def request(query, params): # pylint: disable=unused-argument
return params
# get response from search-request
def response(resp):
def response(resp): # pylint: disable=unused-argument
return []

@ -3,9 +3,10 @@
Ebay (Videos, Music, Files)
"""
from urllib.parse import quote
from lxml import html
from searx.engines.xpath import extract_text
from urllib.parse import quote
# about
about = {

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Emojipedia
Emojipedia is an emoji reference website which documents the meaning and

@ -78,12 +78,10 @@ def response(resp):
else:
thumbnail_src = img_src
url = build_flickr_url(photo['owner'], photo['id'])
# append result
results.append(
{
'url': url,
'url': build_flickr_url(photo['owner'], photo['id']),
'title': photo['title'],
'img_src': img_src,
'thumbnail_src': thumbnail_src,

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Flickr (Images)
"""

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Fyyd (podcasts)
"""

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
# pylint: disable=invalid-name
"""Genius

@ -107,7 +107,7 @@ def request(query, params):
def response(resp):
# get the base URL for the language in which request was made
language = locale_to_lang_code(resp.search_params['language'])
base_url = get_lang_urls(language)['base']
url = get_lang_urls(language)['base']
results = []
@ -116,7 +116,7 @@ def response(resp):
# parse results
for result in dom.xpath(xpath_results):
link = result.xpath(xpath_link)[0]
href = urljoin(base_url, link.attrib.get('href'))
href = urljoin(url, link.attrib.get('href'))
title = extract_text(link)
content = extract_text(result.xpath(xpath_content))

@ -1,7 +1,6 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Github (IT)
"""
from urllib.parse import urlencode

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Goodreads (books)
"""

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""This is the implementation of the Google WEB engine. Some of this
implementations (manly the :py:obj:`get_google_info`) are shared by other
engines:

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""This is the implementation of the Google Images engine using the internal
Google API used by the Google Go Android app.

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""This is the implementation of the Google News engine.
Google News has a different region handling compared to Google WEB.

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Google Play Apps & Google Play Movies
"""

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""This is the implementation of the Google Scholar engine.
Compared to other Google services the Scholar engine has a simple GET REST-API

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""This is the implementation of the Google Videos engine.
.. admonition:: Content-Security-Policy (CSP)

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Hackernews
"""

@ -1,6 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""IMDB - Internet Movie Database
Retrieves results from a basic search. Advanced search options are not

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Imgur (images)
"""

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Internet Archive scholar(science)
"""

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Invidious (Videos)
"""

@ -1,4 +1,15 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
"""The JSON engine is a *generic* engine with which it is possible to configure
engines in the settings.
.. todo::
- The JSON engine needs documentation!!
- The parameters of the JSON engine should be adapted to those of the XPath
engine.
"""
from collections.abc import Iterable
from json import loads
@ -32,32 +43,31 @@ first_page_num = 1
def iterate(iterable):
if type(iterable) == dict:
it = iterable.items()
if isinstance(iterable, dict):
items = iterable.items()
else:
it = enumerate(iterable)
for index, value in it:
items = enumerate(iterable)
for index, value in items:
yield str(index), value
def is_iterable(obj):
if type(obj) == str:
if isinstance(obj, str):
return False
return isinstance(obj, Iterable)
def parse(query):
q = []
def parse(query): # pylint: disable=redefined-outer-name
q = [] # pylint: disable=invalid-name
for part in query.split('/'):
if part == '':
continue
else:
q.append(part)
q.append(part)
return q
def do_query(data, q):
def do_query(data, q): # pylint: disable=invalid-name
ret = []
if not q:
return ret
@ -87,10 +97,10 @@ def query(data, query_string):
return do_query(data, q)
def request(query, params):
def request(query, params): # pylint: disable=redefined-outer-name
query = urlencode({'q': query})[2:]
fp = {'query': query}
fp = {'query': query} # pylint: disable=invalid-name
if paging and search_url.find('{pageno}') >= 0:
fp['pageno'] = (params['pageno'] - 1) * page_size + first_page_num
@ -115,18 +125,18 @@ def response(resp):
content_filter = html_to_text if content_html_to_text else identity
if results_query:
rs = query(json, results_query)
if not len(rs):
rs = query(json, results_query) # pylint: disable=invalid-name
if not rs:
return results
for result in rs[0]:
try:
url = query(result, url_query)[0]
title = query(result, title_query)[0]
except:
except: # pylint: disable=bare-except
continue
try:
content = query(result, content_query)[0]
except:
except: # pylint: disable=bare-except
content = ""
results.append(
{

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Kickass Torrent (Videos, Music, Files)"""
import random

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""This engine uses the Lemmy API (https://lemmy.ml/api/v3/search), which is
documented at `lemmy-js-client`_ / `Interface Search`_. Since Lemmy is
federated, results are from many different, independent lemmy instances, and not

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""lib.rs (packages)"""
from urllib.parse import quote_plus

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Lingva (alternative Google Translate frontend)"""
from json import loads

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""LiveSpace (Videos)
.. hint::

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Mastodon_ is an open source alternative to large social media platforms like
Twitter/X, Facebook, ...

@ -1,6 +1,6 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Material Icons (images)
"""
import re

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""MediathekViewWeb (API)
"""

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""The MediaWiki engine is a *generic* engine to **query** Wikimedia wikis by
the `MediaWiki Action API`_. For a `query action`_ all Wikimedia wikis have
endpoints that follow this pattern::

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
""".. sidebar:: info
- :origin:`meilisearch.py <searx/engines/meilisearch.py>`

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""metacpan
"""

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Mixcloud (Music)
"""

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""MongoDB_ is a document based database program that handles JSON like data.
Before configuring the ``mongodb`` engine, you must install the dependency
pymongo_.

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Moviepilot is a German movie database, similar to IMDB or TMDB. It doesn't
have any official API, but it uses JSON requests internally to fetch search
results and suggestions, that's being used in this implementation.

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Mozhi (alternative frontend for popular translation engines)"""
import random

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Matrix Rooms Search - a fully-featured, standalone, matrix rooms search service.
Configuration

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""This is the implementation of the Mullvad-Leta meta-search engine.

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Mwmbl_ is a non-profit, ad-free, free-libre and free-lunch search engine with
a focus on useability and speed.

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""MySQL is said to be the most popular open source database. Before enabling
MySQL engine, you must install the package ``mysql-connector-python``.

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""npms.io
"""

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Nyaa.si (Anime Bittorrent tracker)
"""

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Odysee_ is a decentralized video hosting platform.
.. _Odysee: https://github.com/OdyseeTeam/odysee-frontend

@ -1,11 +1,11 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
Open Semantic Search
"""Open Semantic Search
"""
from dateutil import parser
from json import loads
from urllib.parse import quote
from dateutil import parser
# about
about = {

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""OpenStreetMap (Map)
"""

@ -96,7 +96,7 @@ def response(resp):
title = gettext('{title} (OBSOLETE)').format(title=result['title'])
try:
superseded_url = pdbe_entry_url.format(pdb_id=result['superseded_by'])
except:
except: # pylint: disable=bare-except
continue
# since we can't construct a proper body from the response, we'll make up our own

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Peertube and :py:obj:`SepiaSearch <searx.engines.sepiasearch>` do share
(more or less) the same REST API and the schema of the JSON result is identical.

@ -87,7 +87,7 @@ def response(resp):
properties.get('extent')[2],
]
else:
# TODO: better boundingbox calculation
# better boundingbox calculation?
boundingbox = [
geojson['coordinates'][1],
geojson['coordinates'][1],

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Pinterest (images)
"""

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""An alternative privacy-friendly YouTube frontend which is efficient by
design. `Pipeds architecture`_ consists of 3 components:

@ -82,14 +82,14 @@ def response(resp):
try:
date = datetime.fromtimestamp(float(result["added"]))
params['publishedDate'] = date
except:
except: # pylint: disable=bare-except
pass
# let's try to calculate the torrent size
try:
filesize = get_torrent_size(result["size"], "B")
params['filesize'] = filesize
except:
except: # pylint: disable=bare-except
pass
# append result

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Pixiv (images)"""
from urllib.parse import urlencode

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""pkg.go.dev (packages)"""
import re

@ -1,5 +1,4 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Podcast Index
"""

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save