2015-05-02 13:45:17 +00:00
|
|
|
"""
|
|
|
|
BTDigg (Videos, Music, Files)
|
|
|
|
|
2019-07-25 06:40:48 +00:00
|
|
|
@website https://btdig.com
|
2015-05-02 13:45:17 +00:00
|
|
|
@provide-api yes (on demand)
|
|
|
|
|
|
|
|
@using-api no
|
|
|
|
@results HTML (using search portal)
|
|
|
|
@stable no (HTML can change)
|
|
|
|
@parse url, title, content, seed, leech, magnetlink
|
|
|
|
"""
|
2015-01-21 17:02:29 +00:00
|
|
|
|
|
|
|
from lxml import html
|
|
|
|
from operator import itemgetter
|
2020-08-06 15:42:46 +00:00
|
|
|
from urllib.parse import quote, urljoin
|
2015-01-21 17:02:29 +00:00
|
|
|
from searx.engines.xpath import extract_text
|
2016-08-13 12:55:47 +00:00
|
|
|
from searx.utils import get_torrent_size
|
2015-01-21 17:02:29 +00:00
|
|
|
|
|
|
|
# engine dependent config
|
|
|
|
categories = ['videos', 'music', 'files']
|
|
|
|
paging = True
|
|
|
|
|
|
|
|
# search-url
|
2019-07-25 06:40:48 +00:00
|
|
|
url = 'https://btdig.com'
|
2015-01-25 09:21:44 +00:00
|
|
|
search_url = url + '/search?q={search_term}&p={pageno}'
|
2015-01-21 17:02:29 +00:00
|
|
|
|
|
|
|
|
|
|
|
# do search-request
|
|
|
|
def request(query, params):
|
|
|
|
params['url'] = search_url.format(search_term=quote(query),
|
2016-01-18 11:47:31 +00:00
|
|
|
pageno=params['pageno'] - 1)
|
2015-01-21 17:02:29 +00:00
|
|
|
|
|
|
|
return params
|
|
|
|
|
|
|
|
|
|
|
|
# get response from search-request
|
|
|
|
def response(resp):
|
|
|
|
results = []
|
|
|
|
|
2016-11-30 17:43:03 +00:00
|
|
|
dom = html.fromstring(resp.text)
|
2015-01-21 17:02:29 +00:00
|
|
|
|
2019-07-25 06:40:48 +00:00
|
|
|
search_res = dom.xpath('//div[@class="one_result"]')
|
2015-01-21 17:02:29 +00:00
|
|
|
|
|
|
|
# return empty array if nothing is found
|
|
|
|
if not search_res:
|
|
|
|
return []
|
|
|
|
|
|
|
|
# parse results
|
|
|
|
for result in search_res:
|
2019-07-25 06:40:48 +00:00
|
|
|
link = result.xpath('.//div[@class="torrent_name"]//a')[0]
|
2015-01-30 18:52:44 +00:00
|
|
|
href = urljoin(url, link.attrib.get('href'))
|
2016-12-09 10:44:24 +00:00
|
|
|
title = extract_text(link)
|
2015-01-21 17:02:29 +00:00
|
|
|
|
2019-07-25 06:40:48 +00:00
|
|
|
excerpt = result.xpath('.//div[@class="torrent_excerpt"]')[0]
|
|
|
|
content = html.tostring(excerpt, encoding='unicode', method='text', with_tail=False)
|
|
|
|
# it is better to emit <br/> instead of |, but html tags are verboten
|
|
|
|
content = content.strip().replace('\n', ' | ')
|
|
|
|
content = ' '.join(content.split())
|
2015-01-21 17:02:29 +00:00
|
|
|
|
2019-07-25 06:40:48 +00:00
|
|
|
filesize = result.xpath('.//span[@class="torrent_size"]/text()')[0].split()[0]
|
|
|
|
filesize_multiplier = result.xpath('.//span[@class="torrent_size"]/text()')[0].split()[1]
|
|
|
|
files = (result.xpath('.//span[@class="torrent_files"]/text()') or ['1'])[0]
|
2015-01-21 17:02:29 +00:00
|
|
|
|
|
|
|
# convert filesize to byte if possible
|
2016-08-13 12:55:47 +00:00
|
|
|
filesize = get_torrent_size(filesize, filesize_multiplier)
|
2015-01-21 17:02:29 +00:00
|
|
|
|
|
|
|
# convert files to int if possible
|
2019-07-25 06:40:48 +00:00
|
|
|
try:
|
2015-01-21 17:02:29 +00:00
|
|
|
files = int(files)
|
2019-07-25 06:40:48 +00:00
|
|
|
except:
|
2015-01-21 17:02:29 +00:00
|
|
|
files = None
|
|
|
|
|
2019-07-25 06:40:48 +00:00
|
|
|
magnetlink = result.xpath('.//div[@class="torrent_magnet"]//a')[0].attrib['href']
|
2015-01-21 17:02:29 +00:00
|
|
|
|
|
|
|
# append result
|
|
|
|
results.append({'url': href,
|
|
|
|
'title': title,
|
|
|
|
'content': content,
|
|
|
|
'filesize': filesize,
|
|
|
|
'files': files,
|
|
|
|
'magnetlink': magnetlink,
|
|
|
|
'template': 'torrent.html'})
|
|
|
|
|
|
|
|
# return results sorted by seeder
|
2019-07-25 06:40:48 +00:00
|
|
|
return results
|