forked from Archives/searxng
Merge branch 'master' into master
commit
eb0abb0825
@ -0,0 +1,44 @@
|
||||
"""
|
||||
Asksteem (general)
|
||||
|
||||
@website https://asksteem.com/
|
||||
@provide-api yes
|
||||
|
||||
@using-api yes
|
||||
@results JSON (https://github.com/Hoxly/asksteem-docs/wiki)
|
||||
@stable yes
|
||||
@parse url, title, content
|
||||
"""
|
||||
|
||||
from json import loads
|
||||
from searx.url_utils import urlencode
|
||||
|
||||
# engine dependent config
|
||||
categories = ['general']
|
||||
paging = True
|
||||
language_support = False
|
||||
disabled = True
|
||||
|
||||
# search-url
|
||||
search_url = 'https://api.asksteem.com/search?{params}'
|
||||
result_url = 'https://steemit.com/@{author}/{title}'
|
||||
|
||||
|
||||
# do search-request
|
||||
def request(query, params):
|
||||
url = search_url.format(params=urlencode({'q': query, 'pg': params['pageno']}))
|
||||
params['url'] = url
|
||||
return params
|
||||
|
||||
|
||||
# get response from search-request
|
||||
def response(resp):
|
||||
json = loads(resp.text)
|
||||
|
||||
results = []
|
||||
|
||||
for result in json.get('results', []):
|
||||
results.append({'url': result_url.format(author=result['author'], title=result['permlink']),
|
||||
'title': result['title'],
|
||||
'content': result['summary']})
|
||||
return results
|
@ -0,0 +1,75 @@
|
||||
"""
|
||||
Microsoft Academic (Science)
|
||||
|
||||
@website https://academic.microsoft.com
|
||||
@provide-api yes
|
||||
@using-api no
|
||||
@results JSON
|
||||
@stable no
|
||||
@parse url, title, content
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from json import loads
|
||||
from uuid import uuid4
|
||||
|
||||
from searx.url_utils import urlencode
|
||||
from searx.utils import html_to_text
|
||||
|
||||
categories = ['images']
|
||||
paging = True
|
||||
result_url = 'https://academic.microsoft.com/api/search/GetEntityResults?{query}'
|
||||
|
||||
|
||||
def request(query, params):
|
||||
correlation_id = uuid4()
|
||||
msacademic = uuid4()
|
||||
time_now = datetime.now()
|
||||
|
||||
params['url'] = result_url.format(query=urlencode({'correlationId': correlation_id}))
|
||||
params['cookies']['msacademic'] = str(msacademic)
|
||||
params['cookies']['ai_user'] = 'vhd0H|{now}'.format(now=str(time_now))
|
||||
params['method'] = 'POST'
|
||||
params['data'] = {
|
||||
'Query': '@{query}@'.format(query=query),
|
||||
'Limit': 10,
|
||||
'Offset': params['pageno'] - 1,
|
||||
'Filters': '',
|
||||
'OrderBy': '',
|
||||
'SortAscending': False,
|
||||
}
|
||||
|
||||
return params
|
||||
|
||||
|
||||
def response(resp):
|
||||
results = []
|
||||
response_data = loads(resp.text)
|
||||
|
||||
for result in response_data['results']:
|
||||
url = _get_url(result)
|
||||
title = result['e']['dn']
|
||||
content = _get_content(result)
|
||||
results.append({
|
||||
'url': url,
|
||||
'title': html_to_text(title),
|
||||
'content': html_to_text(content),
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def _get_url(result):
|
||||
if 's' in result['e']:
|
||||
return result['e']['s'][0]['u']
|
||||
return 'https://academic.microsoft.com/#/detail/{pid}'.format(pid=result['id'])
|
||||
|
||||
|
||||
def _get_content(result):
|
||||
if 'd' in result['e']:
|
||||
content = result['e']['d']
|
||||
if len(content) > 300:
|
||||
return content[:300] + '...'
|
||||
return content
|
||||
|
||||
return ''
|
Binary file not shown.
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
Binary file not shown.
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue