From 3e3672e0790266fc7f2482fdd854d7789a915d4d Mon Sep 17 00:00:00 2001 From: jibe-b Date: Sat, 23 Sep 2017 14:16:06 +0200 Subject: [PATCH 01/10] [add] arxiv engine --- searx/engines/arxiv.py | 73 ++++++++++++++++++++++++++++++++ searx/settings.yml | 6 +++ tests/unit/engines/test_arxiv.py | 58 +++++++++++++++++++++++++ 3 files changed, 137 insertions(+) create mode 100644 searx/engines/arxiv.py create mode 100644 tests/unit/engines/test_arxiv.py diff --git a/searx/engines/arxiv.py b/searx/engines/arxiv.py new file mode 100644 index 000000000..cbeac0089 --- /dev/null +++ b/searx/engines/arxiv.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python + +""" + ArXiV (Scientific preprints) + @website https://axiv.org + @provide-api yes (export.arxiv.org/api/query) + @using-api yes + @results XML-RSS + @stable yes + @parse url, title, publishedDate, content + More info on api: https://arxiv.org/help/api/user-manual +""" + +from lxml import html +from datetime import datetime +from searx.url_utils import urlencode + + +categories = ['science'] + +base_url = 'http://export.arxiv.org/api/query?search_query=all:'\ + + '{query}&start={offset}&max_results={number_of_results}' + +# engine dependent config +number_of_results = 10 + + +def request(query, params): + # basic search + offset = (params['pageno'] - 1) * number_of_results + + string_args = dict(query=query, + offset=offset, + number_of_results=number_of_results) + + params['url'] = base_url.format(**string_args) + + return params + + +def response(resp): + results = [] + + search_results = html.fromstring(resp.text.encode('utf-8')).xpath('//entry') + + for entry in search_results: + title = entry.xpath('.//title')[0].text + + url = entry.xpath('.//id')[0].text + + content = entry.xpath('.//summary')[0].text + + # If a doi is available, add it to the snipppet + try: + doi = entry.xpath('.//link[@title="doi"]')[0].text + content = 'DOI: ' + doi + ' Abstract: ' + content + except: + pass + + if len(content) > 300: + content = content[0:300] + "..." + # TODO: center snippet on query term + + publishedDate = datetime.strptime(entry.xpath('.//published')[0].text, '%Y-%m-%dT%H:%M:%SZ') + + res_dict = {'url': url, + 'title': title, + 'publishedDate': publishedDate, + 'content': content} + + results.append(res_dict) + + return results diff --git a/searx/settings.yml b/searx/settings.yml index 6f44a3b8b..54b2b2e64 100644 --- a/searx/settings.yml +++ b/searx/settings.yml @@ -60,6 +60,12 @@ engines: disabled : True shortcut : ai + - name : arxiv + engine : arxiv + shortcut : arx + categories : science + timeout : 4.0 + - name : base engine : base shortcut : bs diff --git a/tests/unit/engines/test_arxiv.py b/tests/unit/engines/test_arxiv.py new file mode 100644 index 000000000..e51d0f483 --- /dev/null +++ b/tests/unit/engines/test_arxiv.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +from collections import defaultdict +import mock +from searx.engines import arxiv +from searx.testing import SearxTestCase + + +class TestBaseEngine(SearxTestCase): + + def test_request(self): + query = 'test_query' + dicto = defaultdict(dict) + dicto['pageno'] = 1 + params = arxiv.request(query, dicto) + self.assertIn('url', params) + self.assertIn('export.arxiv.org/api/', params['url']) + + def test_response(self): + self.assertRaises(AttributeError, arxiv.response, None) + self.assertRaises(AttributeError, arxiv.response, []) + self.assertRaises(AttributeError, arxiv.response, '') + self.assertRaises(AttributeError, arxiv.response, '[]') + + response = mock.Mock(text=''' +''') + self.assertEqual(arxiv.response(response), []) + + xml_mock = ''' + + ArXiv Query: search_query=all:test_query&id_list=&start=0&max_results=1 + http://arxiv.org/api/1 + 2000-01-21T00:00:00-01:00 + 1 + 0 + 1 + + http://arxiv.org/1 + 2000-01-01T00:00:01Z + 2000-01-01T00:00:01Z + Mathematical proof. + Mathematical formula. + + A. B. + + + + + + + +''' + + response = mock.Mock(text=xml_mock.encode('utf-8')) + results = arxiv.response(response) + self.assertEqual(type(results), list) + self.assertEqual(len(results), 1) + self.assertEqual(results[0]['title'], 'Mathematical proof.') + self.assertEqual(results[0]['content'], 'Mathematical formula.') From 5278fa666c193e5ccb30e7b5f8dddf1b053f97ca Mon Sep 17 00:00:00 2001 From: jibe-b Date: Wed, 27 Sep 2017 16:01:31 +0200 Subject: [PATCH 02/10] [enh] use format to concatenate strings --- searx/engines/arxiv.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/searx/engines/arxiv.py b/searx/engines/arxiv.py index cbeac0089..4b6440cbc 100644 --- a/searx/engines/arxiv.py +++ b/searx/engines/arxiv.py @@ -48,14 +48,16 @@ def response(resp): url = entry.xpath('.//id')[0].text - content = entry.xpath('.//summary')[0].text + content_string = '{doi_content}{abstract_content}' + + abstract = entry.xpath('.//summary')[0].text # If a doi is available, add it to the snipppet try: - doi = entry.xpath('.//link[@title="doi"]')[0].text - content = 'DOI: ' + doi + ' Abstract: ' + content + doi_content = entry.xpath('.//link[@title="doi"]')[0].text + content = content_string.format(doi_content=doi_content, abstract_content=abstract_content) except: - pass + content = content_string.format(abstract_content=abstract_content) if len(content) > 300: content = content[0:300] + "..." From e391b2d970a19cdc39dd550929e91ace4aee8832 Mon Sep 17 00:00:00 2001 From: jibe-b Date: Wed, 27 Sep 2017 16:05:28 +0200 Subject: [PATCH 03/10] [fix] remove .encode for python3 compatibility --- searx/engines/arxiv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/searx/engines/arxiv.py b/searx/engines/arxiv.py index 4b6440cbc..826b77690 100644 --- a/searx/engines/arxiv.py +++ b/searx/engines/arxiv.py @@ -41,7 +41,7 @@ def request(query, params): def response(resp): results = [] - search_results = html.fromstring(resp.text.encode('utf-8')).xpath('//entry') + search_results = html.fromstring(resp.text).xpath('//entry') for entry in search_results: title = entry.xpath('.//title')[0].text From 9c2b7a82f0c515fd1df88ed80349eda7f49e0825 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Wed, 1 Nov 2017 12:28:18 +0100 Subject: [PATCH 04/10] minor fixes of arxiv Closes #1050 --- searx/engines/arxiv.py | 11 ++++++----- tests/unit/engines/test_arxiv.py | 6 +++--- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/searx/engines/arxiv.py b/searx/engines/arxiv.py index 826b77690..5ef84f0c1 100644 --- a/searx/engines/arxiv.py +++ b/searx/engines/arxiv.py @@ -2,7 +2,7 @@ """ ArXiV (Scientific preprints) - @website https://axiv.org + @website https://arxiv.org @provide-api yes (export.arxiv.org/api/query) @using-api yes @results XML-RSS @@ -41,7 +41,8 @@ def request(query, params): def response(resp): results = [] - search_results = html.fromstring(resp.text).xpath('//entry') + dom = html.fromstring(resp.content) + search_results = dom.xpath('//entry') for entry in search_results: title = entry.xpath('.//title')[0].text @@ -49,15 +50,15 @@ def response(resp): url = entry.xpath('.//id')[0].text content_string = '{doi_content}{abstract_content}' - + abstract = entry.xpath('.//summary')[0].text # If a doi is available, add it to the snipppet try: doi_content = entry.xpath('.//link[@title="doi"]')[0].text - content = content_string.format(doi_content=doi_content, abstract_content=abstract_content) + content = content_string.format(doi_content=doi_content, abstract_content=abstract) except: - content = content_string.format(abstract_content=abstract_content) + content = content_string.format(doi_content="", abstract_content=abstract) if len(content) > 300: content = content[0:300] + "..." diff --git a/tests/unit/engines/test_arxiv.py b/tests/unit/engines/test_arxiv.py index e51d0f483..b32c0e605 100644 --- a/tests/unit/engines/test_arxiv.py +++ b/tests/unit/engines/test_arxiv.py @@ -21,11 +21,11 @@ class TestBaseEngine(SearxTestCase): self.assertRaises(AttributeError, arxiv.response, '') self.assertRaises(AttributeError, arxiv.response, '[]') - response = mock.Mock(text=''' + response = mock.Mock(content=b''' ''') self.assertEqual(arxiv.response(response), []) - xml_mock = ''' + xml_mock = b''' ArXiv Query: search_query=all:test_query&id_list=&start=0&max_results=1 http://arxiv.org/api/1 @@ -50,7 +50,7 @@ class TestBaseEngine(SearxTestCase): ''' - response = mock.Mock(text=xml_mock.encode('utf-8')) + response = mock.Mock(content=xml_mock) results = arxiv.response(response) self.assertEqual(type(results), list) self.assertEqual(len(results), 1) From 7de8b43eb2081853ae15b2a52cc0cae43647320b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Wed, 1 Nov 2017 13:28:04 +0100 Subject: [PATCH 05/10] add OpenAIRE engine as a json engine This is the continuation of #1048 created by @jibe-b. Closes #1048 --- searx/settings.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/searx/settings.yml b/searx/settings.yml index 54b2b2e64..18b6a5c59 100644 --- a/searx/settings.yml +++ b/searx/settings.yml @@ -415,6 +415,18 @@ engines: shortcut : nt disabled : True + - name : openaire + engine : json_engine + paging : True + search_url : http://api.openaire.eu/search/datasets?format=json&page={pageno}&size=10&title={query} + results_query : response/results/result + url_query : metadata/oaf:entity/oaf:result/children/instance/webresource/url/$ + title_query : metadata/oaf:entity/oaf:result/title/$ + content_query : metadata/oaf:entity/oaf:result/description/$ + categories : science + shortcut : oa + timeout: 5.0 + - name : openstreetmap engine : openstreetmap shortcut : osm From 575159b194440052d7b48aa073d7e03c80799c90 Mon Sep 17 00:00:00 2001 From: jibe-b Date: Fri, 22 Sep 2017 23:43:05 +0200 Subject: [PATCH 06/10] [enh] oa_doi_rewrite plugin broadens doai_rewrite --- searx/plugins/__init__.py | 4 ++-- .../{doai_rewrite.py => oa_doi_rewrite.py} | 17 +++++++++++++++-- searx/preferences.py | 5 ++++- searx/settings.yml | 7 +++++++ searx/templates/oscar/preferences.html | 17 +++++++++++++++++ searx/webapp.py | 15 ++++++++++++++- 6 files changed, 59 insertions(+), 6 deletions(-) rename searx/plugins/{doai_rewrite.py => oa_doi_rewrite.py} (63%) diff --git a/searx/plugins/__init__.py b/searx/plugins/__init__.py index 46c1f8918..4dbcbbd28 100644 --- a/searx/plugins/__init__.py +++ b/searx/plugins/__init__.py @@ -22,7 +22,7 @@ if version_info[0] == 3: logger = logger.getChild('plugins') -from searx.plugins import (doai_rewrite, +from searx.plugins import (oa_doi_rewrite, https_rewrite, infinite_scroll, open_results_on_new_tab, @@ -78,7 +78,7 @@ class PluginStore(): plugins = PluginStore() -plugins.register(doai_rewrite) +plugins.register(oa_doi_rewrite) plugins.register(https_rewrite) plugins.register(infinite_scroll) plugins.register(open_results_on_new_tab) diff --git a/searx/plugins/doai_rewrite.py b/searx/plugins/oa_doi_rewrite.py similarity index 63% rename from searx/plugins/doai_rewrite.py rename to searx/plugins/oa_doi_rewrite.py index b7b814fac..e952c86f5 100644 --- a/searx/plugins/doai_rewrite.py +++ b/searx/plugins/oa_doi_rewrite.py @@ -1,14 +1,19 @@ from flask_babel import gettext import re from searx.url_utils import urlparse, parse_qsl +from flask import request +from searx import settings + regex = re.compile(r'10\.\d{4,9}/[^\s]+') -name = gettext('DOAI rewrite') +name = gettext('Open Access DOI rewrite') description = gettext('Avoid paywalls by redirecting to open-access versions of publications when available') default_on = False preference_section = 'privacy' +doi_resolvers = settings['doi_resolvers'] + def extract_doi(url): match = regex.search(url.path) @@ -21,12 +26,20 @@ def extract_doi(url): return None +def get_doi_resolver(): + doi_resolvers = settings['doi_resolvers'] + doi_resolver = request.args.get('doi_resolver', request.preferences.get_value('doi_resolver'))[0] + if doi_resolver not in doi_resolvers: + doi_resolvers = settings['default_doi_resolver'] + return doi_resolvers[doi_resolver] + + def on_result(request, search, result): doi = extract_doi(result['parsed_url']) if doi and len(doi) < 50: for suffix in ('/', '.pdf', '/full', '/meta', '/abstract'): if doi.endswith(suffix): doi = doi[:-len(suffix)] - result['url'] = 'http://doai.io/' + doi + result['url'] = get_doi_resolver() + doi result['parsed_url'] = urlparse(result['url']) return True diff --git a/searx/preferences.py b/searx/preferences.py index c2c649eea..00a3fd683 100644 --- a/searx/preferences.py +++ b/searx/preferences.py @@ -15,6 +15,7 @@ LANGUAGE_CODES = [l[0] for l in languages] LANGUAGE_CODES.append('all') DISABLED = 0 ENABLED = 1 +DOI_RESOLVERS = [r for r in settings['doi_resolvers'].keys()] class MissingArgumentException(Exception): @@ -266,7 +267,9 @@ class Preferences(object): 'results_on_new_tab': MapSetting(False, map={'0': False, '1': True, 'False': False, - 'True': True})} + 'True': True}), + 'doi_resolver': MultipleChoiceSetting(['oadoi.org'], choices=DOI_RESOLVERS), + } self.engines = EnginesSetting('engines', choices=engines) self.plugins = PluginsSetting('plugins', choices=plugins) diff --git a/searx/settings.yml b/searx/settings.yml index 18b6a5c59..ec3e6b469 100644 --- a/searx/settings.yml +++ b/searx/settings.yml @@ -712,3 +712,10 @@ locales: tr : Türkçe (Turkish) uk : українська мова (Ukrainian) zh : 中文 (Chinese) + +doi_resolvers : + oadoi.org : 'https://oadoi.org/' + doi.org : 'https://doi.org/' + doai.io : 'http://doai.io/' + +default_doi_resolver : 'oadoi.org' diff --git a/searx/templates/oscar/preferences.html b/searx/templates/oscar/preferences.html index f4b2c63ea..dcca3cba7 100644 --- a/searx/templates/oscar/preferences.html +++ b/searx/templates/oscar/preferences.html @@ -223,6 +223,23 @@ {% endfor %} +
+
+

{{ _('OA DOI rewrite') }}

+
+
+
{{ _('Avoid paywalls by redirecting to open-access versions of publications when available') }}
+
+ +
+
+
diff --git a/searx/webapp.py b/searx/webapp.py index dd93395ee..9542b7814 100644 --- a/searx/webapp.py +++ b/searx/webapp.py @@ -164,6 +164,14 @@ def get_locale(): return locale +def get_doi_resolver(): + doi_resolvers = settings['doi_resolvers'] + doi_resolver = request.args.get('doi_resolver', request.preferences.get_value('doi_resolver'))[0] + if doi_resolver not in doi_resolvers: + doi_resolvers = settings['default_doi_resolver'] + return doi_resolver + + # code-highlighter @app.template_filter('code_highlighter') def code_highlighter(codelines, language=None): @@ -695,6 +703,8 @@ def preferences(): shortcuts={y: x for x, y in engine_shortcuts.items()}, themes=themes, plugins=plugins, + doi_resolvers=settings['doi_resolvers'], + current_doi_resolver=get_doi_resolver(), allowed_plugins=allowed_plugins, theme=get_current_theme_name(), preferences_url_params=request.preferences.get_as_url_params(), @@ -839,7 +849,10 @@ def config(): 'autocomplete': settings['search']['autocomplete'], 'safe_search': settings['search']['safe_search'], 'default_theme': settings['ui']['default_theme'], - 'version': VERSION_STRING}) + 'version': VERSION_STRING, + 'doi_resolvers': [r for r in search['doi_resolvers']], + 'default_doi_resolver': settings['default_doi_resolver'], + }) @app.errorhandler(404) From 1b10abfc92afed36627c7052b89d2de5e7f537ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Wed, 1 Nov 2017 13:58:48 +0100 Subject: [PATCH 07/10] minor fixes of doi resolver Closes #1047 --- searx/plugins/oa_doi_rewrite.py | 9 ++++---- searx/preferences.py | 2 +- searx/templates/oscar/preferences.html | 29 +++++++++++--------------- searx/webapp.py | 11 ++-------- 4 files changed, 19 insertions(+), 32 deletions(-) diff --git a/searx/plugins/oa_doi_rewrite.py b/searx/plugins/oa_doi_rewrite.py index e952c86f5..b62ef0b1e 100644 --- a/searx/plugins/oa_doi_rewrite.py +++ b/searx/plugins/oa_doi_rewrite.py @@ -1,7 +1,6 @@ from flask_babel import gettext import re from searx.url_utils import urlparse, parse_qsl -from flask import request from searx import settings @@ -26,12 +25,12 @@ def extract_doi(url): return None -def get_doi_resolver(): +def get_doi_resolver(args, preference_doi_resolver): doi_resolvers = settings['doi_resolvers'] - doi_resolver = request.args.get('doi_resolver', request.preferences.get_value('doi_resolver'))[0] + doi_resolver = args.get('doi_resolver', preference_doi_resolver)[0] if doi_resolver not in doi_resolvers: doi_resolvers = settings['default_doi_resolver'] - return doi_resolvers[doi_resolver] + return doi_resolver def on_result(request, search, result): @@ -40,6 +39,6 @@ def on_result(request, search, result): for suffix in ('/', '.pdf', '/full', '/meta', '/abstract'): if doi.endswith(suffix): doi = doi[:-len(suffix)] - result['url'] = get_doi_resolver() + doi + result['url'] = get_doi_resolver(request.args, request.preferences.get_value('doi_resolver')) + doi result['parsed_url'] = urlparse(result['url']) return True diff --git a/searx/preferences.py b/searx/preferences.py index 00a3fd683..1a143db6b 100644 --- a/searx/preferences.py +++ b/searx/preferences.py @@ -15,7 +15,7 @@ LANGUAGE_CODES = [l[0] for l in languages] LANGUAGE_CODES.append('all') DISABLED = 0 ENABLED = 1 -DOI_RESOLVERS = [r for r in settings['doi_resolvers'].keys()] +DOI_RESOLVERS = list(settings['doi_resolvers']) class MissingArgumentException(Exception): diff --git a/searx/templates/oscar/preferences.html b/searx/templates/oscar/preferences.html index dcca3cba7..5f85a9af6 100644 --- a/searx/templates/oscar/preferences.html +++ b/searx/templates/oscar/preferences.html @@ -118,6 +118,18 @@ {{ preferences_item_footer(info, label, rtl) }} + + {% set label = _('Open Access DOI resolver') %} + {% set info = _('Redirect to open-access versions of publications when available (plugin required)') %} + {{ preferences_item_header(info, label, rtl) }} + + {{ preferences_item_footer(info, label, rtl) }} @@ -223,23 +235,6 @@ {% endfor %} -
-
-

{{ _('OA DOI rewrite') }}

-
-
-
{{ _('Avoid paywalls by redirecting to open-access versions of publications when available') }}
-
- -
-
-
diff --git a/searx/webapp.py b/searx/webapp.py index 9542b7814..f81747325 100644 --- a/searx/webapp.py +++ b/searx/webapp.py @@ -66,6 +66,7 @@ from searx.search import SearchWithPlugins, get_search_query_from_webapp from searx.query import RawTextQuery from searx.autocomplete import searx_bang, backends as autocomplete_backends from searx.plugins import plugins +from searx.plugins.oa_doi_rewrite import get_doi_resolver from searx.preferences import Preferences, ValidationException from searx.answerers import answerers from searx.url_utils import urlencode, urlparse, urljoin @@ -164,14 +165,6 @@ def get_locale(): return locale -def get_doi_resolver(): - doi_resolvers = settings['doi_resolvers'] - doi_resolver = request.args.get('doi_resolver', request.preferences.get_value('doi_resolver'))[0] - if doi_resolver not in doi_resolvers: - doi_resolvers = settings['default_doi_resolver'] - return doi_resolver - - # code-highlighter @app.template_filter('code_highlighter') def code_highlighter(codelines, language=None): @@ -704,7 +697,7 @@ def preferences(): themes=themes, plugins=plugins, doi_resolvers=settings['doi_resolvers'], - current_doi_resolver=get_doi_resolver(), + current_doi_resolver=get_doi_resolver(request.args, request.preferences.get_value('doi_resolver')), allowed_plugins=allowed_plugins, theme=get_current_theme_name(), preferences_url_params=request.preferences.get_as_url_params(), From df0d915806b6e4488099130cd1d7fb1775fe475c Mon Sep 17 00:00:00 2001 From: jibe-b Date: Fri, 22 Sep 2017 22:09:33 +0200 Subject: [PATCH 08/10] [add] pubmed engine --- searx/engines/pubmed.py | 101 +++++++++++++++++++++++++++++++++++ searx/settings.yml | 6 +++ searx/url_utils.py | 2 + tests/unit/engines/pubmed.py | 37 +++++++++++++ 4 files changed, 146 insertions(+) create mode 100644 searx/engines/pubmed.py create mode 100644 tests/unit/engines/pubmed.py diff --git a/searx/engines/pubmed.py b/searx/engines/pubmed.py new file mode 100644 index 000000000..abb59d2ed --- /dev/null +++ b/searx/engines/pubmed.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python + +""" + PubMed (Scholar publications) + @website https://www.ncbi.nlm.nih.gov/pubmed/ + @provide-api yes (https://www.ncbi.nlm.nih.gov/home/develop/api/) + @using-api yes + @results XML + @stable yes + @parse url, title, publishedDate, content + More info on api: https://www.ncbi.nlm.nih.gov/books/NBK25501/ +""" + +from lxml import etree +from datetime import datetime +from searx.url_utils import urlencode, urlopen + + +categories = ['science'] + +base_url = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi'\ + + '?db=pubmed&{query}&retstart={offset}&retmax={hits}' + +# engine dependent config +number_of_results = 10 +pubmed_url = 'https://www.ncbi.nlm.nih.gov/pubmed/' + + +def request(query, params): + # basic search + offset = (params['pageno'] - 1) * number_of_results + + string_args = dict(query=urlencode({'term': query}), + offset=offset, + hits=number_of_results) + + params['url'] = base_url.format(**string_args) + + return params + + +def response(resp): + results = [] + + # First retrieve notice of each result + pubmed_retrieve_api_url = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?'\ + + 'db=pubmed&retmode=xml&id={pmids_string}' + + # handle Python2 vs Python3 management of bytes and strings + try: + pmids_results = etree.XML(resp.text.encode('utf-8')) + except AttributeError: + pmids_results = etree.XML(resp.text) + + pmids = pmids_results.xpath('//eSearchResult/IdList/Id') + pmids_string = '' + + for item in pmids: + pmids_string += item.text + ',' + + retrieve_notice_args = dict(pmids_string=pmids_string) + + retrieve_url_encoded = pubmed_retrieve_api_url.format(**retrieve_notice_args) + + search_results_xml = urlopen(retrieve_url_encoded).read() + search_results = etree.XML(search_results_xml).xpath('//PubmedArticleSet/PubmedArticle/MedlineCitation') + + for entry in search_results: + title = entry.xpath('.//Article/ArticleTitle')[0].text + + pmid = entry.xpath('.//PMID')[0].text + url = pubmed_url + pmid + + try: + content = entry.xpath('.//Abstract/AbstractText')[0].text + except: + content = 'No abstract is available for this publication.' + + # If a doi is available, add it to the snipppet + try: + doi = entry.xpath('.//ELocationID[@EIdType="doi"]')[0].text + content = 'DOI: ' + doi + ' Abstract: ' + content + except: + pass + + if len(content) > 300: + content = content[0:300] + "..." + # TODO: center snippet on query term + + publishedDate = datetime.strptime(entry.xpath('.//DateCreated/Year')[0].text + + '-' + entry.xpath('.//DateCreated/Month')[0].text + + '-' + entry.xpath('.//DateCreated/Day')[0].text, '%Y-%m-%d') + + res_dict = {'url': url, + 'title': title, + 'publishedDate': publishedDate, + 'content': content} + + results.append(res_dict) + + return results diff --git a/searx/settings.yml b/searx/settings.yml index ec3e6b469..8ec5173f5 100644 --- a/searx/settings.yml +++ b/searx/settings.yml @@ -460,6 +460,12 @@ engines: url: https://pirateproxy.red/ timeout : 3.0 + - name : pubmed + engine : pubmed + shortcut : pub + categories: science + oa_first : false + - name : qwant engine : qwant shortcut : qw diff --git a/searx/url_utils.py b/searx/url_utils.py index dcafc3ba8..5e9e29190 100644 --- a/searx/url_utils.py +++ b/searx/url_utils.py @@ -3,6 +3,7 @@ from sys import version_info if version_info[0] == 2: from urllib import quote, quote_plus, unquote, urlencode from urlparse import parse_qs, parse_qsl, urljoin, urlparse, urlunparse, ParseResult + from urllib2 import urlopen else: from urllib.parse import ( parse_qs, @@ -16,6 +17,7 @@ else: urlunparse, ParseResult ) + from urllib.request import urlopen __export__ = (parse_qs, diff --git a/tests/unit/engines/pubmed.py b/tests/unit/engines/pubmed.py new file mode 100644 index 000000000..370efe067 --- /dev/null +++ b/tests/unit/engines/pubmed.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +from collections import defaultdict +import mock +from searx.engines import pubmed +from searx.testing import SearxTestCase + + +class TestPubmedEngine(SearxTestCase): + + def test_request(self): + query = 'test_query' + dicto = defaultdict(dict) + dicto['pageno'] = 1 + params = pubmed.request(query, dicto) + self.assertIn('url', params) + self.assertIn('eutils.ncbi.nlm.nih.gov/', params['url']) + self.assertIn('term', params['url']) + + def test_response(self): + self.assertRaises(AttributeError, pubmed.response, None) + self.assertRaises(AttributeError, pubmed.response, []) + self.assertRaises(AttributeError, pubmed.response, '') + self.assertRaises(AttributeError, pubmed.response, '[]') + + response = mock.Mock(text='') + self.assertEqual(pubmed.response(response), []) + + xml_mock = """110 +1 + +""" + + response = mock.Mock(text=xml_mock.encode('utf-8')) + results = pubmed.response(response) + self.assertEqual(type(results), list) + self.assertEqual(len(results), 1) + self.assertEqual(results[0]['content'], 'No abstract is available for this publication.') From d20bba6dc74ded16556acf2a404d01ec47455ca6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Wed, 1 Nov 2017 14:20:47 +0100 Subject: [PATCH 09/10] minor fixes of pubmed engine Closes #1045 --- searx/engines/pubmed.py | 17 +++++++---------- searx/settings.yml | 2 +- searx/url_utils.py | 2 -- 3 files changed, 8 insertions(+), 13 deletions(-) diff --git a/searx/engines/pubmed.py b/searx/engines/pubmed.py index abb59d2ed..6451f1467 100644 --- a/searx/engines/pubmed.py +++ b/searx/engines/pubmed.py @@ -11,9 +11,11 @@ More info on api: https://www.ncbi.nlm.nih.gov/books/NBK25501/ """ +from flask_babel import gettext from lxml import etree from datetime import datetime -from searx.url_utils import urlencode, urlopen +from searx.url_utils import urlencode +from searx.poolrequests import get categories = ['science'] @@ -46,12 +48,7 @@ def response(resp): pubmed_retrieve_api_url = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?'\ + 'db=pubmed&retmode=xml&id={pmids_string}' - # handle Python2 vs Python3 management of bytes and strings - try: - pmids_results = etree.XML(resp.text.encode('utf-8')) - except AttributeError: - pmids_results = etree.XML(resp.text) - + pmids_results = etree.XML(resp.content) pmids = pmids_results.xpath('//eSearchResult/IdList/Id') pmids_string = '' @@ -62,7 +59,7 @@ def response(resp): retrieve_url_encoded = pubmed_retrieve_api_url.format(**retrieve_notice_args) - search_results_xml = urlopen(retrieve_url_encoded).read() + search_results_xml = get(retrieve_url_encoded).content search_results = etree.XML(search_results_xml).xpath('//PubmedArticleSet/PubmedArticle/MedlineCitation') for entry in search_results: @@ -74,12 +71,12 @@ def response(resp): try: content = entry.xpath('.//Abstract/AbstractText')[0].text except: - content = 'No abstract is available for this publication.' + content = gettext('No abstract is available for this publication.') # If a doi is available, add it to the snipppet try: doi = entry.xpath('.//ELocationID[@EIdType="doi"]')[0].text - content = 'DOI: ' + doi + ' Abstract: ' + content + content = 'DOI: {doi} Abstract: {content}'.format(doi=doi, content=content) except: pass diff --git a/searx/settings.yml b/searx/settings.yml index 8ec5173f5..067a842e4 100644 --- a/searx/settings.yml +++ b/searx/settings.yml @@ -464,7 +464,7 @@ engines: engine : pubmed shortcut : pub categories: science - oa_first : false + timeout : 3.0 - name : qwant engine : qwant diff --git a/searx/url_utils.py b/searx/url_utils.py index 5e9e29190..dcafc3ba8 100644 --- a/searx/url_utils.py +++ b/searx/url_utils.py @@ -3,7 +3,6 @@ from sys import version_info if version_info[0] == 2: from urllib import quote, quote_plus, unquote, urlencode from urlparse import parse_qs, parse_qsl, urljoin, urlparse, urlunparse, ParseResult - from urllib2 import urlopen else: from urllib.parse import ( parse_qs, @@ -17,7 +16,6 @@ else: urlunparse, ParseResult ) - from urllib.request import urlopen __export__ = (parse_qs, From 5954a8e16a64a369072a7487f62b6396a451ae5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=83=C2=A9mi=20V=C3=83=C2=A1nyi?= Date: Wed, 1 Nov 2017 16:50:27 +0100 Subject: [PATCH 10/10] minor fix of BASE engine --- searx/engines/base.py | 2 +- tests/unit/engines/test_base.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/searx/engines/base.py b/searx/engines/base.py index ff006a3bc..be0b7d247 100755 --- a/searx/engines/base.py +++ b/searx/engines/base.py @@ -73,7 +73,7 @@ def request(query, params): def response(resp): results = [] - search_results = etree.XML(resp.text) + search_results = etree.XML(resp.content) for entry in search_results.xpath('./result/doc'): content = "No description available" diff --git a/tests/unit/engines/test_base.py b/tests/unit/engines/test_base.py index e008b034c..b5da5bde7 100644 --- a/tests/unit/engines/test_base.py +++ b/tests/unit/engines/test_base.py @@ -21,10 +21,10 @@ class TestBaseEngine(SearxTestCase): self.assertRaises(AttributeError, base.response, '') self.assertRaises(AttributeError, base.response, '[]') - response = mock.Mock(text='') + response = mock.Mock(content=b'') self.assertEqual(base.response(response), []) - xml_mock = """ + xml_mock = b""" 0 @@ -83,7 +83,7 @@ class TestBaseEngine(SearxTestCase): """ - response = mock.Mock(text=xml_mock.encode('utf-8')) + response = mock.Mock(content=xml_mock) results = base.response(response) self.assertEqual(type(results), list) self.assertEqual(len(results), 1)