Merge pull request #1075 from kvch/finish-jibe-b-engines

Finish PRs of @jibe-b: pubmed, oa_doi_rewrite, openaire, arxiv
This commit is contained in:
Adam Tauber 2017-11-01 21:27:57 +01:00 committed by GitHub
commit 3d50b0288d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
12 changed files with 343 additions and 10 deletions

76
searx/engines/arxiv.py Normal file
View file

@ -0,0 +1,76 @@
#!/usr/bin/env python
"""
ArXiV (Scientific preprints)
@website https://arxiv.org
@provide-api yes (export.arxiv.org/api/query)
@using-api yes
@results XML-RSS
@stable yes
@parse url, title, publishedDate, content
More info on api: https://arxiv.org/help/api/user-manual
"""
from lxml import html
from datetime import datetime
from searx.url_utils import urlencode
categories = ['science']
base_url = 'http://export.arxiv.org/api/query?search_query=all:'\
+ '{query}&start={offset}&max_results={number_of_results}'
# engine dependent config
number_of_results = 10
def request(query, params):
# basic search
offset = (params['pageno'] - 1) * number_of_results
string_args = dict(query=query,
offset=offset,
number_of_results=number_of_results)
params['url'] = base_url.format(**string_args)
return params
def response(resp):
results = []
dom = html.fromstring(resp.content)
search_results = dom.xpath('//entry')
for entry in search_results:
title = entry.xpath('.//title')[0].text
url = entry.xpath('.//id')[0].text
content_string = '{doi_content}{abstract_content}'
abstract = entry.xpath('.//summary')[0].text
# If a doi is available, add it to the snipppet
try:
doi_content = entry.xpath('.//link[@title="doi"]')[0].text
content = content_string.format(doi_content=doi_content, abstract_content=abstract)
except:
content = content_string.format(doi_content="", abstract_content=abstract)
if len(content) > 300:
content = content[0:300] + "..."
# TODO: center snippet on query term
publishedDate = datetime.strptime(entry.xpath('.//published')[0].text, '%Y-%m-%dT%H:%M:%SZ')
res_dict = {'url': url,
'title': title,
'publishedDate': publishedDate,
'content': content}
results.append(res_dict)
return results

View file

@ -73,7 +73,7 @@ def request(query, params):
def response(resp):
results = []
search_results = etree.XML(resp.text)
search_results = etree.XML(resp.content)
for entry in search_results.xpath('./result/doc'):
content = "No description available"

98
searx/engines/pubmed.py Normal file
View file

@ -0,0 +1,98 @@
#!/usr/bin/env python
"""
PubMed (Scholar publications)
@website https://www.ncbi.nlm.nih.gov/pubmed/
@provide-api yes (https://www.ncbi.nlm.nih.gov/home/develop/api/)
@using-api yes
@results XML
@stable yes
@parse url, title, publishedDate, content
More info on api: https://www.ncbi.nlm.nih.gov/books/NBK25501/
"""
from flask_babel import gettext
from lxml import etree
from datetime import datetime
from searx.url_utils import urlencode
from searx.poolrequests import get
categories = ['science']
base_url = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi'\
+ '?db=pubmed&{query}&retstart={offset}&retmax={hits}'
# engine dependent config
number_of_results = 10
pubmed_url = 'https://www.ncbi.nlm.nih.gov/pubmed/'
def request(query, params):
# basic search
offset = (params['pageno'] - 1) * number_of_results
string_args = dict(query=urlencode({'term': query}),
offset=offset,
hits=number_of_results)
params['url'] = base_url.format(**string_args)
return params
def response(resp):
results = []
# First retrieve notice of each result
pubmed_retrieve_api_url = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?'\
+ 'db=pubmed&retmode=xml&id={pmids_string}'
pmids_results = etree.XML(resp.content)
pmids = pmids_results.xpath('//eSearchResult/IdList/Id')
pmids_string = ''
for item in pmids:
pmids_string += item.text + ','
retrieve_notice_args = dict(pmids_string=pmids_string)
retrieve_url_encoded = pubmed_retrieve_api_url.format(**retrieve_notice_args)
search_results_xml = get(retrieve_url_encoded).content
search_results = etree.XML(search_results_xml).xpath('//PubmedArticleSet/PubmedArticle/MedlineCitation')
for entry in search_results:
title = entry.xpath('.//Article/ArticleTitle')[0].text
pmid = entry.xpath('.//PMID')[0].text
url = pubmed_url + pmid
try:
content = entry.xpath('.//Abstract/AbstractText')[0].text
except:
content = gettext('No abstract is available for this publication.')
# If a doi is available, add it to the snipppet
try:
doi = entry.xpath('.//ELocationID[@EIdType="doi"]')[0].text
content = 'DOI: {doi} Abstract: {content}'.format(doi=doi, content=content)
except:
pass
if len(content) > 300:
content = content[0:300] + "..."
# TODO: center snippet on query term
publishedDate = datetime.strptime(entry.xpath('.//DateCreated/Year')[0].text
+ '-' + entry.xpath('.//DateCreated/Month')[0].text
+ '-' + entry.xpath('.//DateCreated/Day')[0].text, '%Y-%m-%d')
res_dict = {'url': url,
'title': title,
'publishedDate': publishedDate,
'content': content}
results.append(res_dict)
return results

View file

@ -22,7 +22,7 @@ if version_info[0] == 3:
logger = logger.getChild('plugins')
from searx.plugins import (doai_rewrite,
from searx.plugins import (oa_doi_rewrite,
https_rewrite,
infinite_scroll,
open_results_on_new_tab,
@ -78,7 +78,7 @@ class PluginStore():
plugins = PluginStore()
plugins.register(doai_rewrite)
plugins.register(oa_doi_rewrite)
plugins.register(https_rewrite)
plugins.register(infinite_scroll)
plugins.register(open_results_on_new_tab)

View file

@ -1,14 +1,18 @@
from flask_babel import gettext
import re
from searx.url_utils import urlparse, parse_qsl
from searx import settings
regex = re.compile(r'10\.\d{4,9}/[^\s]+')
name = gettext('DOAI rewrite')
name = gettext('Open Access DOI rewrite')
description = gettext('Avoid paywalls by redirecting to open-access versions of publications when available')
default_on = False
preference_section = 'privacy'
doi_resolvers = settings['doi_resolvers']
def extract_doi(url):
match = regex.search(url.path)
@ -21,12 +25,20 @@ def extract_doi(url):
return None
def get_doi_resolver(args, preference_doi_resolver):
doi_resolvers = settings['doi_resolvers']
doi_resolver = args.get('doi_resolver', preference_doi_resolver)[0]
if doi_resolver not in doi_resolvers:
doi_resolvers = settings['default_doi_resolver']
return doi_resolver
def on_result(request, search, result):
doi = extract_doi(result['parsed_url'])
if doi and len(doi) < 50:
for suffix in ('/', '.pdf', '/full', '/meta', '/abstract'):
if doi.endswith(suffix):
doi = doi[:-len(suffix)]
result['url'] = 'http://doai.io/' + doi
result['url'] = get_doi_resolver(request.args, request.preferences.get_value('doi_resolver')) + doi
result['parsed_url'] = urlparse(result['url'])
return True

View file

@ -15,6 +15,7 @@ LANGUAGE_CODES = [l[0] for l in languages]
LANGUAGE_CODES.append('all')
DISABLED = 0
ENABLED = 1
DOI_RESOLVERS = list(settings['doi_resolvers'])
class MissingArgumentException(Exception):
@ -266,7 +267,9 @@ class Preferences(object):
'results_on_new_tab': MapSetting(False, map={'0': False,
'1': True,
'False': False,
'True': True})}
'True': True}),
'doi_resolver': MultipleChoiceSetting(['oadoi.org'], choices=DOI_RESOLVERS),
}
self.engines = EnginesSetting('engines', choices=engines)
self.plugins = PluginsSetting('plugins', choices=plugins)

View file

@ -60,6 +60,12 @@ engines:
disabled : True
shortcut : ai
- name : arxiv
engine : arxiv
shortcut : arx
categories : science
timeout : 4.0
- name : base
engine : base
shortcut : bs
@ -409,6 +415,18 @@ engines:
shortcut : nt
disabled : True
- name : openaire
engine : json_engine
paging : True
search_url : http://api.openaire.eu/search/datasets?format=json&page={pageno}&size=10&title={query}
results_query : response/results/result
url_query : metadata/oaf:entity/oaf:result/children/instance/webresource/url/$
title_query : metadata/oaf:entity/oaf:result/title/$
content_query : metadata/oaf:entity/oaf:result/description/$
categories : science
shortcut : oa
timeout: 5.0
- name : openstreetmap
engine : openstreetmap
shortcut : osm
@ -442,6 +460,12 @@ engines:
url: https://pirateproxy.red/
timeout : 3.0
- name : pubmed
engine : pubmed
shortcut : pub
categories: science
timeout : 3.0
- name : qwant
engine : qwant
shortcut : qw
@ -694,3 +718,10 @@ locales:
tr : Türkçe (Turkish)
uk : українська мова (Ukrainian)
zh : 中文 (Chinese)
doi_resolvers :
oadoi.org : 'https://oadoi.org/'
doi.org : 'https://doi.org/'
doai.io : 'http://doai.io/'
default_doi_resolver : 'oadoi.org'

View file

@ -118,6 +118,18 @@
<option value="0" {% if not results_on_new_tab %}selected="selected"{% endif %}>{{ _('Off')}}</option>
</select>
{{ preferences_item_footer(info, label, rtl) }}
{% set label = _('Open Access DOI resolver') %}
{% set info = _('Redirect to open-access versions of publications when available (plugin required)') %}
{{ preferences_item_header(info, label, rtl) }}
<select class="form-control" id='doi_resolver' name='doi_resolver'>
{% for doi_resolver_name,doi_resolver_url in doi_resolvers.items() %}
<option value="{{ doi_resolver_name }}" {% if doi_resolver_name == current_doi_resolver %}selected="selected"{% endif %}>
{{ doi_resolver_name }} - {{ doi_resolver_url }}
</option>
{% endfor %}
</select>
{{ preferences_item_footer(info, label, rtl) }}
</div>
</fieldset>
</div>

View file

@ -66,6 +66,7 @@ from searx.search import SearchWithPlugins, get_search_query_from_webapp
from searx.query import RawTextQuery
from searx.autocomplete import searx_bang, backends as autocomplete_backends
from searx.plugins import plugins
from searx.plugins.oa_doi_rewrite import get_doi_resolver
from searx.preferences import Preferences, ValidationException
from searx.answerers import answerers
from searx.url_utils import urlencode, urlparse, urljoin
@ -695,6 +696,8 @@ def preferences():
shortcuts={y: x for x, y in engine_shortcuts.items()},
themes=themes,
plugins=plugins,
doi_resolvers=settings['doi_resolvers'],
current_doi_resolver=get_doi_resolver(request.args, request.preferences.get_value('doi_resolver')),
allowed_plugins=allowed_plugins,
theme=get_current_theme_name(),
preferences_url_params=request.preferences.get_as_url_params(),
@ -839,7 +842,10 @@ def config():
'autocomplete': settings['search']['autocomplete'],
'safe_search': settings['search']['safe_search'],
'default_theme': settings['ui']['default_theme'],
'version': VERSION_STRING})
'version': VERSION_STRING,
'doi_resolvers': [r for r in search['doi_resolvers']],
'default_doi_resolver': settings['default_doi_resolver'],
})
@app.errorhandler(404)

View file

@ -0,0 +1,37 @@
# -*- coding: utf-8 -*-
from collections import defaultdict
import mock
from searx.engines import pubmed
from searx.testing import SearxTestCase
class TestPubmedEngine(SearxTestCase):
def test_request(self):
query = 'test_query'
dicto = defaultdict(dict)
dicto['pageno'] = 1
params = pubmed.request(query, dicto)
self.assertIn('url', params)
self.assertIn('eutils.ncbi.nlm.nih.gov/', params['url'])
self.assertIn('term', params['url'])
def test_response(self):
self.assertRaises(AttributeError, pubmed.response, None)
self.assertRaises(AttributeError, pubmed.response, [])
self.assertRaises(AttributeError, pubmed.response, '')
self.assertRaises(AttributeError, pubmed.response, '[]')
response = mock.Mock(text='<PubmedArticleSet></PubmedArticleSet>')
self.assertEqual(pubmed.response(response), [])
xml_mock = """<eSearchResult><Count>1</Count><RetMax>1</RetMax><RetStart>0</RetStart><IdList>
<Id>1</Id>
</IdList></eSearchResult>
"""
response = mock.Mock(text=xml_mock.encode('utf-8'))
results = pubmed.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['content'], 'No abstract is available for this publication.')

View file

@ -0,0 +1,58 @@
# -*- coding: utf-8 -*-
from collections import defaultdict
import mock
from searx.engines import arxiv
from searx.testing import SearxTestCase
class TestBaseEngine(SearxTestCase):
def test_request(self):
query = 'test_query'
dicto = defaultdict(dict)
dicto['pageno'] = 1
params = arxiv.request(query, dicto)
self.assertIn('url', params)
self.assertIn('export.arxiv.org/api/', params['url'])
def test_response(self):
self.assertRaises(AttributeError, arxiv.response, None)
self.assertRaises(AttributeError, arxiv.response, [])
self.assertRaises(AttributeError, arxiv.response, '')
self.assertRaises(AttributeError, arxiv.response, '[]')
response = mock.Mock(content=b'''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom"></feed>''')
self.assertEqual(arxiv.response(response), [])
xml_mock = b'''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title type="html">ArXiv Query: search_query=all:test_query&amp;id_list=&amp;start=0&amp;max_results=1</title>
<id>http://arxiv.org/api/1</id>
<updated>2000-01-21T00:00:00-01:00</updated>
<opensearch:totalResults xmlns:opensearch="http://a9.com/-/spec/opensearch/1.1/">1</opensearch:totalResults>
<opensearch:startIndex xmlns:opensearch="http://a9.com/-/spec/opensearch/1.1/">0</opensearch:startIndex>
<opensearch:itemsPerPage xmlns:opensearch="http://a9.com/-/spec/opensearch/1.1/">1</opensearch:itemsPerPage>
<entry>
<id>http://arxiv.org/1</id>
<updated>2000-01-01T00:00:01Z</updated>
<published>2000-01-01T00:00:01Z</published>
<title>Mathematical proof.</title>
<summary>Mathematical formula.</summary>
<author>
<name>A. B.</name>
</author>
<link href="http://arxiv.org/1" rel="alternate" type="text/html"/>
<link title="pdf" href="http://arxiv.org/1" rel="related" type="application/pdf"/>
<category term="math.QA" scheme="http://arxiv.org/schemas/atom"/>
<category term="1" scheme="http://arxiv.org/schemas/atom"/>
</entry>
</feed>
'''
response = mock.Mock(content=xml_mock)
results = arxiv.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['title'], 'Mathematical proof.')
self.assertEqual(results[0]['content'], 'Mathematical formula.')

View file

@ -21,10 +21,10 @@ class TestBaseEngine(SearxTestCase):
self.assertRaises(AttributeError, base.response, '')
self.assertRaises(AttributeError, base.response, '[]')
response = mock.Mock(text='<response></response>')
response = mock.Mock(content=b'<response></response>')
self.assertEqual(base.response(response), [])
xml_mock = """<?xml version="1.0"?>
xml_mock = b"""<?xml version="1.0"?>
<response>
<lst name="responseHeader">
<int name="status">0</int>
@ -83,7 +83,7 @@ class TestBaseEngine(SearxTestCase):
</result>
</response>"""
response = mock.Mock(text=xml_mock.encode('utf-8'))
response = mock.Mock(content=xml_mock)
results = base.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 1)