[fix] pep8

This commit is contained in:
Thomas Pointhuber 2014-12-16 17:26:16 +01:00
parent 91f9973227
commit a508d540ac
6 changed files with 35 additions and 17 deletions

View File

@ -57,12 +57,16 @@ def response(resp):
link = result.xpath('.//div[@class="newstitle"]/a')[0] link = result.xpath('.//div[@class="newstitle"]/a')[0]
url = link.attrib.get('href') url = link.attrib.get('href')
title = ' '.join(link.xpath('.//text()')) title = ' '.join(link.xpath('.//text()'))
contentXPath = result.xpath('.//div[@class="sn_txt"]/div//span[@class="sn_snip"]//text()') contentXPath = result.xpath('.//div[@class="sn_txt"]/div'
'//span[@class="sn_snip"]//text()')
if contentXPath is not None: if contentXPath is not None:
content = escape(' '.join(contentXPath)) content = escape(' '.join(contentXPath))
# parse publishedDate # parse publishedDate
publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div//span[contains(@class,"sn_ST")]//span[contains(@class,"sn_tm")]//text()') publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div'
'//span[contains(@class,"sn_ST")]'
'//span[contains(@class,"sn_tm")]'
'//text()')
if publishedDateXPath is not None: if publishedDateXPath is not None:
publishedDate = escape(' '.join(publishedDateXPath)) publishedDate = escape(' '.join(publishedDateXPath))
@ -74,7 +78,8 @@ def response(resp):
timeNumbers = re.findall(r'\d+', publishedDate) timeNumbers = re.findall(r'\d+', publishedDate)
publishedDate = datetime.now()\ publishedDate = datetime.now()\
- timedelta(hours=int(timeNumbers[0])) - timedelta(hours=int(timeNumbers[0]))
elif re.match("^[0-9]+ hour(s|), [0-9]+ minute(s|) ago$", publishedDate): elif re.match("^[0-9]+ hour(s|),"
" [0-9]+ minute(s|) ago$", publishedDate):
timeNumbers = re.findall(r'\d+', publishedDate) timeNumbers = re.findall(r'\d+', publishedDate)
publishedDate = datetime.now()\ publishedDate = datetime.now()\
- timedelta(hours=int(timeNumbers[0]))\ - timedelta(hours=int(timeNumbers[0]))\

View File

@ -22,10 +22,17 @@ api_key = None
# search-url # search-url
url = 'http://www.faroo.com/' url = 'http://www.faroo.com/'
search_url = url + 'api?{query}&start={offset}&length={number_of_results}&l={language}&src={categorie}&i=false&f=json&key={api_key}' search_url = url + 'api?{query}'\
'&start={offset}'\
'&length={number_of_results}'\
'&l={language}'\
'&src={categorie}'\
'&i=false'\
'&f=json'\
'&key={api_key}' # noqa
search_category = {'general': 'web', search_category = {'general': 'web',
'news': 'news'} 'news': 'news'}
# do search-request # do search-request
@ -80,8 +87,8 @@ def response(resp):
# parse results # parse results
for result in search_res['results']: for result in search_res['results']:
if result['news']: if result['news']:
# timestamp (how many milliseconds have passed between now and the beginning of 1970) # timestamp (milliseconds since 1970)
publishedDate = datetime.datetime.fromtimestamp(result['date']/1000.0) publishedDate = datetime.datetime.fromtimestamp(result['date']/1000.0) # noqa
# append news result # append news result
results.append({'url': result['url'], results.append({'url': result['url'],

View File

@ -9,7 +9,7 @@
# @stable yes (but deprecated) # @stable yes (but deprecated)
# @parse url, title, img_src # @parse url, title, img_src
from urllib import urlencode,unquote from urllib import urlencode, unquote
from json import loads from json import loads
# engine dependent config # engine dependent config

View File

@ -1,8 +1,8 @@
## Kickass Torrent (Videos, Music, Files) ## Kickass Torrent (Videos, Music, Files)
# #
# @website https://kickass.so # @website https://kickass.so
# @provide-api no (nothing found) # @provide-api no (nothing found)
# #
# @using-api no # @using-api no
# @results HTML (using search portal) # @results HTML (using search portal)
# @stable yes (HTML can change) # @stable yes (HTML can change)
@ -13,7 +13,6 @@ from cgi import escape
from urllib import quote from urllib import quote
from lxml import html from lxml import html
from operator import itemgetter from operator import itemgetter
from dateutil import parser
# engine dependent config # engine dependent config
categories = ['videos', 'music', 'files'] categories = ['videos', 'music', 'files']
@ -33,7 +32,8 @@ def request(query, params):
params['url'] = search_url.format(search_term=quote(query), params['url'] = search_url.format(search_term=quote(query),
pageno=params['pageno']) pageno=params['pageno'])
# FIX: SSLError: hostname 'kickass.so' doesn't match either of '*.kickass.to', 'kickass.to' # FIX: SSLError: hostname 'kickass.so'
# doesn't match either of '*.kickass.to', 'kickass.to'
params['verify'] = False params['verify'] = False
return params return params

View File

@ -20,7 +20,12 @@ guest_client_id = 'b45b1aa10f1ac2941910a7f0d10f8e28'
# search-url # search-url
url = 'https://api.soundcloud.com/' url = 'https://api.soundcloud.com/'
search_url = url + 'search?{query}&facet=model&limit=20&offset={offset}&linked_partitioning=1&client_id={client_id}' search_url = url + 'search?{query}'\
'&facet=model'\
'&limit=20'\
'&offset={offset}'\
'&linked_partitioning=1'\
'&client_id={client_id}' # noqa
# do search-request # do search-request

View File

@ -20,7 +20,8 @@ paging = True
language_support = True language_support = True
# search-url # search-url
search_url = 'https://search.yahoo.com/search?{query}&b={offset}&fl=1&vl=lang_{lang}' base_url = 'https://search.yahoo.com/'
search_url = 'search?{query}&b={offset}&fl=1&vl=lang_{lang}'
# specific xpath variables # specific xpath variables
results_xpath = '//div[@class="res"]' results_xpath = '//div[@class="res"]'
@ -57,9 +58,9 @@ def request(query, params):
else: else:
language = params['language'].split('_')[0] language = params['language'].split('_')[0]
params['url'] = search_url.format(offset=offset, params['url'] = base_url + search_url.format(offset=offset,
query=urlencode({'p': query}), query=urlencode({'p': query}),
lang=language) lang=language)
# TODO required? # TODO required?
params['cookies']['sB'] = 'fl=1&vl=lang_{lang}&sh=1&rw=new&v=1'\ params['cookies']['sB'] = 'fl=1&vl=lang_{lang}&sh=1&rw=new&v=1'\