2014-03-04 12:11:04 +00:00
|
|
|
#!/usr/bin/env python
|
|
|
|
|
|
|
|
from urllib import urlencode
|
|
|
|
from json import loads
|
2014-03-14 08:55:04 +00:00
|
|
|
from datetime import datetime, timedelta
|
2014-03-04 12:11:04 +00:00
|
|
|
|
|
|
|
categories = ['news']
|
|
|
|
|
|
|
|
url = 'https://ajax.googleapis.com/'
|
2014-03-04 13:20:29 +00:00
|
|
|
search_url = url + 'ajax/services/search/news?v=2.0&start={offset}&rsz=large&safe=off&filter=off&{query}&hl={language}' # noqa
|
2014-03-04 12:11:04 +00:00
|
|
|
|
|
|
|
paging = True
|
|
|
|
language_support = True
|
|
|
|
|
|
|
|
|
|
|
|
def request(query, params):
|
|
|
|
offset = (params['pageno'] - 1) * 8
|
|
|
|
language = 'en-US'
|
|
|
|
if params['language'] != 'all':
|
|
|
|
language = params['language'].replace('_', '-')
|
|
|
|
params['url'] = search_url.format(offset=offset,
|
|
|
|
query=urlencode({'q': query}),
|
|
|
|
language=language)
|
|
|
|
return params
|
|
|
|
|
|
|
|
|
|
|
|
def response(resp):
|
|
|
|
results = []
|
|
|
|
search_res = loads(resp.text)
|
|
|
|
|
|
|
|
if not search_res.get('responseData', {}).get('results'):
|
|
|
|
return []
|
|
|
|
|
|
|
|
for result in search_res['responseData']['results']:
|
2014-03-14 08:55:04 +00:00
|
|
|
# S.149 (159), library.pdf
|
|
|
|
# datetime.strptime("Mon, 10 Mar 2014 16:26:15 -0700", "%a, %d %b %Y %H:%M:%S %z")
|
|
|
|
# publishedDate = parse(result['publishedDate'])
|
|
|
|
publishedDate = datetime.strptime(str.join(' ',result['publishedDate'].split(None)[0:5]), "%a, %d %b %Y %H:%M:%S")
|
|
|
|
#utc_offset = timedelta(result['publishedDate'].split(None)[5]) # local = utc + offset
|
|
|
|
#publishedDate = publishedDate + utc_offset
|
|
|
|
|
2014-03-04 12:11:04 +00:00
|
|
|
results.append({'url': result['unescapedUrl'],
|
|
|
|
'title': result['titleNoFormatting'],
|
2014-03-14 08:55:04 +00:00
|
|
|
'publishedDate': publishedDate,
|
2014-03-04 12:11:04 +00:00
|
|
|
'content': result['content']})
|
|
|
|
return results
|