[mod] simple theme: drop img_src from default results

The use of img_src AND thumbnail in the default results makes no sense (only a
thumbnail is needed).  In the current state this is rather confusing, because
img_src is displayed like a thumbnail (small) and thumbnail is displayed like an
image (large).

Signed-off-by: Markus Heiser <markus.heiser@darmarit.de>
pull/3492/head
Markus Heiser 3 weeks ago committed by Markus Heiser
parent 0f2f52f0b5
commit 916739d6b4

@ -133,7 +133,7 @@ def _get_result(item):
'publisher': extract_text(eval_xpath(item, './/div[contains(@class, "text-sm")]')),
'authors': [extract_text(eval_xpath(item, './/div[contains(@class, "italic")]'))],
'content': extract_text(eval_xpath(item, './/div[contains(@class, "text-xs")]')),
'img_src': item.xpath('.//img/@src')[0],
'thumbnail': item.xpath('.//img/@src')[0],
}

@ -53,8 +53,8 @@ def response(resp):
url = base_url + link.attrib.get('href') + '#downloads'
title = extract_text(link)
img_src = base_url + eval_xpath_getindex(result, './/img/@src', 0)
res = {'url': url, 'title': title, 'img_src': img_src}
thumbnail = base_url + eval_xpath_getindex(result, './/img/@src', 0)
res = {'url': url, 'title': title, 'thumbnail': thumbnail}
results.append(res)

@ -47,7 +47,7 @@ def response(resp):
'url': result['trackViewUrl'],
'title': result['trackName'],
'content': result['description'],
'img_src': result['artworkUrl100'],
'thumbnail': result['artworkUrl100'],
'publishedDate': parse(result['currentVersionReleaseDate']),
'author': result['sellerName'],
}

@ -66,7 +66,7 @@ def response(resp):
"title": item['title'],
"content": item['abstract'],
"publishedDate": pubdate_original,
# "img_src": item.get('image_url') or None, # these are not thumbs / to large
# "thumbnail": item.get('image_url') or None, # these are not thumbs / to large
"metadata": ' | '.join(metadata),
}
)

@ -68,7 +68,7 @@ def response(resp):
thumbnail = result.xpath('.//div[@class="art"]/img/@src')
if thumbnail:
new_result['img_src'] = thumbnail[0]
new_result['thumbnail'] = thumbnail[0]
result_id = parse_qs(urlparse(link.get('href')).query)["search_item_id"][0]
itemtype = extract_text(result.xpath('.//div[@class="itemtype"]')).lower()

@ -130,7 +130,7 @@ def response(resp):
'url': url,
'title': title,
'content': content,
'img_src': thumbnail,
'thumbnail': thumbnail,
'metadata': metadata,
}
)

@ -40,9 +40,9 @@ def response(resp):
json_resp = resp.json()
for result in json_resp['teaser']:
img_src = None
thumbnail = None
if result['teaser']['image']:
img_src = base_url + result['teaser']['image']['sources'][-1]['url']
thumbnail = base_url + result['teaser']['image']['sources'][-1]['url']
metadata = result['extension']['overline']
authors = ', '.join(author['name'] for author in result['extension'].get('authors', []))
@ -58,7 +58,7 @@ def response(resp):
'url': base_url + result['teaser']['link']['url'],
'title': result['teaser']['title'],
'content': result['teaser']['text'],
'img_src': img_src,
'thumbnail': thumbnail,
'publishedDate': publishedDate,
'metadata': metadata,
}

@ -296,14 +296,14 @@ def _parse_search(resp):
content_tag = eval_xpath_getindex(result, './/div[contains(@class, "snippet-description")]', 0, default='')
pub_date_raw = eval_xpath(result, 'substring-before(.//div[contains(@class, "snippet-description")], "-")')
img_src = eval_xpath_getindex(result, './/img[contains(@class, "thumb")]/@src', 0, default='')
thumbnail = eval_xpath_getindex(result, './/img[contains(@class, "thumb")]/@src', 0, default='')
item = {
'url': url,
'title': extract_text(title_tag),
'content': extract_text(content_tag),
'publishedDate': _extract_published_date(pub_date_raw),
'img_src': img_src,
'thumbnail': thumbnail,
}
video_tag = eval_xpath_getindex(
@ -324,7 +324,7 @@ def _parse_search(resp):
)
item['publishedDate'] = _extract_published_date(pub_date_raw)
else:
item['img_src'] = eval_xpath_getindex(video_tag, './/img/@src', 0, default='')
item['thumbnail'] = eval_xpath_getindex(video_tag, './/img/@src', 0, default='')
result_list.append(item)
@ -351,7 +351,7 @@ def _parse_news(json_resp):
'publishedDate': _extract_published_date(result['age']),
}
if result['thumbnail'] is not None:
item['img_src'] = result['thumbnail']['src']
item['thumbnail'] = result['thumbnail']['src']
result_list.append(item)
return result_list

@ -47,7 +47,7 @@ def response(resp):
'url': base_url + ("_/" if is_official else "r/") + item.get("slug", ""),
'title': item.get("name"),
'content': item.get("short_description"),
'img_src': item["logo_url"].get("large") or item["logo_url"].get("small"),
'thumbnail': item["logo_url"].get("large") or item["logo_url"].get("small"),
'package_name': item.get("name"),
'maintainer': item["publisher"].get("name"),
'publishedDate': parser.parse(item.get("updated_at") or item.get("created_at")),

@ -47,8 +47,8 @@ def response(resp):
+ ' - '
+ extract_text(app.xpath('./div/div/span[@class="package-license"]')).strip()
)
app_img_src = app.xpath('./img[@class="package-icon"]/@src')[0]
thumbnail = app.xpath('./img[@class="package-icon"]/@src')[0]
results.append({'url': app_url, 'title': app_title, 'content': app_content, 'img_src': app_img_src})
results.append({'url': app_url, 'title': app_title, 'content': app_content, 'thumbnail': thumbnail})
return results

@ -50,7 +50,7 @@ def parse_lyric(hit):
'url': hit['result']['url'],
'title': hit['result']['full_title'],
'content': content,
'img_src': hit['result']['song_art_image_thumbnail_url'],
'thumbnail': hit['result']['song_art_image_thumbnail_url'],
}
if timestamp:
result.update({'publishedDate': datetime.fromtimestamp(timestamp)})
@ -68,7 +68,7 @@ def parse_artist(hit):
'url': hit['result']['url'],
'title': hit['result']['name'],
'content': '',
'img_src': hit['result']['image_url'],
'thumbnail': hit['result']['image_url'],
}
return result
@ -84,7 +84,7 @@ def parse_album(hit):
return {
'url': res['url'],
'title': res['full_title'],
'img_src': res['cover_art_url'],
'thumbnail': res['cover_art_url'],
'content': content.strip(),
}

@ -50,7 +50,7 @@ def response(resp):
'url': item.get('html_url'),
'title': item.get('full_name'),
'content': ' / '.join(content),
'img_src': item.get('owner', {}).get('avatar_url'),
'thumbnail': item.get('owner', {}).get('avatar_url'),
'package_name': item.get('name'),
# 'version': item.get('updated_at'),
'maintainer': item.get('owner', {}).get('login'),

@ -48,7 +48,7 @@ def response(resp):
{
'url': base_url + extract_text(eval_xpath(result, url_xpath)),
'title': extract_text(eval_xpath(result, title_xpath)),
'img_src': extract_text(eval_xpath(result, thumbnail_xpath)),
'thumbnail': extract_text(eval_xpath(result, thumbnail_xpath)),
'content': extract_text(eval_xpath(result, info_text_xpath)),
'metadata': extract_text(eval_xpath(result, author_xpath)),
}

@ -365,17 +365,17 @@ def response(resp):
logger.debug('ignoring item from the result_xpath list: missing content of title "%s"', title)
continue
img_src = content_nodes[0].xpath('.//img/@src')
if img_src:
img_src = img_src[0]
if img_src.startswith('data:image'):
thumbnail = content_nodes[0].xpath('.//img/@src')
if thumbnail:
thumbnail = thumbnail[0]
if thumbnail.startswith('data:image'):
img_id = content_nodes[0].xpath('.//img/@id')
if img_id:
img_src = data_image_map.get(img_id[0])
thumbnail = data_image_map.get(img_id[0])
else:
img_src = None
thumbnail = None
results.append({'url': url, 'title': title, 'content': content, 'img_src': img_src})
results.append({'url': url, 'title': title, 'content': content, 'thumbnail': thumbnail})
except Exception as e: # pylint: disable=broad-except
logger.error(e, exc_info=True)

@ -165,14 +165,14 @@ def response(resp):
# "https://lh3.googleusercontent.com/DjhQh7DMszk.....z=-p-h100-w100"
# These URL are long but not personalized (double checked via tor).
img_src = extract_text(result.xpath('preceding-sibling::a/figure/img/@src'))
thumbnail = extract_text(result.xpath('preceding-sibling::a/figure/img/@src'))
results.append(
{
'url': href,
'title': title,
'content': content,
'img_src': img_src,
'thumbnail': thumbnail,
}
)

@ -64,13 +64,13 @@ def response_movies(resp):
title = extract_text(eval_xpath(div_2, './div[@title]'))
metadata = extract_text(eval_xpath(div_2, './div[@class]'))
img = eval_xpath(div_1, './/img')[0]
img_src = img.get('src')
thumbnail = img.get('src')
results.append(
{
"url": url,
"title": title,
"content": sec_name,
"img_src": img_src,
"thumbnail": thumbnail,
'metadata': metadata,
'template': 'videos.html',
}

@ -107,8 +107,8 @@ def response(resp):
# parse results
for result in eval_xpath_list(dom, '//div[contains(@class, "g ")]'):
img_src = eval_xpath_getindex(result, './/img/@src', 0, None)
if img_src is None:
thumbnail = eval_xpath_getindex(result, './/img/@src', 0, None)
if thumbnail is None:
continue
title = extract_text(eval_xpath_getindex(result, './/a/h3[1]', 0))
@ -124,7 +124,7 @@ def response(resp):
'title': title,
'content': content,
'author': pub_info,
'thumbnail': img_src,
'thumbnail': thumbnail,
'template': 'videos.html',
}
)

@ -90,7 +90,7 @@ def response(resp):
"title": title,
"url": href_base.format(category=categ, entry_id=entry_id),
"content": content,
"img_src": image_url,
"thumbnail": image_url,
}
)

@ -91,7 +91,7 @@ def _get_communities(json):
'url': result['community']['actor_id'],
'title': result['community']['title'],
'content': markdown_to_text(result['community'].get('description', '')),
'img_src': result['community'].get('icon', result['community'].get('banner')),
'thumbnail': result['community'].get('icon', result['community'].get('banner')),
'publishedDate': datetime.strptime(counts['published'][:19], '%Y-%m-%dT%H:%M:%S'),
'metadata': metadata,
}
@ -120,9 +120,9 @@ def _get_posts(json):
for result in json["posts"]:
user = result['creator'].get('display_name', result['creator']['name'])
img_src = None
thumbnail = None
if result['post'].get('thumbnail_url'):
img_src = result['post']['thumbnail_url'] + '?format=webp&thumbnail=208'
thumbnail = result['post']['thumbnail_url'] + '?format=webp&thumbnail=208'
metadata = (
f"&#x25B2; {result['counts']['upvotes']} &#x25BC; {result['counts']['downvotes']}"
@ -140,7 +140,7 @@ def _get_posts(json):
'url': result['post']['ap_id'],
'title': result['post']['name'],
'content': content,
'img_src': img_src,
'thumbnail': thumbnail,
'publishedDate': datetime.strptime(result['post']['published'][:19], '%Y-%m-%dT%H:%M:%S'),
'metadata': metadata,
}

@ -44,7 +44,7 @@ def response(resp):
'url': r_url,
'title': result['name'],
'iframe_src': iframe_src.format(url=r_url),
'img_src': result['pictures']['medium'],
'thumbnail': result['pictures']['medium'],
'publishedDate': publishedDate,
'content': result['user']['name'],
}

@ -104,11 +104,11 @@ def response(resp):
item['metadata'] = html_to_text(result.get('meta_short', ''))
if result.get('image'):
item['img_src'] = image_url.format(image_id=result['image'], filename=result['image_filename'])
item['thumbnail'] = image_url.format(image_id=result['image'], filename=result['image_filename'])
else:
item['url'] = result['url']
item['content'] = ', '.join([result['class'], result['info'], result['more']])
item['img_src'] = result['image']
item['thumbnail'] = result['image']
results.append(item)

@ -178,7 +178,7 @@ def response(resp):
continue
url, osm, geojson = get_url_osm_geojson(result)
img_src = get_thumbnail(get_img_src(result))
thumbnail = get_thumbnail(get_img_src(result))
links, link_keys = get_links(result, user_language)
data = get_data(result, user_language, link_keys)
@ -191,7 +191,7 @@ def response(resp):
'url': url,
'osm': osm,
'geojson': geojson,
'img_src': img_src,
'thumbnail': thumbnail,
'links': links,
'data': data,
'type': get_tag_label(result.get('category'), result.get('type', ''), user_language),

@ -65,18 +65,18 @@ def construct_body(result):
page='',
year=result['release_year'],
)
img_src = pdbe_preview_url.format(pdb_id=result['pdb_id'])
thumbnail = pdbe_preview_url.format(pdb_id=result['pdb_id'])
except KeyError:
content = None
img_src = None
thumbnail = None
# construct url for preview image
try:
img_src = pdbe_preview_url.format(pdb_id=result['pdb_id'])
thumbnail = pdbe_preview_url.format(pdb_id=result['pdb_id'])
except KeyError:
img_src = None
thumbnail = None
return [title, content, img_src]
return [title, content, thumbnail]
def response(resp):
@ -106,16 +106,16 @@ def response(resp):
)
# obsoleted entries don't have preview images
img_src = None
thumbnail = None
else:
title, content, img_src = construct_body(result)
title, content, thumbnail = construct_body(result)
results.append(
{
'url': pdbe_entry_url.format(pdb_id=result['pdb_id']),
'title': title,
'content': content,
'img_src': img_src,
'thumbnail': thumbnail,
}
)

@ -151,7 +151,7 @@ def response(resp):
elif piped_filter == 'music_songs':
item["template"] = "default.html"
item["img_src"] = result.get("thumbnail", "")
item["thumbnail"] = result.get("thumbnail", "")
item["content"] = result.get("uploaderName", "") or ""
results.append(item)

@ -162,7 +162,7 @@ def parse_search_query(json_results):
result = {
'url': item['link'],
'title': item['title'],
'img_src': item['image'],
'thumbnail': item['image'],
'content': '',
'metadata': item.get('source'),
}
@ -244,7 +244,7 @@ def response(resp):
'url': item.get('link'),
'content': '',
'metadata': ' / '.join(metadata),
'img_src': item.get('image'),
'thumbnail': item.get('image'),
}
)
@ -257,7 +257,7 @@ def response(resp):
'url': item.get('link'),
'content': item.get('description', ''),
'metadata': ' / '.join(metadata),
'img_src': item.get('image'),
'thumbnail': item.get('image'),
}
)

@ -242,15 +242,15 @@ def parse_web_api(resp):
if pub_date is not None:
pub_date = datetime.fromtimestamp(pub_date)
news_media = item.get('media', [])
img_src = None
thumbnail = None
if news_media:
img_src = news_media[0].get('pict', {}).get('url', None)
thumbnail = news_media[0].get('pict', {}).get('url', None)
results.append(
{
'title': title,
'url': res_url,
'publishedDate': pub_date,
'img_src': img_src,
'thumbnail': thumbnail,
}
)

@ -114,7 +114,7 @@ def response(resp):
{
'url': url,
'title': result['name'],
'img_src': result.get('favicon', '').replace("http://", "https://"),
'thumbnail': result.get('favicon', '').replace("http://", "https://"),
'content': ' | '.join(content),
'metadata': ' | '.join(metadata),
'iframe_src': result['url_resolved'].replace("http://", "https://"),

@ -133,7 +133,7 @@ def response(resp):
)
if mtype in ['image'] and subtype in ['bmp', 'gif', 'jpeg', 'png']:
item['img_src'] = url
item['thumbnail'] = url
results.append(item)

@ -22,7 +22,7 @@ base_url = "https://www.rottentomatoes.com"
results_xpath = "//search-page-media-row"
url_xpath = "./a[1]/@href"
title_xpath = "./a/img/@alt"
img_src_xpath = "./a/img/@src"
thumbnail_xpath = "./a/img/@src"
release_year_xpath = "concat('From ', string(./@releaseyear))"
score_xpath = "concat('Score: ', string(./@tomatometerscore))"
cast_xpath = "concat('Starring ', string(./@cast))"
@ -52,7 +52,7 @@ def response(resp):
'url': extract_text(eval_xpath(result, url_xpath)),
'title': extract_text(eval_xpath(result, title_xpath)),
'content': ', '.join(content),
'img_src': extract_text(eval_xpath(result, img_src_xpath)),
'thumbnail': extract_text(eval_xpath(result, thumbnail_xpath)),
}
)

@ -77,8 +77,7 @@ def response(resp):
{
'url': url + 'structure/' + result['id'],
'title': result['label'],
# 'thumbnail': thumbnail,
'img_src': thumbnail,
'thumbnail': thumbnail,
'content': html_to_text(content),
}
)

@ -94,9 +94,9 @@ def response(resp):
'publishedDate': parser.parse(result['last_modified']),
'iframe_src': "https://w.soundcloud.com/player/?url=" + uri,
}
img_src = result['artwork_url'] or result['user']['avatar_url']
if img_src:
res['img_src'] = img_src
thumbnail = result['artwork_url'] or result['user']['avatar_url']
if thumbnail:
res['thumbnail'] = thumbnail
results.append(res)
return results

@ -135,7 +135,7 @@ title_xpath = None
'''`XPath selector`_ of result's ``title``.'''
thumbnail_xpath = False
'''`XPath selector`_ of result's ``img_src``.'''
'''`XPath selector`_ of result's ``thumbnail``.'''
suggestion_xpath = ''
'''`XPath selector`_ of result's ``suggestion``.'''
@ -266,7 +266,7 @@ def response(resp): # pylint: disable=too-many-branches
if thumbnail_xpath:
thumbnail_xpath_result = eval_xpath_list(result, thumbnail_xpath)
if len(thumbnail_xpath_result) > 0:
tmp_result['img_src'] = extract_url(thumbnail_xpath_result, search_url)
tmp_result['thumbnail'] = extract_url(thumbnail_xpath_result, search_url)
# add alternative cached url if available
if cached_xpath:

@ -77,9 +77,9 @@ def response(resp):
url = parse_url(url)
title = extract_text(result.xpath('.//h4/a'))
content = extract_text(result.xpath('.//p'))
img_src = eval_xpath_getindex(result, './/img/@data-src', 0, None)
thumbnail = eval_xpath_getindex(result, './/img/@data-src', 0, None)
item = {'url': url, 'title': title, 'content': content, 'img_src': img_src}
item = {'url': url, 'title': title, 'content': content, 'thumbnail': thumbnail}
pub_date = extract_text(result.xpath('.//span[contains(@class,"s-time")]'))
ago = AGO_RE.search(pub_date)

@ -52,11 +52,11 @@ def response(resp):
if description is not None:
content = markdown_to_text(description['text'])
img_src = None
thumbnail = None
if result['display']['images']:
img_src = result['display']['images'][0]
thumbnail = result['display']['images'][0]
elif result['content']['details']['images']:
img_src = result['content']['details']['images'][0]['resizableImageUrl']
thumbnail = result['content']['details']['images'][0]['resizableImageUrl']
url = result['display']['source']['sourceRecipeUrl']
if 'www.yummly.com/private' in url:
@ -67,7 +67,7 @@ def response(resp):
'url': url,
'title': result['display']['displayName'],
'content': content,
'img_src': img_src,
'thumbnail': thumbnail,
'metadata': gettext('Language') + f": {result['locale'].split('-')[0]}",
}
)

@ -141,9 +141,12 @@ def _parse_result(item) -> Dict[str, Any]:
"authors": [extract_text(author) for author in author_elements],
"publisher": _text(item, './/a[@title="Publisher"]'),
"type": _text(item, './/div[contains(@class, "property__file")]//div[contains(@class, "property_value")]'),
"img_src": _text(item, './/img[contains(@class, "cover")]/@data-src'),
}
thumbnail = _text(item, './/img[contains(@class, "cover")]/@data-src')
if not thumbnail.startswith('/'):
result["thumbnail"] = thumbnail
year = _text(item, './/div[contains(@class, "property_year")]//div[contains(@class, "property_value")]')
if year:
result["publishedDate"] = datetime.strptime(year, '%Y')

@ -281,24 +281,12 @@ article[data-vim-selected].category-social {
color: var(--color-result-description-highlight-font);
}
img {
&.thumbnail {
.ltr-float-left();
padding-top: 0.6rem;
.ltr-padding-right(1rem);
width: 20rem;
height: unset; // remove heigth value that was needed for lazy loading
}
&.image {
.ltr-float-left();
padding-top: 0.6rem;
.ltr-padding-right(1rem);
width: 7rem;
max-height: 7rem;
object-fit: scale-down;
object-position: right top;
}
img.thumbnail {
.ltr-float-left();
padding-top: 0.6rem;
.ltr-padding-right(1rem);
width: 7rem;
height: unset; // remove heigth value that was needed for lazy loading
}
.break {
@ -394,6 +382,16 @@ article[data-vim-selected].category-social {
padding: 10px 0 0 0;
}
.result-videos {
img.thumbnail {
.ltr-float-left();
padding-top: 0.6rem;
.ltr-padding-right(1rem);
width: 20rem;
height: unset; // remove heigth value that was needed for lazy loading
}
}
.result-videos .content {
overflow: hidden;
}

@ -25,8 +25,7 @@
<span class="url_o{{loop.index}}"><span class="url_i{{loop.index}}">{{- part -}}</span></span>
{%- endfor %}
{{- result_close_link() -}}
{%- if result.img_src %}{{ result_open_link(result.url) }}<img class="image" src="{{ image_proxify(result.img_src) }}" title="{{ result.title|striptags }}" loading="lazy" width="200" height="200">{{ result_close_link() }}{% endif -%}
{%- if result.thumbnail %}{{ result_open_link(result.url) }}<img class="thumbnail" src="{{ image_proxify(result.thumbnail) }}" title="{{ result.title|striptags }}" loading="lazy" width="200" height="200">{{ result_close_link() }}{% endif -%}
{%- if result.thumbnail %}{{ result_open_link(result.url) }}<img class="thumbnail" src="{{ image_proxify(result.thumbnail) }}" title="{{ result.title|striptags }}" loading="lazy">{{ result_close_link() }}{% endif -%}
<h3>{{ result_link(result.url, result.title|safe) }}</h3>
{%- endmacro -%}

Loading…
Cancel
Save