diff --git a/cps/metadata_provider/amazon.py b/cps/metadata_provider/amazon.py index 558edebc..5c74cf71 100644 --- a/cps/metadata_provider/amazon.py +++ b/cps/metadata_provider/amazon.py @@ -19,15 +19,20 @@ import concurrent.futures import requests from bs4 import BeautifulSoup as BS # requirement +from typing import List, Optional try: import cchardet #optional for better speed except ImportError: pass +from cps import logger from cps.services.Metadata import MetaRecord, MetaSourceInfo, Metadata #from time import time from operator import itemgetter +log = logger.create() + + class Amazon(Metadata): __name__ = "Amazon" __id__ = "amazon" @@ -46,12 +51,16 @@ class Amazon(Metadata): def search( self, query: str, generic_cover: str = "", locale: str = "en" - ): + ) -> Optional[List[MetaRecord]]: #timer=time() - def inner(link,index)->[dict,int]: - with self.session as session: - r = session.get(f"https://www.amazon.com/{link}") - r.raise_for_status() + def inner(link,index) -> tuple[dict,int]: + with self.session as session: + try: + r = session.get(f"https://www.amazon.com/{link}") + r.raise_for_status() + except Exception as e: + log.warning(e) + return long_soup = BS(r.text, "lxml") #~4sec :/ soup2 = long_soup.find("div", attrs={"cel_widget_id": "dpx-books-ppd_csm_instrumentation_wrapper"}) if soup2 is None: @@ -107,11 +116,15 @@ class Amazon(Metadata): val = list() if self.active: - results = self.session.get( - f"https://www.amazon.com/s?k={query.replace(' ', '+')}&i=digital-text&sprefix={query.replace(' ', '+')}" - f"%2Cdigital-text&ref=nb_sb_noss", - headers=self.headers) - results.raise_for_status() + try: + results = self.session.get( + f"https://www.amazon.com/s?k={query.replace(' ', '+')}&i=digital-text&sprefix={query.replace(' ', '+')}" + f"%2Cdigital-text&ref=nb_sb_noss", + headers=self.headers) + results.raise_for_status() + except Exception as e: + log.warning(e) + return None soup = BS(results.text, 'html.parser') links_list = [next(filter(lambda i: "digital-text" in i["href"], x.findAll("a")))["href"] for x in soup.findAll("div", attrs={"data-component-type": "s-search-result"})] diff --git a/cps/metadata_provider/comicvine.py b/cps/metadata_provider/comicvine.py index 56618d4b..b4d8d34c 100644 --- a/cps/metadata_provider/comicvine.py +++ b/cps/metadata_provider/comicvine.py @@ -21,8 +21,11 @@ from typing import Dict, List, Optional from urllib.parse import quote import requests +from cps import logger from cps.services.Metadata import MetaRecord, MetaSourceInfo, Metadata +log = logger.create() + class ComicVine(Metadata): __name__ = "ComicVine" @@ -46,10 +49,15 @@ class ComicVine(Metadata): if title_tokens: tokens = [quote(t.encode("utf-8")) for t in title_tokens] query = "%20".join(tokens) - result = requests.get( - f"{ComicVine.BASE_URL}{query}{ComicVine.QUERY_PARAMS}", - headers=ComicVine.HEADERS, - ) + try: + result = requests.get( + f"{ComicVine.BASE_URL}{query}{ComicVine.QUERY_PARAMS}", + headers=ComicVine.HEADERS, + ) + result.raise_for_status() + except Exception as e: + log.warning(e) + return None for result in result.json()["results"]: match = self._parse_search_result( result=result, generic_cover=generic_cover, locale=locale diff --git a/cps/metadata_provider/google.py b/cps/metadata_provider/google.py index fbb68965..98fadd37 100644 --- a/cps/metadata_provider/google.py +++ b/cps/metadata_provider/google.py @@ -22,9 +22,12 @@ from urllib.parse import quote import requests +from cps import logger from cps.isoLanguages import get_lang3, get_language_name from cps.services.Metadata import MetaRecord, MetaSourceInfo, Metadata +log = logger.create() + class Google(Metadata): __name__ = "Google" @@ -45,7 +48,12 @@ class Google(Metadata): if title_tokens: tokens = [quote(t.encode("utf-8")) for t in title_tokens] query = "+".join(tokens) - results = requests.get(Google.SEARCH_URL + query) + try: + results = requests.get(Google.SEARCH_URL + query) + results.raise_for_status() + except Exception as e: + log.warning(e) + return None for result in results.json().get("items", []): val.append( self._parse_search_result( diff --git a/cps/metadata_provider/lubimyczytac.py b/cps/metadata_provider/lubimyczytac.py index 814a785e..e4abe9db 100644 --- a/cps/metadata_provider/lubimyczytac.py +++ b/cps/metadata_provider/lubimyczytac.py @@ -27,9 +27,12 @@ from html2text import HTML2Text from lxml.html import HtmlElement, fromstring, tostring from markdown2 import Markdown +from cps import logger from cps.isoLanguages import get_language_name from cps.services.Metadata import MetaRecord, MetaSourceInfo, Metadata +log = logger.create() + SYMBOLS_TO_TRANSLATE = ( "öÖüÜóÓőŐúÚéÉáÁűŰíÍąĄćĆęĘłŁńŃóÓśŚźŹżŻ", "oOuUoOoOuUeEaAuUiIaAcCeElLnNoOsSzZzZ", @@ -112,7 +115,12 @@ class LubimyCzytac(Metadata): self, query: str, generic_cover: str = "", locale: str = "en" ) -> Optional[List[MetaRecord]]: if self.active: - result = requests.get(self._prepare_query(title=query)) + try: + result = requests.get(self._prepare_query(title=query)) + result.raise_for_status() + except Exception as e: + log.warning(e) + return None root = fromstring(result.text) lc_parser = LubimyCzytacParser(root=root, metadata=self) matches = lc_parser.parse_search_results() @@ -200,7 +208,12 @@ class LubimyCzytacParser: def parse_single_book( self, match: MetaRecord, generic_cover: str, locale: str ) -> MetaRecord: - response = requests.get(match.url) + try: + response = requests.get(match.url) + response.raise_for_status() + except Exception as e: + log.warning(e) + return None self.root = fromstring(response.text) match.cover = self._parse_cover(generic_cover=generic_cover) match.description = self._parse_description() diff --git a/cps/metadata_provider/scholar.py b/cps/metadata_provider/scholar.py index b0c10b66..7feb0ee9 100644 --- a/cps/metadata_provider/scholar.py +++ b/cps/metadata_provider/scholar.py @@ -28,8 +28,12 @@ try: except FakeUserAgentError: raise ImportError("No module named 'scholarly'") +from cps import logger from cps.services.Metadata import MetaRecord, MetaSourceInfo, Metadata +log = logger.create() + + class scholar(Metadata): __name__ = "Google Scholar" __id__ = "googlescholar" @@ -44,7 +48,11 @@ class scholar(Metadata): if title_tokens: tokens = [quote(t.encode("utf-8")) for t in title_tokens] query = " ".join(tokens) - scholar_gen = itertools.islice(scholarly.search_pubs(query), 10) + try: + scholar_gen = itertools.islice(scholarly.search_pubs(query), 10) + except Exception as e: + log.warning(e) + return None for result in scholar_gen: match = self._parse_search_result( result=result, generic_cover="", locale=locale diff --git a/cps/search_metadata.py b/cps/search_metadata.py index d72273f6..d02667d5 100644 --- a/cps/search_metadata.py +++ b/cps/search_metadata.py @@ -130,6 +130,6 @@ def metadata_search(): if active.get(c.__id__, True) } for future in concurrent.futures.as_completed(meta): - data.extend([asdict(x) for x in future.result()]) + data.extend([asdict(x) for x in future.result() if x]) # log.info({'Time elapsed {}'.format(current_milli_time()-start)}) return Response(json.dumps(data), mimetype="application/json") diff --git a/cps/static/js/get_meta.js b/cps/static/js/get_meta.js index 6db1a261..43a40fa6 100644 --- a/cps/static/js/get_meta.js +++ b/cps/static/js/get_meta.js @@ -92,14 +92,19 @@ $(function () { data: {"query": keyword}, dataType: "json", success: function success(data) { - $("#meta-info").html(""); - data.forEach(function(book) { - var $book = $(templates.bookResult(book)); - $book.find("img").on("click", function () { - populateForm(book); + if (data.length) { + $("#meta-info").html(""); + data.forEach(function(book) { + var $book = $(templates.bookResult(book)); + $book.find("img").on("click", function () { + populateForm(book); + }); + $("#book-list").append($book); }); - $("#book-list").append($book); - }); + } + else { + $("#meta-info").html("

" + msg.no_result + "!

" + $("#meta-info")[0].innerHTML) + } }, error: function error() { $("#meta-info").html("

" + msg.search_error + "!

" + $("#meta-info")[0].innerHTML);