use logger instead of logging (#8225)

# What
- Use `logger` instead of using logging directly.

<!-- Thank you for contributing to LangChain!

Replace this comment with:
  - Description: Use `logger` instead of using logging directly.
  - Issue: None
  - Dependencies: None
  - Tag maintainer: @baskaryan
  - Twitter handle: @MlopsJ

Please make sure you're PR is passing linting and testing before
submitting. Run `make format`, `make lint` and `make test` to check this
locally.

If you're adding a new integration, please include:
1. a test for the integration, preferably unit tests that do not rely on
network access,
  2. an example notebook showing its use.

Maintainer responsibilities:
  - General / Misc / if you don't know who to tag: @baskaryan
  - DataLoaders / VectorStores / Retrievers: @rlancemartin, @eyurtsev
  - Models / Prompts: @hwchase17, @baskaryan
  - Memory: @hwchase17
  - Agents / Tools / Toolkits: @hinthornw
  - Tracing / Callbacks: @agola11
  - Async: @agola11

If no one reviews your PR within a few days, feel free to @-mention the
same people again.

See contribution guidelines for more information on how to write/run
tests, lint, etc:
https://github.com/hwchase17/langchain/blob/master/.github/CONTRIBUTING.md
 -->
This commit is contained in:
shibuiwilliam 2023-07-25 22:55:30 +09:00 committed by GitHub
parent afc55a4fee
commit bed8eb978e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 20 additions and 14 deletions

View File

@ -19,6 +19,8 @@ from langchain.callbacks.tracers.schemas import (
from langchain.schema.messages import get_buffer_string from langchain.schema.messages import get_buffer_string
from langchain.utils import raise_for_status_with_text from langchain.utils import raise_for_status_with_text
logger = logging.getLogger(__name__)
def get_headers() -> Dict[str, Any]: def get_headers() -> Dict[str, Any]:
"""Get the headers for the LangChain API.""" """Get the headers for the LangChain API."""
@ -137,7 +139,7 @@ class LangChainTracerV1(BaseTracer):
) )
raise_for_status_with_text(response) raise_for_status_with_text(response)
except Exception as e: except Exception as e:
logging.warning(f"Failed to persist run: {e}") logger.warning(f"Failed to persist run: {e}")
def _persist_session( def _persist_session(
self, session_create: TracerSessionV1Base self, session_create: TracerSessionV1Base
@ -151,7 +153,7 @@ class LangChainTracerV1(BaseTracer):
) )
session = TracerSessionV1(id=r.json()["id"], **session_create.dict()) session = TracerSessionV1(id=r.json()["id"], **session_create.dict())
except Exception as e: except Exception as e:
logging.warning(f"Failed to create session, using default session: {e}") logger.warning(f"Failed to create session, using default session: {e}")
session = TracerSessionV1(id=1, **session_create.dict()) session = TracerSessionV1(id=1, **session_create.dict())
return session return session
@ -166,7 +168,7 @@ class LangChainTracerV1(BaseTracer):
tracer_session = TracerSessionV1(**r.json()[0]) tracer_session = TracerSessionV1(**r.json()[0])
except Exception as e: except Exception as e:
session_type = "default" if not session_name else session_name session_type = "default" if not session_name else session_name
logging.warning( logger.warning(
f"Failed to load {session_type} session, using empty session: {e}" f"Failed to load {session_type} session, using empty session: {e}"
) )
tracer_session = TracerSessionV1(id=1) tracer_session = TracerSessionV1(id=1)

View File

@ -11,6 +11,8 @@ from typing import Any, Dict, Iterator, List, Optional
from langchain.docstore.document import Document from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class EverNoteLoader(BaseLoader): class EverNoteLoader(BaseLoader):
"""EverNote Loader. """EverNote Loader.
@ -72,7 +74,7 @@ class EverNoteLoader(BaseLoader):
return html2text.html2text(content).strip() return html2text.html2text(content).strip()
except ImportError as e: except ImportError as e:
logging.error( logger.error(
"Could not import `html2text`. Although it is not a required package " "Could not import `html2text`. Although it is not a required package "
"to use Langchain, using the EverNote loader requires `html2text`. " "to use Langchain, using the EverNote loader requires `html2text`. "
"Please install `html2text` via `pip install html2text` and try again." "Please install `html2text` via `pip install html2text` and try again."
@ -133,7 +135,7 @@ class EverNoteLoader(BaseLoader):
try: try:
from lxml import etree from lxml import etree
except ImportError as e: except ImportError as e:
logging.error( logger.error(
"Could not import `lxml`. Although it is not a required package to use " "Could not import `lxml`. Although it is not a required package to use "
"Langchain, using the EverNote loader requires `lxml`. Please install " "Langchain, using the EverNote loader requires `lxml`. Please install "
"`lxml` via `pip install lxml` and try again." "`lxml` via `pip install lxml` and try again."

View File

@ -203,7 +203,7 @@ class OneDriveLoader(BaseLoader, BaseModel):
for object_id in self.object_ids if self.object_ids else [""]: for object_id in self.object_ids if self.object_ids else [""]:
file = drive.get_item(object_id) file = drive.get_item(object_id)
if not file: if not file:
logging.warning( logger.warning(
"There isn't a file with " "There isn't a file with "
f"object_id {object_id} in drive {drive}." f"object_id {object_id} in drive {drive}."
) )

View File

@ -66,7 +66,7 @@ def _check_redis_module_exist(client: RedisType, required_modules: List[dict]) -
"Please head to https://redis.io/docs/stack/search/quick_start/" "Please head to https://redis.io/docs/stack/search/quick_start/"
"to know more about installing the RediSearch module within Redis Stack." "to know more about installing the RediSearch module within Redis Stack."
) )
logging.error(error_message) logger.error(error_message)
raise ValueError(error_message) raise ValueError(error_message)

View File

@ -14,6 +14,8 @@ from langchain.embeddings.base import Embeddings
from langchain.schema import Document from langchain.schema import Document
from langchain.vectorstores.base import VectorStore, VectorStoreRetriever from langchain.vectorstores.base import VectorStore, VectorStoreRetriever
logger = logging.getLogger(__name__)
class Vectara(VectorStore): class Vectara(VectorStore):
"""Implementation of Vector Store using Vectara. """Implementation of Vector Store using Vectara.
@ -51,12 +53,12 @@ class Vectara(VectorStore):
or self._vectara_corpus_id is None or self._vectara_corpus_id is None
or self._vectara_api_key is None or self._vectara_api_key is None
): ):
logging.warning( logger.warning(
"Can't find Vectara credentials, customer_id or corpus_id in " "Can't find Vectara credentials, customer_id or corpus_id in "
"environment." "environment."
) )
else: else:
logging.debug(f"Using corpus id {self._vectara_corpus_id}") logger.debug(f"Using corpus id {self._vectara_corpus_id}")
self._session = requests.Session() # to reuse connections self._session = requests.Session() # to reuse connections
adapter = requests.adapters.HTTPAdapter(max_retries=3) adapter = requests.adapters.HTTPAdapter(max_retries=3)
self._session.mount("http://", adapter) self._session.mount("http://", adapter)
@ -96,7 +98,7 @@ class Vectara(VectorStore):
headers=self._get_post_headers(), headers=self._get_post_headers(),
) )
if response.status_code != 200: if response.status_code != 200:
logging.error( logger.error(
f"Delete request failed for doc_id = {doc_id} with status code " f"Delete request failed for doc_id = {doc_id} with status code "
f"{response.status_code}, reason {response.reason}, text " f"{response.status_code}, reason {response.reason}, text "
f"{response.text}" f"{response.text}"
@ -152,7 +154,7 @@ class Vectara(VectorStore):
doc_ids = [] doc_ids = []
for inx, file in enumerate(files_list): for inx, file in enumerate(files_list):
if not os.path.exists(file): if not os.path.exists(file):
logging.error(f"File {file} does not exist, skipping") logger.error(f"File {file} does not exist, skipping")
continue continue
md = metadatas[inx] if metadatas else {} md = metadatas[inx] if metadatas else {}
files: dict = { files: dict = {
@ -170,14 +172,14 @@ class Vectara(VectorStore):
if response.status_code == 409: if response.status_code == 409:
doc_id = response.json()["document"]["documentId"] doc_id = response.json()["document"]["documentId"]
logging.info( logger.info(
f"File {file} already exists on Vectara (doc_id={doc_id}), skipping" f"File {file} already exists on Vectara (doc_id={doc_id}), skipping"
) )
elif response.status_code == 200: elif response.status_code == 200:
doc_id = response.json()["document"]["documentId"] doc_id = response.json()["document"]["documentId"]
doc_ids.append(doc_id) doc_ids.append(doc_id)
else: else:
logging.info(f"Error indexing file {file}: {response.json()}") logger.info(f"Error indexing file {file}: {response.json()}")
return doc_ids return doc_ids
@ -290,7 +292,7 @@ class Vectara(VectorStore):
) )
if response.status_code != 200: if response.status_code != 200:
logging.error( logger.error(
"Query failed %s", "Query failed %s",
f"(code {response.status_code}, reason {response.reason}, details " f"(code {response.status_code}, reason {response.reason}, details "
f"{response.text})", f"{response.text})",