From df8ab09ca2b5f069b6f1a7a1cb79c0a1ddfce9cd Mon Sep 17 00:00:00 2001 From: Alexander Borzunov Date: Thu, 24 Aug 2023 00:41:40 +0400 Subject: [PATCH] Hide excess key message (#476) Before: ```python Aug 23 23:51:31.394 [INFO] Loaded Maykeye/TinyLLama-v0 block 0, _IncompatibleKeys(missing_keys=[], unexpected_keys=['self_attn.rotary_emb.inv_freq']) ``` After: ```python Aug 23 23:51:31.394 [INFO] Loaded Maykeye/TinyLLama-v0 block 0 ``` Hiding this since the excess keys in Llama-based models are okay since the latest transformers release. --- src/petals/server/from_pretrained.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/petals/server/from_pretrained.py b/src/petals/server/from_pretrained.py index c774a02..73956fe 100644 --- a/src/petals/server/from_pretrained.py +++ b/src/petals/server/from_pretrained.py @@ -74,7 +74,8 @@ def load_pretrained_block( param = param.to(torch_dtype) set_module_tensor_to_device(block, param_name, "cpu", value=param, dtype=param.dtype) - logger.info(f"Loaded {model_name} block {block_index}, {report}") + logger.info(f"Loaded {model_name} block {block_index}") + logger.debug(f"Details: {report}") return block