mirror of
https://github.com/bigscience-workshop/petals
synced 2024-11-10 01:10:33 +00:00
Hide excess key message (#476)
Before: ```python Aug 23 23:51:31.394 [INFO] Loaded Maykeye/TinyLLama-v0 block 0, _IncompatibleKeys(missing_keys=[], unexpected_keys=['self_attn.rotary_emb.inv_freq']) ``` After: ```python Aug 23 23:51:31.394 [INFO] Loaded Maykeye/TinyLLama-v0 block 0 ``` Hiding this since the excess keys in Llama-based models are okay since the latest transformers release.
This commit is contained in:
parent
a14ae7334d
commit
df8ab09ca2
@ -74,7 +74,8 @@ def load_pretrained_block(
|
|||||||
param = param.to(torch_dtype)
|
param = param.to(torch_dtype)
|
||||||
set_module_tensor_to_device(block, param_name, "cpu", value=param, dtype=param.dtype)
|
set_module_tensor_to_device(block, param_name, "cpu", value=param, dtype=param.dtype)
|
||||||
|
|
||||||
logger.info(f"Loaded {model_name} block {block_index}, {report}")
|
logger.info(f"Loaded {model_name} block {block_index}")
|
||||||
|
logger.debug(f"Details: {report}")
|
||||||
return block
|
return block
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user