From a14ae7334d9d9a59730e11eb5d0ccec22e425bed Mon Sep 17 00:00:00 2001 From: Artem Chumachenko Date: Wed, 23 Aug 2023 20:21:28 +0400 Subject: [PATCH] Update peft to 0.5.0 version (#475) Update peft to 0.5.0 --- setup.cfg | 4 ++-- src/petals/utils/peft.py | 9 +++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/setup.cfg b/setup.cfg index 69dcc6e..118b446 100644 --- a/setup.cfg +++ b/setup.cfg @@ -33,7 +33,7 @@ python_requires = >=3.8 install_requires = torch>=1.12 bitsandbytes==0.41.1 - accelerate>=0.20.3,<0.21.0 + accelerate>=0.22.0 huggingface-hub>=0.11.1,<1.0.0 tokenizers>=0.13.3 transformers>=4.31.0,<5.0.0 # if you change this, please also change version assert in petals/__init__.py @@ -46,7 +46,7 @@ install_requires = cpufeature>=0.2.0 packaging>=20.9 sentencepiece>=0.1.99 - peft==0.4.0 + peft>=0.5.0 safetensors>=0.3.1 Dijkstar>=2.6.0 diff --git a/src/petals/utils/peft.py b/src/petals/utils/peft.py index da25623..c7e3d05 100644 --- a/src/petals/utils/peft.py +++ b/src/petals/utils/peft.py @@ -10,8 +10,9 @@ import transformers from accelerate import init_empty_weights from hivemind.utils.logging import get_logger from huggingface_hub import HfFileSystem, get_hf_file_metadata, hf_hub_url +from peft.config import PeftConfig from peft.tuners import lora -from peft.utils import COMMON_LAYERS_PATTERN, CONFIG_NAME, SAFETENSORS_WEIGHTS_NAME, PeftConfig +from peft.utils import COMMON_LAYERS_PATTERN, CONFIG_NAME, SAFETENSORS_WEIGHTS_NAME from safetensors import safe_open from safetensors.torch import load_file from transformers.utils import get_file_from_repo @@ -155,15 +156,15 @@ class AdapterContextMixin: using_adapter = AdapterContextMixin.using_adapter -class LoraLinear(lora.Linear, AdapterContextMixin): +class LoraLinear(AdapterContextMixin, lora.Linear): """LoRA linear layer that uses adapter selected via using_adapter""" -class LoraLinear8bitLt(lora.Linear8bitLt, AdapterContextMixin): +class LoraLinear8bitLt(AdapterContextMixin, lora.Linear8bitLt): """LoRA linear 8-bit with outliers that uses adapter selected via using_adapter""" -class LoraLinear4bit(lora.Linear4bit, AdapterContextMixin): +class LoraLinear4bit(AdapterContextMixin, lora.Linear4bit): """LoRA linear 4-bit that uses adapter selected via using_adapter"""