Update peft to 0.5.0 version (#475)

Update peft to 0.5.0
pull/476/head
Artem Chumachenko 9 months ago committed by GitHub
parent a9b0e9ff1a
commit a14ae7334d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -33,7 +33,7 @@ python_requires = >=3.8
install_requires =
torch>=1.12
bitsandbytes==0.41.1
accelerate>=0.20.3,<0.21.0
accelerate>=0.22.0
huggingface-hub>=0.11.1,<1.0.0
tokenizers>=0.13.3
transformers>=4.31.0,<5.0.0 # if you change this, please also change version assert in petals/__init__.py
@ -46,7 +46,7 @@ install_requires =
cpufeature>=0.2.0
packaging>=20.9
sentencepiece>=0.1.99
peft==0.4.0
peft>=0.5.0
safetensors>=0.3.1
Dijkstar>=2.6.0

@ -10,8 +10,9 @@ import transformers
from accelerate import init_empty_weights
from hivemind.utils.logging import get_logger
from huggingface_hub import HfFileSystem, get_hf_file_metadata, hf_hub_url
from peft.config import PeftConfig
from peft.tuners import lora
from peft.utils import COMMON_LAYERS_PATTERN, CONFIG_NAME, SAFETENSORS_WEIGHTS_NAME, PeftConfig
from peft.utils import COMMON_LAYERS_PATTERN, CONFIG_NAME, SAFETENSORS_WEIGHTS_NAME
from safetensors import safe_open
from safetensors.torch import load_file
from transformers.utils import get_file_from_repo
@ -155,15 +156,15 @@ class AdapterContextMixin:
using_adapter = AdapterContextMixin.using_adapter
class LoraLinear(lora.Linear, AdapterContextMixin):
class LoraLinear(AdapterContextMixin, lora.Linear):
"""LoRA linear layer that uses adapter selected via using_adapter"""
class LoraLinear8bitLt(lora.Linear8bitLt, AdapterContextMixin):
class LoraLinear8bitLt(AdapterContextMixin, lora.Linear8bitLt):
"""LoRA linear 8-bit with outliers that uses adapter selected via using_adapter"""
class LoraLinear4bit(lora.Linear4bit, AdapterContextMixin):
class LoraLinear4bit(AdapterContextMixin, lora.Linear4bit):
"""LoRA linear 4-bit that uses adapter selected via using_adapter"""

Loading…
Cancel
Save