You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
92 lines
3.5 KiB
Python
92 lines
3.5 KiB
Python
"""
|
|
LLaMA intermediate layer
|
|
Based on https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py
|
|
See commit history for authorship.
|
|
"""
|
|
from typing import Optional, Tuple
|
|
|
|
import torch
|
|
from transformers.models.llama.modeling_llama import LlamaDecoderLayer, LlamaModel
|
|
|
|
|
|
class WrappedLlamaBlock(LlamaDecoderLayer):
|
|
def forward(
|
|
self,
|
|
hidden_states: torch.Tensor,
|
|
*args,
|
|
attention_mask: Optional[torch.Tensor] = None,
|
|
position_ids: Optional[torch.LongTensor] = None,
|
|
layer_past: Optional[Tuple[torch.Tensor]] = None,
|
|
use_cache: bool = False,
|
|
**kwargs,
|
|
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
|
batch_size, seq_length, _ = hidden_states.shape
|
|
|
|
seq_length_with_past = seq_length
|
|
past_key_values_length = 0
|
|
|
|
past_key_value = layer_past
|
|
if past_key_value is not None:
|
|
past_key_values_length = past_key_value[0].shape[2]
|
|
seq_length_with_past = seq_length_with_past + past_key_values_length
|
|
past_key_value = self._reorder_cache_from_bloom_to_llama(past_key_value, batch_size, past_key_values_length)
|
|
|
|
if position_ids is None:
|
|
device = hidden_states.device
|
|
position_ids = torch.arange(
|
|
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
|
|
)
|
|
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
|
|
else:
|
|
position_ids = position_ids.view(-1, seq_length).long()
|
|
|
|
# embed positions
|
|
if attention_mask is None:
|
|
attention_mask = torch.ones(
|
|
(batch_size, seq_length_with_past), dtype=torch.bool, device=hidden_states.device
|
|
)
|
|
attention_mask = LlamaModel._prepare_decoder_attention_mask(
|
|
None, attention_mask, (batch_size, seq_length), hidden_states, past_key_values_length
|
|
)
|
|
|
|
outputs = super().forward(
|
|
hidden_states,
|
|
*args,
|
|
attention_mask=attention_mask,
|
|
position_ids=position_ids,
|
|
past_key_value=past_key_value,
|
|
use_cache=use_cache,
|
|
**kwargs,
|
|
)
|
|
|
|
if use_cache:
|
|
present_key_value = outputs[-1]
|
|
present_key_value = self._reorder_cache_from_llama_to_bloom(
|
|
present_key_value, batch_size, seq_length_with_past
|
|
)
|
|
outputs = outputs[:-1] + (present_key_value,)
|
|
|
|
return outputs
|
|
|
|
def _reorder_cache_from_bloom_to_llama(
|
|
self, key_value: Tuple[torch.Tensor], batch_size: int, seq_length: int
|
|
) -> Tuple[torch.Tensor]:
|
|
key_states, value_states = key_value
|
|
key_states = key_states.permute(0, 2, 1)
|
|
key_states = key_states.view(
|
|
batch_size, self.self_attn.num_key_value_heads, seq_length, self.self_attn.head_dim
|
|
)
|
|
value_states = value_states.view(*key_states.shape)
|
|
return (key_states, value_states)
|
|
|
|
def _reorder_cache_from_llama_to_bloom(
|
|
self, key_value: Tuple[torch.Tensor], batch_size: int, seq_length: int
|
|
) -> Tuple[torch.Tensor]:
|
|
key_states, value_states = key_value
|
|
value_states = value_states.view(
|
|
batch_size * self.self_attn.num_key_value_heads, seq_length, self.self_attn.head_dim
|
|
)
|
|
key_states = key_states.view(*value_states.shape)
|
|
key_states = key_states.permute(0, 2, 1)
|
|
return (key_states, value_states)
|