Crystalcareai commited on
Commit
764032e
1 Parent(s): e4c23d7

Update modeling_quiet.py

Browse files
Files changed (1) hide show
  1. modeling_quiet.py +57 -57
modeling_quiet.py CHANGED
@@ -37,7 +37,7 @@ import transformers
37
 
38
  from transformers.activations import ACT2FN
39
  from transformers.cache_utils import Cache, DynamicCache
40
- from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa
41
  from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
42
  from transformers.modeling_utils import PreTrainedModel
43
  from transformers.utils import (
@@ -58,62 +58,62 @@ logger = logging.get_logger(__name__)
58
  _CONFIG_FOR_DOC = "QuietConfig"
59
 
60
 
61
- # def _prepare_4d_causal_attention_mask_for_sdpa(attention_mask, input_shape, inputs_embeds, past_key_values_length):
62
- # # Compute the attention mask correctly
63
- # bsz, tgt_len = input_shape
64
-
65
- # # Create a 4D attention mask from a 2D tensor mask.
66
- # # The shape of the output attention mask is (batch_size, 1, tgt_len, src_len)
67
- # # The values are either 0 or 1, where 0 means padding and 1 means non-padding.
68
- # combined_attention_mask = None
69
- # if attention_mask is not None:
70
- # # What if attention_mask is not None and has a shape of (batch_size, 1, tgt_len, src_len)
71
- # # In this case, we can just use it directly.
72
- # if attention_mask.dim() == 4:
73
- # combined_attention_mask = attention_mask
74
- # # What if attention_mask is not None and has a shape of (batch_size, 1, tgt_len)
75
- # # In this case, we need to expand it to (batch_size, 1, tgt_len, src_len)
76
- # elif attention_mask.dim() == 3:
77
- # expanded_attn_mask = attention_mask[:, None, :, :]
78
- # combined_attention_mask = expanded_attn_mask
79
- # # What if attention_mask is not None and has a shape of (batch_size, tgt_len)
80
- # # In this case, we need to expand it to (batch_size, 1, tgt_len, src_len)
81
- # elif attention_mask.dim() == 2:
82
- # # Provided a padding mask of dimensions [batch_size, seq_length]
83
- # # - if the model is a decoder, apply a causal mask in addition to the padding mask
84
- # # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
85
- # if past_key_values_length > 0:
86
- # attention_mask = attention_mask.to(dtype=torch.long)
87
- # attention_mask = attention_mask[:, past_key_values_length:]
88
- # expanded_attn_mask = attention_mask[:, None, None, :]
89
- # combined_attention_mask = expanded_attn_mask
90
- # else:
91
- # raise ValueError(
92
- # "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
93
- # input_shape, attention_mask.shape
94
- # )
95
- # )
96
-
97
- # # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
98
- # # masked positions, this operation will create a tensor which is 0.0 for
99
- # # positions we want to attend and -10000.0 for masked positions.
100
- # # Since we are adding it to the raw scores before the softmax, this is
101
- # # effectively the same as removing these entirely.
102
- # if combined_attention_mask is not None:
103
- # # Ensure the attention mask values are within a reasonable range
104
- # combined_attention_mask = combined_attention_mask.clamp(min=0, max=1)
105
-
106
- # # Convert the attention mask to bfloat16
107
- # combined_attention_mask = combined_attention_mask.to(torch.bfloat16)
108
-
109
- # # Normalize the attention mask values to be between 0 and 1
110
- # combined_attention_mask = (1.0 - combined_attention_mask) * -10000.0
111
- # else:
112
- # combined_attention_mask = torch.zeros(
113
- # (bsz, 1, tgt_len, tgt_len), dtype=torch.bfloat16, device=inputs_embeds.device
114
- # )
115
-
116
- # return combined_attention_mask
117
 
118
 
119
  # Copied from transformers.models.llama.modeling_llama._get_unpad_data
 
37
 
38
  from transformers.activations import ACT2FN
39
  from transformers.cache_utils import Cache, DynamicCache
40
+ from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask # _prepare_4d_causal_attention_mask_for_sdpa
41
  from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
42
  from transformers.modeling_utils import PreTrainedModel
43
  from transformers.utils import (
 
58
  _CONFIG_FOR_DOC = "QuietConfig"
59
 
60
 
61
+ def _prepare_4d_causal_attention_mask_for_sdpa(attention_mask, input_shape, inputs_embeds, past_key_values_length):
62
+ # Compute the attention mask correctly
63
+ bsz, tgt_len = input_shape
64
+
65
+ # Create a 4D attention mask from a 2D tensor mask.
66
+ # The shape of the output attention mask is (batch_size, 1, tgt_len, src_len)
67
+ # The values are either 0 or 1, where 0 means padding and 1 means non-padding.
68
+ combined_attention_mask = None
69
+ if attention_mask is not None:
70
+ # What if attention_mask is not None and has a shape of (batch_size, 1, tgt_len, src_len)
71
+ # In this case, we can just use it directly.
72
+ if attention_mask.dim() == 4:
73
+ combined_attention_mask = attention_mask
74
+ # What if attention_mask is not None and has a shape of (batch_size, 1, tgt_len)
75
+ # In this case, we need to expand it to (batch_size, 1, tgt_len, src_len)
76
+ elif attention_mask.dim() == 3:
77
+ expanded_attn_mask = attention_mask[:, None, :, :]
78
+ combined_attention_mask = expanded_attn_mask
79
+ # What if attention_mask is not None and has a shape of (batch_size, tgt_len)
80
+ # In this case, we need to expand it to (batch_size, 1, tgt_len, src_len)
81
+ elif attention_mask.dim() == 2:
82
+ # Provided a padding mask of dimensions [batch_size, seq_length]
83
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
84
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
85
+ if past_key_values_length > 0:
86
+ attention_mask = attention_mask.to(dtype=torch.long)
87
+ attention_mask = attention_mask[:, past_key_values_length:]
88
+ expanded_attn_mask = attention_mask[:, None, None, :]
89
+ combined_attention_mask = expanded_attn_mask
90
+ else:
91
+ raise ValueError(
92
+ "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
93
+ input_shape, attention_mask.shape
94
+ )
95
+ )
96
+
97
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
98
+ # masked positions, this operation will create a tensor which is 0.0 for
99
+ # positions we want to attend and -10000.0 for masked positions.
100
+ # Since we are adding it to the raw scores before the softmax, this is
101
+ # effectively the same as removing these entirely.
102
+ if combined_attention_mask is not None:
103
+ # Ensure the attention mask values are within a reasonable range
104
+ combined_attention_mask = combined_attention_mask.clamp(min=0, max=1)
105
+
106
+ # Convert the attention mask to bfloat16
107
+ combined_attention_mask = combined_attention_mask.to(torch.bfloat16)
108
+
109
+ # Normalize the attention mask values to be between 0 and 1
110
+ combined_attention_mask = (1.0 - combined_attention_mask) * -10000.0
111
+ else:
112
+ combined_attention_mask = torch.zeros(
113
+ (bsz, 1, tgt_len, tgt_len), dtype=torch.bfloat16, device=inputs_embeds.device
114
+ )
115
+
116
+ return combined_attention_mask
117
 
118
 
119
  # Copied from transformers.models.llama.modeling_llama._get_unpad_data