return dict
Browse files
modeling_hf_nomic_bert.py
CHANGED
@@ -1058,6 +1058,7 @@ class NomicBertModel(NomicBertPreTrainedModel):
|
|
1058 |
position_ids=None,
|
1059 |
token_type_ids=None,
|
1060 |
attention_mask=None,
|
|
|
1061 |
):
|
1062 |
if token_type_ids is None:
|
1063 |
token_type_ids = torch.zeros_like(input_ids)
|
@@ -1066,7 +1067,7 @@ class NomicBertModel(NomicBertPreTrainedModel):
|
|
1066 |
hidden_states = self.emb_drop(hidden_states)
|
1067 |
|
1068 |
attention_mask = self.get_extended_attention_mask(attention_mask, input_ids.shape)
|
1069 |
-
sequence_output = self.encoder(hidden_states, attention_mask=attention_mask)
|
1070 |
|
1071 |
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
|
1072 |
|
|
|
1058 |
position_ids=None,
|
1059 |
token_type_ids=None,
|
1060 |
attention_mask=None,
|
1061 |
+
return_dict=None,
|
1062 |
):
|
1063 |
if token_type_ids is None:
|
1064 |
token_type_ids = torch.zeros_like(input_ids)
|
|
|
1067 |
hidden_states = self.emb_drop(hidden_states)
|
1068 |
|
1069 |
attention_mask = self.get_extended_attention_mask(attention_mask, input_ids.shape)
|
1070 |
+
sequence_output = self.encoder(hidden_states, attention_mask=attention_mask, return_dict=return_dict)
|
1071 |
|
1072 |
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
|
1073 |
|