Remove import that no longer exists in Transformers

#2
by Rocketknight1 HF staff - opened
Files changed (1) hide show
  1. modeling_minimax_text_01.py +0 -4
modeling_minimax_text_01.py CHANGED
@@ -22,7 +22,6 @@ from transformers.modeling_outputs import (
22
  SequenceClassifierOutputWithPast,
23
  )
24
  from transformers.modeling_utils import PreTrainedModel
25
- from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_13
26
  from transformers.utils import (
27
  add_start_docstrings,
28
  add_start_docstrings_to_model_forward,
@@ -42,9 +41,6 @@ if is_flash_attn_2_available():
42
  # This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.
43
  # It means that the function will not be traced through and simply appear as a node in the graph.
44
  if is_torch_fx_available():
45
- if not is_torch_greater_or_equal_than_1_13:
46
- import torch.fx
47
-
48
  _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask)
49
 
50
  from .configuration_minimax_text_01 import MiniMaxText01Config
 
22
  SequenceClassifierOutputWithPast,
23
  )
24
  from transformers.modeling_utils import PreTrainedModel
 
25
  from transformers.utils import (
26
  add_start_docstrings,
27
  add_start_docstrings_to_model_forward,
 
41
  # This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.
42
  # It means that the function will not be traced through and simply appear as a node in the graph.
43
  if is_torch_fx_available():
 
 
 
44
  _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask)
45
 
46
  from .configuration_minimax_text_01 import MiniMaxText01Config