czczup commited on
Commit
11f1ccb
1 Parent(s): 4fd4126

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. modeling_internvl_chat.py +3 -1
modeling_internvl_chat.py CHANGED
@@ -38,7 +38,7 @@ class InternVLChatModel(PreTrainedModel):
38
  _supports_flash_attn_2 = True
39
  _no_split_modules = ['InternVisionModel', 'LlamaDecoderLayer', 'InternLM2DecoderLayer']
40
 
41
- def __init__(self, config: InternVLChatConfig, vision_model=None, language_model=None):
42
  super().__init__(config)
43
 
44
  assert version_cmp(transformers.__version__, '4.36.2', 'ge')
@@ -50,6 +50,8 @@ class InternVLChatModel(PreTrainedModel):
50
  self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
51
  self.downsample_ratio = config.downsample_ratio
52
  self.ps_version = config.ps_version
 
 
53
 
54
  logger.info(f'num_image_token: {self.num_image_token}')
55
  logger.info(f'ps_version: {self.ps_version}')
 
38
  _supports_flash_attn_2 = True
39
  _no_split_modules = ['InternVisionModel', 'LlamaDecoderLayer', 'InternLM2DecoderLayer']
40
 
41
+ def __init__(self, config: InternVLChatConfig, vision_model=None, language_model=None, use_flash_attn=True):
42
  super().__init__(config)
43
 
44
  assert version_cmp(transformers.__version__, '4.36.2', 'ge')
 
50
  self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
51
  self.downsample_ratio = config.downsample_ratio
52
  self.ps_version = config.ps_version
53
+ config.vision_config.use_flash_attn = True if use_flash_attn else False
54
+ config.llm_config.attn_implementation = 'flash_attention_2' if use_flash_attn else 'eager'
55
 
56
  logger.info(f'num_image_token: {self.num_image_token}')
57
  logger.info(f'ps_version: {self.ps_version}')