czczup commited on
Commit
8403246
1 Parent(s): 0b5b3f3

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. README.md +5 -0
  2. modeling_internvl_chat.py +2 -1
README.md CHANGED
@@ -83,6 +83,7 @@ model = AutoModel.from_pretrained(
83
  path,
84
  torch_dtype=torch.bfloat16,
85
  low_cpu_mem_usage=True,
 
86
  trust_remote_code=True).eval().cuda()
87
  ```
88
 
@@ -97,6 +98,7 @@ model = AutoModel.from_pretrained(
97
  torch_dtype=torch.bfloat16,
98
  load_in_8bit=True,
99
  low_cpu_mem_usage=True,
 
100
  trust_remote_code=True).eval()
101
  ```
102
 
@@ -111,6 +113,7 @@ model = AutoModel.from_pretrained(
111
  torch_dtype=torch.bfloat16,
112
  load_in_4bit=True,
113
  low_cpu_mem_usage=True,
 
114
  trust_remote_code=True).eval()
115
  ```
116
 
@@ -153,6 +156,7 @@ model = AutoModel.from_pretrained(
153
  path,
154
  torch_dtype=torch.bfloat16,
155
  low_cpu_mem_usage=True,
 
156
  trust_remote_code=True,
157
  device_map=device_map).eval()
158
  ```
@@ -248,6 +252,7 @@ model = AutoModel.from_pretrained(
248
  path,
249
  torch_dtype=torch.bfloat16,
250
  low_cpu_mem_usage=True,
 
251
  trust_remote_code=True).eval().cuda()
252
  tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)
253
 
 
83
  path,
84
  torch_dtype=torch.bfloat16,
85
  low_cpu_mem_usage=True,
86
+ use_flash_attn=True,
87
  trust_remote_code=True).eval().cuda()
88
  ```
89
 
 
98
  torch_dtype=torch.bfloat16,
99
  load_in_8bit=True,
100
  low_cpu_mem_usage=True,
101
+ use_flash_attn=True,
102
  trust_remote_code=True).eval()
103
  ```
104
 
 
113
  torch_dtype=torch.bfloat16,
114
  load_in_4bit=True,
115
  low_cpu_mem_usage=True,
116
+ use_flash_attn=True,
117
  trust_remote_code=True).eval()
118
  ```
119
 
 
156
  path,
157
  torch_dtype=torch.bfloat16,
158
  low_cpu_mem_usage=True,
159
+ use_flash_attn=True,
160
  trust_remote_code=True,
161
  device_map=device_map).eval()
162
  ```
 
252
  path,
253
  torch_dtype=torch.bfloat16,
254
  low_cpu_mem_usage=True,
255
+ use_flash_attn=True,
256
  trust_remote_code=True).eval().cuda()
257
  tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)
258
 
modeling_internvl_chat.py CHANGED
@@ -18,7 +18,7 @@ from transformers.utils import ModelOutput, logging
18
 
19
  from .configuration_internvl_chat import InternVLChatConfig
20
  from .conversation import get_conv_template
21
- from .modeling_intern_vit import InternVisionModel
22
  from .modeling_internlm2 import InternLM2ForCausalLM
23
 
24
  logger = logging.get_logger(__name__)
@@ -50,6 +50,7 @@ class InternVLChatModel(PreTrainedModel):
50
  self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
51
  self.downsample_ratio = config.downsample_ratio
52
  self.ps_version = config.ps_version
 
53
  config.vision_config.use_flash_attn = True if use_flash_attn else False
54
  config.llm_config.attn_implementation = 'flash_attention_2' if use_flash_attn else 'eager'
55
 
 
18
 
19
  from .configuration_internvl_chat import InternVLChatConfig
20
  from .conversation import get_conv_template
21
+ from .modeling_intern_vit import InternVisionModel, has_flash_attn
22
  from .modeling_internlm2 import InternLM2ForCausalLM
23
 
24
  logger = logging.get_logger(__name__)
 
50
  self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
51
  self.downsample_ratio = config.downsample_ratio
52
  self.ps_version = config.ps_version
53
+ use_flash_attn = use_flash_attn if has_flash_attn else False
54
  config.vision_config.use_flash_attn = True if use_flash_attn else False
55
  config.llm_config.attn_implementation = 'flash_attention_2' if use_flash_attn else 'eager'
56