czczup commited on
Commit
4177355
·
verified ·
1 Parent(s): e1bd7d6

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. README.md +41 -5
README.md CHANGED
@@ -76,6 +76,7 @@ We also welcome you to experience the InternVL2 series models in our [online dem
76
  > Please use transformers==4.37.2 to ensure the model works normally.
77
 
78
  ```python
 
79
  import numpy as np
80
  import torch
81
  import torchvision.transforms as T
@@ -163,17 +164,44 @@ def load_image(image_file, input_size=448, max_num=6):
163
  return pixel_values
164
 
165
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166
  path = 'OpenGVLab/InternVL2-40B'
167
- # You need to set device_map='auto' to use multiple GPUs for inference.
168
- import os
169
- os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
 
170
  model = AutoModel.from_pretrained(
171
  path,
172
  torch_dtype=torch.bfloat16,
 
173
  low_cpu_mem_usage=True,
174
  trust_remote_code=True,
175
- device_map='auto').eval()
176
-
177
  tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True)
178
  # set the max number of tiles in `max_num`
179
  pixel_values = load_image('./examples/image1.jpg', max_num=6).to(torch.bfloat16).cuda()
@@ -317,6 +345,10 @@ print(f'User: {question}')
317
  print(f'Assistant: {response}')
318
  ```
319
 
 
 
 
 
320
  ## Deployment
321
 
322
  ### LMDeploy
@@ -575,6 +607,10 @@ InternVL 2.0 是一个多模态大语言模型系列,包含各种规模的模
575
 
576
  示例代码请[点击这里](#quick-start)。
577
 
 
 
 
 
578
  ## 部署
579
 
580
  ### LMDeploy
 
76
  > Please use transformers==4.37.2 to ensure the model works normally.
77
 
78
  ```python
79
+ import math
80
  import numpy as np
81
  import torch
82
  import torchvision.transforms as T
 
164
  return pixel_values
165
 
166
 
167
+ def split_model(model_name):
168
+ device_map = {}
169
+ world_size = torch.cuda.device_count()
170
+ num_layers = {'InternVL2-8B': 32, 'InternVL2-26B': 48,
171
+ 'InternVL2-40B': 60, 'InternVL2-Llama3-76B': 80}[model_name]
172
+ # Since the first GPU will be used for ViT, treat it as half a GPU.
173
+ num_layers_per_gpu = math.ceil(num_layers / (world_size - 0.5))
174
+ num_layers_per_gpu = [num_layers_per_gpu] * world_size
175
+ num_layers_per_gpu[0] = math.ceil(num_layers_per_gpu[0] * 0.5)
176
+ layer_cnt = 0
177
+ for i, num_layer in enumerate(num_layers_per_gpu):
178
+ for j in range(num_layer):
179
+ device_map[f'language_model.model.layers.{layer_cnt}'] = i
180
+ layer_cnt += 1
181
+ device_map['vision_model'] = 0
182
+ device_map['mlp1'] = 0
183
+ device_map['language_model.model.tok_embeddings'] = 0
184
+ device_map['language_model.model.embed_tokens'] = 0
185
+ device_map['language_model.output'] = 0
186
+ device_map['language_model.model.norm'] = 0
187
+ device_map['language_model.lm_head'] = 0
188
+ device_map[f'language_model.model.layers.{num_layers - 1}'] = 0
189
+
190
+ return device_map
191
+
192
+
193
  path = 'OpenGVLab/InternVL2-40B'
194
+ device_map = split_model('InternVL2-40B')
195
+ print(device_map)
196
+ # If you set `load_in_8bit=True`, you will need one 80GB GPUs.
197
+ # If you set `load_in_8bit=False`, you will need at least two 80GB GPUs.
198
  model = AutoModel.from_pretrained(
199
  path,
200
  torch_dtype=torch.bfloat16,
201
+ load_in_8bit=True,
202
  low_cpu_mem_usage=True,
203
  trust_remote_code=True,
204
+ device_map=device_map).eval()
 
205
  tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True)
206
  # set the max number of tiles in `max_num`
207
  pixel_values = load_image('./examples/image1.jpg', max_num=6).to(torch.bfloat16).cuda()
 
345
  print(f'Assistant: {response}')
346
  ```
347
 
348
+ ## Finetune
349
+
350
+ SWIFT from ModelScope community has supported the fine-tuning (Image/Video) of InternVL, please check [this link](https://github.com/modelscope/swift/blob/main/docs/source_en/Multi-Modal/internvl-best-practice.md) for more details.
351
+
352
  ## Deployment
353
 
354
  ### LMDeploy
 
607
 
608
  示例代码请[点击这里](#quick-start)。
609
 
610
+ ## 微调
611
+
612
+ 来自ModelScope社区的SWIFT已经支持对InternVL进行微调(图像/视频),详情请查看[此链接](https://github.com/modelscope/swift/blob/main/docs/source_en/Multi-Modal/internvl-best-practice.md)。
613
+
614
  ## 部署
615
 
616
  ### LMDeploy