ValueError: At least one of the model submodule will be offloaded to disk, please pass along an `offload_folder`."

#11
by assa8945 - opened

Can anyone help?

from lmdeploy import pipeline, TurbomindEngineConfig
from lmdeploy.vl import load_image

model = 'OpenGVLab/InternVL2-40B'
image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/tests/data/tiger.jpeg')
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192))
response = pipe(('describe this image', image))
print(response.text)
{
    "name": "ValueError",
    "message": "At least one of the model submodule will be offloaded to disk, please pass along an `offload_folder`.",
    "stack": "---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
Cell In[2], line 6
      4 model = 'OpenGVLab/InternVL2-40B'
      5 image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/tests/data/tiger.jpeg')
----> 6 pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192))
      7 response = pipe(('describe this image', image))
      8 print(response.text)

File /scratch/xiangrui/conda_envs/sol_envs/diffusers/lib/python3.8/site-packages/lmdeploy/api.py:89, in pipeline(model_path, model_name, backend_config, chat_template_config, log_level, **kwargs)
     86 else:
     87     tp = 1 if backend_config is None else backend_config.tp
---> 89 return pipeline_class(model_path,
     90                       model_name=model_name,
     91                       backend=backend,
     92                       backend_config=backend_config,
     93                       chat_template_config=chat_template_config,
     94                       tp=tp,
     95                       **kwargs)

File /scratch/xiangrui/conda_envs/sol_envs/diffusers/lib/python3.8/site-packages/lmdeploy/serve/vl_async_engine.py:21, in VLAsyncEngine.__init__(self, model_path, **kwargs)
     19 vision_config = kwargs.pop('vision_config', None)
     20 backend_config = kwargs.get('backend_config', None)
---> 21 self.vl_encoder = ImageEncoder(model_path,
     22                                vision_config,
     23                                backend_config=backend_config)
     24 super().__init__(model_path, **kwargs)
     25 if self.model_name == 'base':

File /scratch/xiangrui/conda_envs/sol_envs/diffusers/lib/python3.8/site-packages/lmdeploy/vl/engine.py:85, in ImageEncoder.__init__(self, model_path, vision_config, backend_config)
     80 def __init__(self,
     81              model_path: str,
     82              vision_config: VisionConfig = None,
     83              backend_config: Optional[Union[TurbomindEngineConfig,
     84                                             PytorchEngineConfig]] = None):
---> 85     self.model = load_vl_model(model_path, backend_config=backend_config)
     86     if vision_config is None:
     87         vision_config = VisionConfig()

File /scratch/xiangrui/conda_envs/sol_envs/diffusers/lib/python3.8/site-packages/lmdeploy/vl/model/builder.py:55, in load_vl_model(model_path, with_llm, backend_config)
     53     if module.match(hf_config):
     54         logger.info(f'matching vision model: {name}')
---> 55         return module(**kwargs)
     56 except Exception:
     57     logger.error(f'matching vision model: {name} failed')

File /scratch/xiangrui/conda_envs/sol_envs/diffusers/lib/python3.8/site-packages/lmdeploy/vl/model/base.py:31, in VisonModel.__init__(self, model_path, with_llm, max_memory, hf_config)
     29     _, hf_config = get_model_arch(model_path)
     30 self.hf_config = hf_config
---> 31 self.build_model()

File /scratch/xiangrui/conda_envs/sol_envs/diffusers/lib/python3.8/site-packages/lmdeploy/vl/model/internvl.py:99, in InternVLVisionModel.build_model(self)
     97 from accelerate import load_checkpoint_and_dispatch
     98 with disable_logging():
---> 99     load_checkpoint_and_dispatch(
    100         model=model,
    101         checkpoint=self.model_path,
    102         device_map='auto' if not self.with_llm else {'': 'cpu'},
    103         max_memory=self.max_memory,
    104         no_split_module_classes=['InternVisionEncoderLayer'],
    105         dtype=torch.half)
    107 # We need eval mode to freeze the weights in model, thus,
    108 # avoid randomness in inference.
    109 self.model = model.eval()

File /scratch/xiangrui/conda_envs/sol_envs/diffusers/lib/python3.8/site-packages/accelerate/big_modeling.py:613, in load_checkpoint_and_dispatch(model, checkpoint, device_map, max_memory, no_split_module_classes, offload_folder, offload_buffers, dtype, offload_state_dict, skip_keys, preload_module_classes, force_hooks, strict)
    611 if offload_state_dict is None and device_map is not None and \"disk\" in device_map.values():
    612     offload_state_dict = True
--> 613 load_checkpoint_in_model(
    614     model,
    615     checkpoint,
    616     device_map=device_map,
    617     offload_folder=offload_folder,
    618     dtype=dtype,
    619     offload_state_dict=offload_state_dict,
    620     offload_buffers=offload_buffers,
    621     strict=strict,
    622 )
    623 if device_map is None:
    624     return model

File /scratch/xiangrui/conda_envs/sol_envs/diffusers/lib/python3.8/site-packages/accelerate/utils/modeling.py:1677, in load_checkpoint_in_model(model, checkpoint, device_map, offload_folder, dtype, offload_state_dict, offload_buffers, keep_in_fp32_modules, offload_8bit_bnb, strict)
   1674     check_tied_parameters_on_same_device(tied_params, device_map)
   1676 if offload_folder is None and device_map is not None and \"disk\" in device_map.values():
-> 1677     raise ValueError(
   1678         \"At least one of the model submodule will be offloaded to disk, please pass along an `offload_folder`.\"
   1679     )
   1680 elif offload_folder is not None and device_map is not None and \"disk\" in device_map.values():
   1681     os.makedirs(offload_folder, exist_ok=True)

ValueError: At least one of the model submodule will be offloaded to disk, please pass along an `offload_folder`."
}```
OpenGVLab org

Hello, thank you for your use. May I ask what version of lmdeploy you use? Maybe try the newer v1.6.2.

opengvlab-admin changed discussion status to closed

Sign up or log in to comment