fix compatibility issue for transformers 4.46+
Browse files- README.md +3 -2
- configuration_internvl_chat.py +2 -2
README.md
CHANGED
@@ -13,11 +13,12 @@ tags:
|
|
13 |
- custom_code
|
14 |
base_model: OpenGVLab/InternVL2-2B
|
15 |
base_model_relation: quantized
|
|
|
16 |
---
|
17 |
|
18 |
# InternVL2-2B-AWQ
|
19 |
|
20 |
-
[\[π GitHub\]](https://github.com/OpenGVLab/InternVL) [\[π Blog\]](https://internvl.github.io/blog/) [\[π InternVL 1.0
|
21 |
|
22 |
[\[π¨οΈ Chat Demo\]](https://internvl.opengvlab.com/) [\[π€ HF Demo\]](https://huggingface.co/spaces/OpenGVLab/InternVL) [\[π Quick Start\]](#quick-start) [\[π δΈζ解读\]](https://zhuanlan.zhihu.com/p/706547971) [\[π Documents\]](https://internvl.readthedocs.io/en/latest/)
|
23 |
|
@@ -42,7 +43,7 @@ LMDeploy supports the following NVIDIA GPU for W4A16 inference:
|
|
42 |
Before proceeding with the quantization and inference, please ensure that lmdeploy is installed.
|
43 |
|
44 |
```shell
|
45 |
-
pip install lmdeploy
|
46 |
```
|
47 |
|
48 |
This article comprises the following sections:
|
|
|
13 |
- custom_code
|
14 |
base_model: OpenGVLab/InternVL2-2B
|
15 |
base_model_relation: quantized
|
16 |
+
new_version: OpenGVLab/InternVL2_5-2B-AWQ
|
17 |
---
|
18 |
|
19 |
# InternVL2-2B-AWQ
|
20 |
|
21 |
+
[\[π GitHub\]](https://github.com/OpenGVLab/InternVL) [\[π Blog\]](https://internvl.github.io/blog/) [\[π InternVL 1.0\]](https://arxiv.org/abs/2312.14238) [\[π InternVL 1.5\]](https://arxiv.org/abs/2404.16821) [\[π Mini-InternVL\]](https://arxiv.org/abs/2410.16261)
|
22 |
|
23 |
[\[π¨οΈ Chat Demo\]](https://internvl.opengvlab.com/) [\[π€ HF Demo\]](https://huggingface.co/spaces/OpenGVLab/InternVL) [\[π Quick Start\]](#quick-start) [\[π δΈζ解读\]](https://zhuanlan.zhihu.com/p/706547971) [\[π Documents\]](https://internvl.readthedocs.io/en/latest/)
|
24 |
|
|
|
43 |
Before proceeding with the quantization and inference, please ensure that lmdeploy is installed.
|
44 |
|
45 |
```shell
|
46 |
+
pip install lmdeploy>=0.5.3
|
47 |
```
|
48 |
|
49 |
This article comprises the following sections:
|
configuration_internvl_chat.py
CHANGED
@@ -39,11 +39,11 @@ class InternVLChatConfig(PretrainedConfig):
|
|
39 |
super().__init__(**kwargs)
|
40 |
|
41 |
if vision_config is None:
|
42 |
-
vision_config = {}
|
43 |
logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
|
44 |
|
45 |
if llm_config is None:
|
46 |
-
llm_config = {}
|
47 |
logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
|
48 |
|
49 |
self.vision_config = InternVisionConfig(**vision_config)
|
|
|
39 |
super().__init__(**kwargs)
|
40 |
|
41 |
if vision_config is None:
|
42 |
+
vision_config = {'architectures': ['InternVisionModel']}
|
43 |
logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
|
44 |
|
45 |
if llm_config is None:
|
46 |
+
llm_config = {'architectures': ['InternLM2ForCausalLM']}
|
47 |
logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
|
48 |
|
49 |
self.vision_config = InternVisionConfig(**vision_config)
|