fix compatibility issue for transformers 4.46+
Browse files- README.md +9 -2
- configuration_internvl_chat.py +2 -2
- modeling_intern_vit.py +1 -0
README.md
CHANGED
@@ -13,11 +13,12 @@ tags:
|
|
13 |
- custom_code
|
14 |
base_model: OpenGVLab/InternVL2-26B
|
15 |
base_model_relation: quantized
|
|
|
16 |
---
|
17 |
|
18 |
# InternVL2-26B-AWQ
|
19 |
|
20 |
-
[\[π GitHub\]](https://github.com/OpenGVLab/InternVL) [\[π Blog\]](https://internvl.github.io/blog/) [\[π InternVL 1.0
|
21 |
|
22 |
[\[π¨οΈ Chat Demo\]](https://internvl.opengvlab.com/) [\[π€ HF Demo\]](https://huggingface.co/spaces/OpenGVLab/InternVL) [\[π Quick Start\]](#quick-start) [\[π δΈζ解读\]](https://zhuanlan.zhihu.com/p/706547971) [\[π Documents\]](https://internvl.readthedocs.io/en/latest/)
|
23 |
|
@@ -42,7 +43,7 @@ LMDeploy supports the following NVIDIA GPU for W4A16 inference:
|
|
42 |
Before proceeding with the quantization and inference, please ensure that lmdeploy is installed.
|
43 |
|
44 |
```shell
|
45 |
-
pip install lmdeploy
|
46 |
```
|
47 |
|
48 |
This article comprises the following sections:
|
@@ -123,6 +124,12 @@ This project is released under the MIT license, while InternLM2 is licensed unde
|
|
123 |
If you find this project useful in your research, please consider citing:
|
124 |
|
125 |
```BibTeX
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
@article{chen2023internvl,
|
127 |
title={InternVL: Scaling up Vision Foundation Models and Aligning for Generic Visual-Linguistic Tasks},
|
128 |
author={Chen, Zhe and Wu, Jiannan and Wang, Wenhai and Su, Weijie and Chen, Guo and Xing, Sen and Zhong, Muyan and Zhang, Qinglong and Zhu, Xizhou and Lu, Lewei and Li, Bin and Luo, Ping and Lu, Tong and Qiao, Yu and Dai, Jifeng},
|
|
|
13 |
- custom_code
|
14 |
base_model: OpenGVLab/InternVL2-26B
|
15 |
base_model_relation: quantized
|
16 |
+
new_version: OpenGVLab/InternVL2_5-26B-AWQ
|
17 |
---
|
18 |
|
19 |
# InternVL2-26B-AWQ
|
20 |
|
21 |
+
[\[π GitHub\]](https://github.com/OpenGVLab/InternVL) [\[π Blog\]](https://internvl.github.io/blog/) [\[π InternVL 1.0\]](https://arxiv.org/abs/2312.14238) [\[π InternVL 1.5\]](https://arxiv.org/abs/2404.16821) [\[π Mini-InternVL\]](https://arxiv.org/abs/2410.16261)
|
22 |
|
23 |
[\[π¨οΈ Chat Demo\]](https://internvl.opengvlab.com/) [\[π€ HF Demo\]](https://huggingface.co/spaces/OpenGVLab/InternVL) [\[π Quick Start\]](#quick-start) [\[π δΈζ解读\]](https://zhuanlan.zhihu.com/p/706547971) [\[π Documents\]](https://internvl.readthedocs.io/en/latest/)
|
24 |
|
|
|
43 |
Before proceeding with the quantization and inference, please ensure that lmdeploy is installed.
|
44 |
|
45 |
```shell
|
46 |
+
pip install lmdeploy>=0.5.3
|
47 |
```
|
48 |
|
49 |
This article comprises the following sections:
|
|
|
124 |
If you find this project useful in your research, please consider citing:
|
125 |
|
126 |
```BibTeX
|
127 |
+
@article{gao2024mini,
|
128 |
+
title={Mini-internvl: A flexible-transfer pocket multimodal model with 5\% parameters and 90\% performance},
|
129 |
+
author={Gao, Zhangwei and Chen, Zhe and Cui, Erfei and Ren, Yiming and Wang, Weiyun and Zhu, Jinguo and Tian, Hao and Ye, Shenglong and He, Junjun and Zhu, Xizhou and others},
|
130 |
+
journal={arXiv preprint arXiv:2410.16261},
|
131 |
+
year={2024}
|
132 |
+
}
|
133 |
@article{chen2023internvl,
|
134 |
title={InternVL: Scaling up Vision Foundation Models and Aligning for Generic Visual-Linguistic Tasks},
|
135 |
author={Chen, Zhe and Wu, Jiannan and Wang, Wenhai and Su, Weijie and Chen, Guo and Xing, Sen and Zhong, Muyan and Zhang, Qinglong and Zhu, Xizhou and Lu, Lewei and Li, Bin and Luo, Ping and Lu, Tong and Qiao, Yu and Dai, Jifeng},
|
configuration_internvl_chat.py
CHANGED
@@ -39,11 +39,11 @@ class InternVLChatConfig(PretrainedConfig):
|
|
39 |
super().__init__(**kwargs)
|
40 |
|
41 |
if vision_config is None:
|
42 |
-
vision_config = {}
|
43 |
logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
|
44 |
|
45 |
if llm_config is None:
|
46 |
-
llm_config = {}
|
47 |
logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
|
48 |
|
49 |
self.vision_config = InternVisionConfig(**vision_config)
|
|
|
39 |
super().__init__(**kwargs)
|
40 |
|
41 |
if vision_config is None:
|
42 |
+
vision_config = {'architectures': ['InternVisionModel']}
|
43 |
logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
|
44 |
|
45 |
if llm_config is None:
|
46 |
+
llm_config = {'architectures': ['InternLM2ForCausalLM']}
|
47 |
logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
|
48 |
|
49 |
self.vision_config = InternVisionConfig(**vision_config)
|
modeling_intern_vit.py
CHANGED
@@ -3,6 +3,7 @@
|
|
3 |
# Copyright (c) 2024 OpenGVLab
|
4 |
# Licensed under The MIT License [see LICENSE for details]
|
5 |
# --------------------------------------------------------
|
|
|
6 |
from typing import Optional, Tuple, Union
|
7 |
|
8 |
import torch
|
|
|
3 |
# Copyright (c) 2024 OpenGVLab
|
4 |
# Licensed under The MIT License [see LICENSE for details]
|
5 |
# --------------------------------------------------------
|
6 |
+
|
7 |
from typing import Optional, Tuple, Union
|
8 |
|
9 |
import torch
|