fix compatibility issue for transformers 4.46+
Browse files- configuration_intern_vit.py +1 -0
- configuration_internvl_chat.py +2 -2
- conversation.py +15 -17
- modeling_internvl_chat.py +5 -6
configuration_intern_vit.py
CHANGED
@@ -3,6 +3,7 @@
|
|
3 |
# Copyright (c) 2024 OpenGVLab
|
4 |
# Licensed under The MIT License [see LICENSE for details]
|
5 |
# --------------------------------------------------------
|
|
|
6 |
import os
|
7 |
from typing import Union
|
8 |
|
|
|
3 |
# Copyright (c) 2024 OpenGVLab
|
4 |
# Licensed under The MIT License [see LICENSE for details]
|
5 |
# --------------------------------------------------------
|
6 |
+
|
7 |
import os
|
8 |
from typing import Union
|
9 |
|
configuration_internvl_chat.py
CHANGED
@@ -46,10 +46,10 @@ class InternVLChatConfig(PretrainedConfig):
|
|
46 |
logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
|
47 |
|
48 |
self.vision_config = InternVisionConfig(**vision_config)
|
49 |
-
if llm_config['architectures'][0] == 'LlamaForCausalLM':
|
50 |
self.llm_config = LlamaConfig(**llm_config)
|
51 |
else:
|
52 |
-
raise ValueError('Unsupported architecture: {}'.format(llm_config['architectures'][0]))
|
53 |
self.use_backbone_lora = use_backbone_lora
|
54 |
self.use_llm_lora = use_llm_lora
|
55 |
self.select_layer = select_layer
|
|
|
46 |
logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
|
47 |
|
48 |
self.vision_config = InternVisionConfig(**vision_config)
|
49 |
+
if llm_config.get(['architectures'])[0] == 'LlamaForCausalLM':
|
50 |
self.llm_config = LlamaConfig(**llm_config)
|
51 |
else:
|
52 |
+
raise ValueError('Unsupported architecture: {}'.format(llm_config.get(['architectures'])[0]))
|
53 |
self.use_backbone_lora = use_backbone_lora
|
54 |
self.use_llm_lora = use_llm_lora
|
55 |
self.select_layer = select_layer
|
conversation.py
CHANGED
@@ -3,11 +3,13 @@ Conversation prompt templates.
|
|
3 |
|
4 |
We kindly request that you import fastchat instead of copying this file if you wish to use it.
|
5 |
If you have changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
|
|
|
|
|
6 |
"""
|
7 |
|
8 |
import dataclasses
|
9 |
from enum import IntEnum, auto
|
10 |
-
from typing import
|
11 |
|
12 |
|
13 |
class SeparatorStyle(IntEnum):
|
@@ -344,12 +346,6 @@ register_conv_template(
|
|
344 |
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
345 |
sep_style=SeparatorStyle.MPT,
|
346 |
sep='<|im_end|>',
|
347 |
-
stop_token_ids=[
|
348 |
-
2,
|
349 |
-
6,
|
350 |
-
7,
|
351 |
-
8,
|
352 |
-
],
|
353 |
stop_str='<|endoftext|>',
|
354 |
)
|
355 |
)
|
@@ -365,11 +361,6 @@ register_conv_template(
|
|
365 |
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
366 |
sep_style=SeparatorStyle.MPT,
|
367 |
sep='<|im_end|>',
|
368 |
-
stop_token_ids=[
|
369 |
-
2,
|
370 |
-
92543,
|
371 |
-
92542
|
372 |
-
]
|
373 |
)
|
374 |
)
|
375 |
|
@@ -384,10 +375,17 @@ register_conv_template(
|
|
384 |
roles=('<|user|>\n', '<|assistant|>\n'),
|
385 |
sep_style=SeparatorStyle.MPT,
|
386 |
sep='<|end|>',
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
392 |
)
|
393 |
)
|
|
|
3 |
|
4 |
We kindly request that you import fastchat instead of copying this file if you wish to use it.
|
5 |
If you have changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
|
6 |
+
|
7 |
+
Modified from https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py
|
8 |
"""
|
9 |
|
10 |
import dataclasses
|
11 |
from enum import IntEnum, auto
|
12 |
+
from typing import Dict, List, Tuple, Union
|
13 |
|
14 |
|
15 |
class SeparatorStyle(IntEnum):
|
|
|
346 |
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
347 |
sep_style=SeparatorStyle.MPT,
|
348 |
sep='<|im_end|>',
|
|
|
|
|
|
|
|
|
|
|
|
|
349 |
stop_str='<|endoftext|>',
|
350 |
)
|
351 |
)
|
|
|
361 |
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
362 |
sep_style=SeparatorStyle.MPT,
|
363 |
sep='<|im_end|>',
|
|
|
|
|
|
|
|
|
|
|
364 |
)
|
365 |
)
|
366 |
|
|
|
375 |
roles=('<|user|>\n', '<|assistant|>\n'),
|
376 |
sep_style=SeparatorStyle.MPT,
|
377 |
sep='<|end|>',
|
378 |
+
)
|
379 |
+
)
|
380 |
+
|
381 |
+
|
382 |
+
register_conv_template(
|
383 |
+
Conversation(
|
384 |
+
name='internvl2_5',
|
385 |
+
system_template='<|im_start|>system\n{system_message}',
|
386 |
+
system_message='你是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
|
387 |
+
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
388 |
+
sep_style=SeparatorStyle.MPT,
|
389 |
+
sep='<|im_end|>\n',
|
390 |
)
|
391 |
)
|
modeling_internvl_chat.py
CHANGED
@@ -3,6 +3,7 @@
|
|
3 |
# Copyright (c) 2024 OpenGVLab
|
4 |
# Licensed under The MIT License [see LICENSE for details]
|
5 |
# --------------------------------------------------------
|
|
|
6 |
import warnings
|
7 |
from typing import Any, List, Optional, Tuple, Union
|
8 |
|
@@ -233,7 +234,7 @@ class InternVLChatModel(PreTrainedModel):
|
|
233 |
model_inputs = tokenizer(queries, return_tensors='pt', padding=True)
|
234 |
input_ids = model_inputs['input_ids'].to(self.device)
|
235 |
attention_mask = model_inputs['attention_mask'].to(self.device)
|
236 |
-
eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
|
237 |
generation_config['eos_token_id'] = eos_token_id
|
238 |
generation_output = self.generate(
|
239 |
pixel_values=pixel_values,
|
@@ -242,7 +243,7 @@ class InternVLChatModel(PreTrainedModel):
|
|
242 |
**generation_config
|
243 |
)
|
244 |
responses = tokenizer.batch_decode(generation_output, skip_special_tokens=True)
|
245 |
-
responses = [response.split(template.sep)[0].strip() for response in responses]
|
246 |
return responses
|
247 |
|
248 |
def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False,
|
@@ -261,7 +262,7 @@ class InternVLChatModel(PreTrainedModel):
|
|
261 |
|
262 |
template = get_conv_template(self.template)
|
263 |
template.system_message = self.system_message
|
264 |
-
eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
|
265 |
|
266 |
history = [] if history is None else history
|
267 |
for (old_question, old_answer) in history:
|
@@ -290,7 +291,7 @@ class InternVLChatModel(PreTrainedModel):
|
|
290 |
**generation_config
|
291 |
)
|
292 |
response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
|
293 |
-
response = response.split(template.sep)[0].strip()
|
294 |
history.append((question, response))
|
295 |
if return_history:
|
296 |
return response, history
|
@@ -310,7 +311,6 @@ class InternVLChatModel(PreTrainedModel):
|
|
310 |
visual_features: Optional[torch.FloatTensor] = None,
|
311 |
generation_config: Optional[GenerationConfig] = None,
|
312 |
output_hidden_states: Optional[bool] = None,
|
313 |
-
return_dict: Optional[bool] = None,
|
314 |
**generate_kwargs,
|
315 |
) -> torch.LongTensor:
|
316 |
|
@@ -338,7 +338,6 @@ class InternVLChatModel(PreTrainedModel):
|
|
338 |
attention_mask=attention_mask,
|
339 |
generation_config=generation_config,
|
340 |
output_hidden_states=output_hidden_states,
|
341 |
-
return_dict=return_dict,
|
342 |
use_cache=True,
|
343 |
**generate_kwargs,
|
344 |
)
|
|
|
3 |
# Copyright (c) 2024 OpenGVLab
|
4 |
# Licensed under The MIT License [see LICENSE for details]
|
5 |
# --------------------------------------------------------
|
6 |
+
|
7 |
import warnings
|
8 |
from typing import Any, List, Optional, Tuple, Union
|
9 |
|
|
|
234 |
model_inputs = tokenizer(queries, return_tensors='pt', padding=True)
|
235 |
input_ids = model_inputs['input_ids'].to(self.device)
|
236 |
attention_mask = model_inputs['attention_mask'].to(self.device)
|
237 |
+
eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
|
238 |
generation_config['eos_token_id'] = eos_token_id
|
239 |
generation_output = self.generate(
|
240 |
pixel_values=pixel_values,
|
|
|
243 |
**generation_config
|
244 |
)
|
245 |
responses = tokenizer.batch_decode(generation_output, skip_special_tokens=True)
|
246 |
+
responses = [response.split(template.sep.strip())[0].strip() for response in responses]
|
247 |
return responses
|
248 |
|
249 |
def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False,
|
|
|
262 |
|
263 |
template = get_conv_template(self.template)
|
264 |
template.system_message = self.system_message
|
265 |
+
eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
|
266 |
|
267 |
history = [] if history is None else history
|
268 |
for (old_question, old_answer) in history:
|
|
|
291 |
**generation_config
|
292 |
)
|
293 |
response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
|
294 |
+
response = response.split(template.sep.strip())[0].strip()
|
295 |
history.append((question, response))
|
296 |
if return_history:
|
297 |
return response, history
|
|
|
311 |
visual_features: Optional[torch.FloatTensor] = None,
|
312 |
generation_config: Optional[GenerationConfig] = None,
|
313 |
output_hidden_states: Optional[bool] = None,
|
|
|
314 |
**generate_kwargs,
|
315 |
) -> torch.LongTensor:
|
316 |
|
|
|
338 |
attention_mask=attention_mask,
|
339 |
generation_config=generation_config,
|
340 |
output_hidden_states=output_hidden_states,
|
|
|
341 |
use_cache=True,
|
342 |
**generate_kwargs,
|
343 |
)
|