args=Namespace(checkpoint='pretrained/InternVL2-2B-AWQ', task='reasoning-image-val', outputs_dir='pretrained/InternVL2-2B-AWQ/eval_mm_niah/reasoning-image-val', num_gpus_per_rank=2) Start evaluation on task reasoning-image-val args=Namespace(checkpoint='pretrained/InternVL2-2B-AWQ', task='reasoning-image-val', outputs_dir='pretrained/InternVL2-2B-AWQ/eval_mm_niah/reasoning-image-val', num_gpus_per_rank=2) args=Namespace(checkpoint='pretrained/InternVL2-2B-AWQ', task='reasoning-image-val', outputs_dir='pretrained/InternVL2-2B-AWQ/eval_mm_niah/reasoning-image-val', num_gpus_per_rank=2) Start evaluation on task reasoning-image-val args=Namespace(checkpoint='pretrained/InternVL2-2B-AWQ', task='reasoning-image-val', outputs_dir='pretrained/InternVL2-2B-AWQ/eval_mm_niah/reasoning-image-val', num_gpus_per_rank=2) Start evaluation on task reasoning-image-val Start evaluation on task reasoning-image-val language_model.model.layers.0 4 language_model.model.layers.1 4 language_model.model.layers.2 4 language_model.model.layers.3 4 language_model.model.layers.4 4 language_model.model.layers.5 4 language_model.model.layers.6 4 language_model.model.layers.7 4 language_model.model.layers.8 4 language_model.model.layers.9 4 language_model.model.layers.10 4 language_model.model.layers.11 4 language_model.model.layers.12 4 language_model.model.layers.13 4 language_model.model.layers.14 4 language_model.model.layers.15 4 language_model.model.layers.16 4 language_model.model.layers.17 4 language_model.model.layers.18 4 language_model.model.layers.19 4 language_model.model.layers.20 4 language_model.model.layers.21 4 language_model.model.layers.22 4 language_model.model.layers.23 4 vision_model.encoder.layers.0 0 vision_model.encoder.layers.1 0 vision_model.encoder.layers.2 0 vision_model.encoder.layers.3 0 vision_model.encoder.layers.4 0 vision_model.encoder.layers.5 0 vision_model.encoder.layers.6 0 vision_model.encoder.layers.7 0 vision_model.encoder.layers.8 0 vision_model.encoder.layers.9 0 vision_model.encoder.layers.10 0 vision_model.encoder.layers.11 0 vision_model.encoder.layers.12 0 vision_model.encoder.layers.13 0 vision_model.encoder.layers.14 0 vision_model.encoder.layers.15 0 vision_model.encoder.layers.16 0 vision_model.encoder.layers.17 0 vision_model.encoder.layers.18 0 vision_model.encoder.layers.19 0 vision_model.encoder.layers.20 0 vision_model.encoder.layers.21 0 vision_model.encoder.layers.22 0 vision_model.encoder.layers.23 0 vision_model.embeddings 0 mlp1 0 language_model.model.tok_embeddings 4 language_model.model.norm 4 language_model.output 4 language_model.model.embed_tokens 4 language_model.lm_head 4 The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is ignored. The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is ignored. The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is ignored. The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is ignored. Loading checkpoint shards: 0%| | 0/2 [00:00 Traceback (most recent call last): File "/mnt/hwfile/wangweiyun/workspace_zyc/VLM-Dev/eval/mm_niah/eval_mm_niah.py", line 256, in .jsonl: 15%|█▍ | 19/130 [00:13<01:17, 1.43it/s] Processing InternVL2-2B-AWQ_reasoning-image-val.jsonl: 15%|█▌ | 20/130 [00:14<01:17, 1.42it/s] Processing InternVL2-2B-AWQ_reasoning-image-val.jsonl: 16%|█▌ | 21/130 [00:15<01:11, 1.51it/s] Processing InternVL2-2B-AWQ_reasoning-image-val.jsonl: 17%|█▋ | 22/130 [00:15<01:17, 1.40it/s] Processing InternVL2-2B-AWQ_reasoning-image-val.jsonl: 18%|█▊ | 23/130 [00:16<01:12, 1.48it/s] Processing InternVL2-2B-AWQ_reasoning-image-val.jsonl: 18%|█▊ | 24/130 [00:17<01:14, 1.42it/s] Processing InternVL2-2B-AWQ_reasoning-image-val.jsonl: 19%|█▉ | 25/130 [00:17<01:09, 1.51it/s] Processing InternVL2-2B-AWQ_reasoning-image-val.jsonl: 20%|██ | 26/130 [00:18<01:16, 1.36it/s] Processing InternVL2-2B-AWQ_reasoning-image-val.jsonl: 20%|██ | 26/130 [00:18<01:15, 1.38it/s] Traceback (most recent call last): File "/mnt/hwfile/wangweiyun/workspace_zyc/VLM-Dev/eval/mm_niah/eval_mm_niah.py", line 256, in main(args) File "/mnt/hwfile/wangweiyun/workspace_zyc/VLM-Dev/eval/mm_niah/eval_mm_niah.py", line 186, in main main(args) File "/mnt/hwfile/wangweiyun/workspace_zyc/VLM-Dev/eval/mm_niah/eval_mm_niah.py", line 186, in main main(args) File "/mnt/hwfile/wangweiyun/workspace_zyc/VLM-Dev/eval/mm_niah/eval_mm_niah.py", line 171, in main outputs = model.chat( File "/mnt/hwfile/wangweiyun/workspace_zyc/VLM-Dev/internvl/model/internvl_chat/modeling_internvl_chat.py", line 385, in chat outputs = model.chat( File "/mnt/hwfile/wangweiyun/workspace_zyc/VLM-Dev/internvl/model/internvl_chat/modeling_internvl_chat.py", line 385, in chat curr_pixel_values = load_image(img, dynamic_image_size=False) File "/mnt/hwfile/wangweiyun/workspace_zyc/VLM-Dev/eval/mm_niah/eval_mm_niah.py", line 31, in load_image image = Image.open(image_file).convert('RGB') File "/mnt/petrelfs/wangweiyun/miniconda3/envs/internvl/lib/python3.10/site-packages/PIL/Image.py", line 3274, in open generation_output = self.generate( File "/mnt/petrelfs/wangweiyun/miniconda3/envs/internvl/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context generation_output = self.generate( File "/mnt/petrelfs/wangweiyun/miniconda3/envs/internvl/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context slurmstepd: error: *** STEP 3758685.0 ON HOST-10-140-66-148 CANCELLED AT 2024-08-07T19:05:21 *** return func(*args, **kwargs) File "/mnt/hwfile/wangweiyun/workspace_zyc/VLM-Dev/internvl/model/internvl_chat/modeling_internvl_chat.py", line 452, in generate return func(*args, **kwargs) File "/mnt/hwfile/wangweiyun/workspace_zyc/VLM-Dev/internvl/model/internvl_chat/modeling_internvl_chat.py", line 452, in generate outputs = self.language_model.generate( File "/mnt/petrelfs/wangweiyun/miniconda3/envs/internvl/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context outputs = self.language_model.generate( File "/mnt/petrelfs/wangweiyun/miniconda3/envs/internvl/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context return func(*args, **kwargs) File "/mnt/petrelfs/wangweiyun/miniconda3/envs/internvl/lib/python3.10/site-packages/transformers/generation/utils.py", line 1479, in generate return func(*args, **kwargs) File "/mnt/petrelfs/wangweiyun/miniconda3/envs/internvl/lib/python3.10/site-packages/transformers/generation/utils.py", line 1479, in generate filename = os.path.realpath(os.fspath(fp)) File "/mnt/petrelfs/wangweiyun/miniconda3/envs/internvl/lib/python3.10/posixpath.py", line 395, in realpath return self.greedy_search( File "/mnt/petrelfs/wangweiyun/miniconda3/envs/internvl/lib/python3.10/site-packages/transformers/generation/utils.py", line 2341, in greedy_search return self.greedy_search( File "/mnt/petrelfs/wangweiyun/miniconda3/envs/internvl/lib/python3.10/site-packages/transformers/generation/utils.py", line 2341, in greedy_search outputs = self( File "/mnt/petrelfs/wangweiyun/miniconda3/envs/internvl/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl outputs = self( File "/mnt/petrelfs/wangweiyun/miniconda3/envs/internvl/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl