doubility123 commited on
Commit
c5b2dc5
1 Parent(s): de65778

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "candidate_resolutions": [
3
+ [
4
+ 384,
5
+ 384
6
+ ],
7
+ [
8
+ 384,
9
+ 768
10
+ ],
11
+ [
12
+ 768,
13
+ 384
14
+ ],
15
+ [
16
+ 384,
17
+ 1152
18
+ ],
19
+ [
20
+ 1152,
21
+ 384
22
+ ],
23
+ [
24
+ 384,
25
+ 1536
26
+ ],
27
+ [
28
+ 1536,
29
+ 384
30
+ ],
31
+ [
32
+ 768,
33
+ 768
34
+ ],
35
+ [
36
+ 384,
37
+ 1920
38
+ ],
39
+ [
40
+ 1920,
41
+ 384
42
+ ],
43
+ [
44
+ 384,
45
+ 2304
46
+ ],
47
+ [
48
+ 2304,
49
+ 384
50
+ ],
51
+ [
52
+ 768,
53
+ 1152
54
+ ],
55
+ [
56
+ 1152,
57
+ 768
58
+ ],
59
+ [
60
+ 384,
61
+ 2688
62
+ ],
63
+ [
64
+ 2688,
65
+ 384
66
+ ],
67
+ [
68
+ 384,
69
+ 3072
70
+ ],
71
+ [
72
+ 3072,
73
+ 384
74
+ ],
75
+ [
76
+ 768,
77
+ 1536
78
+ ],
79
+ [
80
+ 1536,
81
+ 768
82
+ ],
83
+ [
84
+ 384,
85
+ 3456
86
+ ],
87
+ [
88
+ 3456,
89
+ 384
90
+ ],
91
+ [
92
+ 1152,
93
+ 1152
94
+ ]
95
+ ],
96
+ "global_view_pos": "head",
97
+ "language_config": {
98
+ "architectures": [
99
+ "DeepseekV2ForCausalLM"
100
+ ],
101
+ "auto_map": {
102
+ "AutoConfig": "configuration_deepseek.DeepseekV2Config",
103
+ "AutoModel": "modeling_deepseek.DeepseekV2Model",
104
+ "AutoModelForCausalLM": "modeling_deepseek.DeepseekV2ForCausalLM"
105
+ },
106
+ "first_k_dense_replace": 1,
107
+ "hidden_size": 2048,
108
+ "intermediate_size": 10944,
109
+ "lm_head": true,
110
+ "max_position_embeddings": 4096,
111
+ "model_type": "deepseek_v2",
112
+ "moe_intermediate_size": 1408,
113
+ "n_group": 1,
114
+ "n_routed_experts": 64,
115
+ "n_shared_experts": 2,
116
+ "num_attention_heads": 16,
117
+ "num_experts_per_tok": 6,
118
+ "num_hidden_layers": 27,
119
+ "num_key_value_heads": 16,
120
+ "q_lora_rank": null,
121
+ "rm_head": false,
122
+ "topk_group": 1,
123
+ "topk_method": "greedy",
124
+ "torch_dtype": "bfloat16"
125
+ },
126
+ "model_type": "deepseek_vl_v2",
127
+ "projector_config": {
128
+ "model_type": "mlp_projector"
129
+ },
130
+ "tile_tag": "2D",
131
+ "torch_dtype": "bfloat16",
132
+ "transformers_version": "4.38.2",
133
+ "vision_config": {
134
+ "layers": 27,
135
+ "mlp_ratio": 3.7362,
136
+ "model_name": "siglip_so400m_patch14_384",
137
+ "model_type": "vision",
138
+ "patch_size": 14,
139
+ "width": 1152
140
+ }
141
+ }
model-00001-of-000004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bff387143793c7786707473eae522e4f0a24ae41d29d86532d0b5b9d3e5cab7
3
+ size 8591541424
model-00002-of-000004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7693542b1ba539e0f12db0c25210bdaf7aaeb298f1663be7142bd41316663842
3
+ size 8590731304
model-00003-of-000004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9a0eab9f1918617e991346617ca9222bc928218f418577384004b6dadd5ed25
3
+ size 8591771352
model-00004-of-000004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20a550172ad5761a9a6bad8543968ea421f2963f33d5d4a2822cfeca1b6542fd
3
+ size 6523397144
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
processor_config.json ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_special_token": false,
3
+ "candidate_resolutions": [
4
+ [
5
+ 384,
6
+ 384
7
+ ],
8
+ [
9
+ 384,
10
+ 768
11
+ ],
12
+ [
13
+ 768,
14
+ 384
15
+ ],
16
+ [
17
+ 384,
18
+ 1152
19
+ ],
20
+ [
21
+ 1152,
22
+ 384
23
+ ],
24
+ [
25
+ 384,
26
+ 1536
27
+ ],
28
+ [
29
+ 1536,
30
+ 384
31
+ ],
32
+ [
33
+ 768,
34
+ 768
35
+ ],
36
+ [
37
+ 384,
38
+ 1920
39
+ ],
40
+ [
41
+ 1920,
42
+ 384
43
+ ],
44
+ [
45
+ 384,
46
+ 2304
47
+ ],
48
+ [
49
+ 2304,
50
+ 384
51
+ ],
52
+ [
53
+ 768,
54
+ 1152
55
+ ],
56
+ [
57
+ 1152,
58
+ 768
59
+ ],
60
+ [
61
+ 384,
62
+ 2688
63
+ ],
64
+ [
65
+ 2688,
66
+ 384
67
+ ],
68
+ [
69
+ 384,
70
+ 3072
71
+ ],
72
+ [
73
+ 3072,
74
+ 384
75
+ ],
76
+ [
77
+ 768,
78
+ 1536
79
+ ],
80
+ [
81
+ 1536,
82
+ 768
83
+ ],
84
+ [
85
+ 384,
86
+ 3456
87
+ ],
88
+ [
89
+ 3456,
90
+ 384
91
+ ],
92
+ [
93
+ 1152,
94
+ 1152
95
+ ]
96
+ ],
97
+ "downsample_ratio": 2,
98
+ "ignore_id": -100,
99
+ "image_mean": [
100
+ 0.5,
101
+ 0.5,
102
+ 0.5
103
+ ],
104
+ "image_std": [
105
+ 0.5,
106
+ 0.5,
107
+ 0.5
108
+ ],
109
+ "image_token": "<image>",
110
+ "mask_prompt": false,
111
+ "normalize": true,
112
+ "pad_token": "<\uff5c\u2581pad\u2581\uff5c>",
113
+ "patch_size": 14,
114
+ "processor_class": "DeepseekVLV2Processor",
115
+ "sft_format": "deepseek"
116
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<|User|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "<|Assistant|>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ }
17
+ ],
18
+ "bos_token": {
19
+ "content": "<|begin▁of▁sentence|>",
20
+ "lstrip": false,
21
+ "normalized": true,
22
+ "rstrip": false,
23
+ "single_word": false
24
+ },
25
+ "eos_token": {
26
+ "content": "<|end▁of▁sentence|>",
27
+ "lstrip": false,
28
+ "normalized": true,
29
+ "rstrip": false,
30
+ "single_word": false
31
+ },
32
+ "pad_token": {
33
+ "content": "<|▁pad▁|>",
34
+ "lstrip": false,
35
+ "normalized": false,
36
+ "rstrip": false,
37
+ "single_word": false
38
+ }
39
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "100000": {
6
+ "content": "<|begin▁of▁sentence|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "100001": {
14
+ "content": "<|end▁of▁sentence|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "100002": {
22
+ "content": "<|▁pad▁|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "100003": {
30
+ "content": "<image>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "100004": {
38
+ "content": "<|ref|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "100005": {
46
+ "content": "<|/ref|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "100006": {
54
+ "content": "<|det|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "100007": {
62
+ "content": "<|/det|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "100008": {
70
+ "content": "<|grounding|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "100009": {
78
+ "content": "<|User|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "100010": {
86
+ "content": "<|Assistant|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ }
93
+ },
94
+ "additional_special_tokens": [
95
+ "<|User|>",
96
+ "<|Assistant|>"
97
+ ],
98
+ "bos_token": "<|begin▁of▁sentence|>",
99
+ "chat_template": "{%- set found_item = false -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set found_item = true -%}\n {%- endif -%}\n{%- endfor -%}\n{%- if not found_item -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer.\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{{'### Response:\\n'}}\n",
100
+ "clean_up_tokenization_spaces": false,
101
+ "eos_token": "<|end▁of▁sentence|>",
102
+ "legacy": true,
103
+ "model_max_length": 100000000,
104
+ "pad_token": "<|▁pad▁|>",
105
+ "processor_class": "DeepseekVLV2Processor",
106
+ "sp_model_kwargs": {},
107
+ "tokenizer_class": "LlamaTokenizer",
108
+ "unk_token": null,
109
+ "use_default_system_prompt": false
110
+ }