pain commited on
Commit
b3caf09
โ€ข
1 Parent(s): 37d7242

Upload 7 files

Browse files
arabertv2-vit-B-16-siglib-mscocoarabertv2-vit-B-16-siglibheads_of_the_model_arabertv2-ViT-B-16-SigLIP-512-200_.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78c10ff89b117694d0bc80999e622c11b8c1bb5604c109066d4ef7dc19b0f021
3
+ size 2362569
config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "aubmindlab/bert-base-arabertv2",
3
+ "architectures": [
4
+ "BertModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 3072,
13
+ "layer_norm_eps": 1e-12,
14
+ "max_position_embeddings": 512,
15
+ "model_type": "bert",
16
+ "num_attention_heads": 12,
17
+ "num_hidden_layers": 12,
18
+ "pad_token_id": 0,
19
+ "position_embedding_type": "absolute",
20
+ "transformers_version": "4.31.0",
21
+ "type_vocab_size": 2,
22
+ "use_cache": true,
23
+ "vocab_size": 64000
24
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51246f83f9acfa26401d151b8726bd7b0e6726db7d03aa7689ed6d08adb4d7fa
3
+ size 541040640
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
+ "do_lower_case": false,
6
+ "mask_token": "[MASK]",
7
+ "max_len": 512,
8
+ "model_max_length": 512,
9
+ "never_split": [
10
+ "+ูƒ",
11
+ "+ูƒู…ุง",
12
+ "ูƒ+",
13
+ "+ูˆุง",
14
+ "+ูŠู†",
15
+ "ูˆ+",
16
+ "+ูƒู†",
17
+ "+ุงู†",
18
+ "+ู‡ู…",
19
+ "+ุฉ",
20
+ "[ุจุฑูŠุฏ]",
21
+ "ู„ู„+",
22
+ "+ูŠ",
23
+ "+ุช",
24
+ "+ู†",
25
+ "ุณ+",
26
+ "ู„+",
27
+ "[ู…ุณุชุฎุฏู…]",
28
+ "+ูƒู…",
29
+ "+ุง",
30
+ "ุจ+",
31
+ "ู+",
32
+ "+ู†ุง",
33
+ "+ู‡ุง",
34
+ "+ูˆู†",
35
+ "+ู‡ู…ุง",
36
+ "ุงู„+",
37
+ "+ู‡",
38
+ "+ู‡ู†",
39
+ "+ุงุช",
40
+ "[ุฑุงุจุท]"
41
+ ],
42
+ "pad_token": "[PAD]",
43
+ "sep_token": "[SEP]",
44
+ "strip_accents": null,
45
+ "tokenize_chinese_chars": true,
46
+ "tokenizer_class": "BertTokenizer",
47
+ "unk_token": "[UNK]"
48
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff