emanuelaboros commited on
Commit
f7d8a91
·
verified ·
1 Parent(s): 60133e3

Upload tokenizer

Browse files
Files changed (3) hide show
  1. tokenizer.json +0 -0
  2. tokenizer_config.json +8 -5
  3. vocab.txt +0 -0
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -8,7 +8,7 @@
8
  "single_word": false,
9
  "special": true
10
  },
11
- "2": {
12
  "content": "[UNK]",
13
  "lstrip": false,
14
  "normalized": false,
@@ -16,7 +16,7 @@
16
  "single_word": false,
17
  "special": true
18
  },
19
- "3": {
20
  "content": "[CLS]",
21
  "lstrip": false,
22
  "normalized": false,
@@ -24,7 +24,7 @@
24
  "single_word": false,
25
  "special": true
26
  },
27
- "4": {
28
  "content": "[SEP]",
29
  "lstrip": false,
30
  "normalized": false,
@@ -32,7 +32,7 @@
32
  "single_word": false,
33
  "special": true
34
  },
35
- "5": {
36
  "content": "[MASK]",
37
  "lstrip": false,
38
  "normalized": false,
@@ -43,12 +43,15 @@
43
  },
44
  "clean_up_tokenization_spaces": true,
45
  "cls_token": "[CLS]",
 
46
  "do_lower_case": false,
47
  "mask_token": "[MASK]",
 
48
  "model_max_length": 512,
 
49
  "pad_token": "[PAD]",
50
  "sep_token": "[SEP]",
51
- "strip_accents": null,
52
  "tokenize_chinese_chars": true,
53
  "tokenizer_class": "BertTokenizer",
54
  "unk_token": "[UNK]"
 
8
  "single_word": false,
9
  "special": true
10
  },
11
+ "1": {
12
  "content": "[UNK]",
13
  "lstrip": false,
14
  "normalized": false,
 
16
  "single_word": false,
17
  "special": true
18
  },
19
+ "2": {
20
  "content": "[CLS]",
21
  "lstrip": false,
22
  "normalized": false,
 
24
  "single_word": false,
25
  "special": true
26
  },
27
+ "3": {
28
  "content": "[SEP]",
29
  "lstrip": false,
30
  "normalized": false,
 
32
  "single_word": false,
33
  "special": true
34
  },
35
+ "4": {
36
  "content": "[MASK]",
37
  "lstrip": false,
38
  "normalized": false,
 
43
  },
44
  "clean_up_tokenization_spaces": true,
45
  "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
  "do_lower_case": false,
48
  "mask_token": "[MASK]",
49
+ "max_len": 512,
50
  "model_max_length": 512,
51
+ "never_split": null,
52
  "pad_token": "[PAD]",
53
  "sep_token": "[SEP]",
54
+ "strip_accents": false,
55
  "tokenize_chinese_chars": true,
56
  "tokenizer_class": "BertTokenizer",
57
  "unk_token": "[UNK]"
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff