ksyang commited on
Commit
24248cc
1 Parent(s): f6efd52

Upload tokenizer

Browse files
Files changed (3) hide show
  1. special_tokens_map.json +6 -13
  2. tokenizer.json +40 -13
  3. tokenizer_config.json +35 -12
special_tokens_map.json CHANGED
@@ -1,29 +1,22 @@
1
  {
2
  "bos_token": {
3
- "content": "<s>",
4
  "lstrip": false,
5
- "normalized": true,
6
  "rstrip": false,
7
  "single_word": false
8
  },
9
  "eos_token": {
10
- "content": "</s>",
11
  "lstrip": false,
12
- "normalized": true,
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
  "pad_token": {
17
- "content": "<unk>",
18
  "lstrip": false,
19
- "normalized": true,
20
- "rstrip": false,
21
- "single_word": false
22
- },
23
- "unk_token": {
24
- "content": "<unk>",
25
- "lstrip": false,
26
- "normalized": true,
27
  "rstrip": false,
28
  "single_word": false
29
  }
 
1
  {
2
  "bos_token": {
3
+ "content": "<|begin_of_text|>",
4
  "lstrip": false,
5
+ "normalized": false,
6
  "rstrip": false,
7
  "single_word": false
8
  },
9
  "eos_token": {
10
+ "content": "<|end_of_text|>",
11
  "lstrip": false,
12
+ "normalized": false,
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
  "pad_token": {
17
+ "content": "<|pad|>",
18
  "lstrip": false,
19
+ "normalized": false,
 
 
 
 
 
 
 
20
  "rstrip": false,
21
  "single_word": false
22
  }
tokenizer.json CHANGED
@@ -5,29 +5,56 @@
5
  "added_tokens": [
6
  {
7
  "id": 0,
8
- "content": "<unk>",
9
  "single_word": false,
10
  "lstrip": false,
11
  "rstrip": false,
12
- "normalized": true,
13
  "special": true
14
  },
15
  {
16
  "id": 1,
17
- "content": "<s>",
18
  "single_word": false,
19
  "lstrip": false,
20
  "rstrip": false,
21
- "normalized": true,
22
  "special": true
23
  },
24
  {
25
  "id": 2,
26
- "content": "</s>",
27
  "single_word": false,
28
  "lstrip": false,
29
  "rstrip": false,
30
- "normalized": true,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  "special": true
32
  }
33
  ],
@@ -101,21 +128,21 @@
101
  "model": {
102
  "type": "BPE",
103
  "dropout": null,
104
- "unk_token": "<unk>",
105
  "continuing_subword_prefix": null,
106
  "end_of_word_suffix": null,
107
  "fuse_unk": true,
108
  "byte_fallback": true,
109
  "ignore_merges": false,
110
  "vocab": {
111
- "<unk>": 0,
112
- "<s>": 1,
113
- "</s>": 2,
114
  "\t": 3,
115
  "▁▁": 4,
116
- "<reserved_2>": 5,
117
- "<reserved_3>": 6,
118
- "<reserved_4>": 7,
119
  "\n": 8,
120
  "<h2>": 9,
121
  "<h2/>": 10,
 
5
  "added_tokens": [
6
  {
7
  "id": 0,
8
+ "content": "<|pad|>",
9
  "single_word": false,
10
  "lstrip": false,
11
  "rstrip": false,
12
+ "normalized": false,
13
  "special": true
14
  },
15
  {
16
  "id": 1,
17
+ "content": "<|begin_of_text|>",
18
  "single_word": false,
19
  "lstrip": false,
20
  "rstrip": false,
21
+ "normalized": false,
22
  "special": true
23
  },
24
  {
25
  "id": 2,
26
+ "content": "<|end_of_text|>",
27
  "single_word": false,
28
  "lstrip": false,
29
  "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 5,
35
+ "content": "<|eot_id|>",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ },
42
+ {
43
+ "id": 6,
44
+ "content": "<|start_header_id|>",
45
+ "single_word": false,
46
+ "lstrip": false,
47
+ "rstrip": false,
48
+ "normalized": false,
49
+ "special": true
50
+ },
51
+ {
52
+ "id": 7,
53
+ "content": "<|end_header_id|>",
54
+ "single_word": false,
55
+ "lstrip": false,
56
+ "rstrip": false,
57
+ "normalized": false,
58
  "special": true
59
  }
60
  ],
 
128
  "model": {
129
  "type": "BPE",
130
  "dropout": null,
131
+ "unk_token": null,
132
  "continuing_subword_prefix": null,
133
  "end_of_word_suffix": null,
134
  "fuse_unk": true,
135
  "byte_fallback": true,
136
  "ignore_merges": false,
137
  "vocab": {
138
+ "<|pad|>": 0,
139
+ "<|begin_of_text|>": 1,
140
+ "<|end_of_text|>": 2,
141
  "\t": 3,
142
  "▁▁": 4,
143
+ "<|eot_id|>": 5,
144
+ "<|start_header_id|>": 6,
145
+ "<|end_header_id|>": 7,
146
  "\n": 8,
147
  "<h2>": 9,
148
  "<h2/>": 10,
tokenizer_config.json CHANGED
@@ -4,41 +4,64 @@
4
  "add_prefix_space": true,
5
  "added_tokens_decoder": {
6
  "0": {
7
- "content": "<unk>",
8
  "lstrip": false,
9
- "normalized": true,
10
  "rstrip": false,
11
  "single_word": false,
12
  "special": true
13
  },
14
  "1": {
15
- "content": "<s>",
16
  "lstrip": false,
17
- "normalized": true,
18
  "rstrip": false,
19
  "single_word": false,
20
  "special": true
21
  },
22
  "2": {
23
- "content": "</s>",
24
  "lstrip": false,
25
- "normalized": true,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  "rstrip": false,
27
  "single_word": false,
28
  "special": true
29
  }
30
  },
31
- "bos_token": "<s>",
32
- "chat_template": "{% for message in messages %}{% if loop.first %}{{ bos_token }}{% endif %}{% if message['role'] == 'user' %}{{ 'Human: ' + message['content'] + '\n\nAssistant: ' + eos_token }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token }}{% endif %}{% endfor %}",
33
  "clean_up_tokenization_spaces": false,
34
- "eos_token": "</s>",
35
  "legacy": true,
36
  "model_max_length": 200000,
37
- "pad_token": "<unk>",
38
  "padding_side": "right",
39
  "sp_model_kwargs": {},
40
  "spaces_between_special_tokens": false,
41
- "tokenizer_class": "LlamaTokenizer",
42
- "unk_token": "<unk>",
43
  "use_default_system_prompt": false
44
  }
 
4
  "add_prefix_space": true,
5
  "added_tokens_decoder": {
6
  "0": {
7
+ "content": "<|pad|>",
8
  "lstrip": false,
9
+ "normalized": false,
10
  "rstrip": false,
11
  "single_word": false,
12
  "special": true
13
  },
14
  "1": {
15
+ "content": "<|begin_of_text|>",
16
  "lstrip": false,
17
+ "normalized": false,
18
  "rstrip": false,
19
  "single_word": false,
20
  "special": true
21
  },
22
  "2": {
23
+ "content": "<|end_of_text|>",
24
  "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ },
30
+ "5": {
31
+ "content": "<|eot_id|>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "6": {
39
+ "content": "<|start_header_id|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": true
45
+ },
46
+ "7": {
47
+ "content": "<|end_header_id|>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
  "rstrip": false,
51
  "single_word": false,
52
  "special": true
53
  }
54
  },
55
+ "bos_token": "<|begin_of_text|>",
56
+ "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}",
57
  "clean_up_tokenization_spaces": false,
58
+ "eos_token": "<|end_of_text|>",
59
  "legacy": true,
60
  "model_max_length": 200000,
61
+ "pad_token": "<|pad|>",
62
  "padding_side": "right",
63
  "sp_model_kwargs": {},
64
  "spaces_between_special_tokens": false,
65
+ "tokenizer_class": "PreTrainedTokenizerFast",
 
66
  "use_default_system_prompt": false
67
  }