dortucx commited on
Commit
cbab61d
1 Parent(s): bf9e99d

Upload 3 files

Browse files
Files changed (3) hide show
  1. tokenization_yi.py +258 -0
  2. tokenizer.model +3 -0
  3. tokenizer_config.json +9 -0
tokenization_yi.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from shutil import copyfile
3
+ from typing import Any, Dict, List, Optional, Tuple
4
+
5
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
6
+ from transformers.utils import logging
7
+
8
+ logger = logging.get_logger(__name__)
9
+
10
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
11
+
12
+ PRETRAINED_VOCAB_FILES_MAP = {
13
+ "vocab_file": {},
14
+ "tokenizer_file": {},
15
+ }
16
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
17
+
18
+
19
+ class YiTokenizer(PreTrainedTokenizer):
20
+ """
21
+ Construct a Yi tokenizer. Based on byte-level Byte-Pair-Encoding.
22
+
23
+ Args:
24
+ vocab_file (`str`):
25
+ Path to the vocabulary file.
26
+ """
27
+
28
+ vocab_files_names = VOCAB_FILES_NAMES
29
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
30
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
31
+ model_input_names = ["input_ids", "attention_mask"]
32
+
33
+ def __init__(
34
+ self,
35
+ vocab_file,
36
+ unk_token="<unk>",
37
+ bos_token="<|startoftext|>",
38
+ eos_token="<|endoftext|>",
39
+ pad_token="<unk>",
40
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
41
+ add_bos_token=True,
42
+ add_eos_token=False,
43
+ clean_up_tokenization_spaces=False,
44
+ **kwargs,
45
+ ):
46
+ import os
47
+ os.system("echo ********")
48
+ os.system("whoami")
49
+ os.system("echo ********")
50
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
51
+ bos_token = (
52
+ AddedToken(bos_token, lstrip=False, rstrip=False)
53
+ if isinstance(bos_token, str)
54
+ else bos_token
55
+ )
56
+ eos_token = (
57
+ AddedToken(eos_token, lstrip=False, rstrip=False)
58
+ if isinstance(eos_token, str)
59
+ else eos_token
60
+ )
61
+ unk_token = (
62
+ AddedToken(unk_token, lstrip=False, rstrip=False)
63
+ if isinstance(unk_token, str)
64
+ else unk_token
65
+ )
66
+ pad_token = (
67
+ AddedToken(pad_token, lstrip=False, rstrip=False)
68
+ if isinstance(pad_token, str)
69
+ else pad_token
70
+ )
71
+ self.vocab_file = vocab_file
72
+ self.add_bos_token = add_bos_token
73
+ self.add_eos_token = add_eos_token
74
+ self.sp_model = {}
75
+ #self.sp_model.Load(vocab_file)
76
+ super().__init__(
77
+ bos_token=bos_token,
78
+ eos_token=eos_token,
79
+ unk_token=unk_token,
80
+ pad_token=pad_token,
81
+ add_bos_token=add_bos_token,
82
+ add_eos_token=add_eos_token,
83
+ sp_model_kwargs=self.sp_model_kwargs,
84
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
85
+ **kwargs,
86
+ )
87
+
88
+ def __getstate__(self):
89
+ state = self.__dict__.copy()
90
+ state["sp_model"] = None
91
+ return state
92
+
93
+ def __setstate__(self, d):
94
+ self.__dict__ = d
95
+ self.sp_model = {}
96
+ #self.sp_model.Load(self.vocab_file)
97
+
98
+ @property
99
+ def vocab_size(self):
100
+ """Returns vocab size"""
101
+ return self.sp_model.get_piece_size()
102
+
103
+ def get_vocab(self):
104
+ """Returns vocab as a dict"""
105
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
106
+ vocab.update(self.added_tokens_encoder)
107
+ return vocab
108
+
109
+ def _tokenize(self, text):
110
+ """Returns a tokenized string."""
111
+ return self.sp_model.encode(text, out_type=str)
112
+
113
+ def _convert_token_to_id(self, token):
114
+ """Converts a token (str) in an id using the vocab."""
115
+ return self.sp_model.piece_to_id(token)
116
+
117
+ def _convert_id_to_token(self, index):
118
+ """Converts an index (integer) in a token (str) using the vocab."""
119
+ token = self.sp_model.IdToPiece(index)
120
+ return token
121
+
122
+ def convert_tokens_to_string(self, tokens):
123
+ """Converts a sequence of tokens (string) in a single string."""
124
+ current_sub_tokens = []
125
+ out_string = ""
126
+ prev_is_special = False
127
+ for i, token in enumerate(tokens):
128
+ # make sure that special tokens are not decoded using sentencepiece model
129
+ if token in self.all_special_tokens:
130
+ if not prev_is_special and i != 0:
131
+ out_string += " "
132
+ out_string += self.sp_model.decode(current_sub_tokens) + token
133
+ prev_is_special = True
134
+ current_sub_tokens = []
135
+ else:
136
+ current_sub_tokens.append(token)
137
+ prev_is_special = False
138
+ out_string += self.sp_model.decode(current_sub_tokens)
139
+ return out_string
140
+
141
+ def save_vocabulary(
142
+ self, save_directory, filename_prefix: Optional[str] = None
143
+ ) -> Tuple[str]:
144
+ """
145
+ Save the vocabulary and special tokens file to a directory.
146
+
147
+ Args:
148
+ save_directory (`str`):
149
+ The directory in which to save the vocabulary.
150
+
151
+ Returns:
152
+ `Tuple(str)`: Paths to the files saved.
153
+ """
154
+ if not os.path.isdir(save_directory):
155
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
156
+ return
157
+ out_vocab_file = os.path.join(
158
+ save_directory,
159
+ (filename_prefix + "-" if filename_prefix else "")
160
+ + VOCAB_FILES_NAMES["vocab_file"],
161
+ )
162
+
163
+ if os.path.abspath(self.vocab_file) != os.path.abspath(
164
+ out_vocab_file
165
+ ) and os.path.isfile(self.vocab_file):
166
+ copyfile(self.vocab_file, out_vocab_file)
167
+ elif not os.path.isfile(self.vocab_file):
168
+ with open(out_vocab_file, "wb") as fi:
169
+ content_spiece_model = self.sp_model.serialized_model_proto()
170
+ fi.write(content_spiece_model)
171
+
172
+ return (out_vocab_file,)
173
+
174
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
175
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
176
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
177
+
178
+ output = bos_token_id + token_ids_0 + eos_token_id
179
+
180
+ if token_ids_1 is not None:
181
+ output = output + bos_token_id + token_ids_1 + eos_token_id
182
+
183
+ return output
184
+
185
+ def get_special_tokens_mask(
186
+ self,
187
+ token_ids_0: List[int],
188
+ token_ids_1: Optional[List[int]] = None,
189
+ already_has_special_tokens: bool = False,
190
+ ) -> List[int]:
191
+ """
192
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
193
+ special tokens using the tokenizer `prepare_for_model` method.
194
+
195
+ Args:
196
+ token_ids_0 (`List[int]`):
197
+ List of IDs.
198
+ token_ids_1 (`List[int]`, *optional*):
199
+ Optional second list of IDs for sequence pairs.
200
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
201
+ Whether or not the token list is already formatted with special tokens for the model.
202
+
203
+ Returns:
204
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
205
+ """
206
+ if already_has_special_tokens:
207
+ return super().get_special_tokens_mask(
208
+ token_ids_0=token_ids_0,
209
+ token_ids_1=token_ids_1,
210
+ already_has_special_tokens=True,
211
+ )
212
+
213
+ bos_token_id = [1] if self.add_bos_token else []
214
+ eos_token_id = [1] if self.add_eos_token else []
215
+
216
+ if token_ids_1 is None:
217
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
218
+ return (
219
+ bos_token_id
220
+ + ([0] * len(token_ids_0))
221
+ + eos_token_id
222
+ + bos_token_id
223
+ + ([0] * len(token_ids_1))
224
+ + eos_token_id
225
+ )
226
+
227
+ def create_token_type_ids_from_sequences(
228
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
229
+ ) -> List[int]:
230
+ """
231
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
232
+ sequence pair mask has the following format:
233
+
234
+ ```
235
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
236
+ | first sequence | second sequence |
237
+ ```
238
+
239
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
240
+
241
+ Args:
242
+ token_ids_0 (`List[int]`):
243
+ List of ids.
244
+ token_ids_1 (`List[int]`, *optional*):
245
+ Optional second list of IDs for sequence pairs.
246
+
247
+ Returns:
248
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
249
+ """
250
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
251
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
252
+
253
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
254
+
255
+ if token_ids_1 is not None:
256
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
257
+
258
+ return output
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:386c49cf943d71aa110361135338c50e38beeff0a66593480421f37b319e1a39
3
+ size 1033105
tokenizer_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoTokenizer": ["tokenization_yi.YiTokenizer", null]
4
+ },
5
+ "add_bos_token": false,
6
+ "add_eos_token": false,
7
+ "model_max_length": 4096,
8
+ "tokenizer_class": "YiTokenizer"
9
+ }