Crystalcareai commited on
Commit
75688f0
1 Parent(s): bfb1b9e

Upload tokenization_gemmoe.py

Browse files
Files changed (1) hide show
  1. tokenization_gemmoe.py +313 -0
tokenization_gemmoe.py ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for Gemmoe."""
16
+ import os
17
+ from shutil import copyfile
18
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
19
+
20
+ import sentencepiece as spm
21
+
22
+ from transformers.utils import logging
23
+
24
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
25
+
26
+
27
+ if TYPE_CHECKING:
28
+ pass
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
33
+
34
+ SPIECE_UNDERLINE = "▁"
35
+
36
+ class GemmoeTokenizer(PreTrainedTokenizer):
37
+ """
38
+ Construct a Gemmoe tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is
39
+ no padding token in the original model.
40
+
41
+ Args:
42
+ vocab_file (`str`):
43
+ Path to the vocabulary file.
44
+ unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`):
45
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
46
+ token instead.
47
+ bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<bos>"`):
48
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
49
+ eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<eos>"`):
50
+ The end of sequence token.
51
+ pad_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<pad>"`):
52
+ A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
53
+ attention mechanisms or loss computation.
54
+ sp_model_kwargs (`Dict[str, Any]`, `Optional`, *optional*):
55
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
56
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
57
+ to set:
58
+ - `enable_sampling`: Enable subword regularization.
59
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
60
+ - `nbest_size = {0,1}`: No sampling is performed.
61
+ - `nbest_size > 1`: samples from the nbest_size results.
62
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
63
+ using forward-filtering-and-backward-sampling algorithm.
64
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
65
+ BPE-dropout.
66
+ add_bos_token (`bool`, *optional*, defaults to `True`):
67
+ Whether or not to add an `bos_token` at the start of sequences.
68
+ add_eos_token (`bool`, *optional*, defaults to `False`):
69
+ Whether or not to add an `eos_token` at the end of sequences.
70
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
71
+ Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
72
+ extra spaces.
73
+ use_default_system_prompt (`bool`, *optional*, defaults to `False`):
74
+ Whether or not the default system prompt for Gemmoe should be used.
75
+ spaces_between_special_tokens (`bool`, *optional*, defaults to `False`):
76
+ Whether or not to add spaces between special tokens.
77
+ """
78
+
79
+ vocab_files_names = VOCAB_FILES_NAMES
80
+ model_input_names = ["input_ids", "attention_mask"]
81
+
82
+ def __init__(
83
+ self,
84
+ vocab_file,
85
+ unk_token="<unk>",
86
+ bos_token="<bos>",
87
+ eos_token="<eos>",
88
+ pad_token="<pad>",
89
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
90
+ add_bos_token=True,
91
+ add_eos_token=False,
92
+ clean_up_tokenization_spaces=False,
93
+ use_default_system_prompt=False,
94
+ spaces_between_special_tokens=False,
95
+ **kwargs,
96
+ ):
97
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
98
+ bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token
99
+ eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token
100
+ unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token
101
+ pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token
102
+
103
+ self.vocab_file = vocab_file
104
+ self.add_bos_token = add_bos_token
105
+ self.add_eos_token = add_eos_token
106
+ self.use_default_system_prompt = use_default_system_prompt
107
+
108
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
109
+ self.sp_model.Load(vocab_file)
110
+
111
+ super().__init__(
112
+ bos_token=bos_token,
113
+ eos_token=eos_token,
114
+ unk_token=unk_token,
115
+ pad_token=pad_token,
116
+ add_bos_token=add_bos_token,
117
+ add_eos_token=add_eos_token,
118
+ sp_model_kwargs=self.sp_model_kwargs,
119
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
120
+ use_default_system_prompt=use_default_system_prompt,
121
+ spaces_between_special_tokens=spaces_between_special_tokens,
122
+ **kwargs,
123
+ )
124
+
125
+ def __getstate__(self):
126
+ state = self.__dict__.copy()
127
+ state["sp_model"] = None
128
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
129
+ return state
130
+
131
+ def __setstate__(self, d):
132
+ self.__dict__ = d
133
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
134
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
135
+
136
+ @property
137
+ def vocab_size(self):
138
+ """Returns vocab size"""
139
+ return self.sp_model.get_piece_size()
140
+
141
+ def get_vocab(self):
142
+ """Returns vocab as a dict"""
143
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
144
+ vocab.update(self.added_tokens_encoder)
145
+ return vocab
146
+
147
+ def _tokenize(self, text, **kwargs):
148
+ """
149
+ Returns a tokenized string. The Gemmoe tokenizer never adds a prefix space.
150
+ """
151
+ return self.sp_model.encode(text, out_type=str)
152
+
153
+ def _convert_token_to_id(self, token):
154
+ """Converts a token (str) in an id using the vocab."""
155
+ return self.sp_model.piece_to_id(token)
156
+
157
+ def _convert_id_to_token(self, index):
158
+ """Converts an index (integer) in a token (str) using the vocab."""
159
+ token = self.sp_model.IdToPiece(index)
160
+ return token
161
+
162
+ def _decode(
163
+ self,
164
+ token_ids: List[int],
165
+ skip_special_tokens: bool = False,
166
+ spaces_between_special_tokens: bool = False,
167
+ **kwargs,
168
+ ) -> str:
169
+ sub_texts = []
170
+ current_sub_text = []
171
+ for ids in token_ids:
172
+ if skip_special_tokens and ids in self.all_special_ids:
173
+ continue
174
+ if ids in self._added_tokens_decoder:
175
+ if current_sub_text:
176
+ sub_texts.append(self.sp_model.decode(current_sub_text))
177
+ sub_texts.append(self._added_tokens_decoder[ids].content)
178
+ current_sub_text = []
179
+ else:
180
+ current_sub_text.append(ids)
181
+ if current_sub_text:
182
+ sub_texts.append(self.sp_model.decode(current_sub_text))
183
+ if spaces_between_special_tokens:
184
+ sub_texts = " ".join(sub_texts)
185
+ else:
186
+ sub_texts = "".join(sub_texts)
187
+ return sub_texts
188
+
189
+ def convert_tokens_to_string(self, tokens):
190
+ """Converts a sequence of tokens (string) in a single string."""
191
+ current_sub_tokens = []
192
+ out_string = ""
193
+ for token in tokens:
194
+ # make sure that special tokens are not decoded using sentencepiece model
195
+ if token in self._added_tokens_encoder:
196
+ out_string += self.sp_model.decode(current_sub_tokens) + token
197
+ current_sub_tokens = []
198
+ else:
199
+ current_sub_tokens.append(token)
200
+ out_string += self.sp_model.decode(current_sub_tokens)
201
+ return out_string
202
+
203
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
204
+ """
205
+ Save the vocabulary and special tokens file to a directory.
206
+
207
+ Args:
208
+ save_directory (`str`):
209
+ The directory in which to save the vocabulary.
210
+
211
+ Returns:
212
+ `Tuple(str)`: Paths to the files saved.
213
+ """
214
+ if not os.path.isdir(save_directory):
215
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
216
+ return
217
+ out_vocab_file = os.path.join(
218
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
219
+ )
220
+
221
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
222
+ copyfile(self.vocab_file, out_vocab_file)
223
+ elif not os.path.isfile(self.vocab_file):
224
+ with open(out_vocab_file, "wb") as fi:
225
+ content_spiece_model = self.sp_model.serialized_model_proto()
226
+ fi.write(content_spiece_model)
227
+
228
+ return (out_vocab_file,)
229
+
230
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
231
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
232
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
233
+ output = bos_token_id + token_ids_0 + eos_token_id
234
+ if token_ids_1 is not None:
235
+ output = output + bos_token_id + token_ids_1 + eos_token_id
236
+ return output
237
+
238
+ def get_special_tokens_mask(
239
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
240
+ ) -> List[int]:
241
+ """
242
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
243
+ special tokens using the tokenizer `prepare_for_model` method.
244
+
245
+ Args:
246
+ token_ids_0 (`List[int]`):
247
+ List of IDs.
248
+ token_ids_1 (`List[int]`, *optional*):
249
+ Optional second list of IDs for sequence pairs.
250
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
251
+ Whether or not the token list is already formatted with special tokens for the model.
252
+
253
+ Returns:
254
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
255
+ """
256
+ if already_has_special_tokens:
257
+ return super().get_special_tokens_mask(
258
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
259
+ )
260
+
261
+ bos_token_id = [1] if self.add_bos_token else []
262
+ eos_token_id = [1] if self.add_eos_token else []
263
+
264
+ if token_ids_1 is None:
265
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
266
+ return (
267
+ bos_token_id
268
+ + ([0] * len(token_ids_0))
269
+ + eos_token_id
270
+ + bos_token_id
271
+ + ([0] * len(token_ids_1))
272
+ + eos_token_id
273
+ )
274
+
275
+ def create_token_type_ids_from_sequences(
276
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
277
+ ) -> List[int]:
278
+ """
279
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
280
+ sequence pair mask has the following format:
281
+
282
+ ```
283
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
284
+ | first sequence | second sequence |
285
+ ```
286
+
287
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
288
+
289
+ Args:
290
+ token_ids_0 (`List[int]`):
291
+ List of ids.
292
+ token_ids_1 (`List[int]`, *optional*):
293
+ Optional second list of IDs for sequence pairs.
294
+
295
+ Returns:
296
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
297
+ """
298
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
299
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
300
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
301
+ if token_ids_1 is not None:
302
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
303
+ return output
304
+
305
+ def _build_conversation_input_ids(self, conversation: List[List[int]]) -> List[int]:
306
+ input_ids = []
307
+ for i, history in enumerate(conversation):
308
+ if i % 2 == 0:
309
+ input_ids.extend([self.bos_token_id, self.convert_tokens_to_ids("<start_of_turn>")] + history + [self.convert_tokens_to_ids("<end_of_turn>")])
310
+ else:
311
+ input_ids.extend([self.bos_token_id, self.convert_tokens_to_ids("<start_of_turn>"), self.convert_tokens_to_ids("model")] + history + [self.convert_tokens_to_ids("<end_of_turn>\n")])
312
+ input_ids.append(self.eos_token_id)
313
+ return input_ids