aframson commited on
Commit
7772942
1 Parent(s): 8024d3d
Files changed (1) hide show
  1. tokenizeConfig.py +262 -0
tokenizeConfig.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, Baichuan Intelligent Technology. All rights reserved.
2
+
3
+ import os
4
+ from shutil import copyfile
5
+ from typing import Any, Dict, List, Optional, Tuple
6
+
7
+ import sentencepiece as spm
8
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
9
+ from transformers.utils import logging
10
+
11
+
12
+ logger = logging.get_logger(__name__)
13
+
14
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
15
+
16
+ PRETRAINED_VOCAB_FILES_MAP = {
17
+ "vocab_file": {},
18
+ "tokenizer_file": {},
19
+ }
20
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
21
+
22
+
23
+ class OBTokenzier(PreTrainedTokenizer):
24
+ """
25
+ Construct a Baichuan tokenizer. Based on byte-level Byte-Pair-Encoding.
26
+ Args:
27
+ vocab_file (`str`):
28
+ Path to the vocabulary file.
29
+ """
30
+
31
+ vocab_files_names = VOCAB_FILES_NAMES
32
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
33
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
34
+ model_input_names = ["input_ids", "attention_mask"]
35
+
36
+ def __init__(
37
+ self,
38
+ vocab_file,
39
+ unk_token="<unk>",
40
+ bos_token="<s>",
41
+ eos_token="</s>",
42
+ pad_token=None,
43
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
44
+ add_bos_token=True,
45
+ add_eos_token=False,
46
+ clean_up_tokenization_spaces=False,
47
+ **kwargs,
48
+ ):
49
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
50
+ bos_token = (
51
+ AddedToken(bos_token, lstrip=False, rstrip=False)
52
+ if isinstance(bos_token, str)
53
+ else bos_token
54
+ )
55
+ eos_token = (
56
+ AddedToken(eos_token, lstrip=False, rstrip=False)
57
+ if isinstance(eos_token, str)
58
+ else eos_token
59
+ )
60
+ unk_token = (
61
+ AddedToken(unk_token, lstrip=False, rstrip=False)
62
+ if isinstance(unk_token, str)
63
+ else unk_token
64
+ )
65
+ pad_token = (
66
+ AddedToken(pad_token, lstrip=False, rstrip=False)
67
+ if isinstance(pad_token, str)
68
+ else pad_token
69
+ )
70
+ super().__init__(
71
+ bos_token=bos_token,
72
+ eos_token=eos_token,
73
+ unk_token=unk_token,
74
+ pad_token=pad_token,
75
+ add_bos_token=add_bos_token,
76
+ add_eos_token=add_eos_token,
77
+ sp_model_kwargs=self.sp_model_kwargs,
78
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
79
+ **kwargs,
80
+ )
81
+ self.vocab_file = vocab_file
82
+ self.add_bos_token = add_bos_token
83
+ self.add_eos_token = add_eos_token
84
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
85
+ self.sp_model.Load(vocab_file)
86
+
87
+ def __getstate__(self):
88
+ state = self.__dict__.copy()
89
+ state["sp_model"] = None
90
+ return state
91
+
92
+ def __setstate__(self, d):
93
+ self.__dict__ = d
94
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
95
+ self.sp_model.Load(self.vocab_file)
96
+
97
+ @property
98
+ def vocab_size(self):
99
+ """Returns vocab size"""
100
+ return self.sp_model.get_piece_size()
101
+
102
+
103
+
104
+
105
+ def get_vocab(self):
106
+ """Returns vocab as a dict"""
107
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
108
+ vocab.update(self.added_tokens_encoder)
109
+ return vocab
110
+
111
+ def _tokenize(self, text):
112
+ """Returns a tokenized string."""
113
+ return self.sp_model.encode(text, out_type=str)
114
+
115
+ def _convert_token_to_id(self, token):
116
+ """Converts a token (str) in an id using the vocab."""
117
+ return self.sp_model.piece_to_id(token)
118
+
119
+ def _convert_id_to_token(self, index):
120
+ """Converts an index (integer) in a token (str) using the vocab."""
121
+ token = self.sp_model.IdToPiece(index)
122
+ return token
123
+
124
+ def convert_tokens_to_string(self, tokens):
125
+ """Converts a sequence of tokens (string) in a single string."""
126
+ current_sub_tokens = []
127
+ out_string = ""
128
+ prev_is_special = False
129
+ for i, token in enumerate(tokens):
130
+ # make sure that special tokens are not decoded using sentencepiece model
131
+ if token in self.all_special_tokens:
132
+ if not prev_is_special and i != 0:
133
+ out_string += " "
134
+ out_string += self.sp_model.decode(current_sub_tokens) + token
135
+ prev_is_special = True
136
+ current_sub_tokens = []
137
+ else:
138
+ current_sub_tokens.append(token)
139
+ prev_is_special = False
140
+ out_string += self.sp_model.decode(current_sub_tokens)
141
+ return out_string
142
+
143
+ def _encode(self,text):
144
+ tokens = self._tokenize(text)
145
+ ids = self._convert_token_to_id(tokens)
146
+ return ids
147
+
148
+ def _decode(self,ids):
149
+ tokens = self._convert_id_to_token(ids)
150
+ text = self.convert_tokens_to_string(tokens)
151
+ return text
152
+
153
+ def save_vocabulary(
154
+ self, save_directory, filename_prefix: Optional[str] = None
155
+ ) -> Tuple[str]:
156
+ """
157
+ Save the vocabulary and special tokens file to a directory.
158
+ Args:
159
+ save_directory (`str`):
160
+ The directory in which to save the vocabulary.
161
+ Returns:
162
+ `Tuple(str)`: Paths to the files saved.
163
+ """
164
+ if not os.path.isdir(save_directory):
165
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
166
+ return
167
+ out_vocab_file = os.path.join(
168
+ save_directory,
169
+ (filename_prefix + "-" if filename_prefix else "")
170
+ + VOCAB_FILES_NAMES["vocab_file"],
171
+ )
172
+
173
+ if os.path.abspath(self.vocab_file) != os.path.abspath(
174
+ out_vocab_file
175
+ ) and os.path.isfile(self.vocab_file):
176
+ copyfile(self.vocab_file, out_vocab_file)
177
+ elif not os.path.isfile(self.vocab_file):
178
+ with open(out_vocab_file, "wb") as fi:
179
+ content_spiece_model = self.sp_model.serialized_model_proto()
180
+ fi.write(content_spiece_model)
181
+
182
+ return (out_vocab_file,)
183
+
184
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
185
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
186
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
187
+
188
+ output = bos_token_id + token_ids_0 + eos_token_id
189
+
190
+ if token_ids_1 is not None:
191
+ output = output + bos_token_id + token_ids_1 + eos_token_id
192
+
193
+ return output
194
+
195
+ def get_special_tokens_mask(
196
+ self,
197
+ token_ids_0: List[int],
198
+ token_ids_1: Optional[List[int]] = None,
199
+ already_has_special_tokens: bool = False,
200
+ ) -> List[int]:
201
+ """
202
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
203
+ special tokens using the tokenizer `prepare_for_model` method.
204
+ Args:
205
+ token_ids_0 (`List[int]`):
206
+ List of IDs.
207
+ token_ids_1 (`List[int]`, *optional*):
208
+ Optional second list of IDs for sequence pairs.
209
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
210
+ Whether or not the token list is already formatted with special tokens for the model.
211
+ Returns:
212
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
213
+ """
214
+ if already_has_special_tokens:
215
+ return super().get_special_tokens_mask(
216
+ token_ids_0=token_ids_0,
217
+ token_ids_1=token_ids_1,
218
+ already_has_special_tokens=True,
219
+ )
220
+
221
+ bos_token_id = [1] if self.add_bos_token else []
222
+ eos_token_id = [1] if self.add_eos_token else []
223
+
224
+ if token_ids_1 is None:
225
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
226
+ return (
227
+ bos_token_id
228
+ + ([0] * len(token_ids_0))
229
+ + eos_token_id
230
+ + bos_token_id
231
+ + ([0] * len(token_ids_1))
232
+ + eos_token_id
233
+ )
234
+
235
+ def create_token_type_ids_from_sequences(
236
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
237
+ ) -> List[int]:
238
+ """
239
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
240
+ sequence pair mask has the following format:
241
+ ```
242
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
243
+ | first sequence | second sequence |
244
+ ```
245
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
246
+ Args:
247
+ token_ids_0 (`List[int]`):
248
+ List of ids.
249
+ token_ids_1 (`List[int]`, *optional*):
250
+ Optional second list of IDs for sequence pairs.
251
+ Returns:
252
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
253
+ """
254
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
255
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
256
+
257
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
258
+
259
+ if token_ids_1 is not None:
260
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
261
+
262
+ return output