rajammanabrolu commited on
Commit
cb4eb60
1 Parent(s): 7466ad2

Delete tiktoken.py

Browse files
Files changed (1) hide show
  1. tiktoken.py +0 -359
tiktoken.py DELETED
@@ -1,359 +0,0 @@
1
- # Copyright 2022 MosaicML LLM Foundry authors
2
- # SPDX-License-Identifier: Apache-2.0
3
- from functools import lru_cache
4
- from typing import Any, Dict, List, Optional, Tuple
5
-
6
- from transformers import PreTrainedTokenizer
7
-
8
- DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible."""
9
-
10
-
11
- # Taken from
12
- # https://github.com/huggingface/transformers/blob/8aca43bdb3cb9a5020f6d57589d85679dc873b1c/src/transformers/models/gpt2/tokenization_gpt2.py#L62-L84
13
- @lru_cache()
14
- def bytes_to_unicode():
15
- """Returns list of utf-8 byte and a mapping to unicode strings.
16
-
17
- We specifically avoids mapping to whitespace/control characters the bpe code
18
- barfs on.
19
-
20
- The reversible bpe codes work on unicode strings. This means you need a
21
- large # of unicode characters in your vocab if you want to avoid UNKs. When
22
- you're at something like a 10B token dataset you end up needing around 5K
23
- for decent coverage. This is a significant percentage of your normal, say,
24
- 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and
25
- unicode strings.
26
- """
27
- bs = (list(range(ord('!'),
28
- ord('~') + 1)) + list(range(ord('¡'),
29
- ord('¬') + 1)) +
30
- list(range(ord('®'),
31
- ord('ÿ') + 1)))
32
- cs = bs[:]
33
- n = 0
34
- for b in range(2**8):
35
- if b not in bs:
36
- bs.append(b)
37
- cs.append(2**8 + n)
38
- n += 1
39
- cs = [chr(n) for n in cs]
40
- return dict(zip(bs, cs))
41
-
42
-
43
- class TiktokenTokenizerWrapper(PreTrainedTokenizer):
44
- """A thin wrapper around tiktoken to make it compatible with Hugging Face.
45
-
46
- tokenizers.
47
-
48
- See HuggingFace for further documentation on general tokenizer methods.
49
- """
50
-
51
- model_input_names = ['input_ids', 'attention_mask']
52
-
53
- def __init__(self,
54
- model_name: Optional[str] = None,
55
- encoding_name: Optional[str] = None,
56
- add_bos_token: bool = False,
57
- add_eos_token: bool = False,
58
- use_default_system_prompt: bool = False,
59
- unk_token: Optional[str] = '<|endoftext|>',
60
- eos_token: Optional[str] = '<|endoftext|>',
61
- bos_token: Optional[str] = '<|endoftext|>',
62
- pad_token: Optional[str] = None,
63
- errors: str = 'replace',
64
- **kwargs: Any):
65
- """Constructor creates a tiktoken tokenizer to use as the underlying.
66
-
67
- tokenizer.
68
-
69
- Args:
70
- model_name (Optional[str], optional): The name of the model to load from tiktoken. Defaults to None.
71
- Either model_name or encoding_name must be set, but not both.
72
- encoding_name (Optional[str], optional): The name of the encoding to load from tiktoken. Defaults to None.
73
- Either model_name or encoding_name must be set, but not both.
74
- add_bos_token (bool, optional): Whether to add bos tokens. Defaults to False.
75
- add_eos_token (bool, optional): Whether to add eos tokens. Defaults to False.
76
- use_default_system_prompt (bool, optional): Use the default system prompt or not. Defaults to False.
77
- unk_token (Optional[str], optional): The unk token. Defaults to '<|endoftext|>'.
78
- eos_token (Optional[str], optional): The eos token. Defaults to '<|endoftext|>'.
79
- bos_token (Optional[str], optional): The bos token. Defaults to '<|endoftext|>'.
80
- pad_token (Optional[str], optional): The pad token. Defaults to None.
81
- errors (str, optional): Paradigm to follow when decoding bytes to UTF-8. See
82
- [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
83
- Defaults to `"replace"`.
84
- """
85
- try:
86
- import tiktoken
87
- except:
88
- raise ImportError(
89
- 'You need to install tiktoken to use TiktokenTokenizerWrapper.')
90
-
91
- # Workaround to make tiktokenizer picklable.
92
- # https://github.com/huggingface/datasets/issues/5536#issuecomment-1682309347
93
- # There is an open PR from HF to add this to tiktoken: https://github.com/openai/tiktoken/pull/181
94
- import copyreg
95
- import functools
96
-
97
- from tiktoken import Encoding # type: ignore (thirdParty)
98
-
99
- def pickle_Encoding(enc: Encoding):
100
- return (functools.partial(Encoding,
101
- enc.name,
102
- pat_str=enc._pat_str,
103
- mergeable_ranks=enc._mergeable_ranks,
104
- special_tokens=enc._special_tokens), ())
105
-
106
- copyreg.pickle(Encoding, pickle_Encoding)
107
-
108
- if model_name is not None and encoding_name is not None:
109
- raise ValueError(
110
- 'You need to specify either model_name or encoding_name, not both.'
111
- )
112
-
113
- self.model_name = model_name
114
- self.encoding_name = encoding_name
115
-
116
- if self.model_name is not None:
117
- self.encoding = tiktoken.encoding_for_model( # type: ignore (thirdParty)
118
- self.model_name)
119
- elif self.encoding_name is not None:
120
- self.encoding = tiktoken.get_encoding( # type: ignore (thirdParty)
121
- self.encoding_name)
122
- else:
123
- raise ValueError(
124
- 'You need to specify either model_name or encoding_name.')
125
-
126
- self.add_bos_token = add_bos_token
127
- self.add_eos_token = add_eos_token
128
- self.use_default_system_prompt = use_default_system_prompt
129
-
130
- self.byte_encoder = bytes_to_unicode()
131
- self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
132
- self.errors = errors
133
-
134
- self.decoder: Dict[int, str] = {}
135
- for i in range(self.encoding.n_vocab):
136
- try:
137
- self.encoding.decode_single_token_bytes(i)
138
- except KeyError:
139
- continue
140
- # Taken from
141
- # https://gist.github.com/xenova/a452a6474428de0182b17605a98631ee
142
- decoding = ''.join([
143
- bytes_to_unicode()[ord(char)] for char in
144
- self.encoding.decode_single_token_bytes(i).decode('latin-1')
145
- ])
146
- self.decoder[i] = decoding
147
-
148
- self.encoder: Dict[str, int] = {}
149
- for i in range(self.encoding.n_vocab):
150
- if i in self.decoder:
151
- self.encoder[self.decoder[i]] = i
152
-
153
- super().__init__(model_name=model_name,
154
- encoding_name=encoding_name,
155
- add_bos_token=add_bos_token,
156
- add_eos_token=add_eos_token,
157
- use_default_system_prompt=use_default_system_prompt,
158
- unk_token=unk_token,
159
- eos_token=eos_token,
160
- bos_token=bos_token,
161
- pad_token=pad_token,
162
- errors=errors,
163
- **kwargs)
164
-
165
- @property
166
- def vocab_size(self) -> int:
167
- """Returns vocab size."""
168
- return self.encoding.n_vocab
169
-
170
- @property
171
- def is_fast(self) -> bool:
172
- return False
173
-
174
- @property
175
- def default_chat_template(self):
176
- """Chat ML Template for User/Assistant.
177
-
178
- Pinning default Chat ML template in case defaults change.
179
- """
180
- template = (
181
- "{% if messages[0]['role'] == 'system' %}"
182
- '{% set loop_messages = messages[1:] %}'
183
- "{% set system_message = messages[0]['content'] %}"
184
- "{% elif USE_DEFAULT_PROMPT == true and not 'system' in messages[0]['role'] %}"
185
- '{% set loop_messages = messages %}'
186
- "{% set system_message = 'DEFAULT_SYSTEM_PROMPT' %}"
187
- '{% else %}'
188
- '{% set loop_messages = messages %}'
189
- '{% set system_message = false %}'
190
- '{% endif %}'
191
- '{% for message in loop_messages %}'
192
- '{% if loop.index0 == 0 %}'
193
- '{% if system_message != false %}'
194
- "{{ '<|im_start|>system\n' + system_message.strip() + '<|im_end|>\n'}}"
195
- '{% endif %}'
196
- "{{ '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' }}"
197
- '{% else %}'
198
- "{{ '\n' + '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' }}"
199
- '{% endif %}'
200
- '{% if (add_generation_prompt == true and loop.last) %}'
201
- "{{ '\n' + '<|im_start|>' + 'assistant' + '\n' }}"
202
- '{% endif %}'
203
- '{% endfor %}')
204
- template = template.replace(
205
- 'USE_DEFAULT_PROMPT',
206
- 'true' if self.use_default_system_prompt else 'false')
207
- template = template.replace('DEFAULT_SYSTEM_PROMPT',
208
- DEFAULT_SYSTEM_PROMPT)
209
- return template
210
-
211
- def get_vocab(self) -> Dict[str, int]:
212
- """Returns vocab as a dict."""
213
- # As far as I can tell, we don't require get_vocab to completely work,
214
- # but when using additional_special_tokens, Hugging Face determines the next
215
- # token index to add with len(self.get_vocab()) so we need the _size_ of this dictionary to be correct.
216
- vocab_clone = self.encoder.copy()
217
- extra_id_index = 0
218
- candidate_extra_id = f'<extra_id_{extra_id_index}>'
219
- indices_to_fill_in = {i for i in range(self.vocab_size)} - set(
220
- vocab_clone.values())
221
-
222
- # Add enough indices to make get_vocab() the right length
223
- for index_to_add in indices_to_fill_in:
224
- # Make sure we don't overwrite a token that already exists
225
- while candidate_extra_id in vocab_clone:
226
- extra_id_index += 1
227
- candidate_extra_id = f'<extra_id_{extra_id_index}>'
228
-
229
- # Get an index to add and add the item
230
- vocab_clone[candidate_extra_id] = index_to_add
231
-
232
- return vocab_clone
233
-
234
- def _tokenize(self, text: str) -> List[str]:
235
- """Returns a tokenized string."""
236
- if not isinstance(text, str):
237
- raise ValueError(
238
- f'Expected a string input to _tokenize but got {type(text)}.')
239
-
240
- tokens = [
241
- self.decoder[t]
242
- for t in self.encoding.encode(text, allowed_special='all')
243
- ]
244
-
245
- return tokens
246
-
247
- def _convert_token_to_id(self, token: str) -> Optional[int]:
248
- """Converts a token (str) in an id using the vocab."""
249
- return self.encoder.get(token, self.encoder.get(self.unk_token))
250
-
251
- def _convert_id_to_token(self, index: int) -> Optional[str]:
252
- """Converts an index (integer) in a token (str) using the vocab."""
253
- # For tokens in either the gap in ids in the tokenizer, or beyond the range of the tokenizer,
254
- # we return empty string. This matches the behavior of Hugging Face fast tokenizers,
255
- # but not slow tokenizers.
256
- return self.decoder.get(index, '')
257
-
258
- def convert_tokens_to_string(self, tokens: List[str]) -> str:
259
- """Converts a sequence of tokens (string) in a single string."""
260
- text = ''.join(tokens)
261
- text = bytearray([self.byte_decoder[c] for c in text
262
- ]).decode('utf-8', errors=self.errors)
263
- return text
264
-
265
- def build_inputs_with_special_tokens(
266
- self,
267
- token_ids_0: List[int],
268
- token_ids_1: Optional[List[int]] = None) -> List[int]:
269
- bos_token_id = [self.bos_token_id] if self.add_bos_token else []
270
- eos_token_id = [self.eos_token_id] if self.add_eos_token else []
271
-
272
- output = bos_token_id + token_ids_0 + eos_token_id
273
-
274
- if token_ids_1 is not None:
275
- output = output + bos_token_id + token_ids_1 + eos_token_id
276
-
277
- return output
278
-
279
- def get_special_tokens_mask(
280
- self,
281
- token_ids_0: List[int],
282
- token_ids_1: Optional[List[int]] = None,
283
- already_has_special_tokens: bool = False) -> List[int]:
284
- """Retrieves sequence ids from a token list that has no special tokens.
285
-
286
- Function copied from
287
- https://github.com/huggingface/transformers/blob/e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/src/transformers/models/gpt2/tokenization_gpt2.py#L265-L295
288
-
289
- added. This method is called when adding special tokens using the
290
- tokenizer `prepare_for_model` or `encode_plus` methods.
291
-
292
- Args:
293
- token_ids_0 (`List[int]`):
294
- List of IDs.
295
- token_ids_1 (`List[int]`, *optional*):
296
- Optional second list of IDs for sequence pairs.
297
- already_has_special_tokens (`bool`, *optional*, defaults to `False`):
298
- Whether or not the token list is already formatted with special tokens for the model.
299
-
300
- Returns:
301
- `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
302
- """
303
- if already_has_special_tokens:
304
- return super().get_special_tokens_mask(
305
- token_ids_0=token_ids_0,
306
- token_ids_1=token_ids_1,
307
- already_has_special_tokens=True)
308
-
309
- bos_token_id = [1] if self.add_bos_token else []
310
- eos_token_id = [1] if self.add_eos_token else []
311
-
312
- if token_ids_1 is None:
313
- return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
314
- return (bos_token_id + ([0] * len(token_ids_0)) + eos_token_id +
315
- bos_token_id + ([0] * len(token_ids_1)) + eos_token_id)
316
-
317
- def create_token_type_ids_from_sequences(
318
- self,
319
- token_ids_0: List[int],
320
- token_ids_1: Optional[List[int]] = None) -> List[int]:
321
- sep = [self.sep_token_id]
322
-
323
- if token_ids_1 is None:
324
- return len(token_ids_0 + sep) * [0]
325
- return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
326
-
327
- def save_vocabulary(self,
328
- save_directory: str,
329
- filename_prefix: Optional[str] = None) -> Tuple[str]:
330
-
331
- # ignore the below type to keep the original signature
332
- # we are knowingly breaking the signature here, although not 100% certain
333
- # it doesn't have side effects
334
- # There is some code in huggingface that calls this function to get the vocab files,
335
- # but it doesn't seem to access them (or at least checks for their existence
336
- # before accessing them)
337
- return (None, None) # type: ignore
338
-
339
- def sanitize_special_tokens(self) -> int:
340
- """Make sure that all the special tokens attributes of the tokenizer.
341
-
342
- (`tokenizer.mask_token`, `tokenizer.cls_token`, etc.) are in the
343
- vocabulary.
344
-
345
- Add the missing ones to the vocabulary if needed.
346
-
347
- Return:
348
- `int`: The number of tokens added in the vocabulary during the operation.
349
- """
350
- actual_new_tokens = []
351
- for token in self.all_special_tokens_extended:
352
- encoded = self.encoding.encode(token, allowed_special='all')
353
- if len(encoded) > 1:
354
- actual_new_tokens.append(token)
355
-
356
- return self.add_tokens(actual_new_tokens, special_tokens=True)
357
-
358
-
359
- TiktokenTokenizerWrapper.register_for_auto_class()