ksridhar commited on
Commit
95f740d
1 Parent(s): 8e8a936

Upload processor

Browse files
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_valid_processor_keys": [
3
+ "images",
4
+ "do_resize",
5
+ "size",
6
+ "resample",
7
+ "do_center_crop",
8
+ "crop_size",
9
+ "do_rescale",
10
+ "rescale_factor",
11
+ "do_normalize",
12
+ "image_mean",
13
+ "image_std",
14
+ "do_convert_rgb",
15
+ "return_tensors",
16
+ "data_format",
17
+ "input_data_format"
18
+ ],
19
+ "auto_map": {
20
+ "AutoProcessor": "processing_jat.JatProcessor"
21
+ },
22
+ "crop_size": {
23
+ "height": 224,
24
+ "width": 224
25
+ },
26
+ "do_center_crop": true,
27
+ "do_convert_rgb": true,
28
+ "do_normalize": true,
29
+ "do_rescale": true,
30
+ "do_resize": true,
31
+ "image_mean": [
32
+ 0.48145466,
33
+ 0.4578275,
34
+ 0.40821073
35
+ ],
36
+ "image_processor_type": "CLIPImageProcessor",
37
+ "image_std": [
38
+ 0.26862954,
39
+ 0.26130258,
40
+ 0.27577711
41
+ ],
42
+ "processor_class": "JatProcessor",
43
+ "resample": 3,
44
+ "rescale_factor": 0.00392156862745098,
45
+ "size": {
46
+ "shortest_edge": 224
47
+ }
48
+ }
processing_jat.py ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import warnings
3
+ from typing import Any, Dict, List, Optional, Union
4
+
5
+ import torch
6
+ import torchvision.transforms.functional as F
7
+ from transformers import BatchEncoding
8
+ from transformers.processing_utils import ProcessorMixin
9
+
10
+
11
+ def to_tensor(x):
12
+ """
13
+ Convert a nested structure of numpy arrays or tensors (including lists and tuples of them)
14
+ into a tensor. Assumes that all nested structures can be converted into a tensor directly.
15
+
16
+ :param x: Nested structure containing numpy arrays, tensors, lists, or tuples
17
+ :return: torch.Tensor
18
+ """
19
+ with warnings.catch_warnings():
20
+ # Convert specific warning to an error
21
+ warnings.filterwarnings(
22
+ "error",
23
+ category=UserWarning,
24
+ message=".*Creating a tensor from a list of numpy.ndarrays is extremely slow.*",
25
+ )
26
+ try:
27
+ return torch.Tensor(x)
28
+ except Exception:
29
+ if isinstance(x, list):
30
+ return torch.stack([to_tensor(item) for item in x])
31
+ else:
32
+ raise TypeError("Unsupported type for conversion to tensor")
33
+
34
+
35
+ def truncate(
36
+ encoding: Dict[str, List[List[Any]]], max_length: int, truncation_side: str = "right", preserve: bool = False
37
+ ) -> Dict[str, List[List[Any]]]:
38
+ """
39
+ Truncate the sequences in the encoding to the specified maximum length.
40
+
41
+ This function is designed to process batch of sequences represented in the encoding dictionary.
42
+ Depending on the chosen strategy, sequences are either truncated with loss of residual data or with preservation
43
+ and incorporation of residual data into the batch.
44
+
45
+ Args:
46
+ encoding (`Mapping`):
47
+ A dictionary where each key-value pair consists of a feature name and its corresponding batch of sequences.
48
+ The sequences are expected to be lists.
49
+ max_length (`int`):
50
+ The maximum allowable length for the sequences.
51
+ truncation_side (`str`, **optional**):
52
+ The strategy to use for truncation. Can be `"left"` or `"right"`. Defaults to `"right"`.
53
+ preserve (`bool`, **optional**):
54
+ Whether to preserve the residual data by adding them as new sequences in the batch. Defaults to `False`.
55
+
56
+ Returns:
57
+ `Dict[str, List[List[Any]]]`:
58
+ A dictionary with the same keys as the input `encoding`, containing the truncated batch of sequences.
59
+ If `preserve` is set to `True`, the batch size may increase due to the addition of new sequences formed
60
+ from the residual data.
61
+
62
+ Example:
63
+
64
+ >>> encoding = {'feature1': [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]}
65
+ >>> truncate(encoding, 3, preserve=False)
66
+ {'feature1': [[1, 2, 3], [6, 7, 8]]}
67
+
68
+ >>> truncate(encoding, 3, preserve=True)
69
+ {'feature1': [[1, 2, 3], [4, 5], [6, 7, 8], [9, 10]]}
70
+ """
71
+ truncated_encoding = {}
72
+
73
+ for key, sequences in encoding.items():
74
+ if not all(isinstance(seq, list) for seq in sequences):
75
+ raise TypeError(f"All sequences under key {key} should be of type list.")
76
+
77
+ truncated_sequences = []
78
+
79
+ for seq in sequences:
80
+ if len(seq) <= max_length:
81
+ truncated_sequences.append(seq)
82
+ continue
83
+
84
+ if preserve: # truncate and append the residual as new sequences
85
+ if truncation_side == "right":
86
+ truncated_sequences.extend([seq[i : i + max_length] for i in range(0, len(seq), max_length)])
87
+ elif truncation_side == "left":
88
+ n = len(seq) // max_length + int(len(seq) % max_length > 0)
89
+ low, high = len(seq) - n * max_length, len(seq)
90
+ truncated_sequences.extend(
91
+ [seq[max(0, i - max_length) : i] for i in range(high, low, -max_length)]
92
+ )
93
+ else:
94
+ raise ValueError(f"Invalid truncation_side: {truncation_side}")
95
+ else: # simply truncate the sequence
96
+ if truncation_side == "right":
97
+ truncated_sequences.append(seq[:max_length])
98
+ elif truncation_side == "left":
99
+ truncated_sequences.append(seq[-max_length:])
100
+
101
+ truncated_encoding[key] = truncated_sequences
102
+
103
+ return truncated_encoding
104
+
105
+
106
+ def pad(encoding: Dict[str, List[List[Any]]], target_length: int) -> Dict[str, List[List[Any]]]:
107
+ """
108
+ Pad the sequences in the encoding to the specified maximum length.
109
+
110
+ This function is designed to process batch of sequences represented in the encoding dictionary.
111
+ The padding value is set to be the first element in the sequence.
112
+
113
+ Args:
114
+ encoding (`Mapping`):
115
+ A dictionary where each key-value pair consists of a feature name and its corresponding batch of sequences.
116
+ The sequences are expected to be lists.
117
+ target_length (`int`):
118
+ The desired length for the sequences.
119
+
120
+ Returns:
121
+ `Dict[str, List[List[Any]]]`:
122
+ A dictionary with the same keys as the input `encoding`, containing the padded batch of sequences.
123
+ An additional key `attention_mask` is added to the dictionary to indicate the positions of the non-padding
124
+ elements with 1s and the padding elements with 0s. If the input `encoding` already contains an
125
+ `attention_mask` key, the corresponding mask will be updated such that the original masking is preserved,
126
+ and the newly added padding elements will be masked with 0s. In other words, the resulting
127
+ `attention_mask` is a logical "AND" between the provided mask and the mask created due to padding, ensuring
128
+ that any element masked originally remains masked.
129
+
130
+ Example:
131
+
132
+ >>> encoding = {'feature1': [[1, 2], [3, 4, 5]]}
133
+ >>> pad(encoding, 4)
134
+ {'feature1': [[1, 2, 1, 1], [3, 4, 5, 3]], 'attention_mask': [[1, 1, 0, 0], [1, 1, 1, 0]]}
135
+
136
+ >>> encoding = {'feature1': [[1, 2], [3, 4, 5]], "attention_mask": [[1, 0], [0, 1, 1]]}
137
+ >>> pad(encoding, 4)
138
+ {'feature1': [[1, 2, 1, 1], [3, 4, 5, 3]], 'attention_mask': [[1, 0, 0, 0], [0, 1, 1, 0]]}
139
+ """
140
+ padded_encoding = {}
141
+
142
+ for key, sequences in encoding.items():
143
+ if not all(isinstance(seq, (list, torch.Tensor)) for seq in sequences):
144
+ raise TypeError(f"All sequences under key {key} should be of type list or tensor.")
145
+ if key == "attention_mask": # attention_mask is handled separately
146
+ continue
147
+
148
+ padded_sequences = []
149
+ pad_mask = []
150
+
151
+ for seq in sequences:
152
+ pad_len = target_length - len(seq)
153
+ padded_seq = list(seq) + [seq[0]] * max(0, pad_len)
154
+ mask = [1] * len(seq) + [0] * max(0, pad_len)
155
+
156
+ padded_sequences.append(padded_seq)
157
+ pad_mask.append(mask)
158
+
159
+ padded_encoding[key] = padded_sequences
160
+
161
+ if "attention_mask" in encoding:
162
+ padded_encoding["attention_mask"] = [
163
+ [a * (b[i] if i < len(b) else 0) for i, a in enumerate(row)]
164
+ for row, b in zip(pad_mask, encoding["attention_mask"])
165
+ ]
166
+ else:
167
+ padded_encoding["attention_mask"] = pad_mask
168
+
169
+ return padded_encoding
170
+
171
+
172
+ class JatProcessor(ProcessorMixin):
173
+ r"""
174
+ JAT processor which wraps a CLIP image processor and a BERT tokenizer into a single processor.
175
+
176
+ [`JatProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`BertTokenizerFast`]. See the
177
+ [`~JatProcessor.__call__`] and [`~JatProcessor.decode`] for more information.
178
+
179
+ Args:
180
+ image_processor ([`AutoImageProcessor`]):
181
+ The image processor is a required input.
182
+ tokenizer ([`AutoTokenizer`]):
183
+ The tokenizer is a required input.
184
+ """
185
+
186
+ attributes = ["image_processor", "tokenizer"]
187
+ image_processor_class = "AutoImageProcessor"
188
+ tokenizer_class = "AutoTokenizer"
189
+
190
+ DONT_TRUNCATE_OR_PAD = {"pixel_values"} # Or, a better name for this would be
191
+
192
+ def __init__(self, image_processor, tokenizer):
193
+ super().__init__(image_processor, tokenizer)
194
+ self.current_processor = self.image_processor
195
+
196
+ def _truncate_and_pad(
197
+ self,
198
+ encoding: dict,
199
+ padding: Union[bool, str],
200
+ truncation: Union[bool, str],
201
+ truncation_side: str = "right",
202
+ max_length: Optional[int] = None,
203
+ ) -> dict:
204
+ # If max_length is not provided, use the maximum length accepted by the model.
205
+ if max_length is None:
206
+ max_length = self.tokenizer.model_max_length
207
+
208
+ # Exclude keys that we don't want to truncate or pad.
209
+ excluded = {key: value for key, value in encoding.items() if key in self.DONT_TRUNCATE_OR_PAD}
210
+ encoding = {key: value for key, value in encoding.items() if key not in self.DONT_TRUNCATE_OR_PAD}
211
+
212
+ # Apply Truncation
213
+ if truncation in [True, "lossy"]:
214
+ encoding = truncate(encoding, max_length, truncation_side, preserve=False)
215
+ elif truncation == "preserve":
216
+ encoding = truncate(encoding, max_length, truncation_side, preserve=True)
217
+ elif truncation in [False, "do_not_truncate"]:
218
+ pass
219
+ else:
220
+ raise ValueError("Invalid truncation strategy:" + str(truncation))
221
+
222
+ # Apply Padding
223
+ if padding in [True, "longest"]:
224
+ target_length = max(len(seq) for sequences in encoding.values() for seq in sequences)
225
+ encoding = pad(encoding, target_length)
226
+ elif padding == "max_length":
227
+ encoding = pad(encoding, max_length)
228
+ elif padding in [False, "do_not_pad"]:
229
+ pass
230
+ else:
231
+ raise ValueError("Invalid padding strategy:" + str(padding))
232
+
233
+ # Add back the excluded keys.
234
+ encoding.update(excluded)
235
+
236
+ # Particular case, we handle the conversion to tensor of image_observations, as the format used
237
+ # (list of tensors) is not properly handled by the BatchEncoding class:
238
+ if "image_observations" in encoding:
239
+ encoding["image_observations"] = to_tensor(encoding["image_observations"])
240
+
241
+ return encoding
242
+
243
+ def __call__(
244
+ self,
245
+ text=None,
246
+ images=None,
247
+ continuous_observations=None,
248
+ discrete_observations=None,
249
+ text_observations=None,
250
+ image_observations=None,
251
+ continuous_actions=None,
252
+ discrete_actions=None,
253
+ rewards=None,
254
+ return_tensors=None,
255
+ **kwargs,
256
+ ):
257
+ """
258
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
259
+ and `kwargs` arguments to BertTokenizerFast's [`~BertTokenizerFast.__call__`] if `text` is not `None` to encode
260
+ the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
261
+ CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
262
+ of the above two methods for more information.
263
+
264
+ Args:
265
+ text (`str`, `List[str]`, `List[List[str]]`):
266
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
267
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
268
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
269
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`,
270
+ `List[np.ndarray]`, `List[torch.Tensor]`):
271
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
272
+ tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
273
+ number of channels, H and W are image height and width.
274
+ continuous_observations (`List[List[List[float]]]`):
275
+ The continuous observations or batch of continuous observations to be encoded.
276
+ discrete_observations (`List[List[List[int]]]`):
277
+ The discrete observations or batch of discrete observations to be encoded.
278
+ text_observations (`List[List[str]]`):
279
+ The text observations or batch of text observations to be encoded.
280
+ image_observations (`List[List[PIL.Image.Image]]`, `List[List[np.ndarray]]`, `List[List[torch.Tensor]]`):
281
+ The image observations or batch of image observations to be encoded.
282
+ continuous_actions (`List[List[List[float]]]`):
283
+ The continuous actions or batch of continuous actions to be encoded.
284
+ discrete_actions (``List[List[int]]`):
285
+ The discrete actions or batch of discrete actions to be encoded.
286
+ rewards (``List[List[float]]`):
287
+ The rewards or batch of rewards to be encoded.
288
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
289
+ If set, will return tensors of a particular framework. Acceptable values are:
290
+
291
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
292
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
293
+ - `'np'`: Return NumPy `np.ndarray` objects.
294
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
295
+
296
+ Returns:
297
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
298
+
299
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
300
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
301
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
302
+ `None`).
303
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
304
+ """
305
+ # we truncate and pad ourselves so we need to pass padding=False and truncation=False to the tokenizer
306
+ padding = kwargs.pop("padding", False)
307
+ truncation = kwargs.pop("truncation", False)
308
+ truncation_side = kwargs.pop("truncation_side", "right")
309
+ max_length = kwargs.pop("max_length", None)
310
+
311
+ # Ensure that the input is batched
312
+ if text is not None and not isinstance(text, list):
313
+ text = [text]
314
+
315
+ encoding = {}
316
+ if text is not None:
317
+ encoding["input_ids"] = self.tokenizer(text, **kwargs)["input_ids"]
318
+ if images is not None:
319
+ encoding["pixel_values"] = self.image_processor(images, **kwargs).pixel_values
320
+ if continuous_observations is not None:
321
+ encoding["continuous_observations"] = copy.deepcopy(continuous_observations)
322
+ if discrete_observations is not None:
323
+ encoding["discrete_observations"] = copy.deepcopy(discrete_observations)
324
+ if text_observations is not None:
325
+ if "discrete_observations" not in encoding:
326
+ raise ValueError("discrete_observations must be provided if text_observations is provided")
327
+ for batch_idx, sequence in enumerate(text_observations):
328
+ encoded_text = self.tokenizer(sequence, max_length=64, padding="max_length")["input_ids"]
329
+ for timestep, text_tokens in enumerate(encoded_text):
330
+ encoding["discrete_observations"][batch_idx][timestep].extend(text_tokens)
331
+ if image_observations is not None:
332
+ image_observations = [[(F.to_tensor(im) - 0.5) / 0.5 for im in ep] for ep in image_observations]
333
+ encoding["image_observations"] = image_observations
334
+ if continuous_actions is not None:
335
+ encoding["continuous_actions"] = copy.deepcopy(continuous_actions)
336
+ if discrete_actions is not None:
337
+ encoding["discrete_actions"] = copy.deepcopy(discrete_actions)
338
+
339
+ if rewards is not None:
340
+ encoding["rewards"] = [[float(r) for r in ep] for ep in rewards]
341
+
342
+ # Handle image+text case, need to reduce the max_len as the image and text will be concatenated
343
+ if text is not None and images is not None:
344
+ if max_length is None:
345
+ max_length = self.tokenizer.model_max_length
346
+ max_length -= (224 // 16) ** 2 # substract the number of image tokens
347
+ elif (
348
+ continuous_observations is not None
349
+ or discrete_observations is not None
350
+ or text_observations is not None
351
+ or image_observations is not None
352
+ ):
353
+ if max_length is None:
354
+ max_length = self.tokenizer.model_max_length
355
+ max_length //= 2 # observations and actions are interleaved
356
+
357
+ encoding = self._truncate_and_pad(encoding, padding, truncation, truncation_side, max_length)
358
+
359
+ return BatchEncoding(encoding, tensor_type=return_tensors)
360
+
361
+ def batch_decode(self, *args, **kwargs):
362
+ """
363
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
364
+ refer to the docstring of this method for more information.
365
+ """
366
+ return self.tokenizer.batch_decode(*args, **kwargs)
367
+
368
+ def decode(self, *args, **kwargs):
369
+ """
370
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
371
+ the docstring of this method for more information.
372
+ """
373
+ return self.tokenizer.decode(*args, **kwargs)
374
+
375
+ def pad(self, *args, **kwargs):
376
+ inputs = args[0]
377
+ keys = [key for key in inputs[0].keys() if inputs[0][key] is not None]
378
+ inputs = {key: [arg[key] for arg in inputs] for key in keys}
379
+ elmt = next(iter(inputs.values()))
380
+ if isinstance(elmt[0], torch.Tensor) and not isinstance(elmt, torch.Tensor):
381
+ encoding = {key: torch.stack(inputs[key]) for key in inputs.keys()}
382
+ else:
383
+ encoding = self._truncate_and_pad(
384
+ inputs, padding=kwargs.get("padding", False), truncation=False, max_length=kwargs.get("max_length")
385
+ )
386
+
387
+ return BatchEncoding(encoding, tensor_type=kwargs.get("return_tensors"))
388
+
389
+ @property
390
+ def model_input_names(self):
391
+ return [
392
+ "input_ids",
393
+ "attention_mask",
394
+ "pixel_values",
395
+ "continuous_observations",
396
+ "discrete_observations",
397
+ "image_observations",
398
+ "continuous_actions",
399
+ "discrete_actions",
400
+ "rewards",
401
+ ]
402
+
403
+
404
+ JatProcessor.register_for_auto_class("AutoProcessor")
processor_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoProcessor": "processing_jat.JatProcessor"
4
+ },
5
+ "processor_class": "JatProcessor"
6
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "auto_map": {
14
+ "AutoProcessor": "processing_jat.JatProcessor"
15
+ },
16
+ "bos_token": "<|endoftext|>",
17
+ "clean_up_tokenization_spaces": true,
18
+ "eos_token": "<|endoftext|>",
19
+ "model_input_names": [
20
+ "input_ids",
21
+ "attention_mask"
22
+ ],
23
+ "model_max_length": 40,
24
+ "pad_token": "<|endoftext|>",
25
+ "processor_class": "JatProcessor",
26
+ "tokenizer_class": "GPT2Tokenizer",
27
+ "unk_token": "<|endoftext|>"
28
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff