File size: 34,126 Bytes
d33873d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 71,
   "id": "d7f2bdb5-95c2-4a57-80e8-8f1a30a138b0",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Number of files 20 after adding ./c4_cleaned glob *73*.gz\n",
      "Number of files 39 after adding ./c4_cleaned glob *47*.gz\n",
      "Number of files 60 after adding ./c4_cleaned glob *12*.gz\n",
      "Number of files 79 after adding ./c4_cleaned glob *29*.gz\n",
      "Number of files 97 after adding ./c4_cleaned glob *74*.gz\n",
      "Number of files 116 after adding ./c4_cleaned glob *26*.gz\n",
      "Number of files 135 after adding ./c4_cleaned glob *54*.gz\n",
      "Number of files 154 after adding ./c4_cleaned glob *68*.gz\n",
      "Number of files 172 after adding ./c4_cleaned glob *57*.gz\n",
      "Number of files 189 after adding ./c4_cleaned glob *46*.gz\n",
      "Number of files 206 after adding ./c4_cleaned glob *35*.gz\n",
      "Number of files 226 after adding ./c4_cleaned glob *13*.gz\n",
      "Number of files 242 after adding ./c4_cleaned glob *41*.gz\n",
      "Number of files 259 after adding ./c4_cleaned glob *52*.gz\n",
      "Number of files 276 after adding ./c4_cleaned glob *63*.gz\n",
      "Number of files 292 after adding ./c4_cleaned glob *85*.gz\n",
      "Number of files 309 after adding ./c4_cleaned glob *81*.gz\n",
      "Number of files 326 after adding ./c4_cleaned glob *96*.gz\n",
      "Number of files 526 after adding ./nrc_uniq_cleaned_20210223 glob *.gz\n",
      "Number of files 726 after adding ./nu_uniq_cleaned_20210225 glob *.gz\n",
      "726\n",
      "Got 690 training files and 5.0 % 36 validation files\n"
     ]
    }
   ],
   "source": [
    "data_files = []\n",
    "data_dir=\".\"\n",
    "def train_val_files():\n",
    "    import glob\n",
    "    import random\n",
    "    SEED = 12345\n",
    "\n",
    "    def add_jsonlines_dir(path, filespec):\n",
    "        global data_files\n",
    "        data_files += glob.glob(f\"{path}/{filespec}\")\n",
    "        data_files = list(set(data_files))\n",
    "        print(f\"Number of files {len(data_files)} after adding {path} glob {filespec}\")\n",
    "\n",
    "    # add_jsonlines_dir(f\"{data_dir}/oscar_nl_cleaned\")\n",
    "    add_jsonlines_dir(f\"{data_dir}/c4_cleaned\", \"*73*.gz\")\n",
    "    add_jsonlines_dir(f\"{data_dir}/c4_cleaned\", \"*47*.gz\")\n",
    "    add_jsonlines_dir(f\"{data_dir}/c4_cleaned\", \"*12*.gz\")\n",
    "    add_jsonlines_dir(f\"{data_dir}/c4_cleaned\", \"*29*.gz\")\n",
    "    add_jsonlines_dir(f\"{data_dir}/c4_cleaned\", \"*74*.gz\")\n",
    "    add_jsonlines_dir(f\"{data_dir}/c4_cleaned\", \"*26*.gz\")\n",
    "    add_jsonlines_dir(f\"{data_dir}/c4_cleaned\", \"*54*.gz\")\n",
    "    add_jsonlines_dir(f\"{data_dir}/c4_cleaned\", \"*68*.gz\")\n",
    "    add_jsonlines_dir(f\"{data_dir}/c4_cleaned\", \"*57*.gz\")\n",
    "    add_jsonlines_dir(f\"{data_dir}/c4_cleaned\", \"*46*.gz\")\n",
    "    add_jsonlines_dir(f\"{data_dir}/c4_cleaned\", \"*35*.gz\")\n",
    "    add_jsonlines_dir(f\"{data_dir}/c4_cleaned\", \"*13*.gz\")\n",
    "    add_jsonlines_dir(f\"{data_dir}/c4_cleaned\", \"*41*.gz\")\n",
    "    add_jsonlines_dir(f\"{data_dir}/c4_cleaned\", \"*52*.gz\")\n",
    "    add_jsonlines_dir(f\"{data_dir}/c4_cleaned\", \"*63*.gz\")\n",
    "    add_jsonlines_dir(f\"{data_dir}/c4_cleaned\", \"*85*.gz\")\n",
    "    add_jsonlines_dir(f\"{data_dir}/c4_cleaned\", \"*81*.gz\")\n",
    "    add_jsonlines_dir(f\"{data_dir}/c4_cleaned\", \"*96*.gz\")\n",
    "    add_jsonlines_dir(f\"{data_dir}/nrc_uniq_cleaned_20210223\", \"*.gz\")\n",
    "    add_jsonlines_dir(f\"{data_dir}/nu_uniq_cleaned_20210225\", \"*.gz\")\n",
    "    random.Random(SEED).shuffle(data_files)\n",
    "\n",
    "    total = len(data_files)\n",
    "    print(total)\n",
    "    perc = 0.05\n",
    "    val_size = int(perc * total)\n",
    "    train_size = total - val_size\n",
    "    train = data_files[:train_size]\n",
    "    val = data_files[train_size:]\n",
    "    print(f\"Got {len(train)} training files and {perc*100} % {len(val)} validation files\")\n",
    "\n",
    "    assert list(set(train) & set(val)) == [], \"Train overlaps with test\"\n",
    "\n",
    "    return train, val\n",
    "\n",
    "train, val = train_val_files()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 72,
   "id": "66a923c6-1c7e-4ac2-9aec-e75c572104dd",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using custom data configuration default-ce92ec7dc3732df4\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Downloading and preparing dataset json/default (download: Unknown size, generated: Unknown size, post-processed: Unknown size, total: Unknown size) to /home/yeb/.cache/huggingface/datasets/json/default-ce92ec7dc3732df4/0.0.0/793d004298099bd3c4e61eb7878475bcf1dc212bf2e34437d85126758720d7f9...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "0 tables [00:00, ? tables/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "0 tables [00:00, ? tables/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Dataset json downloaded and prepared to /home/yeb/.cache/huggingface/datasets/json/default-ce92ec7dc3732df4/0.0.0/793d004298099bd3c4e61eb7878475bcf1dc212bf2e34437d85126758720d7f9. Subsequent calls will reuse this data.\n"
     ]
    }
   ],
   "source": [
    "from datasets import load_dataset\n",
    "datasets = load_dataset('json', data_files={'train': train, 'validation': val})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 73,
   "id": "4a6d6009-00e7-4b30-b577-6805dd849b8a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Num examples = 21153916\n"
     ]
    }
   ],
   "source": [
    "print(f\"Num examples = {len(datasets['train'])}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 74,
   "id": "c6186d88-4296-4d1d-b7cd-d0196f0b0f97",
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import (\n",
    "    CONFIG_MAPPING,\n",
    "    FLAX_MODEL_FOR_MASKED_LM_MAPPING,\n",
    "    BatchEncoding,\n",
    "    FlaxT5ForConditionalGeneration,\n",
    "    T5ForConditionalGeneration,\n",
    "    HfArgumentParser,\n",
    "    PreTrainedTokenizerBase,\n",
    "    T5Config,\n",
    "    T5TokenizerFast,\n",
    "    TrainingArguments,\n",
    "    is_tensorboard_available,\n",
    "    set_seed,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "id": "10d90997-6eb6-4399-b1a7-8a858ae4738c",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Start tokenization, remove_column_names = ['url', 'timestamp', 'text']\n"
     ]
    }
   ],
   "source": [
    "tokenizer = T5TokenizerFast.from_pretrained(\"./t5-base-dutch\")\n",
    "\n",
    "def tokenize_function(examples):\n",
    "    return tokenizer(examples['text'], return_attention_mask=False)\n",
    "\n",
    "column_names = datasets[\"train\"].column_names\n",
    "print(f\"Start tokenization, remove_column_names = {column_names}\")\n",
    "\n",
    "tokenized_datasets = datasets.map(\n",
    "    tokenize_function,\n",
    "    batched=True,\n",
    "    num_proc=96,\n",
    "    remove_columns=column_names,\n",
    "    load_from_cache_file=True,\n",
    ")\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 76,
   "id": "de7983e1-775d-4ee3-bf66-681f731501fb",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "21153916"
      ]
     },
     "execution_count": 76,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(tokenized_datasets[\"train\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "id": "5721ad35-8373-4999-8ac5-02c6f759373f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Expanded_inputs_length: 141, targets_length: 29\n",
      "Start group_texts\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "https://symbolize.stripped_domain/r/?trace=https://symbolize.stripped_domain/r/?trace=503811,5cca55,7fe2dabc120f,7fe2dabc120f,90641f90b85f&map=&map= \n",
      " \n",
      "*** SIGTERM received by PID 47670 (TID 47670) on cpu 70 from PID 33223; stack trace: ***\n",
      "*** SIGTERM received by PID 47686 (TID 47686) on cpu 71 from PID 33223; stack trace: ***\n",
      "https://symbolize.stripped_domain/r/?trace=56a4e1,7fe2dabc120f&map= \n",
      "https://symbolize.stripped_domain/r/?trace=*** SIGTERM received by PID 47673 (TID 47673) on cpu 16 from PID 33223; stack trace: ***\n",
      "56a682,7fe2dabc120f,7fdfb4cf751f,90b3ff&map= \n",
      "*** SIGTERM received by PID 47665 (TID 47665) on cpu 67 from PID 33223; stack trace: ***\n",
      "PC: @           0x503811  (unknown)  (unknown)\n",
      "PC: @           0x56a4e1  (unknown)  _PyEval_EvalFrameDefault\n",
      "PC: @           0x5cca55  (unknown)  (unknown)\n",
      "    @     0x7fde2703b800        976  (unknown)\n",
      "    @     0x7fde2703b800        976  (unknown)\n",
      "    @     0x7fe2dabc1210  (unknown)  (unknown)\n",
      "    @ ... and at least 1 more frames\n",
      "https://symbolize.stripped_domain/r/?trace=    @     0x7fe2dabc1210  852927808  (unknown)\n",
      "56a4e1,7fde2703b7ff,7fe2dabc120f&map=2a762cd764e70bc90ae4c7f9747c08d7:7fde1a0f9000-7fde2737a280 \n",
      "E0710 11:59:41.025238   47673 coredump_hook.cc:250] RAW: Remote crash gathering disabled for SIGTERM.\n",
      "    @     0x7fde2703b800        976  (unknown)\n",
      "    @     0x7fe2dabc1210  850855568  (unknown)\n",
      "    @           0x90b860  (unknown)  (unknown)\n",
      "https://symbolize.stripped_domain/r/?trace=5cca55,7fde2703b7ff,7fe2dabc120f,90b85f&map=2a762cd764e70bc90ae4c7f9747c08d7:7fde1a0f9000-7fde2737a280 \n",
      "E0710 11:59:41.030755   47686 coredump_hook.cc:250] RAW: Remote crash gathering disabled for SIGTERM.\n",
      "    @           0x906420  (unknown)  (unknown)\n",
      "https://symbolize.stripped_domain/r/?trace=503811,7fde2703b7ff,7fe2dabc120f,90641f&map=2a762cd764e70bc90ae4c7f9747c08d7:7fde1a0f9000-7fde2737a280 \n",
      "E0710 11:59:41.033184   47670 coredump_hook.cc:250] RAW: Remote crash gathering disabled for SIGTERM.\n",
      "E0710 11:59:41.033730   47673 process_state.cc:771] RAW: Raising signal 15 with default behavior\n",
      "PC: @           0x56a682  (unknown)  _PyEval_EvalFrameDefault\n",
      "    @     0x7fde2703b800        976  (unknown)\n",
      "    @     0x7fe2dabc1210  (unknown)  (unknown)\n",
      "    @     0x7fdfb4cf7520  (unknown)  (unknown)\n",
      "E0710 11:59:41.057700   47670 process_state.cc:771] RAW: Raising signal 15 with default behavior\n",
      "E0710 11:59:41.063730   47686 process_state.cc:771] RAW: Raising signal 15 with default behavior\n",
      "    @           0x90b400  (unknown)  (unknown)\n",
      "https://symbolize.stripped_domain/r/?trace=56a682,7fde2703b7ff,7fe2dabc120f,7fdfb4cf751f,90b3ff&map=2a762cd764e70bc90ae4c7f9747c08d7:7fde1a0f9000-7fde2737a280 \n",
      "E0710 11:59:41.064237   47665 coredump_hook.cc:250] RAW: Remote crash gathering disabled for SIGTERM.\n",
      "E0710 11:59:41.091833   47665 process_state.cc:771] RAW: Raising signal 15 with default behavior\n"
     ]
    }
   ],
   "source": [
    "def compute_input_and_target_lengths(inputs_length, noise_density, mean_noise_span_length):\n",
    "    \"\"\"This function is copy of `random_spans_helper <https://github.com/google-research/text-to-text-transfer-transformer/blob/84f8bcc14b5f2c03de51bd3587609ba8f6bbd1cd/t5/data/preprocessors.py#L2466>`__ .\n",
    "\n",
    "    Training parameters to avoid padding with random_spans_noise_mask.\n",
    "    When training a model with random_spans_noise_mask, we would like to set the other\n",
    "    training hyperparmeters in a way that avoids padding.\n",
    "    This function helps us compute these hyperparameters.\n",
    "    We assume that each noise span in the input is replaced by extra_tokens_per_span_inputs sentinel tokens,\n",
    "    and each non-noise span in the targets is replaced by extra_tokens_per_span_targets sentinel tokens.\n",
    "    This function tells us the required number of tokens in the raw example (for split_tokens())\n",
    "    as well as the length of the encoded targets. Note that this function assumes\n",
    "    the inputs and targets will have EOS appended and includes that in the reported length.\n",
    "\n",
    "    Args:\n",
    "        inputs_length: an integer - desired length of the tokenized inputs sequence\n",
    "        noise_density: a float\n",
    "        mean_noise_span_length: a float\n",
    "    Returns:\n",
    "        tokens_length: length of original text in tokens\n",
    "        targets_length: an integer - length in tokens of encoded targets sequence\n",
    "    \"\"\"\n",
    "\n",
    "    def _tokens_length_to_inputs_length_targets_length(tokens_length):\n",
    "        num_noise_tokens = int(round(tokens_length * noise_density))\n",
    "        num_nonnoise_tokens = tokens_length - num_noise_tokens\n",
    "        num_noise_spans = int(round(num_noise_tokens / mean_noise_span_length))\n",
    "        # inputs contain all nonnoise tokens, sentinels for all noise spans\n",
    "        # and one EOS token.\n",
    "        _input_length = num_nonnoise_tokens + num_noise_spans + 1\n",
    "        _output_length = num_noise_tokens + num_noise_spans + 1\n",
    "        return _input_length, _output_length\n",
    "\n",
    "    tokens_length = inputs_length\n",
    "\n",
    "    while _tokens_length_to_inputs_length_targets_length(tokens_length + 1)[0] <= inputs_length:\n",
    "        tokens_length += 1\n",
    "\n",
    "    inputs_length, targets_length = _tokens_length_to_inputs_length_targets_length(tokens_length)\n",
    "\n",
    "    # minor hack to get the targets length to be equal to inputs length\n",
    "    # which is more likely to have been set to a nice round number.\n",
    "    if noise_density == 0.5 and targets_length > inputs_length:\n",
    "        tokens_length -= 1\n",
    "        targets_length -= 1\n",
    "    return tokens_length, targets_length\n",
    "\n",
    "# T5-like span masked language modeling will fuse consecutively masked tokens to a single sentinel token.\n",
    "# To ensure that the input length is `max_seq_length`, we need to increase the maximum length\n",
    "# according to `mlm_probability` and `mean_noise_span_length`. We can also define the label length accordingly.\n",
    "expanded_inputs_length, targets_length = compute_input_and_target_lengths(\n",
    "    inputs_length=128,\n",
    "    noise_density=0.15,\n",
    "    mean_noise_span_length=3.0,\n",
    ")\n",
    "\n",
    "print(f\"Expanded_inputs_length: {expanded_inputs_length}, targets_length: {targets_length}\")\n",
    "print(f\"Start group_texts\")\n",
    "\n",
    "# Main data processing function that will concatenate all texts from our dataset and generate chunks of expanded_inputs_length.\n",
    "def group_texts(examples):\n",
    "    # Concatenate all texts.\n",
    "    concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}\n",
    "    total_length = len(concatenated_examples[list(examples.keys())[0]])\n",
    "    # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can\n",
    "    # customize this part to your needs.\n",
    "    if total_length >= expanded_inputs_length:\n",
    "        total_length = (total_length // expanded_inputs_length) * expanded_inputs_length\n",
    "    # Split by chunks of max_len.\n",
    "    result = {\n",
    "        k: [t[i : i + expanded_inputs_length] for i in range(0, total_length, expanded_inputs_length)]\n",
    "        for k, t in concatenated_examples.items()\n",
    "    }\n",
    "    return result\n",
    "\n",
    "# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a\n",
    "# remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value\n",
    "# might be slower to preprocess.\n",
    "#\n",
    "# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:\n",
    "# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map\n",
    "grouped_datasets = tokenized_datasets.map(\n",
    "    group_texts,\n",
    "    batched=True,\n",
    "    batch_size=200,\n",
    "    num_proc=96,\n",
    "    load_from_cache_file=True,\n",
    ")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 78,
   "id": "f37e7559-fcc1-436b-a4ee-45adb856869e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "86438919"
      ]
     },
     "execution_count": 78,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "examples = len(grouped_datasets[\"train\"])\n",
    "examples"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "id": "21aac2aa-9dc2-4b7a-8c46-62cfa47f18a7",
   "metadata": {},
   "outputs": [],
   "source": [
    "it = iter(grouped_datasets[\"train\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "id": "011a6a07-5fe0-441a-b032-79cf8664b5c5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'input_ids': [256, 3, 20, 18452, 6690, 7757, 1286, 43, 10, 4942, 1286, 80, 12, 4782, 5442, 39, 5385, 33, 4, 5, 3, 2924, 117, 5669, 228, 21, 193, 9030, 511, 24, 11, 5, 665, 165, 4218, 7, 26, 264, 1528, 35, 105, 3, 19653, 12, 9661, 17156, 13955, 4, 132, 5, 611, 959, 961, 146, 6522, 7757, 1286, 89, 7500, 9716, 11, 5, 4868, 107, 13604, 12, 12836, 13368, 11, 611, 959, 4, 3, 69, 99, 12, 13132, 6690, 590, 5, 1803, 1867, 69, 7, 924, 10, 1762, 4, 3, 69, 538, 489, 14, 1149, 16, 3, 11384, 199, 116, 399, 4782, 291, 3, 6, 237, 13, 2629, 3, 8987, 291, 4, 69, 5, 3, 27, 72, 20, 325, 3, 2924, 133, 21, 105, 9030, 10, 1149, 242, 16, 144, 13572, 11, 9, 13401, 20, 7951, 8, 165, 4218, 4, 5, 1910]}\n"
     ]
    }
   ],
   "source": [
    "print(next(it))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 81,
   "id": "f20d3da2-0132-4ecc-b9b9-c2b5ec06f031",
   "metadata": {},
   "outputs": [],
   "source": [
    "tokens = next(it)['input_ids']\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 82,
   "id": "2bad87cd-06e1-4c52-b2d6-d61fcb96e35d",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "141"
      ]
     },
     "execution_count": 82,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(tokens)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 83,
   "id": "4e0f573a-0abc-4f8f-b59a-a281fb306425",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"werden volgens getuigen vergezeld door een boomlange bodyguard. ook hing er een gordijntje om de tafel, zodat beyoncé in alle rust van de show kon genieten. volgens de bron verliet knowles pas om 03.30 uur's ochtends de hippe club.</s> utrecht - in de schouwburg van utrecht gaat vrijdagavond de musical 'joseph and the amazing technicolor dreamcoat' in première. voor het eerst in nederland. een voorloper van het geesteskind van andrew lloyd webber werd al in 1967 voor het eerst op een school in groot-brittannië uitgeprobeerd. twaalf jaar later werd het in\""
      ]
     },
     "execution_count": 83,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tokenizer.decode(tokens)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 84,
   "id": "ab853c1b-0e0f-4ae8-b1cb-053f76a7d9d7",
   "metadata": {},
   "outputs": [
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m/tmp/ipykernel_33223/1050159500.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mwhile\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mexample\u001b[0m \u001b[0;34m:=\u001b[0m \u001b[0mnext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mit\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      2\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mexample\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'input_ids'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m141\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      3\u001b[0m         \u001b[0;32mcontinue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      4\u001b[0m     \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      5\u001b[0m         \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mexample\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/datasets/src/datasets/arrow_dataset.py\u001b[0m in \u001b[0;36m__iter__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m   1264\u001b[0m         \u001b[0moutput_all_columns\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_output_all_columns\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1265\u001b[0m         \u001b[0;32mfor\u001b[0m \u001b[0mindex\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnum_rows\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1266\u001b[0;31m             yield self._getitem(\n\u001b[0m\u001b[1;32m   1267\u001b[0m                 \u001b[0mindex\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1268\u001b[0m                 \u001b[0mformat_type\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mformat_type\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/datasets/src/datasets/arrow_dataset.py\u001b[0m in \u001b[0;36m_getitem\u001b[0;34m(self, key, format_type, format_columns, output_all_columns, format_kwargs)\u001b[0m\n\u001b[1;32m   1507\u001b[0m         \u001b[0mformat_kwargs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mformat_kwargs\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mformat_kwargs\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1508\u001b[0m         \u001b[0mformatter\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mget_formatter\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mformat_type\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mformat_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1509\u001b[0;31m         \u001b[0mpa_subtable\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mquery_table\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_data\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkey\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mindices\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_indices\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_indices\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   1510\u001b[0m         formatted_output = format_table(\n\u001b[1;32m   1511\u001b[0m             \u001b[0mpa_subtable\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkey\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mformatter\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mformatter\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mformat_columns\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mformat_columns\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moutput_all_columns\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moutput_all_columns\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/datasets/src/datasets/formatting/formatting.py\u001b[0m in \u001b[0;36mquery_table\u001b[0;34m(table, key, indices)\u001b[0m\n\u001b[1;32m    369\u001b[0m     \u001b[0;31m# Query the main table\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    370\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0mindices\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 371\u001b[0;31m         \u001b[0mpa_subtable\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_query_table\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtable\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkey\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    372\u001b[0m     \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    373\u001b[0m         \u001b[0mpa_subtable\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_query_table_with_indices_mapping\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtable\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkey\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mindices\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mindices\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/datasets/src/datasets/formatting/formatting.py\u001b[0m in \u001b[0;36m_query_table\u001b[0;34m(table, key)\u001b[0m\n\u001b[1;32m     77\u001b[0m     \"\"\"\n\u001b[1;32m     78\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mkey\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mint\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 79\u001b[0;31m         \u001b[0;32mreturn\u001b[0m \u001b[0mtable\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfast_slice\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mkey\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0mtable\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnum_rows\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     80\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mkey\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mslice\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     81\u001b[0m         \u001b[0mkey\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mkey\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mindices\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtable\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnum_rows\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/datasets/src/datasets/table.py\u001b[0m in \u001b[0;36mfast_slice\u001b[0;34m(self, offset, length)\u001b[0m\n\u001b[1;32m    127\u001b[0m         \u001b[0;32melif\u001b[0m \u001b[0moffset\u001b[0m \u001b[0;34m>=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_offsets\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mlength\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mlength\u001b[0m \u001b[0;34m<=\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    128\u001b[0m             \u001b[0;32mreturn\u001b[0m \u001b[0mpa\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTable\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfrom_batches\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mschema\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_schema\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 129\u001b[0;31m         \u001b[0mi\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_interpolation_search\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_offsets\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moffset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    130\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0mlength\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mlength\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0moffset\u001b[0m \u001b[0;34m>=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_offsets\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    131\u001b[0m             \u001b[0mbatches\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_batches\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/datasets/src/datasets/table.py\u001b[0m in \u001b[0;36m_interpolation_search\u001b[0;34m(arr, x)\u001b[0m\n\u001b[1;32m     84\u001b[0m     \u001b[0;32mwhile\u001b[0m \u001b[0mi\u001b[0m \u001b[0;34m<\u001b[0m \u001b[0mj\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0marr\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m<=\u001b[0m \u001b[0mx\u001b[0m \u001b[0;34m<\u001b[0m \u001b[0marr\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     85\u001b[0m         \u001b[0mk\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mi\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mj\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0mi\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0marr\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m//\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0marr\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0marr\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 86\u001b[0;31m         \u001b[0;32mif\u001b[0m \u001b[0marr\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m<=\u001b[0m \u001b[0mx\u001b[0m \u001b[0;34m<\u001b[0m \u001b[0marr\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mk\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     87\u001b[0m             \u001b[0;32mreturn\u001b[0m \u001b[0mk\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     88\u001b[0m         \u001b[0;32melif\u001b[0m \u001b[0marr\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m<\u001b[0m \u001b[0mx\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "while (example := next(it, None)) is not None:\n",
    "    if len(example['input_ids']) == 141:\n",
    "        continue\n",
    "    else:\n",
    "        print(example)\n",
    "        break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f71a0f6b-3b60-4dd5-a9af-0ef43aadc6a1",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}