Datasets:

ArXiv:
Maurice Weber commited on
Commit
7f1cb5c
1 Parent(s): 19f2e7e

subsamples 10B, 100B, 1T

Browse files
Files changed (2) hide show
  1. .gitignore +32 -0
  2. RedPajama-Data-V2.py +93 -59
.gitignore ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.pyc
6
+ *.DS_Store
7
+
8
+ # data folders
9
+ data/*
10
+ !data/.gitkeep
11
+
12
+ # notebooks
13
+ notebooks/*
14
+ .ipynb_checkpoints
15
+
16
+ # Environments
17
+ .env
18
+ .venv
19
+ env/
20
+ venv/
21
+ ENV/
22
+ env.bak/
23
+ venv.bak/
24
+
25
+ # ides
26
+ .idea/
27
+ .vscode/
28
+
29
+ # distribution
30
+ *.egg-info/
31
+ dist/
32
+ build/
RedPajama-Data-V2.py CHANGED
@@ -15,13 +15,12 @@
15
  # Lint as: python3
16
  """RedPajama V2: Quality annotated Web Text Documents."""
17
 
 
18
  import json
19
-
20
- import datasets
21
  import traceback
22
- import gzip
23
  from typing import List
24
 
 
25
  import pyarrow.parquet as pq
26
 
27
  logger = datasets.logging.get_logger(__name__)
@@ -30,10 +29,11 @@ _DESCRIPTION = """\
30
  RedPajama V2: an Open Dataset for Training Large Language Models
31
  """
32
 
33
- _URL_BASE = 'https://data.together.xyz/redpajama-data-v2/v1.0.0'
34
  _LANGUAGES = ("en", "de", "fr", "es", "it")
35
  _MISSING_FILES_PATTERN = "urls/missing-{component}.txt"
36
  _NUM_SHARDS = 5000
 
37
 
38
  _CC_SNAPSHOT_IDS = (
39
  "2014-15",
@@ -119,7 +119,7 @@ _CC_SNAPSHOT_IDS = (
119
  "2022-40",
120
  "2022-49",
121
  "2023-06",
122
- "2023-14"
123
  )
124
 
125
 
@@ -138,19 +138,34 @@ class RedPajamaDataV2Config(datasets.BuilderConfig):
138
 
139
 
140
  class RedPajamaV2(datasets.GeneratorBasedBuilder):
141
- """ RedPajama V2: Quality annotated Web Text Documents. """
142
 
143
  BUILDER_CONFIGS = [
144
  RedPajamaDataV2Config(
145
- name='sample',
146
  version=datasets.Version("1.0.0", ""),
147
  description=f"RedPajamaV2 Sample",
148
  ),
149
  RedPajamaDataV2Config(
150
- name='default',
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
  version=datasets.Version("1.0.0", ""),
152
  description=f"RedPajamaV2",
153
- )
154
  ]
155
 
156
  def _info(self):
@@ -161,7 +176,7 @@ class RedPajamaV2(datasets.GeneratorBasedBuilder):
161
  "raw_content": datasets.Value("string"),
162
  "doc_id": datasets.Value("string"),
163
  "meta": datasets.Value("string"),
164
- "quality_signals": datasets.Value("string")
165
  }
166
  ),
167
  supervised_keys=None,
@@ -177,24 +192,30 @@ class RedPajamaV2(datasets.GeneratorBasedBuilder):
177
 
178
  # fetch documents
179
  logger.info(f"Downloading {len(sample_base_tags)} documents files.")
180
- documents_files = dl_manager.download({
181
- base_tag: f"sample/documents/{base_tag}.json.gz"
182
- for base_tag in sample_base_tags
183
- })
 
 
184
 
185
  # fetch quality signals
186
  logger.info(f"Downloading {len(sample_base_tags)} quality signals files.")
187
- quality_signals_files = dl_manager.download({
188
- base_tag: f"sample/quality_signals/{base_tag}.signals.json.gz"
189
- for base_tag in sample_base_tags
190
- })
 
 
191
 
192
  # fetch ids of duplicates
193
  logger.info(f"Downloading {len(sample_base_tags)} duplicates ids files.")
194
- duplicates_ids_files = dl_manager.download({
195
- base_tag: f"sample/duplicates/{base_tag}.duplicates.parquet"
196
- for base_tag in sample_base_tags
197
- })
 
 
198
 
199
  return [
200
  datasets.SplitGenerator(
@@ -203,31 +224,41 @@ class RedPajamaV2(datasets.GeneratorBasedBuilder):
203
  "base_tags": sample_base_tags,
204
  "documents_files": documents_files,
205
  "quality_signals_files": quality_signals_files,
206
- "duplicates_ids_files": duplicates_ids_files
207
- }
208
  )
209
  ]
210
 
211
  def _split_generators_full(self, dl_manager):
212
- snapshots = getattr(self.config, 'snapshots', _CC_SNAPSHOT_IDS)
213
- languages = getattr(self.config, 'languages', _LANGUAGES)
214
- partition = getattr(self.config, 'partition', 'all')
215
-
216
- if partition == 'all':
217
- partitions = ['head', 'middle', 'tail']
218
- elif partition == 'head_middle':
219
- partitions = ['head', 'middle']
220
- elif partition == 'tail':
 
 
 
 
 
 
 
 
221
  partitions = [partition]
222
  else:
223
- raise ValueError(f'invalid partition: {partition}')
224
 
225
  # fetch list of missing files (e.g., missing duplicates or corrupted documents and
226
  # quality signal files)
227
- missing_files_paths = dl_manager.download_and_extract({
228
- component: _MISSING_FILES_PATTERN.format(component=component)
229
- for component in ("documents", "signals", "duplicates")
230
- })
 
 
231
 
232
  missing_files = {}
233
  for component, missing_file in missing_files_paths.items():
@@ -243,7 +274,7 @@ class RedPajamaV2(datasets.GeneratorBasedBuilder):
243
  for lang in languages:
244
  for snapshot in snapshots:
245
  for part in partitions:
246
- for n in range(_NUM_SHARDS):
247
  base_tag = f"{snapshot}/{n:04d}/{lang}_{part}"
248
  base_tags.append(base_tag)
249
 
@@ -281,20 +312,19 @@ class RedPajamaV2(datasets.GeneratorBasedBuilder):
281
  "base_tags": base_tags,
282
  "documents_files": documents_files,
283
  "quality_signals_files": quality_signals_files,
284
- "duplicates_ids_files": duplicates_ids_files
285
- }
286
  )
287
  ]
288
 
289
  def _split_generators(self, dl_manager):
290
- if self.config.name.endswith("sample"):
291
  return self._split_generators_sample(dl_manager)
292
 
293
  return self._split_generators_full(dl_manager)
294
 
295
  def _generate_examples(
296
- self, base_tags, documents_files, quality_signals_files,
297
- duplicates_ids_files
298
  ):
299
  key = 0
300
  for base_tag in base_tags:
@@ -305,9 +335,7 @@ class RedPajamaV2(datasets.GeneratorBasedBuilder):
305
  if doc_file is None:
306
  continue
307
 
308
- for sample in self.__get_generator(
309
- base_tag, doc_file, qs_file, dupe_file
310
- ):
311
  yield key, sample
312
  key += 1
313
 
@@ -325,19 +353,17 @@ class RedPajamaV2(datasets.GeneratorBasedBuilder):
325
  try:
326
  yield self.handle_record("tail", doc_id, doc, None, None)
327
  except Exception as e:
328
- logger.warning(f'failed handling row {row} in {doc_file}')
329
  traceback.print_exc()
330
  continue
331
 
332
  except gzip.BadGzipFile as e:
333
  # skip broken gzip files
334
- print(f'BadGzipFile: {doc_file, qs_file}')
335
  traceback.print_exc()
336
  return
337
 
338
- def _handle_head_middle(
339
- self, base_tag, doc_file, qs_file, dupe_file
340
- ):
341
  if qs_file is None:
342
  yield from self._handle_tail(base_tag, doc_file, None, None)
343
  return
@@ -345,11 +371,13 @@ class RedPajamaV2(datasets.GeneratorBasedBuilder):
345
  # load duplicates
346
  try:
347
  with open(dupe_file, "rb") as df:
348
- duplicates = set(pq.read_table(
349
- df, columns=["doc_id"], use_pandas_metadata=False
350
- )["doc_id"].to_pylist())
 
 
351
  except Exception as e:
352
- logger.warning(f'no duplicate ids found for {base_tag}')
353
  duplicates = set()
354
 
355
  try:
@@ -360,16 +388,22 @@ class RedPajamaV2(datasets.GeneratorBasedBuilder):
360
 
361
  try:
362
  yield self.handle_record(
363
- "head_middle", doc_id, doc, qs, is_duplicate=doc_id in duplicates
 
 
 
 
364
  )
365
  except Exception as e:
366
- logger.warning(f'failed handling row {row} in {doc_file} ({qs_file})')
 
 
367
  traceback.print_exc()
368
  continue
369
 
370
  except gzip.BadGzipFile as e:
371
  # skip broken gzip files
372
- print(f'BadGzipFile: {doc_file, qs_file}')
373
  traceback.print_exc()
374
  return
375
 
 
15
  # Lint as: python3
16
  """RedPajama V2: Quality annotated Web Text Documents."""
17
 
18
+ import gzip
19
  import json
 
 
20
  import traceback
 
21
  from typing import List
22
 
23
+ import datasets
24
  import pyarrow.parquet as pq
25
 
26
  logger = datasets.logging.get_logger(__name__)
 
29
  RedPajama V2: an Open Dataset for Training Large Language Models
30
  """
31
 
32
+ _URL_BASE = "https://data.together.xyz/redpajama-data-v2/v1.0.0"
33
  _LANGUAGES = ("en", "de", "fr", "es", "it")
34
  _MISSING_FILES_PATTERN = "urls/missing-{component}.txt"
35
  _NUM_SHARDS = 5000
36
+ _SUBSAMPLE_FILE_COUNTS = {"sample-10B": 1, "sample-100B": 10, "sample-1T": 100}
37
 
38
  _CC_SNAPSHOT_IDS = (
39
  "2014-15",
 
119
  "2022-40",
120
  "2022-49",
121
  "2023-06",
122
+ "2023-14",
123
  )
124
 
125
 
 
138
 
139
 
140
  class RedPajamaV2(datasets.GeneratorBasedBuilder):
141
+ """RedPajama V2: Quality annotated Web Text Documents."""
142
 
143
  BUILDER_CONFIGS = [
144
  RedPajamaDataV2Config(
145
+ name="sample",
146
  version=datasets.Version("1.0.0", ""),
147
  description=f"RedPajamaV2 Sample",
148
  ),
149
  RedPajamaDataV2Config(
150
+ name="sample-10B",
151
+ version=datasets.Version("1.0.0", ""),
152
+ description=f"RedPajamaV2 Sample with 10B tokens",
153
+ ),
154
+ RedPajamaDataV2Config(
155
+ name="sample-100B",
156
+ version=datasets.Version("1.0.0", ""),
157
+ description=f"RedPajamaV2 Sample with 100B tokens",
158
+ ),
159
+ RedPajamaDataV2Config(
160
+ name="sample-1T",
161
+ version=datasets.Version("1.0.0", ""),
162
+ description=f"RedPajamaV2 Sample with 1T tokens",
163
+ ),
164
+ RedPajamaDataV2Config(
165
+ name="default",
166
  version=datasets.Version("1.0.0", ""),
167
  description=f"RedPajamaV2",
168
+ ),
169
  ]
170
 
171
  def _info(self):
 
176
  "raw_content": datasets.Value("string"),
177
  "doc_id": datasets.Value("string"),
178
  "meta": datasets.Value("string"),
179
+ "quality_signals": datasets.Value("string"),
180
  }
181
  ),
182
  supervised_keys=None,
 
192
 
193
  # fetch documents
194
  logger.info(f"Downloading {len(sample_base_tags)} documents files.")
195
+ documents_files = dl_manager.download(
196
+ {
197
+ base_tag: f"sample/documents/{base_tag}.json.gz"
198
+ for base_tag in sample_base_tags
199
+ }
200
+ )
201
 
202
  # fetch quality signals
203
  logger.info(f"Downloading {len(sample_base_tags)} quality signals files.")
204
+ quality_signals_files = dl_manager.download(
205
+ {
206
+ base_tag: f"sample/quality_signals/{base_tag}.signals.json.gz"
207
+ for base_tag in sample_base_tags
208
+ }
209
+ )
210
 
211
  # fetch ids of duplicates
212
  logger.info(f"Downloading {len(sample_base_tags)} duplicates ids files.")
213
+ duplicates_ids_files = dl_manager.download(
214
+ {
215
+ base_tag: f"sample/duplicates/{base_tag}.duplicates.parquet"
216
+ for base_tag in sample_base_tags
217
+ }
218
+ )
219
 
220
  return [
221
  datasets.SplitGenerator(
 
224
  "base_tags": sample_base_tags,
225
  "documents_files": documents_files,
226
  "quality_signals_files": quality_signals_files,
227
+ "duplicates_ids_files": duplicates_ids_files,
228
+ },
229
  )
230
  ]
231
 
232
  def _split_generators_full(self, dl_manager):
233
+ snapshots = getattr(self.config, "snapshots", _CC_SNAPSHOT_IDS)
234
+ languages = getattr(self.config, "languages", _LANGUAGES)
235
+ partition = getattr(self.config, "partition", "all")
236
+
237
+ if self.config.name in ("sample-10B", "sample-100B", "sample-1T"):
238
+ partition = "head_middle"
239
+ languages = _LANGUAGES
240
+ snapshots = _CC_SNAPSHOT_IDS
241
+ num_shards = _SUBSAMPLE_FILE_COUNTS[self.config.name]
242
+ else:
243
+ num_shards = _NUM_SHARDS
244
+
245
+ if partition == "all":
246
+ partitions = ["head", "middle", "tail"]
247
+ elif partition == "head_middle":
248
+ partitions = ["head", "middle"]
249
+ elif partition == "tail":
250
  partitions = [partition]
251
  else:
252
+ raise ValueError(f"invalid partition: {partition}")
253
 
254
  # fetch list of missing files (e.g., missing duplicates or corrupted documents and
255
  # quality signal files)
256
+ missing_files_paths = dl_manager.download_and_extract(
257
+ {
258
+ component: _MISSING_FILES_PATTERN.format(component=component)
259
+ for component in ("documents", "signals", "duplicates")
260
+ }
261
+ )
262
 
263
  missing_files = {}
264
  for component, missing_file in missing_files_paths.items():
 
274
  for lang in languages:
275
  for snapshot in snapshots:
276
  for part in partitions:
277
+ for n in range(num_shards):
278
  base_tag = f"{snapshot}/{n:04d}/{lang}_{part}"
279
  base_tags.append(base_tag)
280
 
 
312
  "base_tags": base_tags,
313
  "documents_files": documents_files,
314
  "quality_signals_files": quality_signals_files,
315
+ "duplicates_ids_files": duplicates_ids_files,
316
+ },
317
  )
318
  ]
319
 
320
  def _split_generators(self, dl_manager):
321
+ if self.config.name == "sample":
322
  return self._split_generators_sample(dl_manager)
323
 
324
  return self._split_generators_full(dl_manager)
325
 
326
  def _generate_examples(
327
+ self, base_tags, documents_files, quality_signals_files, duplicates_ids_files
 
328
  ):
329
  key = 0
330
  for base_tag in base_tags:
 
335
  if doc_file is None:
336
  continue
337
 
338
+ for sample in self.__get_generator(base_tag, doc_file, qs_file, dupe_file):
 
 
339
  yield key, sample
340
  key += 1
341
 
 
353
  try:
354
  yield self.handle_record("tail", doc_id, doc, None, None)
355
  except Exception as e:
356
+ logger.warning(f"failed handling row {row} in {doc_file}")
357
  traceback.print_exc()
358
  continue
359
 
360
  except gzip.BadGzipFile as e:
361
  # skip broken gzip files
362
+ print(f"BadGzipFile: {doc_file, qs_file}")
363
  traceback.print_exc()
364
  return
365
 
366
+ def _handle_head_middle(self, base_tag, doc_file, qs_file, dupe_file):
 
 
367
  if qs_file is None:
368
  yield from self._handle_tail(base_tag, doc_file, None, None)
369
  return
 
371
  # load duplicates
372
  try:
373
  with open(dupe_file, "rb") as df:
374
+ duplicates = set(
375
+ pq.read_table(df, columns=["doc_id"], use_pandas_metadata=False)[
376
+ "doc_id"
377
+ ].to_pylist()
378
+ )
379
  except Exception as e:
380
+ logger.warning(f"no duplicate ids found for {base_tag}")
381
  duplicates = set()
382
 
383
  try:
 
388
 
389
  try:
390
  yield self.handle_record(
391
+ part="head_middle",
392
+ doc_id=doc_id,
393
+ doc=doc,
394
+ qs=qs,
395
+ is_duplicate=doc_id in duplicates,
396
  )
397
  except Exception as e:
398
+ logger.warning(
399
+ f"failed handling row {row} in {doc_file} ({qs_file})"
400
+ )
401
  traceback.print_exc()
402
  continue
403
 
404
  except gzip.BadGzipFile as e:
405
  # skip broken gzip files
406
+ print(f"BadGzipFile: {doc_file, qs_file}")
407
  traceback.print_exc()
408
  return
409