orionweller commited on
Commit
16caab6
1 Parent(s): 2d644c2

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +24 -0
  2. train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  3. train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds +3 -0
  4. train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds +3 -0
  5. train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds +3 -0
  6. train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  7. train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds +3 -0
  8. train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds +3 -0
  9. train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds +3 -0
  10. train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds +3 -0
  11. train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds +3 -0
  12. train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds +3 -0
  13. train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds +3 -0
  14. train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds +3 -0
  15. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10088-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  16. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10088-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  17. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15756-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  18. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15756-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  19. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16029-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  20. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16029-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  21. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19694-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  22. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19694-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  23. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23976-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  24. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23976-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  25. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25441-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  26. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25441-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  27. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25596-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  28. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25596-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  29. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25868-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  30. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25868-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  31. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28216-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  32. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33222-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  33. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33222-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  34. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35344-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  35. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35344-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  36. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40037-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  37. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40037-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  38. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40429-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  39. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40429-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  40. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_42982-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  41. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_42982-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  42. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_45938-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  43. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_46922-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  44. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_46922-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  45. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47784-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  46. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47784-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  47. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49494-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  48. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49494-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  49. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49999-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  50. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49999-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
.gitattributes CHANGED
@@ -27475,3 +27475,27 @@ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_51753-tokenized-chun
27475
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_21725-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27476
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_52192-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27477
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_3274-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27475
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_21725-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27476
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_52192-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27477
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_3274-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27478
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_44309-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27479
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_21725-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27480
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_11843-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27481
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_91981-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27482
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_3274-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27483
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_44309-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27484
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_2399-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27485
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_47280-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27486
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_91981-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27487
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_15460-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27488
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_11843-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27489
+ train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds filter=lfs diff=lfs merge=lfs -text
27490
+ train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds filter=lfs diff=lfs merge=lfs -text
27491
+ train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text
27492
+ train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text
27493
+ train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27494
+ train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds filter=lfs diff=lfs merge=lfs -text
27495
+ train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds filter=lfs diff=lfs merge=lfs -text
27496
+ train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds filter=lfs diff=lfs merge=lfs -text
27497
+ train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds filter=lfs diff=lfs merge=lfs -text
27498
+ train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds filter=lfs diff=lfs merge=lfs -text
27499
+ train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds filter=lfs diff=lfs merge=lfs -text
27500
+ train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27501
+ train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds filter=lfs diff=lfs merge=lfs -text
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ef84cf11eec20671b7e04bc6043279f5b331efb1cbe3f8b315d309a89971fde
3
+ size 67108854
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:477d57be04a0b354d1e7bf53b7a921815e25390a1f0d482cf7d31c9caf1fb646
3
+ size 67107088
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5da97ebb129f3f3b98e5654458845900d375150da646919058ec655608c6af39
3
+ size 67108086
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf9eccb384458aa6e39f4e2532cf480c6c5c41a04b96e1896bba97b3803623bc
3
+ size 67108402
train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1996bc08cc8de604af4f7af034e6cf8a33ff0bd4b66f3850575bf7692cb6048a
3
+ size 67107874
train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bede9c4291c9d480c6959fdbb8c0d035a368941bc1f96da20522151a2b0b74f
3
+ size 67106911
train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d28b99323f8c99e5865500bfa4ce40cacb2feef19babff2e0c312d02348d9de1
3
+ size 67107334
train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7c1f9dcda878dbd5f2fc006721f5bd98cc82e8c7378b68ba8a786f1e9446873
3
+ size 67108747
train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:878d2b8560c4341da5cdeea7f47ea9f5ef568b24057c285dcc321e89bdf9c80d
3
+ size 67107924
train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66eb9832c1b56da0f0104f8068aebe44a364130e067c42c60ac5f823ce676475
3
+ size 67108545
train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f832a45589cc0cebb927f8aa4bac7fa8a08662233e33c43fca87bab8e43e2989
3
+ size 67108535
train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fb6675db33359446e21dd90cb67efddb0a74d8ab2ec7d23968b0c8479a46410
3
+ size 67107678
train/algebraic-stack/algebraic_stack_train_0009-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53959342efd52630aff806b57d436d88addd1ea292884780bf95b6256c01de82
3
+ size 2683591
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10088-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108038, "hashes": {}}, "samples": 44763, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47988077, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9531372, "hashes": {}}, "samples": 6240, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6780756, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10088-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36741453,
3
+ "num_truncated_tokens": 36712213
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15756-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108015, "hashes": {}}, "samples": 43885, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47467258, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11566947, "hashes": {}}, "samples": 7724, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8179947, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15756-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37740240,
3
+ "num_truncated_tokens": 37711244
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16029-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107942, "hashes": {}}, "samples": 44990, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47938911, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8606109, "hashes": {}}, "samples": 5575, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6094300, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16029-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36292321,
3
+ "num_truncated_tokens": 36263769
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19694-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106973, "hashes": {}}, "samples": 43963, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47796753, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13878501, "hashes": {}}, "samples": 8901, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9848780, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19694-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38856660,
3
+ "num_truncated_tokens": 38825686
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23976-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108375, "hashes": {}}, "samples": 43095, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47356973, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18351771, "hashes": {}}, "samples": 11793, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13004430, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23976-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41031197,
3
+ "num_truncated_tokens": 40996840
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25441-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108528, "hashes": {}}, "samples": 44463, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47895894, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8762164, "hashes": {}}, "samples": 5924, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6268224, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25441-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36376026,
3
+ "num_truncated_tokens": 36349112
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25596-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107237, "hashes": {}}, "samples": 44098, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47955887, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11826333, "hashes": {}}, "samples": 7874, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8439375, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25596-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37858395,
3
+ "num_truncated_tokens": 37828137
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25868-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108098, "hashes": {}}, "samples": 43018, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47485082, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19119094, "hashes": {}}, "samples": 12070, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13518093, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25868-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41408363,
3
+ "num_truncated_tokens": 41373436
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28216-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37906552,
3
+ "num_truncated_tokens": 37877119
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33222-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108568, "hashes": {}}, "samples": 44519, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47818874, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8577850, "hashes": {}}, "samples": 5768, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6122382, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33222-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36287031,
3
+ "num_truncated_tokens": 36259529
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35344-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108084, "hashes": {}}, "samples": 44075, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47691236, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10218646, "hashes": {}}, "samples": 6733, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7277686, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35344-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37090814,
3
+ "num_truncated_tokens": 37062912
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40037-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107397, "hashes": {}}, "samples": 42503, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47618790, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20972096, "hashes": {}}, "samples": 13584, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14832129, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40037-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42303626,
3
+ "num_truncated_tokens": 42266852
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40429-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107672, "hashes": {}}, "samples": 44422, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47894644, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9628802, "hashes": {}}, "samples": 6416, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6889417, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40429-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36794951,
3
+ "num_truncated_tokens": 36765837
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_42982-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107928, "hashes": {}}, "samples": 43261, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47834694, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16767343, "hashes": {}}, "samples": 10811, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11941066, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_42982-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40263936,
3
+ "num_truncated_tokens": 40230537
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_45938-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107992, "hashes": {}}, "samples": 42849, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47602264, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20583236, "hashes": {}}, "samples": 13157, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14706382, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_46922-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107554, "hashes": {}}, "samples": 42723, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47517726, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19786210, "hashes": {}}, "samples": 12710, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13979497, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_46922-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41730972,
3
+ "num_truncated_tokens": 41695333
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47784-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108699, "hashes": {}}, "samples": 44819, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47995740, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 6568605, "hashes": {}}, "samples": 4419, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 4697026, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47784-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 35314855,
3
+ "num_truncated_tokens": 35289216
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49494-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107415, "hashes": {}}, "samples": 43488, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47580121, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17110656, "hashes": {}}, "samples": 10970, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12149381, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49494-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40423644,
3
+ "num_truncated_tokens": 40389632
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49999-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107163, "hashes": {}}, "samples": 44670, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47688845, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11472575, "hashes": {}}, "samples": 7412, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8158299, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49999-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37678298,
3
+ "num_truncated_tokens": 37647605
4
+ }