orionweller commited on
Commit
07c69bf
1 Parent(s): f61c8db

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +45 -0
  2. train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  3. train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  4. train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds +3 -0
  5. train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds +3 -0
  6. train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds +3 -0
  7. train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds +3 -0
  8. train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds +3 -0
  9. train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds +3 -0
  10. train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds +3 -0
  11. train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds +3 -0
  12. train/arxiv/arxiv_0026-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  13. train/arxiv/arxiv_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  14. train/arxiv/arxiv_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds +3 -0
  15. train/arxiv/arxiv_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds +3 -0
  16. train/arxiv/arxiv_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds +3 -0
  17. train/arxiv/arxiv_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds +3 -0
  18. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1062-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  19. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1062-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  20. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11442-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  21. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11442-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  22. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_131-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  23. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_13347-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  24. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15545-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  25. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18926-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  26. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18926-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  27. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19346-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  28. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19346-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  29. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19369-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  30. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19369-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  31. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19705-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  32. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21731-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  33. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21731-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  34. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22461-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  35. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23053-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  36. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23053-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  37. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23186-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  38. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_2458-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  39. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_2458-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  40. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25872-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  41. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25872-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  42. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26387-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  43. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26387-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  44. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_2794-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  45. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_2794-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  46. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28543-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  47. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30189-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  48. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30189-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  49. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31108-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  50. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31108-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
.gitattributes CHANGED
@@ -13671,3 +13671,48 @@ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22345-tokenized-c
13671
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78511-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13672
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_77573-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13673
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_2535-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13671
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78511-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13672
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_77573-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13673
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_2535-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13674
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_49216-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13675
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_38181-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13676
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_77461-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13677
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_49216-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13678
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_131-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13679
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23186-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13680
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_79016-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13681
+ train/arxiv/arxiv_0026-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13682
+ train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text
13683
+ train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text
13684
+ train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text
13685
+ train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text
13686
+ train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13687
+ train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text
13688
+ train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text
13689
+ train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds filter=lfs diff=lfs merge=lfs -text
13690
+ train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text
13691
+ train/arxiv/arxiv_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text
13692
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_70069-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13693
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_42489-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13694
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_42489-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13695
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_89408-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13696
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_51754-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13697
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_89408-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13698
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_51754-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13699
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25872-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13700
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25872-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13701
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_85359-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13702
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_55332-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13703
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_55332-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13704
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37119-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13705
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_38588-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13706
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_85359-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13707
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37119-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13708
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15545-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13709
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_38588-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13710
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_42664-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13711
+ train/arxiv/arxiv_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text
13712
+ train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13713
+ train/arxiv/arxiv_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text
13714
+ train/arxiv/arxiv_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13715
+ train/arxiv/arxiv_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text
13716
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_73937-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13717
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_4134-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13718
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28543-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53bd89b90a43036a4d041889a23f16a8d5a356bea5373e956c4dbd3557ad1e85
3
+ size 67108610
train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d3c580158cd1a3c2c359cf26b7e122c3ae30598789e36926f669b295778b2b8
3
+ size 67108824
train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b79cd11d92e28a4888c8dd5a85be4c7839c818ce65af6d00d740b2e407432ef
3
+ size 67107391
train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f3b649155f1a5775b89f496d07130c6daa3e5073e14f26180bf01c3fb05998f
3
+ size 67107749
train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a0d0b512dfadb0a6d9b129bbd1e2d0297f1733d232184482ca698d633832e7f
3
+ size 67108614
train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d774285c11d5a3167c62a0dfc34ae2bd87f7d3dd52313b541a4a837bbf823817
3
+ size 67107840
train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3210cf06062337c169e53d721ac4a61b1b659ff2e5087579e731c18d766b25d
3
+ size 67108645
train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c56064cb00b591083b0631cdc07f3cf857745c6ed2f15997ff6ed595c89fac6
3
+ size 67107507
train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ce2ebb7f7733e46e93cc73133ccca73ec3eaa541d054be86d252f5123ce889b
3
+ size 67108479
train/arxiv/arxiv_0020-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9abc7af988ba3b5c49ca394d6f32755aac4a5822cfe4d398c17c0c52d9e51a4
3
+ size 65634509
train/arxiv/arxiv_0026-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2dfc5c638c012388f01bcc4307aa9242352bfd60946c81646f77058e49d2aa73
3
+ size 67107929
train/arxiv/arxiv_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffb5fbc45988f2d13a69038f080bfe8cbda8e08f6edec8c0720bea260e1efbe8
3
+ size 67108794
train/arxiv/arxiv_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f58ef8373782ede9c203360ab0bf4df575efaa32fc41d6860ee24572ab29c75
3
+ size 67108522
train/arxiv/arxiv_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3cc9eb0b460242f2e2b5e5a8a3f897cf6c074720c2b3cef7444fe5900613dfd
3
+ size 67107393
train/arxiv/arxiv_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12c4570c56bed342c7b47bf222e740cdcad1460e2b9cd2e5eefc22d98a7f0cfe
3
+ size 67106800
train/arxiv/arxiv_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79960d7bbdf5d9810da9cd0934a61e473da15f9ca4ec22928294fbdb76a37767
3
+ size 43226688
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1062-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108016, "hashes": {}}, "samples": 44033, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48063500, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10542146, "hashes": {}}, "samples": 7151, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7567180, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1062-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37240883,
3
+ "num_truncated_tokens": 37211896
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11442-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107983, "hashes": {}}, "samples": 42793, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47569938, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21488310, "hashes": {}}, "samples": 13561, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15293890, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11442-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42553774,
3
+ "num_truncated_tokens": 42516549
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_131-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9786f83bf95c7239062676e96a3ff0f7ad6b5131bd496abc45e635bc7f629525
3
+ size 67107901
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_13347-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108214, "hashes": {}}, "samples": 44107, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47660467, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11493085, "hashes": {}}, "samples": 7797, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8180518, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15545-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4810830022da857b251793e4111016e810fa899019caae12210df1a63e758473
3
+ size 20690098
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18926-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107831, "hashes": {}}, "samples": 44755, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47080993, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8574167, "hashes": {}}, "samples": 5648, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6042898, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18926-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36281261,
3
+ "num_truncated_tokens": 36254049
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19346-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108257, "hashes": {}}, "samples": 44077, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47843872, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11937235, "hashes": {}}, "samples": 7903, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8565676, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19346-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37914092,
3
+ "num_truncated_tokens": 37883925
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19369-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107996, "hashes": {}}, "samples": 43208, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47477389, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18042378, "hashes": {}}, "samples": 11459, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12767817, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19369-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40883108,
3
+ "num_truncated_tokens": 40848987
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19705-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 35992034,
3
+ "num_truncated_tokens": 35964699
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21731-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108644, "hashes": {}}, "samples": 44859, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48084396, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8095929, "hashes": {}}, "samples": 5356, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5772016, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21731-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36048395,
3
+ "num_truncated_tokens": 36020836
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22461-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108500, "hashes": {}}, "samples": 44481, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47932400, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8978743, "hashes": {}}, "samples": 6095, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6448262, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23053-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107813, "hashes": {}}, "samples": 43281, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47880918, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16162564, "hashes": {}}, "samples": 10578, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11578570, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23053-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39968085,
3
+ "num_truncated_tokens": 39934478
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23186-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c7aa85a6378ac233a1da8cf67aa333429f647d16e7ec4d46619b927e6db5161
3
+ size 67107166
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_2458-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108827, "hashes": {}}, "samples": 43488, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47699770, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17188907, "hashes": {}}, "samples": 10958, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12136588, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_2458-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40463694,
3
+ "num_truncated_tokens": 40429723
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25872-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:485d1b969d11dc61f4f590d174d85cd489c248b46b235ed24982fd8808be8640
3
+ size 67108480
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25872-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f99db9988fa4700ca81bad3dd03a1727b11096ceb017bc88f538587ff512e5af
3
+ size 16346849
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26387-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108460, "hashes": {}}, "samples": 45058, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47935001, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 7142408, "hashes": {}}, "samples": 4666, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5079810, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26387-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 35586749,
3
+ "num_truncated_tokens": 35560049
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_2794-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107797, "hashes": {}}, "samples": 43814, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47596540, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12447552, "hashes": {}}, "samples": 8094, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8833689, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_2794-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38171072,
3
+ "num_truncated_tokens": 38141152
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28543-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b7cda555a917d13ede5310ed1476e7b36a6098c2bacddebb5a2c5115fc1fd21
3
+ size 67108476
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30189-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108355, "hashes": {}}, "samples": 42577, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47757156, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 23428911, "hashes": {}}, "samples": 14675, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 16657552, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30189-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 43496397,
3
+ "num_truncated_tokens": 43457377
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31108-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108397, "hashes": {}}, "samples": 42961, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47631987, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19701978, "hashes": {}}, "samples": 12526, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13986415, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31108-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41687695,
3
+ "num_truncated_tokens": 41652010
4
+ }