parquet-converter
commited on
Commit
•
6a60e4e
1
Parent(s):
152771d
Update parquet files
Browse files- .gitattributes +1 -0
- README.md +0 -31
- dataset_infos.json +0 -1
- openwebtext-10k.py +0 -89
- plain_text/openwebtext-10k-train.parquet +3 -0
- process.txt +0 -65
.gitattributes
CHANGED
@@ -14,3 +14,4 @@
|
|
14 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
15 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
16 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
|
|
|
14 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
15 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
16 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
17 |
+
plain_text/openwebtext-10k-train.parquet filter=lfs diff=lfs merge=lfs -text
|
README.md
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
10K slice of OpenWebText - An open-source replication of the WebText dataset from OpenAI.
|
2 |
-
|
3 |
-
This is a small subset representing the first 10K records from the original dataset - created for testing.
|
4 |
-
|
5 |
-
The full 8M-record dataset is [here](https://huggingface.co/datasets/openwebtext).
|
6 |
-
|
7 |
-
```
|
8 |
-
$ python -c "from datasets import load_dataset; ds=load_dataset('stas/openwebtext-10k'); print(ds)"
|
9 |
-
DatasetDict({
|
10 |
-
train: Dataset({
|
11 |
-
features: ['text'],
|
12 |
-
num_rows: 10000
|
13 |
-
})
|
14 |
-
})
|
15 |
-
```
|
16 |
-
|
17 |
-
* Records: 10,000
|
18 |
-
* compressed size: ~15MB
|
19 |
-
* uncompressed size: 50MB
|
20 |
-
|
21 |
-
To convert to jsonlines:
|
22 |
-
|
23 |
-
```
|
24 |
-
from datasets import load_dataset
|
25 |
-
dataset_name = "stas/openwebtext-10k"
|
26 |
-
name = dataset_name.split('/')[-1]
|
27 |
-
ds = load_dataset(dataset_name, split='train')
|
28 |
-
ds.to_json(f"{name}.jsonl", orient="records", lines=True)
|
29 |
-
```
|
30 |
-
|
31 |
-
To see how this subset was created, here is the [instructions file](https://huggingface.co/datasets/stas/openwebtext-10k/blob/main/process.txt).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dataset_infos.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"plain_text": {"description": "An open-source replication of the WebText dataset from OpenAI.\n\nThis is a small subset representing the first 10K records from the original dataset - created for testing.\n\nThe full 8M-record dataset is at https://huggingface.co/datasets/openwebtext\n", "citation": "@misc{Gokaslan2019OpenWeb,\n title={OpenWebText Corpus},\n author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},\n howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},\n year={2019}\n}\n", "homepage": "https://skylion007.github.io/OpenWebTextCorpus/", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "openwebtext10k", "config_name": "plain_text", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 49670861, "num_examples": 10000, "dataset_name": "openwebtext10k"}}, "download_checksums": {"https://cdn-datasets.huggingface.co/nlp/datasets/openwebtext/openwebtext-10k.tar.xz": {"num_bytes": 14723792, "checksum": "1dd150ffa3361ab32fa9f129d1b5ce20ac48728be16be436558f844d1761c572"}}, "download_size": 14723792, "post_processing_size": null, "dataset_size": 49670861, "size_in_bytes": 64394653}}
|
|
|
|
openwebtext-10k.py
DELETED
@@ -1,89 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
"""The Open WebText Corpus"""
|
16 |
-
|
17 |
-
|
18 |
-
import os
|
19 |
-
import re
|
20 |
-
from itertools import chain
|
21 |
-
|
22 |
-
import datasets
|
23 |
-
|
24 |
-
|
25 |
-
_CITATION = """\
|
26 |
-
@misc{Gokaslan2019OpenWeb,
|
27 |
-
title={OpenWebText Corpus},
|
28 |
-
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
|
29 |
-
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
|
30 |
-
year={2019}
|
31 |
-
}
|
32 |
-
"""
|
33 |
-
|
34 |
-
_DESCRIPTION = """\
|
35 |
-
An open-source replication of the WebText dataset from OpenAI.
|
36 |
-
|
37 |
-
This is a small subset representing the first 10K records from the original dataset - created for testing.
|
38 |
-
|
39 |
-
The full 8M-record dataset is at https://huggingface.co/datasets/openwebtext
|
40 |
-
"""
|
41 |
-
|
42 |
-
_URL = "https://cdn-datasets.huggingface.co/nlp/datasets/openwebtext/openwebtext-10k.tar.xz"
|
43 |
-
|
44 |
-
class Openwebtext10k(datasets.GeneratorBasedBuilder):
|
45 |
-
"""The Open WebText dataset."""
|
46 |
-
|
47 |
-
BUILDER_CONFIGS = [
|
48 |
-
datasets.BuilderConfig(
|
49 |
-
name="plain_text",
|
50 |
-
description="Plain text",
|
51 |
-
version=datasets.Version("1.0.0"),
|
52 |
-
)
|
53 |
-
]
|
54 |
-
|
55 |
-
def _info(self):
|
56 |
-
return datasets.DatasetInfo(
|
57 |
-
description=_DESCRIPTION,
|
58 |
-
features=datasets.Features({"text": datasets.Value("string")}),
|
59 |
-
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
|
60 |
-
citation=_CITATION,
|
61 |
-
)
|
62 |
-
|
63 |
-
def _split_generators(self, dl_manager):
|
64 |
-
dl_dir = dl_manager.download_and_extract(_URL)
|
65 |
-
owt_dir = os.path.join(dl_dir, "openwebtext-10k")
|
66 |
-
subset_xzs = [
|
67 |
-
os.path.join(owt_dir, file_name)
|
68 |
-
for file_name in sorted(os.listdir(owt_dir))
|
69 |
-
if file_name.endswith("xz") # filter out ...xz.lock
|
70 |
-
]
|
71 |
-
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
|
72 |
-
nested_txt_files = [
|
73 |
-
[
|
74 |
-
os.path.join(ex_dir, txt_file_name)
|
75 |
-
for txt_file_name in sorted(os.listdir(ex_dir))
|
76 |
-
if txt_file_name.endswith("txt")
|
77 |
-
]
|
78 |
-
for ex_dir in ex_dirs
|
79 |
-
]
|
80 |
-
txt_files = chain(*nested_txt_files)
|
81 |
-
return [
|
82 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
|
83 |
-
]
|
84 |
-
|
85 |
-
def _generate_examples(self, txt_files):
|
86 |
-
"""Yields examples."""
|
87 |
-
for idx, filepath in enumerate(txt_files):
|
88 |
-
with open(filepath, encoding="utf-8") as f:
|
89 |
-
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
plain_text/openwebtext-10k-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:757c03fd89e59a557d7768effd97c9a9383ea4a1020f8334967e692566677082
|
3 |
+
size 30271596
|
process.txt
DELETED
@@ -1,65 +0,0 @@
|
|
1 |
-
|
2 |
-
# this is a small derivative from 8M-big openwebtext dataset for testing
|
3 |
-
|
4 |
-
# how this build script and dataset_infos.json were generated
|
5 |
-
|
6 |
-
#
|
7 |
-
|
8 |
-
mkdir openwebtext-10k
|
9 |
-
cd openwebtext-10k
|
10 |
-
|
11 |
-
# data
|
12 |
-
wget https://zenodo.org/record/3834942/files/openwebtext.tar.xz
|
13 |
-
tar xf openwebtext.tar.xz
|
14 |
-
cd openwebtext
|
15 |
-
rename.pl 's|-|-00|; s|-00(\d\d\d)|-$1|; s|-00(\d\d)|-0$1|;' *xz
|
16 |
-
|
17 |
-
# now open the first 30 archives
|
18 |
-
mkdir subset
|
19 |
-
cp urlsf_subset00-0[0-2]*_data.xz subset
|
20 |
-
cd subset
|
21 |
-
find . -name "*xz" -exec tar xf {} \;
|
22 |
-
mkdir 10k
|
23 |
-
find . -name "*txt" | sort | head -10000 | xargs mv -t 10k
|
24 |
-
tar cfJ 10k.xz -C 10k .
|
25 |
-
mkdir openwebtext-10k
|
26 |
-
mv 10k.xz openwebtext-10k
|
27 |
-
tar cfJ openwebtext-10k.tar.xz openwebtext-10k
|
28 |
-
# the openwebtext subdir gets created on the fly
|
29 |
-
aws s3 cp openwebtext-10k.tar.xz s3://datasets.huggingface.co/nlp/datasets/openwebtext/
|
30 |
-
|
31 |
-
# script
|
32 |
-
wget https://raw.githubusercontent.com/huggingface/datasets/master/datasets/openwebtext/openwebtext.py
|
33 |
-
mv openwebtext.py openwebtext-10k.py
|
34 |
-
perl -pi -e 's|openwebtext|openwebtext-10k|g' openwebtext-10k.py
|
35 |
-
perl -pi -e 's|https://zenodo.org/record/3834942/files/|https://cdn-datasets.huggingface.co/nlp/datasets/openwebtext/|g' openwebtext-10k.py
|
36 |
-
perl -pi -e 's|Openwebtext|Openwebtext10k|g' openwebtext-10k.py
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
# manually check that the script is correct - edit the descriptions
|
41 |
-
|
42 |
-
# create a new dataset entry on the hub
|
43 |
-
https://huggingface.co/new-dataset
|
44 |
-
|
45 |
-
# once created clone it
|
46 |
-
git clone https://huggingface.co/datasets/stas/openwebtext-10k
|
47 |
-
cp openwebtext-10k.py process.txt openwebtext-10k
|
48 |
-
cd openwebtext-10k
|
49 |
-
|
50 |
-
git add openwebtext-10k.py process.txt
|
51 |
-
git commit -m "build script" openwebtext-10k.py process.txt
|
52 |
-
git push
|
53 |
-
|
54 |
-
# test and generate config file
|
55 |
-
cd ..
|
56 |
-
datasets-cli test ./openwebtext-10k --save_infos --all_configs
|
57 |
-
|
58 |
-
# add and push the generated config
|
59 |
-
cd openwebtext-10k
|
60 |
-
git add dataset_infos.json
|
61 |
-
git commit -m "add dataset_infos.json" dataset_infos.json
|
62 |
-
git push
|
63 |
-
|
64 |
-
# test that the dataset is working
|
65 |
-
python -c "from datasets import load_dataset; ds=load_dataset('stas/openwebtext-10k'); print(ds)"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|