devrim commited on
Commit
173b8c5
·
verified ·
1 Parent(s): fbf021a

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -56,3 +56,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
59
+ train/partition_0.jsonl filter=lfs diff=lfs merge=lfs -text
goodwiki_long.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Devrim Cavusoglu and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Goodwiki Long Subset."""
18
+
19
+
20
+ import json
21
+
22
+ import datasets
23
+
24
+ logger = datasets.logging.get_logger(__name__)
25
+
26
+
27
+ _DESCRIPTION = """\
28
+ Dataset consisting of long wikipedia articles in markdown format.
29
+ """
30
+
31
+ _URLS = {
32
+ "train": [
33
+ "train/partition_0.jsonl",
34
+ ],
35
+ "test": [
36
+ "test/partition_1.jsonl",
37
+ ]
38
+ }
39
+
40
+
41
+ class GoodWikiLongDatasetConfig(datasets.BuilderConfig):
42
+ """BuilderConfig for Dataset."""
43
+
44
+ def __init__(self, **kwargs):
45
+ """BuilderConfig for Dataset.
46
+
47
+ Args:
48
+ **kwargs: keyword arguments forwarded to super.
49
+ """
50
+ super(GoodWikiLongDatasetConfig, self).__init__(**kwargs)
51
+
52
+ @property
53
+ def features(self):
54
+ return {
55
+ "id": datasets.Value("string"),
56
+ "url": datasets.Value("null"),
57
+ "title": datasets.Value("string"),
58
+ "text": datasets.Value("string"),
59
+ "revid": datasets.Value("string"),
60
+ "description": datasets.Value("string"),
61
+ "categories": datasets.Sequence(datasets.Value("string")),
62
+ }
63
+
64
+
65
+ class GoodWikiLongDataset(datasets.GeneratorBasedBuilder):
66
+ """WikiLongDataset Classification dataset. Version 1.0."""
67
+
68
+ BUILDER_CONFIGS = [
69
+ GoodWikiLongDatasetConfig(
70
+ version=datasets.Version("1.0.0", ""), description="Goodwiki Long Articles"
71
+ )
72
+ ]
73
+ BUILDER_CONFIG_CLASS = GoodWikiLongDatasetConfig
74
+
75
+ def _info(self):
76
+ return datasets.DatasetInfo(
77
+ description=_DESCRIPTION,
78
+ features=datasets.Features(self.config.features),
79
+ )
80
+
81
+ def _split_generators(self, dl_manager):
82
+ data_dir = dl_manager.download_and_extract(_URLS)
83
+
84
+ return [
85
+ datasets.SplitGenerator(
86
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir["train"]}
87
+ ),
88
+ ]
89
+
90
+ def _generate_examples(self, filepath):
91
+ """This function returns the examples in the raw (text) form."""
92
+ logger.info("generating examples from = %s", filepath)
93
+ if isinstance(filepath, str):
94
+ filepath = [filepath]
95
+ key = 0
96
+ for path in filepath:
97
+ with open(path, encoding="utf-8") as data:
98
+ for article_data in data:
99
+ article = json.loads(article_data)
100
+ article["id"] = article.pop("pageid")
101
+ article["text"] = "# " + article["title"] + "\n\n" + article.pop("markdown")
102
+ article["url"] = None
103
+ yield key, article
104
+ key += 1
info.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "n_instances": 1000,
4
+ "avg_num_words": 5092.245,
5
+ "min_char_length": 16436,
6
+ "max_char_length": 132301,
7
+ "source_dataset": {
8
+ "path": "data/goodwiki_long",
9
+ "name": null
10
+ }
11
+ },
12
+ "test": {
13
+ "n_instances": 200,
14
+ "avg_num_words": 5227.825,
15
+ "min_char_length": 16434,
16
+ "max_char_length": 115234,
17
+ "source_dataset": {
18
+ "path": "data/goodwiki_long",
19
+ "name": null
20
+ }
21
+ }
22
+ }
test/partition_0.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
train/partition_0.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db4dbf93038c83a3cc1b366bf72f164baa0221f90f4c9232ddfbbfb9c5054873
3
+ size 32131353