soldni commited on
Commit
aa618cc
•
1 Parent(s): b0b0ae5
README.md CHANGED
@@ -26,6 +26,55 @@ tags:
26
  pretty_name: PES2O
27
  size_categories:
28
  - 10B<n<100B
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  ---
30
 
31
  # PES2O 🌿🎓
@@ -52,6 +101,7 @@ Each document in the dataset is a dictionary with the following fields:
52
  - `text`: Text of the document. Paragraphs are separated by two newlines (`\n\n`).
53
  - `version`: version of PES2O.
54
 
 
55
 
56
  ## PES2O V1
57
 
@@ -111,6 +161,8 @@ Unfiltered, the corpus contains 91.1M papers and 15.5B whitespace-separated toke
111
  |s2ag | valid | 111,228 | 24,398,512 |
112
 
113
 
 
 
114
  ## PES2O V2
115
 
116
 
@@ -118,11 +170,17 @@ Unfiltered, the corpus contains 91.1M papers and 15.5B whitespace-separated toke
118
 
119
  - *Knowledge cutoff*: 2023-01-03
120
  - *Number of documents*: 38.97M
121
- - *Number of whitespace-separated tokens**: 42,28
122
 
123
  ### Processing
124
 
125
- TODO
 
 
 
 
 
 
126
 
127
  | Dataset | Split | # Documents | # Words |
128
  |:-------:|:-----:|------------:|---------------:|
 
26
  pretty_name: PES2O
27
  size_categories:
28
  - 10B<n<100B
29
+ source_datasets:
30
+ - allenai/s2orc
31
+ configs:
32
+ - v1
33
+ - v2
34
+
35
+ dataset_info:
36
+ - config_name: v1
37
+ features:
38
+ - name: added
39
+ dtype: string
40
+ - name: created
41
+ dtype: string
42
+ - name: id
43
+ dtype: string
44
+ - name: source
45
+ dtype: string
46
+ - name: text
47
+ dtype: string
48
+ - name: version
49
+ dtype: string
50
+ splits:
51
+ - name: train
52
+ num_bytes: 100145555091
53
+ num_examples: 67624463
54
+ - name: validation
55
+ num_bytes: 556447813
56
+ num_examples: 162551
57
+ - config_name: v2
58
+ features:
59
+ - name: added
60
+ dtype: string
61
+ - name: created
62
+ dtype: string
63
+ - name: id
64
+ dtype: string
65
+ - name: source
66
+ dtype: string
67
+ - name: text
68
+ dtype: string
69
+ - name: version
70
+ dtype: string
71
+ splits:
72
+ - name: train
73
+ num_bytes: 86572382178
74
+ num_examples: 38811179
75
+ - name: validation
76
+ num_bytes: 556854302
77
+ num_examples: 161032
78
  ---
79
 
80
  # PES2O 🌿🎓
 
101
  - `text`: Text of the document. Paragraphs are separated by two newlines (`\n\n`).
102
  - `version`: version of PES2O.
103
 
104
+ ------
105
 
106
  ## PES2O V1
107
 
 
161
  |s2ag | valid | 111,228 | 24,398,512 |
162
 
163
 
164
+ ------
165
+
166
  ## PES2O V2
167
 
168
 
 
170
 
171
  - *Knowledge cutoff*: 2023-01-03
172
  - *Number of documents*: 38.97M
173
+ - *Number of whitespace-separated tokens**: 42.01B
174
 
175
  ### Processing
176
 
177
+ PES2o V2 is largely the same as V1, but it includes additional heuristics s2ag aimed at filtering out OCR errors from abstract.
178
+
179
+ First, we check if the abstract was obtained from Semantic Scholar sources that are likely to contain OCR'ed content. For any abstract derived from those sources, we count how often the text contains subsequences matching `\b([A-Za-z]\s)([a-z]\s)*[A-Za-z]\b`, i.e. individual alpha letters separated by a space. This heuristic matches cases such as `A b stra ct` (2 matching subsequences), where the OCR parser inserted erroneous spaces.
180
+ Any abstract with more than 4 matching subsequences is removed.
181
+
182
+
183
+ #### Statistics
184
 
185
  | Dataset | Split | # Documents | # Words |
186
  |:-------:|:-----:|------------:|---------------:|
pes2o.py DELETED
@@ -1,99 +0,0 @@
1
- import gzip
2
- import json
3
- import datasets
4
-
5
-
6
- logger = datasets.logging.get_logger(__name__)
7
-
8
-
9
- _URL = "https://huggingface.co/datasets/allenai/pes2o"
10
-
11
- _VARIANTS = ["v1", "v2"]
12
-
13
- _N_SHARDS_PER_SPLIT = {
14
- "v1": {"train": {'s2orc': 10, 's2ag': 10}, "valid": {'s2orc': 1, 's2ag': 1}},
15
- "v2": {"train": {'s2orc': 10, 's2ag': 10}, "valid": {'s2orc': 1, 's2ag': 1}},
16
- }
17
-
18
- _DATA_URL = "\
19
- https://huggingface.co/datasets/allenai/pes2o/resolve/main/\
20
- {name}/{subset}/{split}/{shard:05d}.json.gz\
21
- "
22
-
23
- _DESCRIPTION = "\
24
- The PES2O dataset is a collection of ~40M creative commmon licensed academic \
25
- papers, cleaned, filtered, and formatted for pre-training of language models. \
26
- It is derived from the Semantic Scholar Open Research Corpus(Lo et al, 2020), \
27
- or S2ORC.\
28
- "
29
-
30
- _CITATION = ""
31
-
32
-
33
- class pes2o(datasets.GeneratorBasedBuilder):
34
- """Pretraining Efficiently on S2ORC!"""
35
-
36
- BUILDER_CONFIGS = [datasets.BuilderConfig(name) for name in _VARIANTS]
37
-
38
- def _info(self):
39
- return datasets.DatasetInfo(
40
- description=_DESCRIPTION,
41
- features=datasets.Features(
42
- {
43
- "added": datasets.Value("string"),
44
- "created": datasets.Value("string"),
45
- "id": datasets.Value("string"),
46
- "source": datasets.Value("string"),
47
- "text": datasets.Value("string"),
48
- "version": datasets.Value("string")
49
- }
50
- ),
51
- supervised_keys=None,
52
- homepage=_URL,
53
- citation=_CITATION,
54
- )
55
-
56
- def _split_generators(self, dl_manager):
57
- data_urls = {}
58
- for split in ["train", "validation"]:
59
- n_shards = _N_SHARDS_PER_SPLIT[self.config.name][split]
60
- data_urls[split] = [
61
- _DATA_URL.format(
62
- name=self.config.name,
63
- split=split,
64
- subset=subset,
65
- index=index
66
- )
67
- for subset, n_shards in n_shards.items()
68
- for index in range(n_shards)
69
- ]
70
- train_downloaded_files = dl_manager.download(
71
- data_urls["train"]
72
- )
73
- validation_downloaded_files = dl_manager.download(
74
- data_urls["validation"]
75
- )
76
- return [
77
- datasets.SplitGenerator(
78
- name=str(datasets.Split.TRAIN), gen_kwargs={
79
- "filepaths": train_downloaded_files
80
- }),
81
- datasets.SplitGenerator(
82
- name=str(datasets.Split.VALIDATION), gen_kwargs={
83
- "filepaths": validation_downloaded_files
84
- }
85
- ),
86
- ]
87
-
88
- def _generate_examples(self, filepaths):
89
- """This function returns the examples in the raw (text) form by
90
- iterating on all the files."""
91
- id_ = 0
92
- for filepath in filepaths:
93
- logger.info("generating examples from = %s", filepath)
94
- with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
95
- for line in f:
96
- if line:
97
- example = json.loads(line)
98
- yield id_, example
99
- id_ += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v2/{valid → validation}/s2ag/00000.json.gz RENAMED
File without changes
v2/{valid → validation}/s2orc/00000.json.gz RENAMED
File without changes