James Briggs
commited on
Commit
•
66d4de0
1
Parent(s):
e554e66
Changed data script so it takes a list of files instead of a directory
Browse files- wikipedia-bert-128.py +4 -6
wikipedia-bert-128.py
CHANGED
@@ -117,18 +117,17 @@ class WikipediaBERT128(datasets.GeneratorBasedBuilder):
|
|
117 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
|
118 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
119 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
120 |
-
data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
|
121 |
return [
|
122 |
datasets.SplitGenerator(
|
123 |
name=datasets.Split.TRAIN,
|
124 |
# These kwargs will be passed to _generate_examples
|
125 |
gen_kwargs={
|
126 |
-
"
|
127 |
},
|
128 |
),
|
129 |
]
|
130 |
|
131 |
-
def _generate_examples(self,
|
132 |
""" Yields examples as (key, example) tuples. """
|
133 |
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
134 |
# The `key` is here for legacy reason (tfds) and is not important in itself.
|
@@ -142,10 +141,8 @@ class WikipediaBERT128(datasets.GeneratorBasedBuilder):
|
|
142 |
'masked_lm_ids', # masked_lm_labels=None : label of masked tokens with padding as 0.
|
143 |
'next_sentence_labels' # next_sentence_label=None : 1 if next sentence, 0 otherwise
|
144 |
)
|
145 |
-
tfrecords = Path(filepath).glob("*.tfrecord")
|
146 |
-
|
147 |
highest_id_ = -1
|
148 |
-
for rec in
|
149 |
reader = tfrecord_loader(rec, None, list(TFRECORD_KEYS))
|
150 |
for id_, d in enumerate(reader, start=highest_id_+1):
|
151 |
highest_id_ = id_
|
@@ -164,3 +161,4 @@ class WikipediaBERT128(datasets.GeneratorBasedBuilder):
|
|
164 |
"labels": labels,
|
165 |
"next_sentence_label": d["next_sentence_labels"]
|
166 |
}
|
|
|
|
117 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
|
118 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
119 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
|
|
120 |
return [
|
121 |
datasets.SplitGenerator(
|
122 |
name=datasets.Split.TRAIN,
|
123 |
# These kwargs will be passed to _generate_examples
|
124 |
gen_kwargs={
|
125 |
+
"data_files": self.config.data_files["train"],
|
126 |
},
|
127 |
),
|
128 |
]
|
129 |
|
130 |
+
def _generate_examples(self, data_files):
|
131 |
""" Yields examples as (key, example) tuples. """
|
132 |
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
133 |
# The `key` is here for legacy reason (tfds) and is not important in itself.
|
|
|
141 |
'masked_lm_ids', # masked_lm_labels=None : label of masked tokens with padding as 0.
|
142 |
'next_sentence_labels' # next_sentence_label=None : 1 if next sentence, 0 otherwise
|
143 |
)
|
|
|
|
|
144 |
highest_id_ = -1
|
145 |
+
for rec in data_files:
|
146 |
reader = tfrecord_loader(rec, None, list(TFRECORD_KEYS))
|
147 |
for id_, d in enumerate(reader, start=highest_id_+1):
|
148 |
highest_id_ = id_
|
|
|
161 |
"labels": labels,
|
162 |
"next_sentence_label": d["next_sentence_labels"]
|
163 |
}
|
164 |
+
|