EricR401S commited on
Commit
20a7f56
1 Parent(s): cbe92e1

third function draft

Browse files
Files changed (1) hide show
  1. reddit_dataset_loader.py +33 -0
reddit_dataset_loader.py CHANGED
@@ -183,10 +183,43 @@ class SubRedditPosts(datasets.GeneratorBasedBuilder):
183
  ),
184
  ]
185
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
  # # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
187
  # def _generate_examples(self, filepath, split):
188
  # # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
189
  # # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
 
190
  # with open(filepath, encoding="utf-8") as f:
191
  # for key, row in enumerate(f):
192
  # data = json.loads(row)
 
183
  ),
184
  ]
185
 
186
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
187
+ def _generate_examples(self, filepath, split):
188
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
189
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
190
+
191
+ with open(filepath, encoding="utf-8") as f:
192
+ for key, row in enumerate(f):
193
+ data = json.loads(row)
194
+ if self.config.name == "first_domain":
195
+ # Yields examples as (key, example) tuples
196
+ yield key, {
197
+ "subreddit": data["subreddit"],
198
+ "id": data["id"],
199
+ "title": data["title"],
200
+ "text": data["text"],
201
+ "url": data["url"],
202
+ "score": data["score"],
203
+ "author": data["author"],
204
+ "date": data["date"],
205
+ }
206
+ else:
207
+ yield key, {
208
+ "subreddit": data["subreddit"],
209
+ "id": data["id"],
210
+ "title": data["title"],
211
+ "text": data["text"],
212
+ "url": data["url"],
213
+ "score": data["score"],
214
+ "author": data["author"],
215
+ "date": data["date"],
216
+ }
217
+
218
  # # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
219
  # def _generate_examples(self, filepath, split):
220
  # # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
221
  # # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
222
+
223
  # with open(filepath, encoding="utf-8") as f:
224
  # for key, row in enumerate(f):
225
  # data = json.loads(row)