|
import json |
|
|
|
import datasets |
|
from datasets.tasks import QuestionAnsweringExtractive |
|
|
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
_CITATION = """\ |
|
@article{2016arXiv160605250R, |
|
author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev}, |
|
Konstantin and {Liang}, Percy}, |
|
title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}", |
|
journal = {arXiv e-prints}, |
|
year = 2016, |
|
eid = {arXiv:1606.05250}, |
|
pages = {arXiv:1606.05250}, |
|
archivePrefix = {arXiv}, |
|
eprint = {1606.05250}, |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Demo... |
|
""" |
|
|
|
_URL = "https://github.com/rajivmehtaflex/PythonExp/raw/main/HFDatasets/reddit_data.tar.gz" |
|
|
|
|
|
class RedditTopicsTargz(datasets.GeneratorBasedBuilder): |
|
"""SQUAD: The Stanford Question Answering Dataset. Version 1.1.""" |
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"sub": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"selftext": datasets.Value("string"), |
|
"upvote_ratio": datasets.Value("float32"), |
|
"id": datasets.Value("string"), |
|
"created_utc": datasets.Value("float32"), |
|
} |
|
), |
|
|
|
|
|
supervised_keys=None, |
|
homepage="https://github.com/rajivmehtapy/", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
path = dl_manager.download_and_extract(_URL) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"filepath": path+'/reddit_data.jsonl'} |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
"""This function returns the examples in the raw (text) form.""" |
|
idx = 0 |
|
|
|
with open(filepath, encoding="utf-8") as fp: |
|
for line in fp: |
|
|
|
obj = json.loads(line) |
|
yield idx, obj |
|
idx += 1 |