|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import json |
|
import datasets |
|
from itertools import product |
|
|
|
|
|
_DESCRIPTION = """ |
|
Intro Programming. A dataset of open student submissions to programming assignments. |
|
|
|
""" |
|
|
|
_DUBLIN_URLS = { |
|
"data": { |
|
"train": f"./data/dublin_data_train.jsonl", |
|
"test": f"./data/dublin_data_test.jsonl", |
|
}, |
|
"repair": { |
|
"train": f"./data/dublin_repair_train.jsonl", |
|
"test": f"./data/dublin_repair_test.jsonl", |
|
} |
|
} |
|
|
|
_SINGAPORE_URLS = { |
|
"data": { |
|
"train": f"./data/singapore_data_train.jsonl", |
|
}, |
|
"repair": { |
|
"train": f"./data/singapore_repair_train.jsonl", |
|
} |
|
} |
|
|
|
_URLS = { |
|
"dublin": _DUBLIN_URLS, |
|
"singapore": _SINGAPORE_URLS |
|
} |
|
|
|
class IntroProgConfig(datasets.BuilderConfig): |
|
""" BuilderConfig for StaQC.""" |
|
|
|
def __init__(self, **kwargs): |
|
"""BuilderConfig for StaQC. |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
|
|
""" |
|
super(IntroProgConfig, self).__init__(**kwargs) |
|
|
|
|
|
class IntroProg(datasets.GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
|
|
tasks = [("data", "Submissions to the programming assignments."), |
|
("repair", "Buggy programs and ground truth repair(s)."),] |
|
|
|
|
|
sources = ["dublin", "singapore"] |
|
|
|
BUILDER_CONFIGS = [] |
|
for (task, description), source in product(tasks, sources): |
|
BUILDER_CONFIGS.append( |
|
IntroProgConfig( |
|
name=f"{source}_{task}", |
|
description=description, |
|
version=VERSION, |
|
) |
|
) |
|
|
|
|
|
def _info(self): |
|
|
|
features = datasets.Features({ |
|
"submission_id": datasets.Value("int32"), |
|
"func_code": datasets.Value("string"), |
|
|
|
|
|
"assignment_id": datasets.Value("string"), |
|
"func_name": datasets.Value("string"), |
|
"description": datasets.Value(dtype='string'), |
|
"test": datasets.Value(dtype='string'), |
|
}) |
|
|
|
if self.config.name.split("_")[1] == "repair": |
|
features["annotation"] = datasets.Value("string") |
|
if self.config.name.split("_")[1] == "bug": |
|
features["comments"] = datasets.Value("string") |
|
|
|
if self.config.name.split("_")[0] == "dublin": |
|
features["user"] = datasets.Value("string") |
|
features["academic_year"] = datasets.Value('int32') |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=None, |
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager): |
|
source, task = self.config.name.split("_") |
|
urls = _URLS[source][task] |
|
downloaded_files = dl_manager.download_and_extract(urls) |
|
|
|
splits = [] |
|
for name, files in downloaded_files.items(): |
|
splits.append(datasets.SplitGenerator(name=name, gen_kwargs={"filepath": files})) |
|
|
|
return splits |
|
|
|
def _generate_examples(self, filepath): |
|
with open(filepath, "r") as f: |
|
lines = f.read().splitlines() |
|
for key, line in enumerate(lines): |
|
d = json.loads(line) |
|
d = {k:v for k, v in d.items() if k in self.info.features} |
|
yield key, d |