intro_prog / intro_prog.py
koutch's picture
Upload 8 files
2a00acc
raw
history blame
4.11 kB
# coding=utf-8
# Copyright 2023 Charles Koutcheme
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import datasets
from itertools import product
_DESCRIPTION = """
Intro Programming. A dataset of open student submissions to programming assignments.
"""
_DUBLIN_URLS = {
"data": {
"train": f"./data/dublin_data_train.jsonl",
"test": f"./data/dublin_data_test.jsonl",
},
"repair": {
"train": f"./data/dublin_repair_train.jsonl",
"test": f"./data/dublin_repair_test.jsonl",
}
}
_SINGAPORE_URLS = {
"data": {
"train": f"./data/singapore_data_train.jsonl",
},
"repair": {
"train": f"./data/singapore_repair_train.jsonl",
}
}
_URLS = {
"dublin": _DUBLIN_URLS,
"singapore": _SINGAPORE_URLS
}
class IntroProgConfig(datasets.BuilderConfig):
""" BuilderConfig for StaQC."""
def __init__(self, **kwargs):
"""BuilderConfig for StaQC.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(IntroProgConfig, self).__init__(**kwargs)
class IntroProg(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
# splits "data", "repair", "bugs"
# also add here the "metadata" split which will also contain the full metadata
tasks = [("data", "Submissions to the programming assignments."),
("repair", "Buggy programs and ground truth repair(s)."),]
# ("bug", "Buggy programs and bug categories.")]
sources = ["dublin", "singapore"]
BUILDER_CONFIGS = []
for (task, description), source in product(tasks, sources):
BUILDER_CONFIGS.append(
IntroProgConfig(
name=f"{source}_{task}",
description=description,
version=VERSION,
)
)
def _info(self):
features = datasets.Features({
"submission_id": datasets.Value("int32"),
"func_code": datasets.Value("string"),
# assignment information
"assignment_id": datasets.Value("string"),
"func_name": datasets.Value("string"),
"description": datasets.Value(dtype='string'),
"test": datasets.Value(dtype='string'),
})
if self.config.name.split("_")[1] == "repair":
features["annotation"] = datasets.Value("string")
if self.config.name.split("_")[1] == "bug":
features["comments"] = datasets.Value("string")
if self.config.name.split("_")[0] == "dublin":
features["user"] = datasets.Value("string")
features["academic_year"] = datasets.Value('int32')
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
)
def _split_generators(self, dl_manager: datasets.DownloadManager):
source, task = self.config.name.split("_")
urls = _URLS[source][task]
downloaded_files = dl_manager.download_and_extract(urls)
splits = []
for name, files in downloaded_files.items():
splits.append(datasets.SplitGenerator(name=name, gen_kwargs={"filepath": files}))
return splits
def _generate_examples(self, filepath):
with open(filepath, "r") as f:
lines = f.read().splitlines()
for key, line in enumerate(lines):
d = json.loads(line)
d = {k:v for k, v in d.items() if k in self.info.features}
yield key, d