intro_prog / intro_prog.py
koutch's picture
Upload 13 files
93e85b1
# coding=utf-8
# Copyright 2023 Charles Koutcheme and the original authors of the datasets
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import datasets
from itertools import product
_DUBLIN_DESCRIPTION = """
The Dublin programming dataset is a dataset composed of students' submissions
to introductory programming assignments at the University of Dublin.
Students submitted these programs for multiple programming courses over the duration of three academic years."""
_SINGAPORE_DESCRIPTION = """
This dataset contains 2442 correct and 1783 buggy program attempts by 361 undergraduate students crediting
an introduction to Python programming course at NUS (National University of Singapore).
"""
_NEW_CALEDONIA_DESCRIPTION = """
The NewCaledonia dataset includes the programs submitted in 2020 by a group of 60 students from the University of New Caledonia,
on a programming training platform. This plateform were developed and made available by the Computer Science department from the Orléans'
Technological Institute (University of Orléans, France). This release contains a subset of the assignments.
"""
_DUBLIN_HOMEPAGE = """https://figshare.com/articles/dataset/_5_Million_Python_Bash_Programming_Submissions_for_5_Courses_Grades_for_Computer-Based_Exams_over_3_academic_years_/12610958"""
_SINGAPORE_HOMEPAGE = """https://github.com/githubhuyang/refactory"""
_NEW_CALEDONIA_HOMEPAGE = """https://github.com/GCleuziou/code2aes2vec/tree/master/Datasets"""
_DUBLIN_CITATION = """
@inproceedings{azcona2019user2code2vec,
title={user2code2vec: Embeddings for Profiling Students Based on Distributional Representations of Source Code},
author={Azcona, David and Arora, Piyush and Hsiao, I-Han and Smeaton, Alan},
booktitle={Proceedings of the 9th International Learning Analytics & Knowledge Conference (LAK’19)},
year={2019},
organization={ACM}
}
@inproceedings{DBLP:conf/edm/CleuziouF21,
author = {Guillaume Cleuziou and
Fr{\'{e}}d{\'{e}}ric Flouvat},
editor = {Sharon I{-}Han Hsiao and
Shaghayegh (Sherry) Sahebi and
Fran{\c{c}}ois Bouchet and
Jill{-}J{\^{e}}nn Vie},
title = {Learning student program embeddings using abstract execution traces},
booktitle = {Proceedings of the 14th International Conference on Educational Data
Mining, {EDM} 2021, virtual, June 29 - July 2, 2021},
publisher = {International Educational Data Mining Society},
year = {2021},
timestamp = {Wed, 09 Mar 2022 16:47:22 +0100},
biburl = {https://dblp.org/rec/conf/edm/CleuziouF21.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
_SINGAPORE_CITATION = """
@inproceedings{yang2019refactory,
title={Re-factoring based Program Repair applied to Programming Assignments},
author={Hu, Yang and Ahmed, Umair Z. and Mechtaev, Sergey and Leong, Ben and Roychoudhury, Abhik},
booktitle={2019 34th IEEE/ACM International Conference on Automated Software Engineering (ASE)},
pages={388--398},
year={2019},
organization={IEEE/ACM}
}
"""
_NEW_CALEDONIA_CITATION = """
@inproceedings{DBLP:conf/edm/CleuziouF21,
author = {Guillaume Cleuziou and
Fr{\'{e}}d{\'{e}}ric Flouvat},
editor = {Sharon I{-}Han Hsiao and
Shaghayegh (Sherry) Sahebi and
Fran{\c{c}}ois Bouchet and
Jill{-}J{\^{e}}nn Vie},
title = {Learning student program embeddings using abstract execution traces},
booktitle = {Proceedings of the 14th International Conference on Educational Data
Mining, {EDM} 2021, virtual, June 29 - July 2, 2021},
publisher = {International Educational Data Mining Society},
year = {2021},
timestamp = {Wed, 09 Mar 2022 16:47:22 +0100},
biburl = {https://dblp.org/rec/conf/edm/CleuziouF21.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
_DESCRIPTION = """
Intro Programming. A dataset of student submissions to programming assignments.
"""
_DUBLIN_URLS = {
"metadata": {
"train": "./data/dublin_metadata_train.jsonl",
"test": "./data/dublin_metadata_test.jsonl"
},
"data": {
"train": f"./data/dublin_data_train.jsonl",
"test": f"./data/dublin_data_test.jsonl",
},
"repair": {
"train": f"./data/dublin_repair_train.jsonl",
"test": f"./data/dublin_repair_test.jsonl",
}
}
_SINGAPORE_URLS = {
"metadata": {
"train": "./data/singapore_metadata_train.jsonl",
},
"data": {
"train": f"./data/singapore_data_train.jsonl",
},
"repair": {
"train": f"./data/singapore_repair_train.jsonl",
}
}
_NEW_CALEDONIA_URLS = {
"metadata": {
"train": "./data/newcaledonia_metadata_train.jsonl",
},
"data": {
"train": f"./data/newcaledonia_data_train.jsonl",
},
}
_URLS = {
"dublin": _DUBLIN_URLS,
"singapore": _SINGAPORE_URLS,
"newcaledonia": _NEW_CALEDONIA_URLS,
}
class IntroProgConfig(datasets.BuilderConfig):
""" BuilderConfig for IntroProg."""
def __init__(self, **kwargs):
"""BuilderConfig for IntroProg.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(IntroProgConfig, self).__init__(**kwargs)
class IntroProg(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("2.12.0")
# splits "data", "repair", "bugs"
# also add here the "metadata" split which will also contain the full metadata
tasks = [("metadata", "Information about the programming assignments."),
("data", "Submissions to the programming assignments."),
("repair", "Buggy programs and ground truth repair(s)."),]
# ("bug", "Buggy programs and bug categories.")]
sources = ["dublin", "singapore"]
configurations = list(product(tasks, sources))
configurations.append((tasks[0], "newcaledonia"))
configurations.append((tasks[1], "newcaledonia"))
BUILDER_CONFIGS = []
for (task, description), source in configurations:
BUILDER_CONFIGS.append(
IntroProgConfig(
name=f"{source}_{task}",
version=VERSION,
)
)
def _info(self):
source, task = self.config.name.split("_")
if source == "dublin":
description = _DUBLIN_DESCRIPTION
citation = _DUBLIN_CITATION
homepage = _DUBLIN_HOMEPAGE
elif source == "singapore":
description =_SINGAPORE_DESCRIPTION
citation = _SINGAPORE_CITATION
homepage = _SINGAPORE_HOMEPAGE
elif source == "newcaledonia":
description = _NEW_CALEDONIA_DESCRIPTION
citation = _NEW_CALEDONIA_CITATION
homepage = _NEW_CALEDONIA_HOMEPAGE
else:
description = ""
citation = ""
homepage = ""
main_features = datasets.Features({
"submission_id": datasets.Value("int32"),
"func_code": datasets.Value("string"),
# assignment information
"assignment_id": datasets.Value("string"),
"func_name": datasets.Value("string"),
"description": datasets.Value(dtype='string'),
"test": datasets.Value(dtype='string'),
})
if task == "data":
features = main_features
features["correct"] = datasets.Value(dtype="bool")
if source == "dublin":
features["user"] = datasets.Value("string")
features["academic_year"] = datasets.Value('int32')
features['date']: datasets.Value('timestamp[s]')
elif task == "metadata":
# metadata information
features = datasets.Features({
"assignment_id": datasets.Value("string"),
"func_name": datasets.Value("string"),
"reference_solution": datasets.Value("string"),
"description": datasets.Value("string"),
"test": datasets.Value("string"),
})
elif task == "repair":
features = main_features
features["annotation"] = datasets.Value("string")
if source == "dublin":
features["user"] = datasets.Value("string")
features["academic_year"] = datasets.Value('int32')
features['date']: datasets.Value('timestamp[s]')
elif task == "bug":
features = main_features
features["comments"] = datasets.Value("string")
return datasets.DatasetInfo(
description=description,
citation=citation,
homepage=homepage,
features=features,
supervised_keys=None,
)
def _split_generators(self, dl_manager: datasets.DownloadManager):
source, task = self.config.name.split("_")
urls = _URLS[source][task]
downloaded_files = dl_manager.download_and_extract(urls)
splits = []
for name, files in downloaded_files.items():
splits.append(datasets.SplitGenerator(name=name, gen_kwargs={"filepath": files}))
return splits
def _generate_examples(self, filepath):
with open(filepath, "r") as f:
lines = f.read().splitlines()
for key, line in enumerate(lines):
d = json.loads(line)
d = {k:v for k, v in d.items() if k in self.info.features}
yield key, d