Datasets:
Version 2: added MBPP
Browse files- MultiPL-E.py +24 -6
- README.md +5 -3
- dataset_infos.json +0 -0
MultiPL-E.py
CHANGED
@@ -23,10 +23,13 @@ _CITATION = """\
|
|
23 |
_DESCRIPTION = """\
|
24 |
MultiPL-E is a dataset for evaluating large language models for code \
|
25 |
generation that supports 18 programming languages. It takes the OpenAI \
|
26 |
-
"HumanEval" Python benchmarks and uses little compilers to
|
27 |
-
to other languages. It is easy to add support for new languages
|
|
|
28 |
"""
|
29 |
|
|
|
|
|
30 |
_LANGUAGES = [
|
31 |
"cpp", "cs", "d", "go", "java", "jl", "js", "lua", "php", "pl", "py", "r",
|
32 |
"rb", "rkt", "rs", "scala", "sh", "swift", "ts"
|
@@ -39,29 +42,44 @@ class MultiPLEBuilderConfig(datasets.BuilderConfig):
|
|
39 |
|
40 |
def __init__(
|
41 |
self,
|
|
|
42 |
language,
|
43 |
variation,
|
44 |
**kwargs,
|
45 |
):
|
46 |
self.language = language
|
47 |
self.variation = variation
|
48 |
-
|
|
|
|
|
|
|
49 |
kwargs["name"] = name
|
50 |
super(MultiPLEBuilderConfig, self).__init__(**kwargs)
|
51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
class MultiPLE(datasets.GeneratorBasedBuilder):
|
53 |
BUILDER_CONFIG_CLASS = MultiPLEBuilderConfig
|
54 |
|
55 |
BUILDER_CONFIGS = [
|
56 |
MultiPLEBuilderConfig(
|
|
|
57 |
language=language,
|
58 |
variation=variation,
|
59 |
-
version=datasets.Version("
|
|
|
60 |
for language in _LANGUAGES
|
61 |
for variation in _VARIATIONS
|
|
|
62 |
]
|
63 |
|
64 |
-
DEFAULT_CONFIG_NAME = "cpp
|
65 |
|
66 |
def _info(self):
|
67 |
return datasets.DatasetInfo(
|
@@ -85,7 +103,7 @@ class MultiPLE(datasets.GeneratorBasedBuilder):
|
|
85 |
|
86 |
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
87 |
files = dl_manager.download(
|
88 |
-
f"https://raw.githubusercontent.com/nuprl/MultiPL-E/
|
89 |
)
|
90 |
return [
|
91 |
datasets.SplitGenerator(
|
|
|
23 |
_DESCRIPTION = """\
|
24 |
MultiPL-E is a dataset for evaluating large language models for code \
|
25 |
generation that supports 18 programming languages. It takes the OpenAI \
|
26 |
+
"HumanEval" and the MBPP Python benchmarks and uses little compilers to \
|
27 |
+
translate them to other languages. It is easy to add support for new languages \
|
28 |
+
and benchmarks.
|
29 |
"""
|
30 |
|
31 |
+
_SRCDATA = [ "humaneval", "mbpp" ]
|
32 |
+
|
33 |
_LANGUAGES = [
|
34 |
"cpp", "cs", "d", "go", "java", "jl", "js", "lua", "php", "pl", "py", "r",
|
35 |
"rb", "rkt", "rs", "scala", "sh", "swift", "ts"
|
|
|
42 |
|
43 |
def __init__(
|
44 |
self,
|
45 |
+
srcdata,
|
46 |
language,
|
47 |
variation,
|
48 |
**kwargs,
|
49 |
):
|
50 |
self.language = language
|
51 |
self.variation = variation
|
52 |
+
self.srcdata = srcdata
|
53 |
+
name = f"{srcdata}-{language}"
|
54 |
+
if variation != "reworded":
|
55 |
+
name = f"{name}-{variation}"
|
56 |
kwargs["name"] = name
|
57 |
super(MultiPLEBuilderConfig, self).__init__(**kwargs)
|
58 |
|
59 |
+
def _is_interesting(srcdata: str, variation: str):
|
60 |
+
if srcdata == "humaneval":
|
61 |
+
return True
|
62 |
+
if srcdata == "mbpp":
|
63 |
+
# MBPP does not have doctests, so these are the only interesting
|
64 |
+
# variations
|
65 |
+
return variation in [ "keep", "reworded" ]
|
66 |
+
|
67 |
class MultiPLE(datasets.GeneratorBasedBuilder):
|
68 |
BUILDER_CONFIG_CLASS = MultiPLEBuilderConfig
|
69 |
|
70 |
BUILDER_CONFIGS = [
|
71 |
MultiPLEBuilderConfig(
|
72 |
+
srcdata=srcdata,
|
73 |
language=language,
|
74 |
variation=variation,
|
75 |
+
version=datasets.Version("2.0.0"))
|
76 |
+
for srcdata in _SRCDATA
|
77 |
for language in _LANGUAGES
|
78 |
for variation in _VARIATIONS
|
79 |
+
if _is_interesting(srcdata, variation)
|
80 |
]
|
81 |
|
82 |
+
DEFAULT_CONFIG_NAME = "humaneval-cpp"
|
83 |
|
84 |
def _info(self):
|
85 |
return datasets.DatasetInfo(
|
|
|
103 |
|
104 |
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
105 |
files = dl_manager.download(
|
106 |
+
f"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/{self.config.srcdata}-{self.config.language}-{self.config.variation}.json"
|
107 |
)
|
108 |
return [
|
109 |
datasets.SplitGenerator(
|
README.md
CHANGED
@@ -16,6 +16,7 @@ size_categories:
|
|
16 |
source_datasets:
|
17 |
- original
|
18 |
- extended|openai_humaneval
|
|
|
19 |
tags: []
|
20 |
task_categories: []
|
21 |
task_ids: []
|
@@ -34,8 +35,9 @@ task_ids: []
|
|
34 |
|
35 |
MultiPL-E is a dataset for evaluating large language models for code
|
36 |
generation that supports 18 programming languages. It takes the OpenAI
|
37 |
-
"HumanEval" Python benchmarks and uses little compilers to
|
38 |
-
to other languages. It is easy to add support for new languages
|
|
|
39 |
|
40 |
## Example
|
41 |
|
@@ -50,7 +52,7 @@ LANG = "lua"
|
|
50 |
MODEL_NAME = "Salesforce/codegen-350M-multi"
|
51 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
52 |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME).half().cuda()
|
53 |
-
problems = datasets.load_dataset("nuprl/MultiPL-E", LANG)
|
54 |
|
55 |
def stop_at_stop_token(decoded_string, problem):
|
56 |
"""
|
|
|
16 |
source_datasets:
|
17 |
- original
|
18 |
- extended|openai_humaneval
|
19 |
+
- extended|mbpp
|
20 |
tags: []
|
21 |
task_categories: []
|
22 |
task_ids: []
|
|
|
35 |
|
36 |
MultiPL-E is a dataset for evaluating large language models for code
|
37 |
generation that supports 18 programming languages. It takes the OpenAI
|
38 |
+
"HumanEval" and the MBPP Python benchmarks and uses little compilers to
|
39 |
+
translate them to other languages. It is easy to add support for new languages
|
40 |
+
and benchmarks.
|
41 |
|
42 |
## Example
|
43 |
|
|
|
52 |
MODEL_NAME = "Salesforce/codegen-350M-multi"
|
53 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
54 |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME).half().cuda()
|
55 |
+
problems = datasets.load_dataset("nuprl/MultiPL-E", f"humaneval-{LANG}")
|
56 |
|
57 |
def stop_at_stop_token(decoded_string, problem):
|
58 |
"""
|
dataset_infos.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|