ndupuis commited on
Commit
60ab0f0
1 Parent(s): 1b033fc

Delete loading script

Browse files
Files changed (1) hide show
  1. qiskit_humaneval.py +0 -92
qiskit_humaneval.py DELETED
@@ -1,92 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """ qiskit_humaneval dataset"""
15
-
16
- import json
17
- import datasets
18
- import os
19
- import requests
20
- logger = datasets.logging.get_logger(__name__)
21
-
22
-
23
- _CITATION = """\
24
- @misc{2406.14712,
25
- Author = {Sanjay Vishwakarma and Francis Harkins and Siddharth Golecha and Vishal Sharathchandra Bajpe and Nicolas Dupuis and Luca Buratti and David Kremer and Ismael Faro and Ruchir Puri and Juan Cruz-Benito},
26
- Title = {Qiskit HumanEval: An Evaluation Benchmark For Quantum Code Generative Models},
27
- Year = {2024},
28
- Eprint = {arXiv:2406.14712},
29
- }
30
- """
31
-
32
- _DESCRIPTION = """\
33
- qiskit_humaneval is a dataset for evaluating LLM's at writing Qiskit code.
34
- """
35
-
36
- _HOMEPAGE = "https://github.com/qiskit-community/qiskit-human-eval"
37
-
38
- _LICENSE = "apache-2.0"
39
-
40
- _URL = "https://raw.githubusercontent.com/qiskit-community/qiskit-human-eval/"\
41
- "refs/heads/main/dataset/dataset_qiskit_test_human_eval.json"
42
-
43
- class QiskitHumanEval(datasets.GeneratorBasedBuilder):
44
- """ qiskit_humaneval dataset
45
-
46
- 0.1.0: first version of the dataset
47
- """
48
-
49
- VERSION = datasets.Version("0.1.0")
50
-
51
- def _info(self):
52
- features = datasets.Features(
53
- {
54
- 'task_id': datasets.Value('string'),
55
- 'prompt': datasets.Value('string'),
56
- 'canonical_solution': datasets.Value('string'),
57
- 'test': datasets.Value('string'),
58
- 'entry_point': datasets.Value('string'),
59
- 'difficulty_scale': datasets.Value('string')
60
- }
61
- )
62
- return datasets.DatasetInfo(
63
- description=_DESCRIPTION,
64
- features=features,
65
- homepage=_HOMEPAGE,
66
- license=_LICENSE,
67
- citation=_CITATION,
68
- )
69
-
70
- def _split_generators(self, dl_manager: datasets.DownloadManager):
71
- filepath = dl_manager.download_and_extract(_URL)
72
- return [
73
- datasets.SplitGenerator(
74
- name=datasets.Split.TEST,
75
- gen_kwargs={
76
- "filepath": filepath,
77
- },
78
- ),
79
- ]
80
-
81
- def _generate_examples(self, filepath):
82
- with open(filepath, 'r', encoding="UTF-8") as in_json:
83
- for row in json.load(in_json):
84
- id_ = row['task_id']
85
- yield id_, {
86
- 'task_id': row['task_id'],
87
- 'prompt': row['prompt'],
88
- 'canonical_solution': row['canonical_solution'],
89
- 'test': row['test'],
90
- 'entry_point': row['entry_point'],
91
- 'difficulty_scale': row['difficulty_scale']
92
- }