Datasets:

Modalities:
Text
Formats:
parquet
Languages:
code
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
d5478a4
1 Parent(s): fdeb807

Convert dataset to Parquet (#4)

Browse files

- Convert dataset to Parquet (f18c1083190b63c04aa75a523272a99a94784863)
- Delete loading script (946dd8dff11a705b83836687d2051d8429c23844)
- Delete loading script auxiliary file (c16948ddc82f70bbe3ca778dc91382d71b334cd7)
- Delete loading script auxiliary file (f9b0bc0e4d8417416690684915dcdfe3627729ab)

README.md CHANGED
@@ -29,16 +29,25 @@ dataset_info:
29
  dtype: string
30
  splits:
31
  - name: train
32
- num_bytes: 4372657
33
  num_examples: 10300
34
  - name: validation
35
- num_bytes: 226415
36
  num_examples: 500
37
  - name: test
38
- num_bytes: 418595
39
  num_examples: 1000
40
- download_size: 4876035
41
- dataset_size: 5017667
 
 
 
 
 
 
 
 
 
42
  ---
43
  # Dataset Card for "code_x_glue_cc_code_to_code_trans"
44
 
 
29
  dtype: string
30
  splits:
31
  - name: train
32
+ num_bytes: 4372641
33
  num_examples: 10300
34
  - name: validation
35
+ num_bytes: 226407
36
  num_examples: 500
37
  - name: test
38
+ num_bytes: 418587
39
  num_examples: 1000
40
+ download_size: 2064764
41
+ dataset_size: 5017635
42
+ configs:
43
+ - config_name: default
44
+ data_files:
45
+ - split: train
46
+ path: data/train-*
47
+ - split: validation
48
+ path: data/validation-*
49
+ - split: test
50
+ path: data/test-*
51
  ---
52
  # Dataset Card for "code_x_glue_cc_code_to_code_trans"
53
 
code_x_glue_cc_code_to_code_trans.py DELETED
@@ -1,101 +0,0 @@
1
- from typing import List
2
-
3
- import datasets
4
-
5
- from .common import TrainValidTestChild
6
- from .generated_definitions import DEFINITIONS
7
-
8
-
9
- _DESCRIPTION = """The dataset is collected from several public repos, including Lucene(http://lucene.apache.org/), POI(http://poi.apache.org/), JGit(https://github.com/eclipse/jgit/) and Antlr(https://github.com/antlr/).
10
- We collect both the Java and C# versions of the codes and find the parallel functions. After removing duplicates and functions with the empty body, we split the whole dataset into training, validation and test sets."""
11
- _CITATION = """@article{DBLP:journals/corr/abs-2102-04664,
12
- author = {Shuai Lu and
13
- Daya Guo and
14
- Shuo Ren and
15
- Junjie Huang and
16
- Alexey Svyatkovskiy and
17
- Ambrosio Blanco and
18
- Colin B. Clement and
19
- Dawn Drain and
20
- Daxin Jiang and
21
- Duyu Tang and
22
- Ge Li and
23
- Lidong Zhou and
24
- Linjun Shou and
25
- Long Zhou and
26
- Michele Tufano and
27
- Ming Gong and
28
- Ming Zhou and
29
- Nan Duan and
30
- Neel Sundaresan and
31
- Shao Kun Deng and
32
- Shengyu Fu and
33
- Shujie Liu},
34
- title = {CodeXGLUE: {A} Machine Learning Benchmark Dataset for Code Understanding
35
- and Generation},
36
- journal = {CoRR},
37
- volume = {abs/2102.04664},
38
- year = {2021}
39
- }"""
40
-
41
-
42
- class CodeXGlueCcCodeToCodeTransImpl(TrainValidTestChild):
43
- _DESCRIPTION = _DESCRIPTION
44
- _CITATION = _CITATION
45
-
46
- _FEATURES = {
47
- "id": datasets.Value("int32"), # Index of the sample
48
- "java": datasets.Value("string"), # The java version of the code
49
- "cs": datasets.Value("string"), # The C# version of the code
50
- }
51
-
52
- def generate_urls(self, split_name):
53
- for key in "cs", "java":
54
- yield key, f"{split_name}.java-cs.txt.{key}"
55
-
56
- def _generate_examples(self, split_name, file_paths):
57
- """This function returns the examples in the raw (text) form."""
58
- # Open each file (one for java, and one for c#)
59
- files = {k: open(file_paths[k], encoding="utf-8") for k in file_paths}
60
-
61
- id_ = 0
62
- while True:
63
- # Read a single line from each file
64
- entries = {k: files[k].readline() for k in file_paths}
65
-
66
- empty = self.check_empty(entries)
67
- if empty:
68
- # We are done: end of files
69
- return
70
-
71
- entries["id"] = id_
72
- yield id_, entries
73
- id_ += 1
74
-
75
-
76
- CLASS_MAPPING = {
77
- "CodeXGlueCcCodeToCodeTrans": CodeXGlueCcCodeToCodeTransImpl,
78
- }
79
-
80
-
81
- class CodeXGlueCcCodeToCodeTrans(datasets.GeneratorBasedBuilder):
82
- BUILDER_CONFIG_CLASS = datasets.BuilderConfig
83
- BUILDER_CONFIGS = [
84
- datasets.BuilderConfig(name=name, description=info["description"]) for name, info in DEFINITIONS.items()
85
- ]
86
-
87
- def _info(self):
88
- name = self.config.name
89
- info = DEFINITIONS[name]
90
- if info["class_name"] in CLASS_MAPPING:
91
- self.child = CLASS_MAPPING[info["class_name"]](info)
92
- else:
93
- raise RuntimeError(f"Unknown python class for dataset configuration {name}")
94
- ret = self.child._info()
95
- return ret
96
-
97
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
98
- return self.child._split_generators(dl_manager=dl_manager)
99
-
100
- def _generate_examples(self, split_name, file_paths):
101
- return self.child._generate_examples(split_name, file_paths)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
common.py DELETED
@@ -1,75 +0,0 @@
1
- from typing import List
2
-
3
- import datasets
4
-
5
-
6
- # Citation, taken from https://github.com/microsoft/CodeXGLUE
7
- _DEFAULT_CITATION = """@article{CodeXGLUE,
8
- title={CodeXGLUE: A Benchmark Dataset and Open Challenge for Code Intelligence},
9
- year={2020},}"""
10
-
11
-
12
- class Child:
13
- _DESCRIPTION = None
14
- _FEATURES = None
15
- _CITATION = None
16
- SPLITS = {"train": datasets.Split.TRAIN}
17
- _SUPERVISED_KEYS = None
18
-
19
- def __init__(self, info):
20
- self.info = info
21
-
22
- def homepage(self):
23
- return self.info["project_url"]
24
-
25
- def _info(self):
26
- # This is the description that will appear on the datasets page.
27
- return datasets.DatasetInfo(
28
- description=self.info["description"] + "\n\n" + self._DESCRIPTION,
29
- features=datasets.Features(self._FEATURES),
30
- homepage=self.homepage(),
31
- citation=self._CITATION or _DEFAULT_CITATION,
32
- supervised_keys=self._SUPERVISED_KEYS,
33
- )
34
-
35
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
36
- SPLITS = self.SPLITS
37
- _URL = self.info["raw_url"]
38
- urls_to_download = {}
39
- for split in SPLITS:
40
- if split not in urls_to_download:
41
- urls_to_download[split] = {}
42
-
43
- for key, url in self.generate_urls(split):
44
- if not url.startswith("http"):
45
- url = _URL + "/" + url
46
- urls_to_download[split][key] = url
47
-
48
- downloaded_files = {}
49
- for k, v in urls_to_download.items():
50
- downloaded_files[k] = dl_manager.download(v)
51
-
52
- return [
53
- datasets.SplitGenerator(
54
- name=SPLITS[k],
55
- gen_kwargs={"split_name": k, "file_paths": downloaded_files[k]},
56
- )
57
- for k in SPLITS
58
- ]
59
-
60
- def check_empty(self, entries):
61
- all_empty = all([v == "" for v in entries.values()])
62
- all_non_empty = all([v != "" for v in entries.values()])
63
-
64
- if not all_non_empty and not all_empty:
65
- raise RuntimeError("Parallel data files should have the same number of lines.")
66
-
67
- return all_empty
68
-
69
-
70
- class TrainValidTestChild(Child):
71
- SPLITS = {
72
- "train": datasets.Split.TRAIN,
73
- "valid": datasets.Split.VALIDATION,
74
- "test": datasets.Split.TEST,
75
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f85e7a4685883fd9d95c0a1cb743e8ca2f17553b59a4f7b8967e515e7488b94
3
+ size 169864
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:670d745b939f8f065e207558306d10200ad07e2de02e1b533070ad3b1957cb56
3
+ size 1804099
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7074b5b5e913273e03153931f39fdfcfbacd9db9aa85d4dc0f7af4b77c50e311
3
+ size 90801
generated_definitions.py DELETED
@@ -1,12 +0,0 @@
1
- DEFINITIONS = {
2
- "default": {
3
- "class_name": "CodeXGlueCcCodeToCodeTrans",
4
- "dataset_type": "Code-Code",
5
- "description": "CodeXGLUE code-to-code-trans dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/code-to-code-trans",
6
- "dir_name": "code-to-code-trans",
7
- "name": "default",
8
- "project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/code-to-code-trans",
9
- "raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/code-to-code-trans/data",
10
- "sizes": {"test": 1000, "train": 10300, "validation": 500},
11
- }
12
- }