Datasets:

ArXiv:
License:
NamCyan commited on
Commit
d32cc51
1 Parent(s): 3a51cad
Files changed (1) hide show
  1. the-vault-function.py +67 -58
the-vault-function.py CHANGED
@@ -7,17 +7,17 @@ import datasets
7
 
8
  _REPO_NAME = 'Fsoft-AIC/the-vault'
9
 
10
- _LANG_TO_EXTENSION = {
11
- "Python": [".py"],
12
- "C": [".c", ".h"],
13
- "C#": [".cs"],
14
- "C++": [".cpp", ".hpp", ".c++", ".h++", ".cc", ".hh", ".C", ".H"],
15
- "Go": [".go"],
16
- "Java": [".java"],
17
- "JavaScript": [".js"],
18
- "PHP": [".php", ".php3", ".php4", ".php5", ".phps", ".phpt"],
19
- "Ruby": [".rb"],
20
- "Rust": [".rs"],
21
  }
22
 
23
 
@@ -26,20 +26,22 @@ _DESCRIPTION = """The Vault"""
26
  _HOMEPAGE = "https://huggingface.co/Fsoft-AIC"
27
 
28
 
29
- _EXTENSION_TO_LANG = {}
30
- for lang in _LANG_TO_EXTENSION:
31
- for extension in _LANG_TO_EXTENSION[lang]:
32
- _EXTENSION_TO_LANG[extension] = lang
33
 
34
 
35
 
36
- _LANG_CONFIGS = ["all"] + list(_LANG_TO_EXTENSION.keys())
37
 
38
  num_shard_split = {
39
- 'train/small': 2,
40
- 'train/medium': 4
 
 
41
  }
42
- _SPLIT_CONFIGS = ["all"] + list(num_shard_split.keys())
43
 
44
  class TheVaultFunctionConfig(datasets.BuilderConfig):
45
  """BuilderConfig for The Vault dataset."""
@@ -56,8 +58,8 @@ class TheVaultFunctionConfig(datasets.BuilderConfig):
56
  **kwargs,
57
  )
58
 
59
- languages = set(languages)
60
- split_set = set(split_set)
61
 
62
  assert all([language in _LANG_CONFIGS for language in languages]), f"languages {languages} contains language not in {_LANG_CONFIGS}."
63
  assert all([split in _SPLIT_CONFIGS for split in split_set]), "split_set {} contains element not in {}.".format(split_set, _SPLIT_CONFIGS)
@@ -88,13 +90,15 @@ class TheVaultFunction(datasets.GeneratorBasedBuilder):
88
  def _info(self):
89
  return datasets.DatasetInfo(
90
  description=_DESCRIPTION,
91
- features=datasets.Features({"original_string": datasets.Value("string"),
 
92
  "original_docstring": datasets.Value("string"),
93
  "code": datasets.Value("string"),
94
  "docstring": datasets.Value("string"),
95
  "code_tokens": datasets.Value("string"),
96
  "docstring_tokens": datasets.Value("string"),
97
  "short_docstring": datasets.Value("string"),
 
98
  "comment": datasets.Value("string"),
99
  "return_type": datasets.Value("string"),
100
  "identifier": datasets.Value("string"),
@@ -104,7 +108,6 @@ class TheVaultFunction(datasets.GeneratorBasedBuilder):
104
  }),
105
  supervised_keys=None,
106
  homepage=_HOMEPAGE,
107
- license="Multiple: see the 'license' field of each sample.",
108
 
109
  )
110
 
@@ -113,24 +116,29 @@ class TheVaultFunction(datasets.GeneratorBasedBuilder):
113
 
114
  generators = []
115
  split_set = self.config.split_set
 
116
  if "all" in split_set:
117
- split_set = list(num_shard_split.keys())
 
 
 
118
 
119
  for split in split_set:
120
- num_shards = num_shard_split[split]
121
- data_files = [
122
- f"data/{split}-{_index:05d}-of-{num_shards:05d}.parquet"
123
- for _index in range(num_shards)
124
- ]
125
- files = dl_manager.download(data_files)
126
- generators.append(
127
- datasets.SplitGenerator(
128
- name=split.replace("/", "_"),
129
- gen_kwargs={
130
- "files": files,
131
- },
132
- ),
133
- )
 
134
  return generators
135
 
136
  def _generate_examples(self, files):
@@ -143,30 +151,31 @@ class TheVaultFunction(datasets.GeneratorBasedBuilder):
143
  for row_index in range(pa_table.num_rows):
144
  row = pa_table.slice(row_index, 1).to_pydict()
145
 
146
- lang = row['language'][0]
147
 
148
- if self.config.filter_languages and not lang in self.config.languages:
149
- continue
150
 
151
  yield key, {
152
- "original_string": datasets.Value("string"),
153
- "original_docstring": datasets.Value("string"),
154
- "code": datasets.Value("string"),
155
- "docstring": datasets.Value("string"),
156
- "code_tokens": datasets.Value("string"),
157
- "docstring_tokens": datasets.Value("string"),
158
- "short_docstring": datasets.Value("string"),
159
- "comment": datasets.Value("string"),
160
- "return_type": datasets.Value("string"),
161
- "identifier": datasets.Value("string"),
162
- "repo": datasets.Value("string"),
163
- "path": datasets.Value("string"),
164
- "language": datasets.Value("string"),
 
165
  }
166
  key += 1
167
 
168
 
169
- def lang_from_name(name):
170
- for extension in _EXTENSION_TO_LANG:
171
- if name.endswith(extension):
172
- return _EXTENSION_TO_LANG[extension]
 
7
 
8
  _REPO_NAME = 'Fsoft-AIC/the-vault'
9
 
10
+ _LANG_TO_TEXT = {
11
+ "python": "python",
12
+ "c": "c",
13
+ "c#": "c_sharp",
14
+ "c++": "cpp",
15
+ "go": "go",
16
+ "Java": "java",
17
+ "javascript": "javascript",
18
+ "php": "php",
19
+ "ruby": "ruby",
20
+ "rust": "rust",
21
  }
22
 
23
 
 
26
  _HOMEPAGE = "https://huggingface.co/Fsoft-AIC"
27
 
28
 
29
+ _TEXT_TO_LANG = {}
30
+ for lang in _LANG_TO_TEXT:
31
+ for extension in _LANG_TO_TEXT[lang]:
32
+ _TEXT_TO_LANG[extension] = lang
33
 
34
 
35
 
36
+ _LANG_CONFIGS = ["all"] + list(_TEXT_TO_LANG.keys())
37
 
38
  num_shard_split = {
39
+ 'train/small/python': 1,
40
+ 'train/medium/python': 1,
41
+ 'train/small/c': 1,
42
+ 'train/medium/c': 1
43
  }
44
+ _SPLIT_CONFIGS = ["all", "train/small", "train/medium"]
45
 
46
  class TheVaultFunctionConfig(datasets.BuilderConfig):
47
  """BuilderConfig for The Vault dataset."""
 
58
  **kwargs,
59
  )
60
 
61
+ languages = set([lang.lower() for lang in languages])
62
+ split_set = set([split.lower() for split in split_set])
63
 
64
  assert all([language in _LANG_CONFIGS for language in languages]), f"languages {languages} contains language not in {_LANG_CONFIGS}."
65
  assert all([split in _SPLIT_CONFIGS for split in split_set]), "split_set {} contains element not in {}.".format(split_set, _SPLIT_CONFIGS)
 
90
  def _info(self):
91
  return datasets.DatasetInfo(
92
  description=_DESCRIPTION,
93
+ features=datasets.Features({
94
+ # "original_string": datasets.Value("string"),
95
  "original_docstring": datasets.Value("string"),
96
  "code": datasets.Value("string"),
97
  "docstring": datasets.Value("string"),
98
  "code_tokens": datasets.Value("string"),
99
  "docstring_tokens": datasets.Value("string"),
100
  "short_docstring": datasets.Value("string"),
101
+ "short_docstring_tokens": datasets.Value("string"),
102
  "comment": datasets.Value("string"),
103
  "return_type": datasets.Value("string"),
104
  "identifier": datasets.Value("string"),
 
108
  }),
109
  supervised_keys=None,
110
  homepage=_HOMEPAGE,
 
111
 
112
  )
113
 
 
116
 
117
  generators = []
118
  split_set = self.config.split_set
119
+ languages = self.config.languages
120
  if "all" in split_set:
121
+ split_set = _SPLIT_CONFIGS[1:]
122
+
123
+ if "all" in languages:
124
+ languages = _LANG_CONFIGS[1:]
125
 
126
  for split in split_set:
127
+ for language in languages:
128
+ num_shards = num_shard_split[split]
129
+ data_files = [
130
+ f"data/{split}/{language}-{_index:05d}-of-{num_shards:05d}.parquet"
131
+ for _index in range(num_shards)
132
+ ]
133
+ files = dl_manager.download(data_files)
134
+ generators.append(
135
+ datasets.SplitGenerator(
136
+ name=split.replace("/", "_"),
137
+ gen_kwargs={
138
+ "files": files,
139
+ },
140
+ ),
141
+ )
142
  return generators
143
 
144
  def _generate_examples(self, files):
 
151
  for row_index in range(pa_table.num_rows):
152
  row = pa_table.slice(row_index, 1).to_pydict()
153
 
154
+ # lang = row['language'][0]
155
 
156
+ # if self.config.filter_languages and not lang in self.config.languages:
157
+ # continue
158
 
159
  yield key, {
160
+ "repo": row['repo'][0],
161
+ "path": row['path'][0],
162
+ "language": row['language'][0],
163
+ "identifier": row['identifier'][0],
164
+ "return_type": row['return_type'][0],
165
+ # "original_string": row['original_string'][0],
166
+ "original_docstring": row['original_docstring'][0],
167
+ "docstring": row['docstring'][0],
168
+ "docstring_tokens": row['docstring_tokens'][0],
169
+ "code": row['code'][0],
170
+ "code_tokens": row['code_tokens'][0],
171
+ "short_docstring": row['short_docstring'][0],
172
+ "short_docstring_tokens": row['short_docstring_tokens'][0],
173
+ "comment": row['comment'][0]
174
  }
175
  key += 1
176
 
177
 
178
+ # def lang_from_name(name):
179
+ # for extension in _EXTENSION_TO_LANG:
180
+ # if name.endswith(extension):
181
+ # return _EXTENSION_TO_LANG[extension]